language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/check-if-digits-are-equal-in-string-after-operations-ii.py | {
"start": 1000,
"end": 2121
} | class ____(object):
def hasSameDigits(self, s):
"""
:type s: str
:rtype: bool
"""
def nCr(n, r):
if n-r < r:
r = n-r
if LOOKUP[n][r] == -1:
c = 1
for k in xrange(1, r+1):
c *= n-k+1
c //= k
LOOKUP[n][r] = c
return LOOKUP[n][r]
# https://en.wikipedia.org/wiki/Lucas%27s_theorem
def nCr_mod(n, r, mod):
result = 1
while n > 0 or r > 0:
n, ni = divmod(n, mod)
r, ri = divmod(r, mod)
if ni < ri:
return 0
result = (result*nCr(ni, ri))%mod
return result
def nC10(n, k):
return lookup[nCr_mod(n, k, 2)][nCr_mod(n, k, 5)]
lookup = [[0]*5 for _ in xrange(2)]
for i in xrange(10):
lookup[i%2][i%5] = i
total = 0
for i in xrange(len(s)-1):
total = (total+nC10(len(s)-2, i)*(ord(s[i])-ord(s[i+1])))%10
return total == 0
| Solution2 |
python | huggingface__transformers | src/transformers/models/hiera/modeling_hiera.py | {
"start": 55749,
"end": 59599
} | class ____(HieraPreTrainedModel, BackboneMixin):
def __init__(self, config: HieraConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embed_dim] + [
int(config.embed_dim * config.embed_dim_multiplier**i) for i in range(len(config.depths))
]
self.embeddings = HieraEmbeddings(config, is_mae=False)
self.encoder = HieraEncoder(config)
# Add layer norms to hidden states of out_features
hidden_states_norms = {}
for stage, num_channels in zip(self._out_features, self.channels):
hidden_states_norms[stage] = nn.LayerNorm(num_channels)
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> BackboneOutput:
"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-hf")
>>> model = AutoBackbone.from_pretrained(
... "facebook/hiera-tiny-224-hf", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
embedding_output, _, _ = self.embeddings(pixel_values)
outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=return_dict,
)
hidden_states = outputs[-1]
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
batch_size, height, width, num_channels = hidden_state.shape
hidden_state = hidden_state.view(batch_size, height * width, num_channels)
hidden_state = self.hidden_states_norms[stage](hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps += (hidden_state,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs[1],)
if output_attentions:
output += (outputs[2],)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs[1] if output_hidden_states else None,
attentions=outputs[2] if output_attentions else None,
)
__all__ = ["HieraForImageClassification", "HieraForPreTraining", "HieraBackbone", "HieraModel", "HieraPreTrainedModel"]
| HieraBackbone |
python | pytorch__pytorch | torch/__init__.py | {
"start": 72541,
"end": 72775
} | class ____(_LegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal(stacklevel=3)
return self._dtype
@classproperty
def _dtype(self):
return torch.cdouble
| ComplexDoubleStorage |
python | joblib__joblib | joblib/_parallel_backends.py | {
"start": 23049,
"end": 28139
} | class ____(AutoBatchingMixin, ParallelBackendBase):
"""Managing pool of workers with loky instead of multiprocessing."""
supports_retrieve_callback = True
supports_inner_max_num_threads = True
def configure(
self,
n_jobs=1,
parallel=None,
prefer=None,
require=None,
idle_worker_timeout=None,
**memmapping_executor_kwargs,
):
"""Build a process executor and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))
memmapping_executor_kwargs = {
**self.backend_kwargs,
**memmapping_executor_kwargs,
}
# Prohibit the use of 'timeout' in the LokyBackend, as 'idle_worker_timeout'
# better describes the backend's behavior.
if "timeout" in memmapping_executor_kwargs:
raise ValueError(
"The 'timeout' parameter is not supported by the LokyBackend. "
"Please use the `idle_worker_timeout` parameter instead."
)
if idle_worker_timeout is None:
idle_worker_timeout = self.backend_kwargs.get("idle_worker_timeout", 300)
self._workers = get_memmapping_executor(
n_jobs,
timeout=idle_worker_timeout,
env=self._prepare_worker_env(n_jobs=n_jobs),
context_id=parallel._id,
**memmapping_executor_kwargs,
)
self.parallel = parallel
return n_jobs
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError("n_jobs == 0 in Parallel has no meaning")
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif mp.current_process().daemon:
# Daemonic processes cannot have children
if n_jobs != 1:
if inside_dask_worker():
msg = (
"Inside a Dask worker with daemon=True, "
"setting n_jobs=1.\nPossible work-arounds:\n"
"- dask.config.set("
"{'distributed.worker.daemon': False})\n"
"- set the environment variable "
"DASK_DISTRIBUTED__WORKER__DAEMON=False\n"
"before creating your Dask cluster."
)
else:
msg = (
"Loky-backed parallel loops cannot be called in a"
" multiprocessing, setting n_jobs=1"
)
warnings.warn(msg, stacklevel=3)
return 1
elif not (self.in_main_thread() or self.nesting_level == 0):
# Prevent posix fork inside in non-main posix threads
if n_jobs != 1:
warnings.warn(
"Loky-backed parallel loops cannot be nested below "
"threads, setting n_jobs=1",
stacklevel=3,
)
return 1
elif n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
return n_jobs
def submit(self, func, callback=None):
"""Schedule a func to be run"""
future = self._workers.submit(func)
if callback is not None:
future.add_done_callback(callback)
return future
def retrieve_result_callback(self, future):
"""Retrieve the result, here out is the future given by submit"""
try:
return future.result()
except ShutdownExecutorError:
raise RuntimeError(
"The executor underlying Parallel has been shutdown. "
"This is likely due to the garbage collection of a previous "
"generator from a call to Parallel with return_as='generator'."
" Make sure the generator is not garbage collected when "
"submitting a new job or that it is first properly exhausted."
)
def terminate(self):
if self._workers is not None:
# Don't terminate the workers as we want to reuse them in later
# calls, but cleanup the temporary resources that the Parallel call
# created. This 'hack' requires a private, low-level operation.
self._workers._temp_folder_manager._clean_temporary_resources(
context_id=self.parallel._id, force=False
)
self._workers = None
self.reset_batch_stats()
def abort_everything(self, ensure_ready=True):
"""Shutdown the workers and restart a new one with the same parameters"""
self._workers.terminate(kill_workers=True)
self._workers = None
if ensure_ready:
self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)
| LokyBackend |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/cumulative_vrange_bottom/package.py | {
"start": 216,
"end": 760
} | class ____(Package):
"""Test that creating cumulative version ranges of the
form X.Y:X works and allows for the selection of all the
versions >= X.Y with major == X
"""
homepage = "https://www.example.org"
url = "https://example.org/files/v3.4/cmake-3.4.3.tar.gz"
version("3.0", md5="4cb3ff35b2472aae70f542116d616e63")
version("2.2", md5="4cb3ff35b2472aae70f542116d616e63")
version("2.1", md5="4cb3ff35b2472aae70f542116d616e63")
version("2.0", md5="4cb3ff35b2472aae70f542116d616e63")
| CumulativeVrangeBottom |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/query_metrics/query_data_source_table/comparison_query_data_source_table.py | {
"start": 222,
"end": 543
} | class ____(QueryDataSourceTable):
metric_name = "comparison_query.data_source_table"
value_keys = ("comparison_query", "comparison_data_source_name")
query_param_name: ClassVar[str] = "comparison_query"
data_source_name_param_name: ClassVar[str] = "comparison_data_source_name"
| ComparisonQueryDataSourceTable |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_memusage.py | {
"start": 33088,
"end": 33144
} | class ____(DialectKWArgs, ClauseElement):
pass
| SomeFoo |
python | PrefectHQ__prefect | src/integrations/prefect-dask/prefect_dask/task_runners.py | {
"start": 2285,
"end": 3532
} | class ____(PrefectWrappedFuture[R, distributed.Future]):
"""
A Prefect future that wraps a distributed.Future. This future is used
when the task run is submitted to a DaskTaskRunner.
"""
def wait(self, timeout: Optional[float] = None) -> None:
try:
result = self._wrapped_future.result(timeout=timeout)
except Exception:
# either the task failed or the timeout was reached
return
if isinstance(result, State):
self._final_state = result
def result(
self,
timeout: Optional[float] = None,
raise_on_failure: bool = True,
) -> R:
if not self._final_state:
try:
future_result = self._wrapped_future.result(timeout=timeout)
except distributed.TimeoutError as exc:
raise TimeoutError(
f"Task run {self.task_run_id} did not complete within {timeout} seconds"
) from exc
if isinstance(future_result, State):
self._final_state = future_result
else:
return future_result
return self._final_state.result(raise_on_failure=raise_on_failure, _sync=True)
| PrefectDaskFuture |
python | altair-viz__altair | tools/codemod.py | {
"start": 7188,
"end": 7706
} | class ____(Protocol):
def __call__(self, *code: _Code) -> str:
"""
Transform some input into a single block of modified code.
Parameters
----------
*code
Arbitrarily nested code fragments.
"""
...
def _join(self, code: _Code, *, sep: str = "\n") -> str:
"""
Concatenate any number of code fragments.
All nested groups are unwrapped into a flat iterable.
"""
return sep.join(iter_flatten(code))
| CodeMod |
python | Textualize__textual | docs/examples/widgets/masked_input.py | {
"start": 92,
"end": 626
} | class ____(App):
# (1)!
CSS = """
MaskedInput.-valid {
border: tall $success 60%;
}
MaskedInput.-valid:focus {
border: tall $success;
}
MaskedInput {
margin: 1 1;
}
Label {
margin: 1 2;
}
"""
def compose(self) -> ComposeResult:
yield Label("Enter a valid credit card number.")
yield MaskedInput(
template="9999-9999-9999-9999;0", # (2)!
)
app = MaskedInputApp()
if __name__ == "__main__":
app.run()
| MaskedInputApp |
python | ipython__ipython | IPython/lib/display.py | {
"start": 11963,
"end": 12607
} | class ____(IFrame):
"""
Class for embedding a Scribd document in an IPython session
Use the start_page params to specify a starting point in the document
Use the view_mode params to specify display type one off scroll | slideshow | book
e.g to Display Wes' foundational paper about PANDAS in book mode from page 3
ScribdDocument(71048089, width=800, height=400, start_page=3, view_mode="book")
"""
def __init__(self, id, width=400, height=300, **kwargs):
src="https://www.scribd.com/embeds/{0}/content".format(id)
super(ScribdDocument, self).__init__(src, width, height, **kwargs)
| ScribdDocument |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/plugin.py | {
"start": 1362,
"end": 6100
} | class ____(Enum):
PARAMETRIZE = 1
SKIP = 2
FAIL = 3
def pytest_generate_tests(metafunc):
"""Hook function to customize test discovery and parametrization.
It parametrizes, skips or fails a discovered test according the test configuration.
"""
if "inputs" in metafunc.fixturenames:
test_config_key = metafunc.cls.config_key()
global_config = load_config(metafunc.config.getoption("--acceptance-test-config"))
test_configuration: GenericTestConfig = getattr(global_config.acceptance_tests, test_config_key, None)
test_action, reason = parametrize_skip_or_fail(
metafunc.cls, metafunc.function, global_config.test_strictness_level, test_configuration
)
if test_action == TestAction.PARAMETRIZE:
metafunc.parametrize("inputs", test_configuration.tests)
if test_action == TestAction.SKIP:
pytest.skip(reason)
if test_action == TestAction.FAIL:
pytest.fail(reason)
def parametrize_skip_or_fail(
TestClass: Type[BaseTest],
test_function: Callable,
global_test_mode: AcceptanceTestConfig.TestStrictnessLevel,
test_configuration: GenericTestConfig,
) -> Tuple[TestAction, str]:
"""Use the current test strictness level and test configuration to determine if the discovered test should be parametrized, skipped or failed.
We parametrize a test if:
- the configuration declares tests.
We skip a test if:
- the configuration does not declare tests and:
- the current test mode allows this test to be skipped.
- Or a bypass_reason is declared in the test configuration.
We fail a test if:
- the configuration does not declare the test but the discovered test is declared as mandatory for the current test strictness level.
Args:
TestClass (Type[BaseTest]): The discovered test class
test_function (Callable): The discovered test function
global_test_mode (AcceptanceTestConfig.TestStrictnessLevel): The global test strictness level (from the global configuration object)
test_configuration (GenericTestConfig): The current test configuration.
Returns:
Tuple[TestAction, str]: The test action the execution should take and the reason why.
"""
test_name = f"{TestClass.__name__}.{test_function.__name__}"
test_mode_can_skip_this_test = global_test_mode not in TestClass.MANDATORY_FOR_TEST_STRICTNESS_LEVELS
skipping_reason_prefix = f"Skipping {test_name}: "
default_skipping_reason = skipping_reason_prefix + "not found in the config."
if test_configuration is None:
if test_mode_can_skip_this_test:
return TestAction.SKIP, default_skipping_reason
else:
return (
TestAction.FAIL,
f"{test_name} failed: it was not configured but must be according to the current {global_test_mode} test strictness level.",
)
else:
if test_configuration.tests is not None:
return TestAction.PARAMETRIZE, f"Parametrize {test_name}: tests are configured."
else:
return TestAction.SKIP, skipping_reason_prefix + test_configuration.bypass_reason
def pytest_collection_modifyitems(config, items):
"""
Get prepared test items and wrap them with `pytest.mark.timeout(timeout_seconds)` decorator.
`timeout_seconds` may be received either from acceptance test config or `pytest.mark.default_timeout(timeout_seconds)`,
if `timeout_seconds` is not specified in the acceptance test config.
"""
config = load_config(config.getoption("--acceptance-test-config"))
i = 0
packed_items = []
while i < len(items):
inner_items = [item for item in items if item.originalname == items[i].originalname]
packed_items.append(inner_items)
i += len(inner_items)
for items in packed_items:
if not hasattr(items[0].cls, "config_key"):
# Skip user defined test classes from integration_tests/ directory.
continue
test_configs = getattr(config.acceptance_tests, items[0].cls.config_key())
for test_config, item in zip(test_configs.tests, items):
default_timeout = item.get_closest_marker("default_timeout")
if test_config.timeout_seconds:
item.add_marker(pytest.mark.timeout(test_config.timeout_seconds))
elif default_timeout:
item.add_marker(pytest.mark.timeout(*default_timeout.args))
def pytest_assertrepr_compare(config, op, left, right):
if op != "==":
return
use_markup = config.get_terminal_writer().hasmarkup
return diff_dicts(left, right, use_markup=use_markup)
| TestAction |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup_py38.py | {
"start": 2577,
"end": 2964
} | class ____(B):
# a is required, b is optional, c is required again
c: str
@given(from_type(C))
def test_typeddict_with_optional_then_required_again(value):
assert type(value) == dict
assert set(value).issubset({"a", "b", "c"})
assert isinstance(value["a"], int)
if "b" in value:
assert isinstance(value["b"], bool)
assert isinstance(value["c"], str)
| C |
python | numpy__numpy | numpy/distutils/_shell_utils.py | {
"start": 2130,
"end": 2538
} | class ____:
"""
The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
"""
@staticmethod
def join(argv):
return ' '.join(shlex.quote(arg) for arg in argv)
@staticmethod
def split(cmd):
return shlex.split(cmd, posix=True)
if os.name == 'nt':
NativeParser = WindowsParser
elif os.name == 'posix':
NativeParser = PosixParser
| PosixParser |
python | coleifer__peewee | tests/test_utils.py | {
"start": 292,
"end": 442
} | class ____(TestModel):
data = ForeignKeyField(Data, backref='items')
value = CharField()
class Meta:
order_by = ('value',)
| DataItem |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_header_footer.py | {
"start": 301,
"end": 1738
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_header_footer() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_header_footer_header_only(self):
"""Test the _write_header_footer() method header only"""
self.worksheet.set_header("Page &P of &N")
self.worksheet._write_header_footer()
exp = """<headerFooter><oddHeader>Page &P of &N</oddHeader></headerFooter>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_header_footer_footer_only(self):
"""Test the _write_header_footer() method footer only"""
self.worksheet.set_footer("&F")
self.worksheet._write_header_footer()
exp = """<headerFooter><oddFooter>&F</oddFooter></headerFooter>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_header_footer_both(self):
"""Test the _write_header_footer() method header and footer"""
self.worksheet.set_header("Page &P of &N")
self.worksheet.set_footer("&F")
self.worksheet._write_header_footer()
exp = """<headerFooter><oddHeader>Page &P of &N</oddHeader><oddFooter>&F</oddFooter></headerFooter>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteHeaderFooter |
python | milvus-io__pymilvus | tests/test_prepare.py | {
"start": 22988,
"end": 23292
} | class ____:
def test_load_collection_request(self):
kwargs = {'load_fields': ['pk', 'float_vector', 'string_load', 'int64_load']}
req = Prepare.load_collection('foo', **kwargs)
assert req.load_fields == ['pk', 'float_vector', 'string_load', 'int64_load']
| TestLoadCollectionRequest |
python | doocs__leetcode | lcof2/剑指 Offer II 002. 二进制加法/Solution.py | {
"start": 0,
"end": 386
} | class ____:
def addBinary(self, a: str, b: str) -> str:
ans = []
i, j, carry = len(a) - 1, len(b) - 1, 0
while i >= 0 or j >= 0 or carry:
carry += (0 if i < 0 else int(a[i])) + (0 if j < 0 else int(b[j]))
carry, v = divmod(carry, 2)
ans.append(str(v))
i, j = i - 1, j - 1
return "".join(ans[::-1])
| Solution |
python | jina-ai__jina | tests/integration/websocket_gateway/test_subprotocols.py | {
"start": 358,
"end": 3595
} | class ____(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
for d in docs:
d.text += f'{d.id} is fooed!'
def ws_flow(start_event, stop_event, gateway_port):
with Flow(protocol='websocket', port_expose=gateway_port).add(
uses=DummyExecutor
) as f:
start_event.set()
f.block(stop_event=stop_event)
def input_da_gen():
for i in range(5):
yield DocumentArray.empty(INPUT_DA_LEN)
time.sleep(1)
def json_requestify(da: DocumentArray, exec_endpoint='/foo'):
return {
'execEndpoint': exec_endpoint,
'data': {'docs': da.to_dict()},
}
def bytes_requestify(da: DocumentArray, exec_endpoint='/foo'):
r = DataRequest()
r._pb_body.header.exec_endpoint = exec_endpoint
r.data.docs_bytes = da.to_bytes()
return r.to_bytes()
@pytest.fixture
def flow_context(gateway_port):
start_event = Event()
stop_event = Event()
p = Process(
target=ws_flow,
args=(start_event, stop_event, gateway_port),
)
p.start()
start_event.wait()
yield
stop_event.set()
p.join()
async def json_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
) as ws:
for da in input_da_gen():
request = json_requestify(da)
await ws.send_json(request)
response = await ws.receive_json()
assert isinstance(response, dict)
assert response['header']['exec_endpoint'] == '/foo'
assert len(response['data']) == INPUT_DA_LEN
for doc in response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
async def bytes_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
protocols=('bytes',),
) as ws:
for da in input_da_gen():
request = bytes_requestify(da)
await ws.send_bytes(request)
response = await ws.receive_bytes()
assert isinstance(response, bytes)
dict_response = DataRequest(response).to_dict()
assert dict_response['header']['exec_endpoint'] == '/foo'
assert len(dict_response['data']) == INPUT_DA_LEN
for doc in dict_response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
@pytest.mark.asyncio
async def test_json_single_client(flow_context, gateway_port):
await json_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_json_multiple_clients(flow_context, gateway_port):
await asyncio.wait([json_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
@pytest.mark.asyncio
async def test_bytes_single_client(flow_context, gateway_port):
await bytes_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_bytes_multiple_clients(flow_context, gateway_port):
await asyncio.wait([bytes_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
| DummyExecutor |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_math_ops_test.py | {
"start": 17259,
"end": 17927
} | class ____(test_util.TensorFlowTestCase):
def testCastWithFullType(self):
@def_function.function
def test_fn():
ta = tensor_array_ops.TensorArray(dtypes.int32, size=1)
h = math_ops.cast(ta.flow, dtypes.variant)
t = full_type_pb2.FullTypeDef(
type_id=full_type_pb2.TFT_PRODUCT,
args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_ARRAY)])
h.op.experimental_set_type(t)
ta = tensor_array_ops.TensorArray(dtypes.int32, flow=h)
ta = ta.write(0, _get_weak_tensor(1))
return ta.stack()
self.assertAllEqual(self.evaluate(test_fn()), [1])
@test_util.run_all_in_graph_and_eager_modes
| CastTest |
python | django-compressor__django-compressor | compressor/filters/jsmin/__init__.py | {
"start": 336,
"end": 1356
} | class ____(FilterBase):
def __init__(self, *args, **kwargs):
try:
self._parser = kwargs.pop("parser")
except KeyError:
self._parser = None
try:
self._unparser = kwargs.pop("unparser")
except KeyError:
self._unparser = None
super().__init__(*args, **kwargs)
try:
import calmjs.parse
except ImportError:
raise ImproperlyConfigured(
"The module calmjs.parse couldn't be imported. "
"Make sure it is correctly installed."
)
if self._parser is None:
self._parser = calmjs.parse.es5
if self._unparser is None:
self._unparser = calmjs.parse.unparsers.es5.minify_printer(obfuscate=True)
def output(self, **kwargs):
program = self._parser(self.content)
minified = "".join(part.text for part in self._unparser(program))
assert isinstance(minified, str)
return minified
| CalmjsFilter |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/tests/test_code_hierarchy_with_skeleton.py | {
"start": 6492,
"end": 12241
} | class ____:
@bar
@barfoo
def bar() -> None:
# {CodeHierarchyNodeParser._get_comment_text(chunks[2])}"""
)
assert chunks[1].metadata["module"] == "example.foo"
assert chunks[1].metadata["inclusive_scopes"] == [
{"name": "Foo", "type": "class_definition", "signature": "class Foo:"}
]
assert isinstance(chunks[1].relationships[NodeRelationship.PARENT], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.PARENT]).node_id
== chunks[0].id_
)
assert [c.node_id for c in chunks[1].relationships[NodeRelationship.CHILD]] == [
chunks[2].id_,
]
assert isinstance(chunks[1].relationships[NodeRelationship.SOURCE], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[1].relationships
assert NodeRelationship.NEXT not in chunks[1].relationships
# This is the first method scope
assert (
chunks[2].text
== """\
def bar() -> None:
print("bar")"""
)
assert chunks[2].metadata["module"] == "example.foo"
assert chunks[2].metadata["inclusive_scopes"] == [
{"name": "Foo", "type": "class_definition", "signature": "class Foo:"},
{
"name": "bar",
"type": "function_definition",
"signature": "def bar() -> None:",
},
]
assert isinstance(chunks[2].relationships[NodeRelationship.PARENT], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.PARENT]).node_id
== chunks[1].id_
)
assert chunks[2].relationships[NodeRelationship.CHILD] == []
assert isinstance(chunks[2].relationships[NodeRelationship.SOURCE], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[2].relationships
assert NodeRelationship.NEXT not in chunks[2].relationships
def test_html_code_splitter() -> None:
"""Test case for code splitting using HTML."""
if "CI" in os.environ:
return
code_splitter = CodeHierarchyNodeParser(
language="html",
chunk_min_characters=len(" <title>My Example Page</title>") + 1,
skeleton=True,
)
text = """\
<!DOCTYPE html>
<html>
<head>
<title>My Example Page</title>
</head>
<body>
<h1>Welcome to My Example Page</h1>
<p>This is a basic HTML page example.</p>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<img src="https://example.com/image.jpg" alt="Example Image">
</body>
</html>"""
text_node = TextNode(
text=text,
)
chunks = code_splitter.get_nodes_from_documents([text_node])
# This is the DOCTYPE scope
assert (
chunks[0].text
== f"""\
<!DOCTYPE html>
<html>
<!-- {CodeHierarchyNodeParser._get_comment_text(chunks[1])} -->
</html>"""
)
assert chunks[0].metadata["inclusive_scopes"] == []
assert NodeRelationship.PARENT not in chunks[0].relationships
assert [c.node_id for c in chunks[0].relationships[NodeRelationship.CHILD]] == [
chunks[1].id_
]
assert (
cast(RelatedNodeInfo, chunks[0].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[0].relationships
assert NodeRelationship.NEXT not in chunks[0].relationships
# This is the html scope
assert (
chunks[1].text
== f"""\
<html>
<head>
<!-- {CodeHierarchyNodeParser._get_comment_text(chunks[2])} -->
</head>
<body>
<!-- {CodeHierarchyNodeParser._get_comment_text(chunks[3])} -->
</body>
</html>"""
)
assert chunks[1].metadata["inclusive_scopes"] == [
{"name": "html", "type": "element", "signature": "<html>"}
]
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.PARENT]).node_id
== chunks[0].id_
)
assert [c.node_id for c in chunks[1].relationships[NodeRelationship.CHILD]] == [
chunks[2].id_,
chunks[3].id_,
]
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[1].relationships
assert NodeRelationship.NEXT not in chunks[1].relationships
# Head chunk
assert (
chunks[2].text
== """\
<head>
<title>My Example Page</title>
</head>"""
)
assert chunks[2].metadata["inclusive_scopes"] == [
{"name": "html", "type": "element", "signature": "<html>"},
{"name": "head", "type": "element", "signature": "<head>"},
]
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.PARENT]).node_id
== chunks[1].id_
) # Parent should be <html>
assert [
c.node_id for c in chunks[2].relationships[NodeRelationship.CHILD]
] == [] # Child should be <title>
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[2].relationships
assert NodeRelationship.NEXT not in chunks[2].relationships
def test_typescript_code_splitter() -> None:
"""Test case for code splitting using TypeScript."""
if "CI" in os.environ:
return
code_splitter = CodeHierarchyNodeParser(
language="typescript", skeleton=True, chunk_min_characters=0
)
text = """\
function foo() {
console.log("bar");
}
| Foo |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/gcs.py | {
"start": 10206,
"end": 14539
} | class ____(GoogleCloudBaseOperator):
"""
Deletes objects from a list or all objects matching a prefix from a Google Cloud Storage bucket.
:param bucket_name: The GCS bucket to delete from
:param objects: List of objects to delete. These should be the names
of objects in the bucket, not including gs://bucket/
:param prefix: String or list of strings, which filter objects whose name begin with
it/them. (templated)
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket_name",
"prefix",
"objects",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
bucket_name: str,
objects: list[str] | None = None,
prefix: str | list[str] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.bucket_name = bucket_name
self.objects = objects
self.prefix = prefix
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
if objects is None and prefix is None:
err_message = "(Task {task_id}) Either objects or prefix should be set. Both are None.".format(
**kwargs
)
raise ValueError(err_message)
if objects is not None and prefix is not None:
err_message = "(Task {task_id}) Objects or prefix should be set. Both provided.".format(**kwargs)
raise ValueError(err_message)
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.objects is not None:
objects = self.objects
else:
objects = hook.list(bucket_name=self.bucket_name, prefix=self.prefix)
self.log.info("Deleting %s objects from %s", len(objects), self.bucket_name)
for object_name in objects:
hook.delete(bucket_name=self.bucket_name, object_name=object_name)
def get_openlineage_facets_on_start(self):
from airflow.providers.common.compat.openlineage.facet import (
Dataset,
LifecycleStateChange,
LifecycleStateChangeDatasetFacet,
PreviousIdentifier,
)
from airflow.providers.google.cloud.openlineage.utils import extract_ds_name_from_gcs_path
from airflow.providers.openlineage.extractors import OperatorLineage
objects = []
if self.objects is not None:
objects = self.objects
elif self.prefix is not None:
prefixes = [self.prefix] if isinstance(self.prefix, str) else self.prefix
objects = [extract_ds_name_from_gcs_path(pref) for pref in prefixes]
bucket_url = f"gs://{self.bucket_name}"
input_datasets = [
Dataset(
namespace=bucket_url,
name=object_name,
facets={
"lifecycleStateChange": LifecycleStateChangeDatasetFacet(
lifecycleStateChange=LifecycleStateChange.DROP.value,
previousIdentifier=PreviousIdentifier(
namespace=bucket_url,
name=object_name,
),
)
},
)
for object_name in objects
]
return OperatorLineage(inputs=input_datasets)
| GCSDeleteObjectsOperator |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 27401,
"end": 29060
} | class ____(Request):
"""
Deletes a queue. If the queue is not empty and force is not set to true, queue will not be deleted.
:param queue: Queue id
:type queue: str
:param force: Force delete of non-empty queue. Defaults to false
:type force: bool
"""
_service = "queues"
_action = "delete"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Force delete of non-empty queue. Defaults to false",
"type": "boolean",
},
"queue": {"description": "Queue id", "type": "string"},
},
"required": ["queue"],
"type": "object",
}
def __init__(self, queue: str, force: Optional[bool] = False, **kwargs: Any) -> None:
super(DeleteRequest, self).__init__(**kwargs)
self.queue = queue
self.force = force
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| DeleteRequest |
python | PrefectHQ__prefect | tests/experimental/test_sla.py | {
"start": 3190,
"end": 4807
} | class ____:
async def test_create_sla_against_cloud(self):
account_id = uuid4()
workspace_id = uuid4()
deployment_id = uuid4()
prefect_api_url = f"https://api.prefect.cloud/api/accounts/{account_id}/workspaces/{workspace_id}/"
with temporary_settings(
updates={
PREFECT_API_URL: prefect_api_url,
}
):
with respx.mock(
assert_all_mocked=True,
assert_all_called=False,
base_url=prefect_api_url,
using="httpx",
) as router:
sla_name = "test-sla"
router.get("/csrf-token", params={"client": mock.ANY}).pass_through()
router.post(
f"/slas/apply-resource-slas/prefect.deployment.{deployment_id}",
).mock(
return_value=httpx.Response(
status_code=201,
json={
"created": [{"name": sla_name}],
"updated": [],
"deleted": [],
},
)
)
prefect_client = get_client()
sla = TimeToCompletionSla(
name=sla_name,
duration=timedelta(minutes=10).total_seconds(),
)
response = await prefect_client.apply_slas_for_deployment(
deployment_id, [sla]
)
assert response.created[0] == sla.name
| TestClientApplySla |
python | pytorch__pytorch | torch/distributed/tensor/_op_schema.py | {
"start": 10073,
"end": 11145
} | class ____:
"""
RuntimeSchemaInfo stores the operator schema related information for runtime (eager)
execution. This is mainly used for two ways: 1. to generate hash for args to determine
whether to re-run sharding prop or not 2. to determine if we need pytree
"""
# This static_argnum records static arg "starting index" for ops that have non-tensor
# args/kwargs which would affect sharding propagation results. All args starting from
# this index would be hashed to our sharding cache.
# Note that only a few ops need this information, e.g. view, transpose, var.dim, etc.
static_argnum: int = 100
# This static_kwargkey records static kwarg names which would affect sharding prop
static_kwargkey: list[str] | None = None
# each op can decide if it wants to use pytree flatten/unflatten during operator
# eager execution, by default we don't need to do flatten/unflatten, only if the
# op indicate it needs to, this is to accelerate eager performance.
needs_pytree: bool = False
@dataclass
| RuntimeSchemaInfo |
python | skorch-dev__skorch | skorch/tests/callbacks/test_lr_scheduler.py | {
"start": 9743,
"end": 15067
} | class ____:
def get_net_with_mock(
self, classifier_data, classifier_module, monitor='train_loss'):
"""Returns a net with a mocked lr policy that allows to check what
it's step method was called with.
"""
X, y = classifier_data
net = NeuralNetClassifier(
classifier_module,
callbacks=[
('scheduler', LRScheduler(ReduceLROnPlateau, monitor=monitor)),
],
max_epochs=1,
).fit(X, y)
# mock the policy
policy = dict(net.callbacks_)['scheduler'].lr_scheduler_
mock_step = Mock(side_effect=policy.step)
policy.step = mock_step
# make sure that mocked policy is set
scheduler = dict(net.callbacks_)['scheduler']
# pylint: disable=protected-access
scheduler._get_scheduler = lambda *args, **kwargs: policy
net.partial_fit(X, y)
return net, mock_step
@pytest.mark.parametrize('monitor', ['train_loss', 'valid_loss', 'epoch'])
def test_reduce_lr_monitor_with_string(
self, monitor, classifier_data, classifier_module):
# step should be called with the 2nd to last value from that
# history entry
net, mock_step = self.get_net_with_mock(
classifier_data, classifier_module, monitor=monitor)
score = mock_step.call_args_list[0][0][0]
np.isclose(score, net.history[-2, monitor])
def test_reduce_lr_monitor_with_callable(
self, classifier_data, classifier_module):
# step should always be called with the return value from the
# callable, 55
_, mock_step = self.get_net_with_mock(
classifier_data, classifier_module, monitor=lambda x: 55)
score = mock_step.call_args_list[0][0][0]
assert score == 55
@pytest.mark.parametrize('mode', ['min', 'max'])
def test_reduce_lr_monitor_passes_monitored_loss(
self, classifier_data, classifier_module, mode):
X, y = classifier_data
net = NeuralNetClassifier(
classifier_module,
callbacks=[
('scheduler', LRScheduler(
ReduceLROnPlateau, monitor='valid_loss', mode=mode)),
],
max_epochs=1,
)
net.fit(X, y)
expected = net.history_[-1, "valid_loss"]
policy = dict(net.callbacks_)['scheduler'].lr_scheduler_
assert policy.best == pytest.approx(expected)
def test_reduce_lr_raise_error_when_key_does_not_exist(
self, classifier_data, classifier_module):
X, y = classifier_data
net = NeuralNetClassifier(
classifier_module,
callbacks=[
('scheduler', LRScheduler(
ReduceLROnPlateau, monitor='bad_key')),
],
max_epochs=1,
)
msg = ("'bad_key' was not found in history. A Scoring "
"callback with name='bad_key' should be placed before the "
"LRScheduler callback")
with pytest.raises(ValueError, match=msg):
net.fit(X, y)
def test_reduce_lr_record_epoch_step(self, classifier_module, classifier_data):
epochs = 10 * 3 # patience = 10, get 3 full cycles of lr reduction
lr = 123.
net = NeuralNetClassifier(
classifier_module,
max_epochs=epochs,
lr=lr,
callbacks=[
('scheduler', LRScheduler(ReduceLROnPlateau, monitor='train_loss')),
],
)
net.fit(*classifier_data)
# We cannot compare lrs to simulation data, as ReduceLROnPlateau cannot be
# simulated. Instead we expect the lr to be reduced by a factor of 10 every
# 10+ epochs (as patience = 10), with the exact number depending on the training
# progress. Therefore, we can have at most 3 distinct lrs, but it could be less,
# so we need to slice the expected lrs.
lrs = net.history[:, 'event_lr']
lrs_unique = np.unique(lrs)
expected = np.unique([123., 12.3, 1.23])[-len(lrs_unique):]
assert np.allclose(lrs_unique, expected)
def test_reduce_lr_record_batch_step(self, classifier_module, classifier_data):
epochs = 3
lr = 123.
net = NeuralNetClassifier(
classifier_module,
max_epochs=epochs,
lr=lr,
callbacks=[
('scheduler', LRScheduler(
ReduceLROnPlateau, monitor='train_loss', step_every='batch'
)),
],
)
net.fit(*classifier_data)
# We cannot compare lrs to simulation data, as ReduceLROnPlateau cannot be
# simulated. Instead we expect the lr to be reduced by a factor of 10 every
# 10+ batches (as patience = 10), with the exact number depending on the
# training progress. Therefore, we can have at most 3 distinct lrs, but it
# could be less, so we need to slice the expected, lrs.
lrs_nested = net.history[:, 'batches', :, 'event_lr']
lrs_flat = sum(lrs_nested, [])
lrs_unique = np.unique(lrs_flat)
expected = np.unique([123., 12.3, 1.23])[-len(lrs_unique):]
assert np.allclose(lrs_unique, expected)
| TestReduceLROnPlateau |
python | eventlet__eventlet | tests/hub_test.py | {
"start": 5643,
"end": 6391
} | class ____(tests.LimitedTestCase):
def test_sleep(self):
# even if there was an error in the mainloop, the hub should continue
# to work
start = time.time()
eventlet.sleep(DELAY)
delay = time.time() - start
assert delay >= DELAY * \
0.9, 'sleep returned after %s seconds (was scheduled for %s)' % (
delay, DELAY)
def fail():
1 // 0
hubs.get_hub().schedule_call_global(0, fail)
start = time.time()
eventlet.sleep(DELAY)
delay = time.time() - start
assert delay >= DELAY * \
0.9, 'sleep returned after %s seconds (was scheduled for %s)' % (
delay, DELAY)
| TestExceptionInMainloop |
python | apache__avro | lang/py/avro/test/test_schema.py | {
"start": 36111,
"end": 37066
} | class ____(unittest.TestCase):
"""Ensure that Schema are hashable.
While hashability is implemented with parsing canonical form fingerprinting,
this test should be kept distinct to avoid coupling."""
def __init__(self, test_schema):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("parse_and_hash")
self.test_schema = test_schema
def parse_and_hash(self):
"""Ensure that every schema can be hashed."""
try:
hash(self.test_schema.parse())
except TypeError as e:
if "unhashable type" in str(e):
self.fail(f"{self.test_schema} is not hashable")
raise
| HashableTestCase |
python | joke2k__faker | faker/providers/lorem/es_MX/__init__.py | {
"start": 50,
"end": 183
} | class ____(SpanishProvider):
"""Implement lorem provider for ``es_MX`` locale.
Using the same as in ```es_ES```.
"""
| Provider |
python | joke2k__faker | faker/providers/address/ko_KR/__init__.py | {
"start": 95,
"end": 13467
} | class ____(AddressProvider):
"""
Korean Address Provider
=======================
Korea has two address and postal code system.
Address:
- Address based on land parcel numbers
(지번 주소, OLD, but someone use consistently)
- Address based on road names and building numbers (도로명 주소, NEW)
:meth:`land_address` generate Address based on land parcel numbers and
:meth:`road_address` generate Address based on road names and building
numbers.
Postal code:
- Old postal code (6-digit, OLD and dead)
- New postal code (5-digit, New)
:meth:`old_postal_code` and :meth:`postcode` generate old 6-digit code
and :meth:`postal_code` generate newer 5-digit code.
Reference:
- `Official Confirmation Prividing that Old and New Addresses are Identical`__
(warn: cert error)
__ https://www.juso.go.kr/addridentity/AddrIdentityHelp.htm
"""
building_suffixes = (
"빌라",
"아파트",
"연립",
"마을",
"타운",
"타워",
)
road_suffixes = ("로", "길", "거리", "가")
town_suffixes = ("동", "읍", "면", "리", "마을")
postcode_formats = ("###-###",)
new_postal_code_formats = ("#####",)
metropolitan_cities = (
"서울특별시",
"부산광역시",
"대구광역시",
"인천광역시",
"광주광역시",
"대전광역시",
"울산광역시",
"세종특별자치시",
)
provinces = (
"경기도",
"강원도",
"충청북도",
"충청남도",
"전라북도",
"전라남도",
"경상북도",
"경상남도",
"제주특별자치도",
)
cities = (
"파주시",
"수원시",
"수원시 권선구",
"수원시 팔달구",
"수원시 영통구",
"성남시",
"성남시 수정구",
"성남시 중원구",
"화성시",
"성남시 분당구",
"안양시",
"안양시 만안구",
"안양시 동안구",
"부천시",
"부천시 원미구",
"부천시 소사구",
"부천시 오정구",
"광명시",
"평택시",
"이천시",
"동두천시",
"안산시",
"안산시 상록구",
"안산시 단원구",
"안성시",
"고양시",
"고양시 덕양구",
"고양시 일산동구",
"고양시 일산서구",
"과천시",
"구리시",
"남양주시",
"오산시",
"시흥시",
"군포시",
"의왕시",
"하남시",
"김포시",
"용인시",
"용인시 처인구",
"용인시 기흥구",
"용인시 수지구",
"연천군",
"가평군",
"양평군",
"광주시",
"포천시",
"양주시",
"수원시 장안구",
"의정부시",
"여주시",
"춘천시",
"원주시",
"강릉시",
"동해시",
"태백시",
"속초시",
"삼척시",
"홍천군",
"횡성군",
"영월군",
"평창군",
"정선군",
"철원군",
"화천군",
"양구군",
"인제군",
"고성군",
"양양군",
"천안시 동남구",
"천안시 서북구",
"공주시",
"보령시",
"아산시",
"서산시",
"논산시",
"계룡시",
"당진시",
"금산군",
"부여군",
"서천군",
"청양군",
"홍성군",
"예산군",
"태안군",
"청주시 상당구",
"청주시 서원구",
"청주시 흥덕구",
"청주시 청원구",
"충주시",
"제천시",
"보은군",
"옥천군",
"영동군",
"증평군",
"진천군",
"괴산군",
"음성군",
"단양군",
)
road_names = (
"압구정",
"도산대",
"학동",
"봉은사",
"테헤란",
"역삼",
"논현",
"언주",
"강남대",
"양재천",
"삼성",
"영동대",
"개포",
"선릉",
"반포대",
"서초중앙",
"서초대",
"잠실",
"석촌호수",
"백제고분",
"가락",
"오금",
)
boroughs = (
"종로구",
"중구",
"용산구",
"성동구",
"광진구",
"동대문구",
"중랑구",
"성북구",
"강북구",
"도봉구",
"노원구",
"은평구",
"서대문구",
"마포구",
"양천구",
"강서구",
"구로구",
"금천구",
"영등포구",
"동작구",
"관악구",
"서초구",
"강남구",
"송파구",
"강동구",
"동구",
"서구",
"남구",
"북구",
)
countries = (
"가나",
"가봉",
"가이아나",
"감비아",
"과테말라",
"그레나다",
"그리스",
"기니",
"기니비사우",
"나미비아",
"나우루",
"나이지리아",
"남수단",
"남아프리카 공화국",
"네덜란드 왕국",
"네팔",
"노르웨이",
"뉴질랜드",
"니제르",
"니카라과",
"대한민국",
"덴마크",
"도미니카 공화국",
"도미니카 연방",
"독일",
"동티모르",
"라오스",
"라이베리아",
"라트비아",
"러시아",
"레바논",
"레소토",
"루마니아",
"룩셈부르크",
"르완다",
"리비아",
"리투아니아",
"리히텐슈타인",
"마다가스카르",
"마셜 제도",
"북마케도니아 공화국",
"말라위",
"말레이시아",
"말리",
"멕시코",
"모나코",
"모로코",
"모리셔스",
"모리타니",
"모잠비크",
"몬테네그로",
"몰도바",
"몰디브",
"몰타",
"몽골",
"미국",
"미얀마",
"미크로네시아 연방",
"바누아투",
"바레인",
"바베이도스",
"바하마",
"방글라데시",
"베냉",
"베네수엘라",
"베트남",
"벨기에",
"벨라루스",
"벨리즈",
"보스니아 헤르체고비나",
"보츠와나",
"볼리비아",
"부룬디",
"부르키나파소",
"부탄",
"불가리아",
"브라질",
"브루나이",
"사모아",
"사우디아라비아",
"산마리노",
"상투메 프린시페",
"세네갈",
"세르비아",
"세이셸",
"세인트루시아",
"세인트빈센트 그레나딘",
"세인트키츠 네비스",
"소말리아",
"솔로몬 제도",
"수단",
"수리남",
"스리랑카",
"스와질란드",
"스웨덴",
"스위스",
"스페인",
"슬로바키아",
"슬로베니아",
"시리아",
"시에라리온 공화국",
"싱가포르",
"아랍에미리트",
"아르메니아",
"아르헨티나",
"아이슬란드",
"아이티",
"아일랜드",
"아제르바이잔",
"아프가니스탄",
"안도라",
"알바니아",
"알제리",
"앙골라",
"앤티가 바부다",
"에리트레아",
"에스토니아",
"에콰도르",
"에티오피아",
"엘살바도르",
"영국",
"예멘",
"오만",
"오스트레일리아",
"오스트리아",
"온두라스",
"요르단",
"우간다",
"우루과이",
"우즈베키스탄",
"우크라이나",
"이라크",
"이란",
"이스라엘",
"이집트",
"이탈리아",
"인도네시아",
"일본",
"자메이카",
"잠비아",
"적도 기니",
"조선민주주의인민공화국",
"조지아",
"중앙아프리카 공화국",
"중화인민공화국",
"지부티",
"짐바브웨",
"차드",
"체코",
"칠레",
"카메룬",
"카보베르데",
"카자흐스탄",
"카타르",
"캄보디아",
"캐나다",
"케냐",
"코모로",
"코스타리카",
"코트디부아르",
"콜롬비아",
"콩고 공화국",
"콩고 민주 공화국",
"쿠바",
"쿠웨이트",
"크로아티아",
"키르기스스탄",
"키리바시",
"키프로스",
"타이",
"타지키스탄",
"탄자니아",
"터키",
"토고",
"통가",
"투르크메니스탄",
"투발루",
"튀니지",
"트리니다드 토바고",
"파나마",
"파라과이",
"파키스탄",
"파푸아 뉴기니",
"팔라우",
"페루",
"포르투갈",
"폴란드",
"프랑스",
"피지",
"핀란드",
"필리핀",
"헝가리",
)
building_dongs = (
"가",
"나",
"다",
"라",
"마",
"바",
"##",
"###",
) + tuple(ALPHABET)
land_numbers = (
"###",
"###-#",
"###-##",
)
road_numbers = (
"#",
"##",
"###",
)
town_formats = (
"{{first_name}}{{last_name}}{{town_suffix}}",
"{{first_name}}{{last_name}}{{last_name}}{{town_suffix}}",
)
building_name_formats = (
"{{first_name}}{{last_name}}{{building_suffix}}",
"{{first_name}}{{last_name}}{{last_name}}{{building_suffix}}",
)
address_detail_formats = (
"{{building_name}}",
"{{building_name}} ###호",
"{{building_name}} {{building_dong}}동 ###호",
)
road_formats = (
"{{road_name}}{{road_suffix}} {{building_number}}",
"{{road_name}}{{road_number}}{{road_suffix}} {{building_number}}",
)
road_address_formats = (
"{{metropolitan_city}} {{borough}} {{road}}",
"{{province}} {{city}} {{road}}",
"{{metropolitan_city}} {{borough}} {{road}} ({{town}})",
"{{province}} {{city}} {{road}} ({{town}})",
)
land_address_formats = (
"{{metropolitan_city}} {{borough}} {{town}} {{land_number}}",
"{{province}} {{city}} {{town}} {{land_number}}",
)
# Keep backward compatibility
city_suffixes = ("시",)
street_suffixes = road_suffixes
street_name_formats = ("{{road_name}}",)
street_address_formats = road_address_formats
address_formats = road_address_formats
def land_number(self) -> str:
"""
:example: 507
"""
return self.bothify(self.random_element(self.land_numbers))
def land_address(self) -> str:
"""
:example: 세종특별자치시 어진동 507
"""
pattern: str = self.random_element(self.land_address_formats)
return self.generator.parse(pattern)
def road_number(self) -> str:
"""
:example: 24
"""
return self.bothify(self.random_element(self.road_numbers))
def road_address(self) -> str:
"""
:example: 세종특별자치시 도움5로 19 (어진동)
"""
pattern: str = self.random_element(self.road_address_formats)
return self.generator.parse(pattern)
def address_detail(self) -> str:
"""
:example: 가나아파트 가동 102호
"""
pattern: str = self.bothify(self.random_element(self.address_detail_formats))
return self.generator.parse(pattern)
def road(self) -> str:
"""
:example: 도움5로
"""
pattern: str = self.random_element(self.road_formats)
return self.generator.parse(pattern)
def road_name(self) -> str:
"""
:example: 압구정
"""
return self.random_element(self.road_names)
def road_suffix(self) -> str:
"""
:example: 길
"""
return self.random_element(self.road_suffixes)
def building_number(self) -> str:
"""
:returns: A random building number
Generates building number(건물 번호). There are 3 types of building number with current ROK addressing system.
(1) 19: A typical format. Only marks one building.
(2) 지하11: The building entrance is underground.
(3) 132-1: Several buildings are distinguished with sub-building-number(가지 번호).
Generating probability is arbitrarily.
:example: 19, 지하11, 143-1
"""
if self.random_int() % 9 < 1:
return self.building_number_underground()
elif self.random_int() % 9 < 4:
return self.building_number_segregated()
else:
return "%d" % self.generator.random.randint(1, 999)
def building_number_underground(self) -> str:
"""
:returns: A random building number with undergrond entrances
:example: 지하11
"""
return "지하%d" % (self.generator.random.randint(1, 999))
def building_number_segregated(self) -> str:
"""
:returns: A random building number distinguished with sub-building-number(가지 번호)
:example: 143-1
"""
main_building_number = self.generator.random.randint(1, 999)
sub_building_number = self.generator.random.randint(1, 99)
return "%d-%d" % (main_building_number, sub_building_number)
def metropolitan_city(self) -> str:
"""
:example: 서울특별시
"""
return self.random_element(self.metropolitan_cities)
def administrative_unit(self) -> str:
"""
:example: 경기도
"""
return self.random_element(self.provinces)
province = administrative_unit
def city(self) -> str:
"""
:example: 고양시
"""
pattern: str = self.random_element(self.cities)
return self.generator.parse(pattern)
def borough(self) -> str:
"""
:example: 중구
"""
return self.random_element(self.boroughs)
def town(self) -> str:
"""
:example: 가나동
"""
pattern: str = self.random_element(self.town_formats)
return self.generator.parse(pattern)
def town_suffix(self) -> str:
"""
:example: 동
"""
return self.random_element(self.town_suffixes)
def building_name(self) -> str:
"""
:example: 김구아파트
"""
pattern: str = self.random_element(self.building_name_formats)
return self.generator.parse(pattern)
def building_suffix(self) -> str:
"""
:example: 아파트
"""
return self.random_element(self.building_suffixes)
def building_dong(self) -> str:
"""
:example: 가
"""
return self.bothify(self.random_element(self.building_dongs))
def old_postal_code(self) -> str:
"""
:example: 123-456
"""
return self.bothify(self.random_element(self.postcode_formats))
def postcode(self) -> str:
"""
:example: 12345
"""
return self.bothify(self.random_element(self.new_postal_code_formats))
def postal_code(self) -> str:
"""
:example: 12345
"""
return self.postcode()
| Provider |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/models.py | {
"start": 1241,
"end": 4049
} | class ____(_collections_abc.MutableMapping): # type: ignore
# Start by filling-out the abstract methods
def __init__(self, _dict: Optional[MutableMapping] = None, **kwargs: Any):
self.data: MutableMapping = {}
if _dict is not None:
self.update(_dict)
if kwargs:
self.update(kwargs)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: Any) -> Any:
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key: Any, item: Any) -> None:
self.data[key] = item
def __delitem__(self, key: Any) -> None:
del self.data[key]
def __iter__(self) -> Iterator:
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key: Any) -> bool:
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self) -> str:
return repr(self.data)
def __or__(self, other: UserDict | dict) -> UserDict:
if isinstance(other, UserDict):
return self.__class__(self.data | other.data) # type: ignore
if isinstance(other, dict):
return self.__class__(self.data | other) # type: ignore
return NotImplemented
def __ror__(self, other: UserDict | dict) -> UserDict:
if isinstance(other, UserDict):
return self.__class__(other.data | self.data) # type: ignore
if isinstance(other, dict):
return self.__class__(other | self.data) # type: ignore
return NotImplemented
def __ior__(self, other: UserDict | dict) -> UserDict:
if isinstance(other, UserDict):
self.data |= other.data # type: ignore
else:
self.data |= other # type: ignore
return self
def __copy__(self) -> UserDict:
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
return inst
def copy(self) -> UserDict:
if self.__class__ is UserDict:
return UserDict(self.data.copy()) # type: ignore
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable: Iterable, value: Optional[Any] = None) -> UserDict:
d = cls()
for key in iterable:
d[key] = value
return d
| UserDict |
python | gevent__gevent | src/gevent/tests/test__socket.py | {
"start": 1364,
"end": 2056
} | class ____(BaseThread):
def __init__(self, target=None, args=()):
BaseThread.__init__(self, target)
self.glet = gevent.spawn(self.target, *args)
def join(self, *args, **kwargs):
return self.glet.join(*args, **kwargs)
def is_alive(self):
return not self.glet.ready()
if not monkey.is_module_patched('threading'):
class ThreadThread(BaseThread, _Thread):
def __init__(self, **kwargs):
target = kwargs.pop('target')
BaseThread.__init__(self, target)
_Thread.__init__(self, target=self.target, **kwargs)
self.start()
Thread = ThreadThread
else:
Thread = GreenletThread
| GreenletThread |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 17611,
"end": 18402
} | class ____(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} +
\frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(
default=90.0,
description="The latitude at which to minimize the error,in degrees",
)
| Sky2Pix_Airy |
python | google__jax | jax/experimental/mosaic/gpu/examples/flash_attention.py | {
"start": 1481,
"end": 22011
} | class ____(enum.Enum):
TWO_COMPUTE_WG = enum.auto()
TWO_COMPUTE_ONE_TMA_WG = enum.auto()
def build_kernel(
batch_size: int,
q_heads: int,
kv_heads: int,
q_seq_len: int,
kv_seq_len: int,
head_dim: int,
blocks: BlockSizes,
prof_spec: profiler.ProfilerSpec | None = None,
exp_impl: ExpImplementation = ExpImplementation.EXACT,
impl: Implementation = Implementation.TWO_COMPUTE_WG,
):
compute_wgs_per_block = 2
match impl:
case Implementation.TWO_COMPUTE_WG:
wgs_per_block = 2
case Implementation.TWO_COMPUTE_ONE_TMA_WG:
wgs_per_block = 3
if batch_size != 1:
raise NotImplementedError
if blocks.stages < 2:
raise ValueError("Kernel requires at least 2 stages.")
if q_heads % kv_heads:
raise ValueError("kv_heads must divide q_heads.")
if q_seq_len % (blocks.q * compute_wgs_per_block):
raise ValueError
if kv_seq_len % blocks.kv:
raise ValueError
if blocks.q % 64:
raise NotImplementedError
if blocks.kv % 64:
raise NotImplementedError
if head_dim % 64:
raise NotImplementedError
if blocks.stages * blocks.kv > kv_seq_len:
raise NotImplementedError
q_shape = jax.ShapeDtypeStruct(
(q_heads, q_seq_len, head_dim), jnp.float16
)
kv_shape = jax.ShapeDtypeStruct(
(kv_heads, kv_seq_len, head_dim), jnp.float16
)
q_heads_per_kv_head = q_heads // kv_heads
def exp(x: FragmentedArray) -> FragmentedArray:
return x.exp(approx=exp_impl == ExpImplementation.APPROX)
block_partition = Partition(
elements=(batch_size, q_seq_len, q_heads),
partition=(0, 1, 2),
chunk_size=(1, blocks.q * compute_wgs_per_block, 1),
)
index = ir.IndexType.get()
i32 = ir.IntegerType.get_signless(32)
f16 = ir.F16Type.get()
f32 = ir.F32Type.get()
grid = block_partition.num_chunks
block = (wgs_per_block * 128, 1, 1)
tiling = (64, 64)
qo_scratch = jax.ShapeDtypeStruct(
(compute_wgs_per_block, *tile_shape((blocks.q, head_dim), tiling)),
jnp.float16,
)
k_scratch = jax.ShapeDtypeStruct(
tile_shape((blocks.stages, head_dim, blocks.kv), tiling), jnp.float16
)
v_scratch = jax.ShapeDtypeStruct(
tile_shape((blocks.stages, blocks.kv, head_dim), tiling), jnp.float16
)
smem_buffers_shape = [
qo_scratch,
k_scratch,
v_scratch,
]
in_shape = (q_shape, kv_shape, kv_shape)
out_shape = q_shape
def c(value, ty=index):
return _utils_c(value, ty)
def tma_wg_kernel(
ctx: LaunchContext,
q_gmem,
k_gmem,
v_gmem,
out_gmem,
smem,
):
wg_idx = warpgroup_idx(sync=True)
smem_buffers, buffer_barriers, consumed_barriers, schedule_barrier = smem
qo_smem, k_smem, v_smem = smem_buffers
k_barriers, v_barriers, q_barriers = buffer_barriers
k_consumed_barrier, v_consumed_barrier = consumed_barriers
@ctx.named_region("Schedule barrier")
def perform_schedule_barrier():
schedule_barrier.arrive()
schedule_barrier.wait()
qo_smem = memref_slice(qo_smem, arith.index_cast(index, wg_idx))
@contextlib.contextmanager
def only_wg(idx):
is_wg = arith.cmpi(arith.CmpIPredicate.eq, wg_idx, c(idx, i32))
with ir.InsertionPoint(scf.IfOp(is_wg).then_block):
yield
scf.yield_([])
batch_idx, q_seq_base, q_head_idx = block_partition.get_base(
gpu.block_id(gpu.Dimension.x),
gpu.block_id(gpu.Dimension.y),
gpu.block_id(gpu.Dimension.z),
)
q_seq_base = arith.addi(
q_seq_base, arith.muli(arith.index_cast(index, wg_idx), c(blocks.q))
)
del batch_idx
loop_partition = Partition1D(kv_seq_len, chunk_size=blocks.kv)
if_compute = scf.IfOp(
arith.cmpi(arith.CmpIPredicate.ne, wg_idx, c(2, i32)), hasElse=True
)
with ir.InsertionPoint(if_compute.then_block):
nvvm.setmaxregister(232, nvvm.SetMaxRegisterAction.increase)
with ctx.named_region("Q TMA start"):
ctx.async_copy(
src_ref=q_gmem,
gmem_slice=(q_head_idx, ds(q_seq_base, blocks.q)),
gmem_transform=TileTransform(tiling),
dst_ref=qo_smem,
barrier=q_barriers[wg_idx],
swizzle=128,
)
with ctx.named_region("Q TMA wait"):
q_barriers[wg_idx].wait()
m_i = FragmentedArray.splat(
c(-jnp.inf, f32), shape=(blocks.q,), layout=WGMMA_ROW_LAYOUT
)
l_i = FragmentedArray.splat(
c(0, f32), shape=(blocks.q,), layout=WGMMA_ROW_LAYOUT
)
acc = FragmentedArray.splat(
c(0, f32), shape=(blocks.q, head_dim), layout=WGMMA_LAYOUT
)
k_barriers[c(0)].wait()
with only_wg(1):
perform_schedule_barrier()
@fori(c(loop_partition.num_chunks), (acc, m_i, l_i))
def kv_loop(kv_step, carry):
acc, m_i, l_i = carry
slot = arith.remui(kv_step, c(blocks.stages))
with ctx.named_region("QK issue"):
# TODO(apaszke): Support WGMMA without an initial accumulator.
qk_acc = WGMMAAccumulator.zero(blocks.q, blocks.kv)
q, k = qo_smem, memref_slice(k_smem, slot)
qk_acc = wgmma(qk_acc, q, memref_transpose(k, (0, 1, 3, 2)))
nvvm.wgmma_commit_group_sync_aligned()
perform_schedule_barrier()
with ctx.named_region("QK wait"):
nvvm.wgmma_wait_group_sync_aligned(0)
k_consumed_barrier.arrive()
qk = qk_acc.value
with ctx.named_region("Softmax"):
m_ij = m_i.max(qk.reduce(arith.maximumf, axis=1))
alpha = exp(m_i - m_ij)
m_i = m_ij
p = exp(qk - m_ij.broadcast_minor(blocks.kv))
acc *= alpha.broadcast_minor(head_dim)
l_i *= alpha
p16 = p.astype(f16)
with ctx.named_region("V TMA wait"):
v_barriers[slot].wait()
perform_schedule_barrier()
# This is quite surprising, but it seems like warp shuffles cannot
# run simultaneously with the WGMMA. For that reason we include it as
# part of the TensorCore critical section and not the ALU section.
with ctx.named_region("Softmax reduction"):
l_i += p.reduce(arith.addf, axis=1)
with ctx.named_region("PV issue"):
v = memref_slice(v_smem, slot)
acc_update = WGMMAAccumulator.from_registers(acc)
acc_update = wgmma(acc_update, p16, v)
nvvm.wgmma_commit_group_sync_aligned()
# We hide the barrier overhead by overlapping it with the PV matmul.
with ctx.named_region("K TMA wait"):
wait_step = arith.addi(kv_step, c(1))
wait_slot = arith.remui(wait_step, c(blocks.stages))
wait_step_in_bounds = arith.cmpi(
arith.CmpIPredicate.slt, wait_step, c(loop_partition.num_chunks)
)
with ir.InsertionPoint(scf.IfOp(wait_step_in_bounds).then_block):
k_barriers[wait_slot].wait()
scf.yield_([])
with ctx.named_region("PV wait"):
nvvm.wgmma_wait_group_sync_aligned(0)
v_consumed_barrier.arrive()
acc = acc_update.value
return acc, m_i, l_i
with only_wg(0):
perform_schedule_barrier()
acc, m_i, l_i = kv_loop.results
del m_i
# TODO(apaszke): Invert and multiply to avoid expensive divisions.
acc /= l_i.broadcast_minor(head_dim)
with ctx.named_region("Acc store"):
acc.astype(f16).store_tiled(qo_smem, swizzle=128)
commit_shared() # Make sure the store is visible to the TMA.
with ctx.named_region("GMEM store"):
ctx.async_copy(
src_ref=qo_smem,
dst_ref=out_gmem,
gmem_slice=(q_head_idx, ds(q_seq_base, blocks.q)),
gmem_transform=TileTransform(tiling),
swizzle=128,
)
ctx.await_async_copy(0)
scf.yield_([])
with ir.InsertionPoint(if_compute.else_block):
nvvm.setmaxregister(40, nvvm.SetMaxRegisterAction.decrease)
with single_thread(scope=ThreadSubset.WARPGROUP):
k_tr = (TileTransform(tiling), TransposeTransform((1, 0, 2, 3)))
v_tr = TileTransform(tiling)
kv_head_idx = arith.divui(q_head_idx, c(q_heads_per_kv_head))
def start_kv_copy(slot, kv_seq_base, smem, gmem, barrier, transform):
ctx.async_copy(
dst_ref=memref_slice(smem, slot),
src_ref=gmem,
gmem_slice=(kv_head_idx, ds(kv_seq_base, blocks.kv)),
gmem_transform=transform,
barrier=barrier,
predicate=None,
swizzle=128,
)
def start_k_copy(slot, kv_seq_base):
return start_kv_copy(
slot, kv_seq_base, k_smem, k_gmem, k_barriers[slot], k_tr
)
def start_v_copy(slot, kv_seq_base):
return start_kv_copy(
slot, kv_seq_base, v_smem, v_gmem, v_barriers[slot], v_tr
)
with ctx.named_region("KV TMA warmup"):
for i in range(blocks.stages):
start_k_copy(c(i), loop_partition.get_base(c(i)))
start_v_copy(c(i), loop_partition.get_base(c(i)))
@fori(c(loop_partition.num_chunks - blocks.stages), None)
def _kv_loop_memory(kv_step, _):
tma_step = arith.addi(kv_step, c(blocks.stages))
tma_slot = arith.remui(kv_step, c(blocks.stages))
with ctx.named_region("K consumed barrier"):
k_consumed_barrier.wait()
start_k_copy(tma_slot, loop_partition.get_base(tma_step))
with ctx.named_region("V consumed barrier"):
v_consumed_barrier.wait()
start_v_copy(tma_slot, loop_partition.get_base(tma_step))
@fori(c(blocks.stages), None)
def _kv_loop_memory(i, _):
k_consumed_barrier.wait()
v_consumed_barrier.wait()
scf.yield_([])
def compute_only_kernel(
ctx: LaunchContext,
q_gmem,
k_gmem,
v_gmem,
out_gmem,
smem_scratch,
):
wg_idx = warpgroup_idx(sync=True)
(qo_smem, k_smem, v_smem), barriers, schedule_barrier = smem_scratch
def perform_schedule_barrier():
schedule_barrier.arrive()
schedule_barrier.wait()
qo_smem = memref_slice(qo_smem, arith.index_cast(index, wg_idx))
@contextlib.contextmanager
def only_wg(idx):
i32 = ir.IntegerType.get_signless(32)
is_wg = arith.cmpi(arith.CmpIPredicate.eq, wg_idx, c(idx, i32))
with ir.InsertionPoint(scf.IfOp(is_wg).then_block):
yield
scf.yield_([])
batch_idx, q_seq_base, q_head_idx = block_partition.get_base(
gpu.block_id(gpu.Dimension.x),
gpu.block_id(gpu.Dimension.y),
gpu.block_id(gpu.Dimension.z),
)
q_seq_base = arith.addi(
q_seq_base, arith.muli(arith.index_cast(index, wg_idx), c(blocks.q))
)
del batch_idx
q_barrier = arith.addi(c(blocks.stages), arith.index_cast(index, wg_idx))
with ctx.named_region("Q TMA start"):
ctx.async_copy(
src_ref=q_gmem,
gmem_slice=(q_head_idx, ds(q_seq_base, blocks.q)),
gmem_transform=TileTransform(tiling),
dst_ref=qo_smem,
barrier=barriers[q_barrier],
swizzle=128,
)
kv_head_idx = arith.divui(q_head_idx, c(q_heads_per_kv_head))
def kv_copy_init(slot, kv_seq_base):
with single_thread(ThreadSubset.WARPGROUP):
txcount = 2 * blocks.kv * head_dim * bytewidth(f16)
barriers[slot].arrive_expect_tx(txcount)
k_tr = (TileTransform(tiling), TransposeTransform((1, 0, 2, 3)))
v_tr = TileTransform(tiling)
for smem, gmem, t in ((k_smem, k_gmem, k_tr), (v_smem, v_gmem, v_tr)):
ctx.async_copy(
dst_ref=memref_slice(smem, slot),
src_ref=gmem,
gmem_slice=(kv_head_idx, ds(kv_seq_base, blocks.kv)),
gmem_transform=t,
barrier=barriers[slot],
arrive=False,
predicate=None,
swizzle=128,
)
loop_partition = Partition1D(kv_seq_len, chunk_size=blocks.kv)
with only_wg(1), ctx.named_region("KV TMA warmup"):
for i in range(blocks.stages - 1):
kv_copy_init(c(i), loop_partition.get_base(c(i)))
with ctx.named_region("Q TMA wait"):
barriers[q_barrier].wait()
m_i = FragmentedArray.splat(
c(-jnp.inf, f32), shape=(blocks.q,), layout=WGMMA_ROW_LAYOUT
)
l_i = FragmentedArray.splat(
c(0, f32), shape=(blocks.q,), layout=WGMMA_ROW_LAYOUT
)
acc = FragmentedArray.splat(
c(0, f32), shape=(blocks.q, head_dim), layout=WGMMA_LAYOUT
)
with only_wg(1):
perform_schedule_barrier()
with only_wg(0):
barriers[c(0)].wait()
@fori(c(loop_partition.num_chunks), (acc, m_i, l_i))
def kv_loop(kv_step, carry):
acc, m_i, l_i = carry
slot = arith.remui(kv_step, c(blocks.stages))
with ctx.named_region("QK issue"):
# TODO(apaszke): Support WGMMA without an initial accumulator.
qk_acc = WGMMAAccumulator.zero(blocks.q, blocks.kv)
q, k = qo_smem, memref_slice(k_smem, slot)
qk_acc = wgmma(qk_acc, q, memref_transpose(k, (0, 1, 3, 2)))
nvvm.wgmma_commit_group_sync_aligned()
# We hide the TMA overhead by overlapping it with the QK matmul.
with only_wg(1), ctx.named_region("KV TMA start"):
tma_step = arith.addi(kv_step, c(blocks.stages - 1))
tma_slot = arith.remui(tma_step, c(blocks.stages))
tma_step_in_bounds = arith.cmpi(
arith.CmpIPredicate.slt, tma_step, c(loop_partition.num_chunks)
)
if_op = scf.IfOp(tma_step_in_bounds)
with ir.InsertionPoint(if_op.then_block):
kv_copy_init(tma_slot, loop_partition.get_base(tma_step))
scf.yield_([])
perform_schedule_barrier()
with ctx.named_region("QK wait"):
nvvm.wgmma_wait_group_sync_aligned(0)
qk = qk_acc.value
with ctx.named_region("Softmax"):
m_ij = m_i.max(qk.reduce(arith.maximumf, axis=1))
alpha = exp(m_i - m_ij)
m_i = m_ij
p = exp(qk - m_ij.broadcast_minor(blocks.kv))
acc *= alpha.broadcast_minor(head_dim)
l_i *= alpha
l_i += p.reduce(arith.addf, axis=1)
p = p.astype(f16)
perform_schedule_barrier()
with ctx.named_region("PV issue"):
v = memref_slice(v_smem, slot)
acc_update = WGMMAAccumulator.from_registers(acc)
acc_update = wgmma(acc_update, p, v)
nvvm.wgmma_commit_group_sync_aligned()
# We hide the barrier overhead by overlapping it with the PV matmul.
with only_wg(0), ctx.named_region("KV TMA wait"):
wait_step = arith.addi(kv_step, c(1))
wait_slot = arith.remui(wait_step, c(blocks.stages))
wait_step_in_bounds = arith.cmpi(
arith.CmpIPredicate.slt, wait_step, c(loop_partition.num_chunks)
)
with ir.InsertionPoint(scf.IfOp(wait_step_in_bounds).then_block):
barriers[wait_slot].wait()
scf.yield_([])
with ctx.named_region("PV wait"):
nvvm.wgmma_wait_group_sync_aligned(0)
acc = acc_update.value
return acc, m_i, l_i
with only_wg(0):
perform_schedule_barrier()
acc, m_i, l_i = kv_loop.results
del m_i
# TODO(apaszke): Invert and multiply to avoid expensive divisions.
acc /= l_i.broadcast_minor(head_dim)
with ctx.named_region("Acc store"):
acc.astype(f16).store_tiled(qo_smem, swizzle=128)
gpu.barrier()
nvvm.fence_proxy(
nvvm.ProxyKind.async_shared, space=nvvm.SharedSpace.shared_cta
) # Make sure the store is visible to the TMA.
with ctx.named_region("GMEM store"):
ctx.async_copy(
src_ref=qo_smem,
dst_ref=out_gmem,
gmem_slice=(q_head_idx, ds(q_seq_base, blocks.q)),
gmem_transform=TileTransform(tiling),
swizzle=128,
)
ctx.await_async_copy(0)
match impl:
case Implementation.TWO_COMPUTE_WG:
kernel = compute_only_kernel
smem_scratch_shape = (
smem_buffers_shape,
TMABarrier(blocks.stages + compute_wgs_per_block),
Barrier(arrival_count=256, num_barriers=1),
)
case Implementation.TWO_COMPUTE_ONE_TMA_WG:
kernel = tma_wg_kernel
smem_scratch_shape = (
smem_buffers_shape,
(
TMABarrier(blocks.stages),
TMABarrier(blocks.stages),
TMABarrier(compute_wgs_per_block),
),
Barrier(arrival_count=256, num_barriers=2),
Barrier(arrival_count=256, num_barriers=1),
)
return as_gpu_kernel(
kernel, grid, block, in_shape, out_shape, smem_scratch_shape, prof_spec
)
def benchmark_and_verify(
batch_size,
q_seq_len,
kv_seq_len,
num_q_heads,
num_kv_heads,
head_dim,
**kwargs,
) -> float:
with mlir.make_ir_context(), ir.Location.unknown():
kq, kk, kv = random.split(random.key(1234), 3)
q = random.normal(
kq, (batch_size, num_q_heads, q_seq_len, head_dim), dtype=jnp.float16
)
k = random.normal(
kk, (batch_size, num_kv_heads, kv_seq_len, head_dim), dtype=jnp.float16
)
v = random.normal(
kv, (batch_size, num_kv_heads, kv_seq_len, head_dim), dtype=jnp.float16
)
f = build_kernel(
batch_size=batch_size,
q_heads=num_q_heads,
kv_heads=num_kv_heads,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len,
head_dim=head_dim,
**kwargs,
)
out, runtime = profiler.measure(f)(q[0], k[0], v[0])
out = out[None]
@jax.jit
def ref(q, k, v):
q = q.astype(jnp.float32)
k = k.astype(jnp.float32)
v = v.astype(jnp.float32)
q_reshaped = q.reshape(
batch_size, num_kv_heads, num_q_heads // num_kv_heads, q_seq_len,
head_dim)
logits = jnp.einsum("bxhqc,bxkc->bxhqk", q_reshaped, k)
m = logits.max(axis=-1)
unnormalized = jnp.exp(logits - m[..., None])
l = unnormalized.sum(axis=-1)
weights = unnormalized / l[..., None]
return jnp.einsum("bxhqk,bxkc->bxhqc", weights, v).reshape(*q.shape)
expected = ref(q, k, v)
np.testing.assert_allclose(out, expected, atol=2e-3, rtol=2e-3)
return runtime
if __name__ == "__main__":
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_equal("9.0")):
print(
"Mosaic GPU Flash Attention requires compute capability 9.0a to run, "
"skipping.")
exit(0)
batch_size = 1
num_q_heads = 2
num_kv_heads = 1
prof_spec = None
seq_lens = (4096, 32768)
problem_it = itertools.product(seq_lens, (64, 128, 256,))
for seq_len, head_dim in problem_it:
q_seq_len = kv_seq_len = seq_len
print(
"===="
f" {kv_seq_len=:<6} {q_seq_len=:<6} {num_q_heads=:<4} {head_dim=:<6} ===="
)
param_it = itertools.product(
(ExpImplementation.APPROX,), Implementation, (64,), (64, 128, 256),
)
best = None
for exp_impl, impl, block_q, block_kv in param_it:
try:
runtime_ms = benchmark_and_verify(
batch_size,
q_seq_len,
kv_seq_len,
num_q_heads,
num_kv_heads,
head_dim,
prof_spec=prof_spec,
exp_impl=exp_impl,
blocks=BlockSizes(q=block_q, kv=block_kv, stages=2),
impl=impl,
)
except ValueError as e:
if "exceeds available shared memory" in e.args[0]:
continue
raise
runtime_us = runtime_ms * 1e3
matmul_flops = (
4 * q_seq_len * kv_seq_len * head_dim * num_q_heads * batch_size
)
# Table 1 in
# https://resources.nvidia.com/en-us-tensor-core/gtc22-whitepaper-hopper
peak_flops = 989.4 * 1e12 # f16 TensorCore peak
optimal_time = matmul_flops / peak_flops * 1e6 # us
achieved_tc_util = optimal_time / runtime_us * 100
has_tma_warp = impl == Implementation.TWO_COMPUTE_ONE_TMA_WG
print(
f"exp_impl={exp_impl.name:<6} block_q={block_q:<4}block_kv={block_kv:<4}tma_warp={has_tma_warp:<1}: {runtime_us:<7.1f}us"
f" = {achieved_tc_util:4.1f}% TC utilization"
)
if best is None or runtime_us < best[0]:
best = (runtime_us, achieved_tc_util)
if best is not None:
print(f"Best: {best[0]:<7.1f}us = {best[1]:4.1f}% TC utilization")
| Implementation |
python | cython__cython | tests/run/py3k_super.py | {
"start": 63,
"end": 352
} | class ____(object):
def method(self):
return 1
@classmethod
def class_method(cls):
return 2
@staticmethod
def static_method():
return 3
def generator_test(self):
return [1, 2, 3]
def super_class(self):
return __class__
| A |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_increasing.py | {
"start": 673,
"end": 4322
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.increasing"
condition_value_keys = ("strictly",)
default_kwarg_values = {
"strictly": False,
}
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(
cls,
column,
**kwargs,
):
temp_column = column
series_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
series_diff[series_diff.isnull()] = 1
strictly: bool = kwargs.get("strictly") or False
if strictly:
return series_diff > 0
return series_diff >= 0
@metric_partial(
engine=SparkDFExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
domain_type=MetricDomainTypes.COLUMN,
)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
# check if column is any type that could have na (numeric types)
column_name = metric_domain_kwargs["column"]
table_columns = metrics["table.column_types"]
column_metadata = [col for col in table_columns if col["name"] == column_name][0]
if isinstance(
column_metadata["type"],
(
pyspark.types.LongType,
pyspark.types.DoubleType,
pyspark.types.IntegerType,
),
):
# if column is any type that could have NA values, remove them (not filtered by .isNotNull()) # noqa: E501 # FIXME CoP
compute_domain_kwargs = execution_engine.add_column_row_condition(
metric_domain_kwargs,
filter_null=cls.filter_column_isnull,
filter_nan=True,
)
else:
compute_domain_kwargs = metric_domain_kwargs
(
_df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
compute_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
# instead detect types naturally
column = F.col(column_name)
if isinstance(
column_metadata["type"],
(
pyspark.types.TimestampType,
pyspark.types.DateType,
),
):
diff = F.datediff(
column,
F.lag(column).over(pyspark.Window.orderBy(F.lit("constant"))),
)
else:
diff = column - F.lag(column).over(pyspark.Window.orderBy(F.lit("constant")))
diff = F.when(diff.isNull(), 1).otherwise(diff)
# NOTE: because in spark we are implementing the window function directly,
# we have to return the *unexpected* condition.
# If we expect values to be *strictly* increasing then unexpected values are those
# that are flat or decreasing
if metric_value_kwargs["strictly"] is True:
return (
F.when(diff <= 0, F.lit(True)).otherwise(F.lit(False)),
compute_domain_kwargs,
accessor_domain_kwargs,
)
# If we expect values to be flat or increasing then unexpected values are those
# that are decreasing
else:
return (
F.when(diff < 0, F.lit(True)).otherwise(F.lit(False)),
compute_domain_kwargs,
accessor_domain_kwargs,
)
| ColumnValuesIncreasing |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 18083,
"end": 18292
} | class ____(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, *args, **kwargs):
pass
def post(self, request, *args, **kwargs):
pass
| ExampleListView |
python | getsentry__sentry | tests/sentry/deletions/test_pullrequest.py | {
"start": 413,
"end": 8249
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.repo = self.create_repo(project=self.project, name="test-repo")
self.author = self.create_commit_author(project=self.project, email="test@example.com")
self.now = timezone.now()
self.old_date = self.now - timedelta(days=100)
self.recent_date = self.now - timedelta(days=10)
self.task = PullRequestDeletionTask(manager=get_manager(), model=PullRequest, query={})
def create_pr(self, key: str, date_added: datetime | None = None) -> PullRequest:
if date_added is None:
date_added = self.old_date
pr = PullRequest.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key=key,
title="Test PR",
author=self.author,
)
PullRequest.objects.filter(id=pr.id).update(date_added=date_added)
pr.refresh_from_db()
return pr
def create_old_commit(self) -> Commit:
commit = self.create_commit(
project=self.project,
repo=self.repo,
author=self.author,
)
Commit.objects.filter(id=commit.id).update(date_added=self.old_date)
return commit
def create_pull_request_comment(
self,
pull_request: PullRequest,
created_at: datetime | None = None,
updated_at: datetime | None = None,
group_ids: list[int] | None = None,
external_id: int | None = None,
) -> PullRequestComment:
if external_id is None:
external_id = PullRequestComment.objects.filter(pull_request=pull_request).count() + 1
return PullRequestComment.objects.create(
pull_request=pull_request,
external_id=external_id,
created_at=created_at or self.old_date,
updated_at=updated_at or self.old_date,
group_ids=group_ids or [],
)
def test_query_filter_removes_unused_prs(self) -> None:
pr_old_unused = self.create_pr("pr1", self.old_date)
self.create_pr("pr2", self.recent_date)
pr_with_recent_comment = self.create_pr("pr3", self.old_date)
self.create_pull_request_comment(
pull_request=pr_with_recent_comment,
created_at=self.recent_date,
updated_at=self.old_date,
)
filtered = list(PullRequest.objects.filter(self.task.get_query_filter()))
assert len(filtered) == 1
assert filtered[0].id == pr_old_unused.id
def test_query_filter_keeps_pr_with_release_commit(self) -> None:
pr = self.create_pr("pr_release", self.old_date)
commit = self.create_old_commit()
self.create_pull_request_commit(pr, commit)
release = self.create_release(project=self.project)
self.create_release_commit(release, commit)
filtered = list(PullRequest.objects.filter(self.task.get_query_filter()))
assert len(filtered) == 0
def test_query_filter_keeps_pr_with_valid_group_link(self) -> None:
pr = self.create_pr("pr_group", self.old_date)
group = self.create_group(project=self.project)
GroupLink.objects.create(
group=group,
project=self.project,
linked_type=GroupLink.LinkedType.pull_request,
linked_id=pr.id,
relationship=GroupLink.Relationship.resolves,
)
filtered = list(PullRequest.objects.filter(self.task.get_query_filter()))
assert len(filtered) == 0
def test_query_filter_deletes_pr_with_invalid_group_link(self) -> None:
pr = self.create_pr("pr_invalid_group", self.old_date)
GroupLink.objects.create(
group_id=999999, # Non-existent group
project=self.project,
linked_type=GroupLink.LinkedType.pull_request,
linked_id=pr.id,
relationship=GroupLink.Relationship.resolves,
)
filtered = list(PullRequest.objects.filter(self.task.get_query_filter()))
assert len(filtered) == 1
assert filtered[0].id == pr.id
def test_query_filter_with_comment_group_ids(self) -> None:
pr_valid_group = self.create_pr("pr_valid", self.old_date)
group = self.create_group(project=self.project)
self.create_pull_request_comment(
pull_request=pr_valid_group,
group_ids=[group.id],
)
pr_invalid_group = self.create_pr("pr_invalid", self.old_date)
self.create_pull_request_comment(
pull_request=pr_invalid_group,
group_ids=[999999], # Non-existent
)
filtered = list(PullRequest.objects.filter(self.task.get_query_filter()))
assert len(filtered) == 1
assert filtered[0].id == pr_invalid_group.id
def test_get_child_relations_includes_comments_and_commits(self) -> None:
pr = self.create_pr("pr_children", self.old_date)
self.create_pull_request_comment(pr)
commit = self.create_old_commit()
self.create_pull_request_commit(pr, commit)
relations = self.task.get_child_relations(pr)
assert len(relations) == 2
relation_models = {r.params["model"] for r in relations}
assert PullRequestComment in relation_models
assert PullRequestCommit in relation_models
for relation in relations:
assert relation.params["query"] == {"pull_request_id": pr.id}
def test_deletion_cascades_to_children(self) -> None:
pr = self.create_pr("pr_cascade", self.old_date)
comment = self.create_pull_request_comment(pr)
commit = self.create_old_commit()
pr_commit = self.create_pull_request_commit(pr, commit)
pr.delete()
assert not PullRequestComment.objects.filter(id=comment.id).exists()
assert not PullRequestCommit.objects.filter(id=pr_commit.id).exists()
assert Commit.objects.filter(id=commit.id).exists()
def test_query_filter_with_no_prs(self) -> None:
filtered = list(PullRequest.objects.filter(self.task.get_query_filter()))
assert filtered == []
def test_cutoff_date_is_90_days(self) -> None:
self.create_pr("pr_89", self.now - timedelta(days=89))
pr_91_days = self.create_pr("pr_91", self.now - timedelta(days=91))
filtered = list(PullRequest.objects.filter(self.task.get_query_filter()))
assert len(filtered) == 1
assert filtered[0].id == pr_91_days.id
def test_actual_deletion_execution(self) -> None:
# Create a mix of PRs that should and shouldn't be deleted
pr_old_unused = self.create_pr("old_unused", self.old_date)
pr_recent = self.create_pr("recent", self.recent_date)
pr_with_release = self.create_pr("with_release", self.old_date)
comment = self.create_pull_request_comment(pr_old_unused)
commit = self.create_old_commit()
pr_commit = self.create_pull_request_commit(pr_old_unused, commit)
release_commit = self.create_old_commit()
self.create_pull_request_commit(pr_with_release, release_commit)
release = self.create_release(project=self.project)
self.create_release_commit(release, release_commit)
self.task.chunk(apply_filter=True)
assert not PullRequest.objects.filter(id=pr_old_unused.id).exists()
assert not PullRequestComment.objects.filter(id=comment.id).exists()
assert not PullRequestCommit.objects.filter(id=pr_commit.id).exists()
assert PullRequest.objects.filter(id=pr_recent.id).exists()
assert PullRequest.objects.filter(id=pr_with_release.id).exists()
assert Commit.objects.filter(id=commit.id).exists()
assert Commit.objects.filter(id=release_commit.id).exists()
| PullRequestDeletionTaskTest |
python | apache__airflow | providers/ftp/src/airflow/providers/ftp/operators/ftp.py | {
"start": 1181,
"end": 1283
} | class ____:
"""Operation that can be used with FTP."""
PUT = "put"
GET = "get"
| FTPOperation |
python | huggingface__transformers | src/transformers/models/afmoe/modular_afmoe.py | {
"start": 14585,
"end": 18290
} | class ____(AfmoePreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`AfmoeDecoderLayer`]
Args:
config: AfmoeConfig
"""
def __init__(self, config: AfmoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[AfmoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = AfmoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens,
past_seen_tokens + inputs_embeds.shape[1],
device=inputs_embeds.device,
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
# Apply muP input scaling if enabled
if self.config.mup_enabled:
hidden_states = hidden_states * (self.config.hidden_size**0.5)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_ids=position_ids,
past_key_value=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| AfmoeModel |
python | spack__spack | lib/spack/spack/error.py | {
"start": 3472,
"end": 3580
} | class ____(SpackError):
"""Raised when package headers are requested but cannot be found"""
| NoHeadersError |
python | dask__distributed | distributed/shuffle/tests/test_disk_buffer.py | {
"start": 2768,
"end": 4131
} | class ____(DiskShardsBuffer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
async def _process(self, *args: Any, **kwargs: Any) -> None:
# We only want to raise if this was queued up before
if self.counter > self.concurrency_limit:
raise Exception(123)
self.counter += 1
return await super()._process(*args, **kwargs)
@gen_test()
async def test_high_pressure_flush_with_exception(tmp_path):
payload = {f"shard-{ix}": [f"shard-{ix}".encode() * 100] for ix in range(100)}
async with EventuallyBrokenDiskShardsBuffer(
directory=tmp_path, read=read_bytes, memory_limiter=ResourceLimiter(None)
) as mf:
tasks = []
for _ in range(10):
tasks.append(asyncio.create_task(mf.write(payload)))
# Wait until things are actually queued up.
# This is when there is no slot on the queue available anymore
# but there are still shards around
while not mf.shards:
# Disks are fast, don't give it time to unload the queue...
# There may only be a few ticks atm so keep this at zero
await asyncio.sleep(0)
with pytest.raises(Exception, match="123"):
await mf.flush()
mf.raise_on_exception()
| EventuallyBrokenDiskShardsBuffer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F401_13.py | {
"start": 163,
"end": 244
} | class ____:
List: TypeAlias = List
def bar(self) -> List:
pass
| Class |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 15825,
"end": 16357
} | class ____(TypedDict):
where: Optional[Where]
where_document: Optional[WhereDocument]
def validate_filter_set(filter_set: FilterSet) -> None:
if filter_set["where"] is not None:
validate_where(filter_set["where"])
if filter_set["where_document"] is not None:
validate_where_document(filter_set["where_document"])
Embeddable = Union[Documents, Images]
D = TypeVar("D", bound=Embeddable, contravariant=True)
Loadable = List[Optional[Image]]
L = TypeVar("L", covariant=True, bound=Loadable)
| FilterSet |
python | dagster-io__dagster | docs/sphinx/_ext/sphinx-mdx-builder/tests/dummy_module.py | {
"start": 1884,
"end": 2020
} | class ____(Enum):
"""Color enumeration for testing enum documentation."""
RED = "red"
GREEN = "green"
BLUE = "blue"
| Color |
python | Pylons__pyramid | tests/test_scripts/test_pdistreport.py | {
"start": 39,
"end": 1667
} | class ____(unittest.TestCase):
def _callFUT(self, **kw):
argv = []
from pyramid.scripts.pdistreport import main
return main(argv, **kw)
def test_no_dists(self):
def platform():
return 'myplatform'
importlib_metadata = DummyImportlibMetadata()
L = []
def out(*args):
L.extend(args)
result = self._callFUT(
importlib_metadata=importlib_metadata, platform=platform, out=out
)
self.assertEqual(result, None)
self.assertEqual(
L,
['Pyramid version:', '1', 'Platform:', 'myplatform', 'Packages:'],
)
def test_with_dists(self):
def platform():
return 'myplatform'
working_set = (DummyDistribution('abc'), DummyDistribution('def'))
importlib_metadata = DummyImportlibMetadata(working_set)
L = []
def out(*args):
L.extend(args)
result = self._callFUT(
importlib_metadata=importlib_metadata, platform=platform, out=out
)
self.assertEqual(result, None)
self.assertEqual(
L,
[
'Pyramid version:',
'1',
'Platform:',
'myplatform',
'Packages:',
' ',
'abc',
'1',
' ',
'summary for name=\'abc\'',
' ',
'def',
'1',
' ',
'summary for name=\'def\'',
],
)
| TestPDistReportCommand |
python | PrefectHQ__prefect | tests/server/schemas/test_states.py | {
"start": 1716,
"end": 3010
} | class ____:
@pytest.mark.parametrize("state_type", StateType)
def test_is_scheduled(self, state_type):
state = State(type=state_type)
assert state.is_scheduled() == (state_type == StateType.SCHEDULED)
@pytest.mark.parametrize("state_type", StateType)
def test_is_pending(self, state_type):
state = State(type=state_type)
assert state.is_pending() == (state_type == StateType.PENDING)
@pytest.mark.parametrize("state_type", StateType)
def test_is_running(self, state_type):
state = State(type=state_type)
assert state.is_running() == (state_type == StateType.RUNNING)
@pytest.mark.parametrize("state_type", StateType)
def test_is_completed(self, state_type):
state = State(type=state_type)
assert state.is_completed() == (state_type == StateType.COMPLETED)
@pytest.mark.parametrize("state_type", StateType)
def test_is_failed(self, state_type):
state = State(type=state_type)
assert state.is_failed() == (state_type == StateType.FAILED)
@pytest.mark.parametrize("state_type", StateType)
def test_is_cancelled(self, state_type):
state = State(type=state_type)
assert state.is_cancelled() == (state_type == StateType.CANCELLED)
| TestStateTypeFunctions |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/template_test.py | {
"start": 2640,
"end": 28351
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def test_end_to_end(self):
"""This test shows a very simple line model with test_loss.
The template is used to share parameters between a training and test model.
"""
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
train_prediction = line_template(training_input)
test_prediction = line_template(test_input)
train_loss = math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
test_loss = math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train_op = optimizer.minimize(train_loss)
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
initial_test_loss = self.evaluate(test_loss)
self.evaluate(train_op)
final_test_loss = self.evaluate(test_loss)
# Parameters are tied, so the loss should have gone down when we trained it.
self.assertLess(final_test_loss, initial_test_loss)
def test_end_to_end_eager(self):
"""This test shows a very simple line model with test_loss in eager mode.
The template is used to share parameters between a training and test model.
"""
with context.eager_mode():
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
def train_loss():
train_prediction = line_template(training_input)
return math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
def test_loss():
test_prediction = line_template(test_input)
return math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
initial_test_loss = test_loss()
optimizer.minimize(train_loss)
final_test_loss = test_loss()
# Parameters are tied, so the loss should have gone down after training.
self.assertLess(final_test_loss.numpy(), initial_test_loss.numpy())
def test_eager_delayed_store_pickup(self):
"""This test shows a very simple line model with test_loss in eager mode.
The template is used to share parameters between a training and test model.
This test also shows that it can pick up explicitly set variable stores
even if they are only set before the first template usage.
"""
with context.eager_mode():
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
def train_loss():
train_prediction = line_template(training_input)
return math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
def test_loss():
test_prediction = line_template(test_input)
return math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
store = variable_scope._VariableStore()
store._store_eager_variables = True
with variable_scope.with_variable_store(store):
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
initial_test_loss = test_loss()
optimizer.minimize(train_loss)
final_test_loss = test_loss()
# Parameters are tied, so the loss should have gone down after training.
self.assertLess(final_test_loss.numpy(), initial_test_loss.numpy())
# Verify that the explicitly set store is not empty
# and the make_template picked it up
self.assertEqual(set(store._vars.keys()), {"line/w", "line/b"})
# But the store should only get picked up once, so a second
# store will go unused:
second_store = variable_scope._VariableStore()
second_store._store_eager_variables = True
with variable_scope.with_variable_store(second_store):
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
test_loss()
optimizer.minimize(train_loss)
test_loss()
self.assertEmpty(second_store._vars)
@test_util.run_in_graph_and_eager_modes
def test_skip_stack_frames(self):
first = traceback.format_stack()
second = traceback.format_stack()
result = template._skip_common_stack_elements(first, second)
self.assertEqual(1, len(result))
self.assertNotEqual(len(first), len(result))
@test_util.run_in_graph_and_eager_modes
def test_template_with_empty_name(self):
tpl = template.make_template("", variable_scoped_function)
with variable_scope.variable_scope("outer"):
x = variable_scope.get_variable("x", [])
v = tpl()
self.assertEqual("outer/", tpl.variable_scope_name)
self.assertEqual("outer//dummy:0", v.name)
if context.executing_eagerly():
# In eager mode `x` is not visible to the template since the template does
# not rely on global collections.
self.assertEqual(1, len(tpl.variables))
self.assertIs(v, tpl.variables[0])
else:
self.assertEqual([x, v], tpl.variables)
@test_util.run_in_graph_and_eager_modes
def test_template_with_name(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
self.assertEqual("s1_1/dummy:0", v3.name)
@test_util.run_deprecated_v1
def test_same_unique_name_raise_error(self):
tmpl1 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
tmpl1()
tmpl2 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
with self.assertRaisesRegex(
ValueError, "Variable s1/dummy already exists, disallowed.*"):
tmpl2()
def test_unique_name_raise_error_in_eager(self):
with context.eager_mode():
with self.assertRaisesRegex(
ValueError,
"unique_name_ cannot be used when eager execution is enabled."):
template.make_template(
"_", variable_scoped_function, unique_name_="s1")
@test_util.run_deprecated_v1
def test_unique_name_and_reuse(self):
tmpl1 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
v1 = tmpl1()
v2 = tmpl1()
variable_scope.get_variable_scope().reuse_variables()
tmpl2 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIs(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
@test_util.run_in_graph_and_eager_modes
def test_template_in_scope(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
with variable_scope.variable_scope("scope"):
v1 = tmpl1()
v3 = tmpl2()
# The template contract requires the following to ignore scope2.
with variable_scope.variable_scope("scope2"):
v2 = tmpl1()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("scope/s1/dummy:0", v1.name)
self.assertEqual("scope/s1_1/dummy:0", v3.name)
@test_util.run_in_graph_and_eager_modes
def test_template_with_internal_reuse(self):
tmpl1 = template.make_template("s1", internally_variable_scoped_function)
tmpl2 = template.make_template("s1", internally_variable_scoped_function)
v1 = tmpl1("test")
v2 = tmpl1("test")
v3 = tmpl2("test")
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
with self.assertRaises(ValueError):
tmpl1("not_test")
@test_util.run_in_graph_and_eager_modes
def test_template_without_name(self):
with self.assertRaisesRegex(ValueError, "name cannot be None."):
template.make_template(None, variable_scoped_function)
@test_util.run_in_graph_and_eager_modes
def test_make_template(self):
# Test both that we can call it with positional and keywords.
tmpl1 = template.make_template(
"s1", internally_variable_scoped_function, scope_name="test")
tmpl2 = template.make_template(
"s1", internally_variable_scoped_function, scope_name="test")
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
@test_util.run_deprecated_v1
def test_enforces_no_extra_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=True)
tmpl()
with self.assertRaises(ValueError):
tmpl()
@test_util.run_in_graph_and_eager_modes
def test_enforces_no_extra_trainable_variables_eager(self):
tmpl = template.make_template("s",
function_with_side_create,
trainable=True)
tmpl(name="1")
with self.assertRaises(ValueError):
tmpl(name="2")
def test_permits_extra_non_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=False)
self.assertIs(tmpl(), tmpl())
def test_permits_extra_non_trainable_variables_eager(self):
with context.eager_mode():
tmpl = template.make_template("s",
function_with_side_create,
trainable=False)
self.assertIs(tmpl(name="1"), tmpl(name="2"))
@test_util.run_in_graph_and_eager_modes
def test_internal_variable_reuse(self):
def nested():
with variable_scope.variable_scope("nested") as vs:
v1 = variable_scope.get_variable(
"x", initializer=init_ops.zeros_initializer(), shape=[])
with variable_scope.variable_scope(vs, reuse=True):
v2 = variable_scope.get_variable("x")
self.assertIs(v1, v2)
return v1
tmpl1 = template.make_template("s1", nested)
tmpl2 = template.make_template("s1", nested)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/nested/x:0", v1.name)
self.assertEqual("s1_1/nested/x:0", v3.name)
@test_util.run_in_graph_and_eager_modes
def test_nested_templates(self):
def nested_template():
nested1 = template.make_template("nested", variable_scoped_function)
nested2 = template.make_template("nested", variable_scoped_function)
v1 = nested1()
v2 = nested2()
# nested1 and nested2 should not share variables
self.assertIsNot(v1, v2)
# Variables created by nested1 should be isolated from variables
# created by nested2.
self.assertEqual(1, len(nested1.variables))
self.assertEqual(1, len(nested2.variables))
self.assertIs(nested1.variables[0], v1)
self.assertIs(nested2.variables[0], v2)
self.assertEqual(1, len(nested1.trainable_variables))
self.assertEqual(1, len(nested2.trainable_variables))
self.assertIs(nested1.trainable_variables[0], v1)
self.assertIs(nested2.trainable_variables[0], v2)
self.assertEqual(len(nested1.non_trainable_variables), 0)
self.assertEqual(len(nested2.non_trainable_variables), 0)
return v1, v2
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
v1, v2 = tmpl1()
v3, v4 = tmpl1()
v5, v6 = tmpl2()
# The second invocation of tmpl1 should reuse the variables
# created in the first invocation.
self.assertIs(v1, v3)
self.assertIs(v2, v4)
for v, w in zip(tmpl1.variables, [v1, v2]):
self.assertIs(v, w)
for v, w in zip(tmpl1.trainable_variables, [v1, v2]):
self.assertIs(v, w)
self.assertEqual(len(tmpl1.non_trainable_variables), 0)
# tmpl1 and tmpl2 should not share variables.
self.assertIsNot(v1, v5)
self.assertIsNot(v2, v6)
for v, w in zip(tmpl2.variables, [v5, v6]):
self.assertIs(v, w)
for v, w in zip(tmpl2.trainable_variables, [v5, v6]):
self.assertIs(v, w)
self.assertEqual(len(tmpl2.non_trainable_variables), 0)
self.assertEqual("s1/nested/dummy:0", v1.name)
self.assertEqual("s1/nested_1/dummy:0", v2.name)
self.assertEqual("s1_1/nested/dummy:0", v5.name)
self.assertEqual("s1_1/nested_1/dummy:0", v6.name)
self.assertEqual(["nested", "nested_1"], list(tmpl1._trackable_children()))
def test_graph_function_no_name(self):
with context.eager_mode():
def f(_, y):
return y + 1
partial = functools.partial(f, 1.0)
tmpl = template.make_template_internal(
"a", partial, create_graph_function_=True)
self.assertAllEqual(tmpl(ops.convert_to_tensor(1.0)), 2.0)
@test_util.run_in_graph_and_eager_modes
def test_immediate_scope_creation(self):
# Create templates in scope a then call in scope b. make_template should
# capture the scope the first time it is called, and make_immediate_template
# should capture the scope at construction time.
with variable_scope.variable_scope("ctor_scope"):
# Create scope here:
tmpl_immed = template.make_template("a", variable_scoped_function,
True)
# default: create scope at __call__
tmpl_defer = template.make_template(
"b", variable_scoped_function, False)
with variable_scope.variable_scope("call_scope"):
inner_imm_var = tmpl_immed()
inner_defer_var = tmpl_defer()
outer_imm_var = tmpl_immed()
outer_defer_var = tmpl_defer()
self.assertIsNot(inner_imm_var, inner_defer_var)
self.assertIs(outer_imm_var, inner_imm_var)
self.assertIs(outer_defer_var, inner_defer_var)
self.assertEqual("ctor_scope/a/dummy:0", inner_imm_var.name)
self.assertEqual("call_scope/b/dummy:0", inner_defer_var.name)
@test_util.run_in_graph_and_eager_modes
def test_scope_access(self):
# Ensure that we can access the scope inside the template, because the name
# of that scope may be different from the name we pass to make_template, due
# to having been made unique by variable_scope.
with variable_scope.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar", variable_scoped_function, True)
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(ta.variable_scope.name, "foo/bar")
self.assertEqual(tb.variable_scope.name, "foo/bar_1")
with variable_scope.variable_scope("foo_2"):
# Create a template which defers scope creation.
tc = template.make_template("blah", variable_scoped_function, False)
# Before we call the template, the scope property will be set to None.
self.assertEqual(tc.variable_scope, None)
tc()
# Template is called at the top level, so there is no preceding "foo_2".
self.assertEqual(tc.variable_scope.name, "blah")
@test_util.run_in_graph_and_eager_modes
def test_custom_getter(self):
# Custom getter that maintains call count and forwards to true getter
custom_getter_count = [0]
def custom_getter(getter, name, *args, **kwargs):
custom_getter_count[0] += 1
return getter(name, *args, **kwargs)
# Test that custom getter is called both when variables are created and
# subsequently accessed
tmpl1 = template.make_template(
"s1", variable_scoped_function, custom_getter_=custom_getter)
self.assertEqual(custom_getter_count[0], 0)
tmpl1()
self.assertEqual(custom_getter_count[0], 1)
tmpl1()
self.assertEqual(custom_getter_count[0], 2)
# Test that custom getter is called when the variable scope is created
# during construction
custom_getter_count[0] = 0
tmpl2 = template.make_template(
"s2",
variable_scoped_function,
custom_getter_=custom_getter,
create_scope_now_=True)
self.assertEqual(custom_getter_count[0], 0)
tmpl2()
self.assertEqual(custom_getter_count[0], 1)
tmpl2()
self.assertEqual(custom_getter_count[0], 2)
@test_util.run_in_graph_and_eager_modes
def test_fails_gracefully(self):
for create_scope_now in [True, False]:
def module_function_with_one_arg(inputs):
w = variable_scope.get_variable(
"w", shape=[1], initializer=init_ops.zeros_initializer())
return inputs * w
templatized_function = template.make_template(
"f1", module_function_with_one_arg,
create_scope_now_=create_scope_now)
data = array_ops.zeros([1])
try:
# Try to connect with a kwarg which is unsupported.
templatized_function(data, is_training=True)
except TypeError:
pass
# The failed __call__ hasn't modified the inner state.
self.assertFalse(templatized_function._variables_created)
templatized_function(data)
self.assertTrue(templatized_function._variables_created)
@test_util.run_in_graph_and_eager_modes
def test_name_scopes_for_variable_scopes(self):
# Test that name scopes are not unnecessarily uniquified (but are
# still uniquified when necessary).
def linear_module(x, output_size):
w = variable_scope.get_variable(
"w", shape=[x.get_shape()[1], output_size],
initializer=init_ops.zeros_initializer())
b = variable_scope.get_variable(
"b", shape=[output_size],
initializer=init_ops.zeros_initializer())
return (math_ops.matmul(x, w) + b), w
def make_linear_module(output_size, name):
return template.make_template(
name,
linear_module,
output_size=output_size,
create_scope_now_=True)
inputs = array_ops.ones((3, 4))
linear1 = make_linear_module(output_size=2, name="foo")
outputs_a, w1 = linear1(inputs)
outputs_b, _ = linear1(inputs)
self.assertEqual("foo", linear1.variable_scope.name)
self.assertEqual("foo/w:0", w1.name)
if not context.executing_eagerly():
self.assertEqual(
"foo/add:0", outputs_a.name,
"First application of template should get "
"same name scope as variables.")
self.assertEqual(
"foo_1/add:0", outputs_b.name,
"Second application of template should get "
"a freshly uniquified name scope.")
linear2 = make_linear_module(output_size=2, name="foo")
outputs_c, w2 = linear2(inputs)
outputs_d, _ = linear2(inputs)
self.assertEqual(
"foo_1", linear2.variable_scope.name,
"New template gets a freshly uniquified variable scope "
"because 'foo' is already taken.")
self.assertEqual("foo_1/w:0", w2.name)
if not context.executing_eagerly():
self.assertEqual(
"foo_1_1/add:0", outputs_c.name,
"First application of template would get "
"same name scope as variables, but 'foo_1' is already "
"a name scope.")
self.assertEqual(
"foo_1_2/add:0", outputs_d.name,
"Second application of template should also get "
"a freshly uniquified name scope.")
@test_util.run_in_graph_and_eager_modes
def test_global_variables(self):
# Make sure global_variables are created.
with variable_scope.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
if context.executing_eagerly():
tb = template.make_template("s", function_with_side_create,
trainable=False)
else:
tb = template.make_template("s", function_with_create, trainable=False)
# Initially there are not variables created.
self.assertEqual([], list(ta.global_variables))
self.assertEqual([], list(tb.global_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(1, len(ta.global_variables))
self.assertEqual(2, len(tb.global_variables))
@test_util.run_in_graph_and_eager_modes
def test_trainable_variables(self):
# Make sure trainable_variables are created.
with variable_scope.variable_scope("foo2"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar", variable_scoped_function, True)
# Initially there are not variables created.
self.assertEqual([], list(ta.trainable_variables))
self.assertEqual([], list(tb.trainable_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(1, len(ta.trainable_variables))
self.assertEqual(1, len(tb.trainable_variables))
# None non-trainable variable was created.
self.assertEqual([], list(ta.non_trainable_variables))
self.assertEqual([], list(tb.non_trainable_variables))
# Ensure variables returns all the variables.
self.assertEqual(1, len(ta.variables))
self.assertEqual(1, len(tb.variables))
@test_util.run_in_graph_and_eager_modes
def test_non_trainable_variables(self):
# Make sure non_trainable_variables are created.
with variable_scope.variable_scope("foo2"):
ta = template.make_template("a", variable_scoped_function,
trainable=True)
tb = template.make_template("b", variable_scoped_function,
trainable=False)
# Initially there are not variables created.
self.assertEqual([], list(ta.variables))
self.assertEqual([], list(tb.variables))
# After calling there are variables created.
ta()
tb()
# Check the trainable and non_trainable variables.
self.assertEqual(1, len(ta.trainable_variables))
self.assertEqual([], list(ta.non_trainable_variables))
self.assertEqual([], list(tb.trainable_variables))
self.assertEqual(1, len(tb.non_trainable_variables))
# Ensure variables returns all the variables.
self.assertEqual(1, len(ta.variables))
self.assertEqual(1, len(tb.variables))
# TODO(apassos) handle local variables in Eager
@test_util.run_deprecated_v1
def test_local_variables(self):
# Make sure trainable_variables are created.
with variable_scope.variable_scope("foo3"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar",
variable_scoped_function_with_local_variable)
# Initially there are not variables created.
self.assertEqual([], list(ta.local_variables))
self.assertEqual([], list(tb.local_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(0, len(ta.local_variables))
self.assertEqual(1, len(tb.local_variables))
@test_util.run_in_graph_and_eager_modes
def test_make_template_with_defun(self):
def variable_scoped_function_no_return_value(scope_name):
# defun cannot compile functions that return non-Tensor objects
with variable_scope.variable_scope(
scope_name,
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
tmpl = template.make_template_internal(
"s1",
variable_scoped_function_no_return_value,
create_graph_function_=True,
scope_name="test")
# The first invocation of tmpl1 creates variables, the second should
# be executed as a graph function.
tmpl()
v1 = tmpl.variables
tmpl()
v2 = tmpl.variables
self.assertEqual(len(v1), len(v2))
for v, w in zip(v1, v2):
self.assertIs(v, w)
self.assertEqual("s1/test/dummy:0", v1[0].name)
if __name__ == "__main__":
test.main()
| TemplateTest |
python | django__django | django/middleware/common.py | {
"start": 5101,
"end": 7700
} | class ____(MiddlewareMixin):
def process_response(self, request, response):
"""Send broken link emails for relevant 404 NOT FOUND responses."""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = request.META.get("HTTP_REFERER", "")
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get("HTTP_USER_AGENT", "<none>")
ip = request.META.get("REMOTE_ADDR", "<none>")
mail_managers(
"Broken %slink on %s"
% (
(
"INTERNAL "
if self.is_internal_request(domain, referer)
else ""
),
domain,
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True,
)
return response
def is_internal_request(self, domain, referer):
"""
Return True if the referring URL is the same domain as the current
request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Return True if the given request *shouldn't* notify the site managers
according to project settings or in situations outlined by the inline
comments.
"""
# The referer is empty.
if not referer:
return True
# APPEND_SLASH is enabled and the referer is equal to the current URL
# without a trailing slash indicating an internal redirect.
if settings.APPEND_SLASH and uri.endswith("/") and referer == uri[:-1]:
return True
# A '?' in referer is identified as a search engine source.
if not self.is_internal_request(domain, referer) and "?" in referer:
return True
# The referer is equal to the current URL, ignoring the scheme (assumed
# to be a poorly implemented bot).
parsed_referer = urlsplit(referer)
if parsed_referer.netloc in ["", domain] and parsed_referer.path == uri:
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| BrokenLinkEmailsMiddleware |
python | apache__airflow | providers/databricks/tests/unit/databricks/operators/test_databricks_repos.py | {
"start": 3820,
"end": 5897
} | class ____:
@mock.patch("airflow.providers.databricks.operators.databricks_repos.DatabricksHook")
def test_delete_with_id(self, db_mock_class):
"""
Test the execute function using Repo ID.
"""
op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id="123")
db_mock = db_mock_class.return_value
db_mock.delete_repo.return_value = None
op.execute(None)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
caller="DatabricksReposDeleteOperator",
)
db_mock.delete_repo.assert_called_once_with("123")
@mock.patch("airflow.providers.databricks.operators.databricks_repos.DatabricksHook")
def test_delete_with_path(self, db_mock_class):
"""
Test the execute function using Repo path.
"""
op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_path="/Repos/user@domain.com/test-repo")
db_mock = db_mock_class.return_value
db_mock.get_repo_by_path.return_value = "123"
db_mock.delete_repo.return_value = None
op.execute(None)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
caller="DatabricksReposDeleteOperator",
)
db_mock.delete_repo.assert_called_once_with("123")
def test_init_exception(self):
"""
Tests handling of incorrect parameters passed to ``__init__``
"""
with pytest.raises(
AirflowException, match="Only one of repo_id or repo_path should be provided, but not both"
):
DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id="abc", repo_path="path")
with pytest.raises(AirflowException, match="One of repo_id repo_path tag should be provided"):
DatabricksReposDeleteOperator(task_id=TASK_ID)
| TestDatabricksReposDeleteOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-faker/source_faker/streams.py | {
"start": 2059,
"end": 4310
} | class ____(Stream, IncrementalMixin):
primary_key = "id"
cursor_field = "updated_at"
def __init__(self, count: int, seed: int, parallelism: int, records_per_slice: int, always_updated: bool, **kwargs):
super().__init__(**kwargs)
self.count = count
self.seed = seed
self.records_per_slice = records_per_slice
self.parallelism = parallelism
self.always_updated = always_updated
self.generator = UserGenerator(self.name, self.seed)
@property
def state_checkpoint_interval(self) -> Optional[int]:
return self.records_per_slice
@property
def state(self) -> Mapping[str, Any]:
if hasattr(self, "_state"):
return self._state
else:
return {}
@state.setter
def state(self, value: Mapping[str, Any]):
self._state = value
def read_records(self, **kwargs) -> Iterable[Mapping[str, Any]]:
"""
This is a multi-process implementation of read_records.
We make N workers (where N is the number of available CPUs) and spread out the CPU-bound work of generating records and serializing them to JSON
"""
if "updated_at" in self.state and not self.always_updated:
return iter([])
updated_at = ""
median_record_byte_size = 450
yield generate_estimate(self.name, self.count, median_record_byte_size)
loop_offset = 0
with Pool(initializer=self.generator.prepare, processes=self.parallelism) as pool:
while loop_offset < self.count:
records_remaining_this_loop = min(self.records_per_slice, (self.count - loop_offset))
users = pool.map(self.generator.generate, range(loop_offset, loop_offset + records_remaining_this_loop))
for user in users:
updated_at = user.record.data["updated_at"]
loop_offset += 1
yield user
if records_remaining_this_loop == 0:
break
self.state = {"seed": self.seed, "updated_at": updated_at, "loop_offset": loop_offset}
self.state = {"seed": self.seed, "updated_at": updated_at, "loop_offset": loop_offset}
| Users |
python | Netflix__metaflow | metaflow/_vendor/click/_winconsole.py | {
"start": 3262,
"end": 4260
} | class ____(_WindowsConsoleRawIOBase):
def readable(self):
return True
def readinto(self, b):
bytes_to_be_read = len(b)
if not bytes_to_be_read:
return 0
elif bytes_to_be_read % 2:
raise ValueError(
"cannot read odd number of bytes from UTF-16-LE encoded console"
)
buffer = get_buffer(b, writable=True)
code_units_to_be_read = bytes_to_be_read // 2
code_units_read = c_ulong()
rv = ReadConsoleW(
HANDLE(self.handle),
buffer,
code_units_to_be_read,
byref(code_units_read),
None,
)
if GetLastError() == ERROR_OPERATION_ABORTED:
# wait for KeyboardInterrupt
time.sleep(0.1)
if not rv:
raise OSError("Windows error: {}".format(GetLastError()))
if buffer[0] == EOF:
return 0
return 2 * code_units_read.value
| _WindowsConsoleReader |
python | requests__requests-oauthlib | tests/test_oauth2_session.py | {
"start": 20895,
"end": 21881
} | class ____(OAuth2SessionTest):
"""Ensure that there is no magic auth handling.
By default, requests sessions have magic handling of netrc files,
which is undesirable for this library because it will take
precedence over manually set authentication headers.
"""
def setUp(self):
# Set up a temporary home directory
self.homedir = tempfile.mkdtemp()
self.prehome = os.environ.get("HOME", None)
os.environ["HOME"] = self.homedir
# Write a .netrc file that will cause problems
netrc_loc = os.path.expanduser("~/.netrc")
with open(netrc_loc, "w") as f:
f.write("machine i.b\n" " password abc123\n" " login spam@eggs.co\n")
super(OAuth2SessionNetrcTest, self).setUp()
def tearDown(self):
super(OAuth2SessionNetrcTest, self).tearDown()
if self.prehome is not None:
os.environ["HOME"] = self.prehome
shutil.rmtree(self.homedir)
| OAuth2SessionNetrcTest |
python | run-llama__llama_index | llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/null.py | {
"start": 151,
"end": 1770
} | class ____(BaseSpanHandler[BaseSpan]):
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "NullSpanHandler"
def span_enter(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Logic for entering a span."""
return
def span_exit(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
return
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Create a span."""
return
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
return
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> None:
"""Logic for droppping a span."""
return
| NullSpanHandler |
python | walkccc__LeetCode | solutions/2846. Minimum Edge Weight Equilibrium Queries in a Tree/2846.py | {
"start": 0,
"end": 2362
} | class ____:
def minOperationsQueries(
self,
n: int,
edges: list[list[int]],
queries: list[list[int]],
) -> list[int]:
MAX = 26
m = math.ceil(math.log2(n))
graph = [[] for _ in range(n)]
# jump[i][j] := the 2^j-th ancestor of i
jump = [[0] * m for _ in range(n)]
# depth[i] := the depth of i
depth = [0] * n
# count[i][j] := the count of j from root to i, where 1 <= j <= 26
count = [[] for _ in range(n)]
for u, v, w in edges:
graph[u].append((v, w))
graph[v].append((u, w))
count[0] = [0] * (MAX + 1)
self._dfs(graph, 0, -1, jump, depth, count)
for j in range(1, m):
for i in range(n):
jump[i][j] = jump[jump[i][j - 1]][j - 1]
def getMinOperations(u: int, v: int) -> int:
"""
Returns the minimum number of operations to make the edge weight
equilibrium between (u, v).
"""
lca = self._getLCA(u, v, jump, depth)
# the number of edges between (u, v).
numEdges = depth[u] + depth[v] - 2 * depth[lca]
# the maximum frequency of edges between (u, v)
maxFreq = max(count[u][j] + count[v][j] - 2 * count[lca][j]
for j in range(1, MAX + 1))
return numEdges - maxFreq
return [getMinOperations(u, v) for u, v in queries]
def _dfs(
self,
graph: list[list[tuple[int, int]]],
u: int,
prev: int,
jump: list[list[int]],
depth: list[int],
count: list[list[int]]
) -> None:
for v, w in graph[u]:
if v == prev:
continue
jump[v][0] = u
depth[v] = depth[u] + 1
count[v] = count[u][:]
count[v][w] += 1
self._dfs(graph, v, u, jump, depth, count)
def _getLCA(
self,
u: int,
v: int,
jump: list[list[int]],
depth: list[int]
) -> int:
"""Returns the lca(u, v) by binary jump."""
# v is always deeper than u.
if depth[u] > depth[v]:
return self._getLCA(v, u, jump, depth)
# Jump v to the same height of u.
for j in range(len(jump[0])):
if depth[v] - depth[u] >> j & 1:
v = jump[v][j]
if u == v:
return u
# Jump u and v to the node right below the lca.
for j in range(len(jump[0]) - 1, -1, -1):
if jump[u][j] != jump[v][j]:
u = jump[u][j]
v = jump[v][j]
return jump[u][0]
| Solution |
python | donnemartin__system-design-primer | solutions/object_oriented_design/lru_cache/lru_cache.py | {
"start": 112,
"end": 361
} | class ____(object):
def __init__(self):
self.head = None
self.tail = None
def move_to_front(self, node):
pass
def append_to_front(self, node):
pass
def remove_from_tail(self):
pass
| LinkedList |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 101179,
"end": 101632
} | class ____:
engine: T_NetcdfEngine
def test_file_remains_open(self) -> None:
data = Dataset({"foo": ("x", [1, 2, 3])})
f = BytesIO()
data.to_netcdf(f, engine=self.engine)
assert not f.closed
restored = open_dataset(f, engine=self.engine)
assert not f.closed
assert_identical(restored, data)
restored.close()
assert not f.closed
@requires_h5netcdf_or_netCDF4
| FileObjectNetCDF |
python | sympy__sympy | sympy/tensor/tensor.py | {
"start": 168944,
"end": 171413
} | class ____(TensorIndex):
"""
A wild object that matches TensorIndex instances.
Examples
========
>>> from sympy.tensor.tensor import TensorIndex, TensorIndexType, WildTensorIndex
>>> R3 = TensorIndexType('R3', dim=3)
>>> p = TensorIndex("p", R3)
By default, covariant indices only match with covariant indices (and
similarly for contravariant)
>>> q = WildTensorIndex("q", R3)
>>> (q).matches(p)
{q: p}
>>> (q).matches(-p)
If you want matching to ignore whether the index is co/contra-variant, set
ignore_updown=True
>>> r = WildTensorIndex("r", R3, ignore_updown=True)
>>> (r).matches(-p)
{r: -p}
>>> (r).matches(p)
{r: p}
Parameters
==========
name : name of the index (string), or ``True`` if you want it to be
automatically assigned
tensor_index_type : ``TensorIndexType`` of the index
is_up : flag for contravariant index (is_up=True by default)
ignore_updown : bool, Whether this should match both co- and contra-variant
indices (default:False)
"""
def __new__(cls, name, tensor_index_type, is_up=True, ignore_updown=False):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{}".format(len(tensor_index_type._autogenerated))
name_symbol = Symbol(name)
tensor_index_type._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
is_up = sympify(is_up)
ignore_updown = sympify(ignore_updown)
return Basic.__new__(cls, name_symbol, tensor_index_type, is_up, ignore_updown)
@property
def ignore_updown(self):
return self.args[3]
def __neg__(self):
t1 = WildTensorIndex(self.name, self.tensor_index_type,
(not self.is_up), self.ignore_updown)
return t1
def matches(self, expr, repl_dict=None, old=False):
if not isinstance(expr, TensorIndex):
return None
if self.tensor_index_type != expr.tensor_index_type:
return None
if not self.ignore_updown:
if self.is_up != expr.is_up:
return None
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
| WildTensorIndex |
python | getsentry__sentry | tests/sentry/utils/locking/backends/test_migration.py | {
"start": 240,
"end": 872
} | class ____(LockBackend):
path = "tests.sentry.utils.locking.backends.test_migration.DummyLockBackend"
def __init__(self):
self._locks = {}
def acquire(self, key: str, duration: int, routing_key: str | None = None) -> None:
if self.locked(key=key, routing_key=routing_key):
raise AssertionError(f"Could not acquire ({key}, {routing_key})")
self._locks[(key, routing_key)] = duration
def release(self, key, routing_key=None):
del self._locks[(key, routing_key)]
def locked(self, key, routing_key=None):
return (key, routing_key) in self._locks
| DummyLockBackend |
python | gevent__gevent | src/greentest/3.12/test_interpreters.py | {
"start": 9963,
"end": 14054
} | class ____(TestBase):
def test_basic(self):
main = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
interp3 = interpreters.create()
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp2, interp3})
interp2.close()
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp3})
def test_all(self):
before = set(interpreters.list_all())
interps = set()
for _ in range(3):
interp = interpreters.create()
interps.add(interp)
self.assertEqual(set(interpreters.list_all()), before | interps)
for interp in interps:
interp.close()
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
main.close()
def f():
with self.assertRaises(RuntimeError):
main.close()
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
interp = interpreters.create()
interp.close()
with self.assertRaises(RuntimeError):
interp.close()
def test_does_not_exist(self):
interp = interpreters.Interpreter(1_000_000)
with self.assertRaises(RuntimeError):
interp.close()
def test_bad_id(self):
interp = interpreters.Interpreter(-1)
with self.assertRaises(ValueError):
interp.close()
def test_from_current(self):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
from test.support import interpreters
interp = interpreters.Interpreter({int(interp.id)})
try:
interp.close()
except RuntimeError:
print('failed')
"""))
self.assertEqual(out.strip(), 'failed')
self.assertEqual(set(interpreters.list_all()), {main, interp})
def test_from_sibling(self):
main, = interpreters.list_all()
interp1 = interpreters.create()
interp2 = interpreters.create()
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp2})
interp1.run(dedent(f"""
from test.support import interpreters
interp2 = interpreters.Interpreter(int({interp2.id}))
interp2.close()
interp3 = interpreters.create()
interp3.close()
"""))
self.assertEqual(set(interpreters.list_all()), {main, interp1})
def test_from_other_thread(self):
interp = interpreters.create()
def f():
interp.close()
t = threading.Thread(target=f)
t.start()
t.join()
@unittest.skip('Fails on FreeBSD')
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
with self.assertRaises(RuntimeError):
interp.close()
self.assertTrue(interp.is_running())
def test_subthreads_still_running(self):
r_interp, w_interp = self.os_pipe()
r_thread, w_thread = self.os_pipe()
FINISHED = b'F'
interp = interpreters.create()
interp.run(f"""if True:
import os
import threading
import time
done = False
def notify_fini():
global done
done = True
t.join()
threading._register_atexit(notify_fini)
def task():
while not done:
time.sleep(0.1)
os.write({w_interp}, {FINISHED!r})
t = threading.Thread(target=task)
t.start()
""")
interp.close()
self.assertEqual(os.read(r_interp, 1), FINISHED)
| TestInterpreterClose |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 17533,
"end": 21721
} | class ____(django_test.TestCase):
def tearDown(self):
super().tearDown()
for path in os.listdir(models.WITHFILE_UPLOAD_DIR):
# Remove temporary files written during tests.
os.unlink(os.path.join(models.WITHFILE_UPLOAD_DIR, path))
def test_default_build(self):
o = WithFileFactory.build()
self.assertIsNone(o.pk)
self.assertEqual(b'', o.afile.read())
self.assertEqual('example.dat', o.afile.name)
o.save()
self.assertEqual('django/example.dat', o.afile.name)
def test_default_create(self):
o = WithFileFactory.create()
self.assertIsNotNone(o.pk)
with o.afile as f:
self.assertEqual(b'', f.read())
self.assertEqual('django/example.dat', o.afile.name)
def test_with_content(self):
o = WithFileFactory.build(afile__data='foo')
self.assertIsNone(o.pk)
# Django only allocates the full path on save()
o.save()
with o.afile as f:
self.assertEqual(b'foo', f.read())
self.assertEqual('django/example.dat', o.afile.name)
def test_with_file(self):
with open(testdata.TESTFILE_PATH, 'rb') as f:
o = WithFileFactory.build(afile__from_file=f)
o.save()
with o.afile as f:
self.assertEqual(b'example_data\n', f.read())
self.assertEqual('django/example.data', o.afile.name)
def test_with_path(self):
o = WithFileFactory.build(afile__from_path=testdata.TESTFILE_PATH)
self.assertIsNone(o.pk)
with o.afile as f:
# Django only allocates the full path on save()
o.save()
f.seek(0)
self.assertEqual(b'example_data\n', f.read())
self.assertEqual('django/example.data', o.afile.name)
def test_with_file_empty_path(self):
with open(testdata.TESTFILE_PATH, 'rb') as f:
o = WithFileFactory.build(
afile__from_file=f,
afile__from_path=''
)
# Django only allocates the full path on save()
o.save()
with o.afile as f:
self.assertEqual(b'example_data\n', f.read())
self.assertEqual('django/example.data', o.afile.name)
def test_with_path_empty_file(self):
o = WithFileFactory.build(
afile__from_path=testdata.TESTFILE_PATH,
afile__from_file=None,
)
self.assertIsNone(o.pk)
with o.afile as f:
# Django only allocates the full path on save()
o.save()
f.seek(0)
self.assertEqual(b'example_data\n', f.read())
self.assertEqual('django/example.data', o.afile.name)
def test_error_both_file_and_path(self):
with self.assertRaises(ValueError):
WithFileFactory.build(
afile__from_file='fakefile',
afile__from_path=testdata.TESTFILE_PATH,
)
def test_override_filename_with_path(self):
o = WithFileFactory.build(
afile__from_path=testdata.TESTFILE_PATH,
afile__filename='example.foo',
)
self.assertIsNone(o.pk)
with o.afile as f:
# Django only allocates the full path on save()
o.save()
f.seek(0)
self.assertEqual(b'example_data\n', f.read())
self.assertEqual('django/example.foo', o.afile.name)
def test_existing_file(self):
o1 = WithFileFactory.build(afile__from_path=testdata.TESTFILE_PATH)
with o1.afile:
o1.save()
self.assertEqual('django/example.data', o1.afile.name)
o2 = WithFileFactory.build(afile__from_file=o1.afile)
self.assertIsNone(o2.pk)
with o2.afile as f:
o2.save()
f.seek(0)
self.assertEqual(b'example_data\n', f.read())
self.assertNotEqual('django/example.data', o2.afile.name)
self.assertRegex(o2.afile.name, r'django/example_\w+.data')
def test_no_file(self):
o = WithFileFactory.build(afile=None)
self.assertIsNone(o.pk)
self.assertFalse(o.afile)
| DjangoFileFieldTestCase |
python | huggingface__transformers | src/transformers/models/dpr/modeling_dpr.py | {
"start": 3619,
"end": 3690
} | class ____(PreTrainedModel):
_supports_sdpa = True
| DPRPreTrainedModel |
python | faif__python-patterns | patterns/creational/abstract_factory.py | {
"start": 1568,
"end": 1703
} | class ____(Pet):
def speak(self) -> None:
print("meow")
def __str__(self) -> str:
return f"Cat<{self.name}>"
| Cat |
python | RaRe-Technologies__gensim | gensim/test/test_phrases.py | {
"start": 12739,
"end": 16106
} | class ____(PhrasesData, unittest.TestCase):
def test_save_load_custom_scorer(self):
"""Test saving and loading a Phrases object with a custom scorer."""
bigram = Phrases(self.sentences, min_count=1, threshold=.001, scoring=dumb_scorer)
with temporary_file("test.pkl") as fpath:
bigram.save(fpath)
bigram_loaded = Phrases.load(fpath)
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = list(bigram_loaded.find_phrases(test_sentences).values())
assert all(score == 1 for score in seen_scores)
assert len(seen_scores) == 3 # 'graph minors' and 'survey human' and 'interface system'
def test_save_load(self):
"""Test saving and loading a Phrases object."""
bigram = Phrases(self.sentences, min_count=1, threshold=1)
with temporary_file("test.pkl") as fpath:
bigram.save(fpath)
bigram_loaded = Phrases.load(fpath)
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = set(round(score, 3) for score in bigram_loaded.find_phrases(test_sentences).values())
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def test_save_load_with_connector_words(self):
"""Test saving and loading a Phrases object."""
connector_words = frozenset({'of'})
bigram = Phrases(self.sentences, min_count=1, threshold=1, connector_words=connector_words)
with temporary_file("test.pkl") as fpath:
bigram.save(fpath)
bigram_loaded = Phrases.load(fpath)
assert bigram_loaded.connector_words == connector_words
def test_save_load_string_scoring(self):
"""Test backwards compatibility with a previous version of Phrases with custom scoring."""
bigram_loaded = Phrases.load(datapath("phrases-scoring-str.pkl"))
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = set(round(score, 3) for score in bigram_loaded.find_phrases(test_sentences).values())
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def test_save_load_no_scoring(self):
"""Test backwards compatibility with old versions of Phrases with no scoring parameter."""
bigram_loaded = Phrases.load(datapath("phrases-no-scoring.pkl"))
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = set(round(score, 3) for score in bigram_loaded.find_phrases(test_sentences).values())
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def test_save_load_no_common_terms(self):
"""Ensure backwards compatibility with old versions of Phrases, before connector_words."""
bigram_loaded = Phrases.load(datapath("phrases-no-common-terms.pkl"))
self.assertEqual(bigram_loaded.connector_words, frozenset())
# can make a phraser, cf #1751
phraser = FrozenPhrases(bigram_loaded) # does not raise
phraser[["human", "interface", "survey"]] # does not raise
| TestPhrasesPersistence |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 7008,
"end": 7104
} | class ____(GISLookup):
lookup_name = "crosses"
@BaseSpatialField.register_lookup
| CrossesLookup |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/schedule_definition.py | {
"start": 6028,
"end": 14786
} | class ____:
"""The context object available as the first argument to various functions defined on a :py:class:`dagster.ScheduleDefinition`.
A ``ScheduleEvaluationContext`` object is passed as the first argument to ``run_config_fn``, ``tags_fn``,
and ``should_execute``.
**Users should not instantiate this object directly**. To construct a ``ScheduleEvaluationContext`` for testing purposes, use :py:func:`dagster.build_schedule_context`.
Example:
.. code-block:: python
from dagster import schedule, ScheduleEvaluationContext
@schedule
def the_schedule(context: ScheduleEvaluationContext):
...
"""
__slots__ = [
"_cm_scope_entered",
"_exit_stack",
"_instance",
"_instance_ref",
"_log_key",
"_logger",
"_repository_def",
"_repository_name",
"_resource_defs",
"_resources",
"_resources_cm",
"_schedule_name",
"_scheduled_execution_time",
]
def __init__(
self,
instance_ref: Optional[InstanceRef],
scheduled_execution_time: Optional[datetime],
log_key: Optional[Sequence[str]] = None,
repository_name: Optional[str] = None,
schedule_name: Optional[str] = None,
resources: Optional[Mapping[str, "ResourceDefinition"]] = None,
repository_def: Optional["RepositoryDefinition"] = None,
):
from dagster._core.definitions.repository_definition import RepositoryDefinition
self._exit_stack = ExitStack()
self._instance = None
self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)
self._scheduled_execution_time = check.opt_inst_param(
scheduled_execution_time, "scheduled_execution_time", datetime
)
self._log_key = log_key
# Kept for backwards compatibility if the schedule log key is not passed into the
# schedule evaluation.
if not self._log_key and repository_name and schedule_name and scheduled_execution_time:
self._log_key = [
repository_name,
schedule_name,
scheduled_execution_time.strftime("%Y%m%d_%H%M%S"),
]
self._logger = None
self._repository_name = repository_name
self._schedule_name = schedule_name
# Wait to set resources unless they're accessed
self._resource_defs = resources
self._resources = None
self._cm_scope_entered = False
self._repository_def = check.opt_inst_param(
repository_def, "repository_def", RepositoryDefinition
)
def __enter__(self) -> "ScheduleEvaluationContext":
self._cm_scope_entered = True
return self
def __exit__(self, *exc) -> None:
self._exit_stack.close()
self._logger = None
@property
def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]:
return self._resource_defs
@public
@property
def resources(self) -> Resources:
"""Mapping of resource key to resource definition to be made available
during schedule execution.
"""
from dagster._core.definitions.scoped_resources_builder import IContainsGenerator
from dagster._core.execution.build_resources import build_resources
if not self._resources:
# Early exit if no resources are defined. This skips unnecessary initialization
# entirely. This allows users to run user code servers in cases where they
# do not have access to the instance if they use a subset of features do
# that do not require instance access. In this case, if they do not use
# resources on schedules they do not require the instance, so we do not
# instantiate it
#
# Tracking at https://github.com/dagster-io/dagster/issues/14345
if not self._resource_defs:
self._resources = ScopedResourcesBuilder.build_empty()
return self._resources
instance = self.instance if self._instance or self._instance_ref else None
resources_cm = build_resources(resources=self._resource_defs, instance=instance)
self._resources = self._exit_stack.enter_context(resources_cm)
if isinstance(self._resources, IContainsGenerator) and not self._cm_scope_entered:
self._exit_stack.close()
raise DagsterInvariantViolationError(
"At least one provided resource is a generator, but attempting to access"
" resources outside of context manager scope. You can use the following syntax"
" to open a context manager: `with build_schedule_context(...) as context:`"
)
return self._resources
def merge_resources(self, resources_dict: Mapping[str, Any]) -> "ScheduleEvaluationContext":
"""Merge the specified resources into this context.
This method is intended to be used by the Dagster framework, and should not be called by user code.
Args:
resources_dict (Mapping[str, Any]): The resources to replace in the context.
"""
check.invariant(
self._resources is None, "Cannot merge resources in context that has been initialized."
)
from dagster._core.execution.build_resources import wrap_resources_for_execution
return ScheduleEvaluationContext(
instance_ref=self._instance_ref,
scheduled_execution_time=self._scheduled_execution_time,
repository_name=self._repository_name,
schedule_name=self._schedule_name,
resources={
**(self._resource_defs or {}),
**wrap_resources_for_execution(resources_dict),
},
repository_def=self._repository_def,
)
@public
@property
def instance(self) -> "DagsterInstance":
"""DagsterInstance: The current :py:class:`~dagster.DagsterInstance`."""
# self._instance_ref should only ever be None when this ScheduleEvaluationContext was
# constructed under test.
if not self._instance_ref:
raise DagsterInvariantViolationError(
"Attempted to initialize dagster instance, but no instance reference was provided."
)
if not self._instance:
self._instance = self._exit_stack.enter_context(
DagsterInstance.from_ref(self._instance_ref)
)
return cast("DagsterInstance", self._instance)
@property
def instance_ref(self) -> Optional[InstanceRef]:
"""The serialized instance configured to run the schedule."""
return self._instance_ref
@public
@property
def scheduled_execution_time(self) -> datetime:
"""The time in which the execution was scheduled to happen. May differ slightly
from both the actual execution time and the time at which the run config is computed.
"""
if self._scheduled_execution_time is None:
check.failed(
"Attempting to access scheduled_execution_time, but no scheduled_execution_time was"
" set on this context"
)
return self._scheduled_execution_time
@property
def log(self) -> logging.Logger:
if self._logger is None:
if not self._instance_ref:
self._logger = self._exit_stack.enter_context(
InstigationLogger(
self._log_key,
repository_name=self._repository_name,
instigator_name=self._schedule_name,
)
)
else:
self._logger = self._exit_stack.enter_context(
InstigationLogger(
self._log_key,
self.instance,
repository_name=self._repository_name,
instigator_name=self._schedule_name,
)
)
return self._logger
def has_captured_logs(self):
return self._logger and self._logger.has_captured_logs()
@property
def log_key(self) -> Optional[Sequence[str]]:
return self._log_key
@property
def repository_def(self) -> "RepositoryDefinition":
if not self._repository_def:
raise DagsterInvariantViolationError(
"Attempted to access repository_def, but no repository_def was provided."
)
return self._repository_def
| ScheduleEvaluationContext |
python | pytorch__pytorch | test/test_ops.py | {
"start": 122703,
"end": 124813
} | class ____(TestCase):
@ops(
[op for op in op_db if op.name in ["mul", "add", "div"]],
allowed_dtypes=(torch.float32,),
)
def test_0d_tensor_with_python_scalar(self, device, dtype, op):
"""Test that forward AD preserves dtype when combining 0D tensors with Python scalars."""
if torch.float not in op.supported_backward_dtypes(device):
raise unittest.SkipTest("Does not support autograd")
# skip if operator doesn't support forward AD
if not op.supports_forward_ad:
raise unittest.SkipTest("Does not support forward_ad")
# create 0D tensors
primal0d = torch.ones((), device=device, dtype=dtype)
tangent0d = torch.ones((), device=device, dtype=dtype)
with torch.autograd.forward_ad.dual_level():
dual0d = torch.autograd.forward_ad.make_dual(primal0d, tangent0d)
# Test with scalar on RHS
if op.supports_rhs_python_scalar:
result = op(dual0d, 2.0)
p, t = torch.autograd.forward_ad.unpack_dual(result)
self.assertEqual(
p.dtype, t.dtype, f"{op.name} and scalar on RHS - dtype mismatch"
)
# Test with scalar on LHS
if op.supports_one_python_scalar:
result = op(2.0, dual0d)
p, t = torch.autograd.forward_ad.unpack_dual(result)
self.assertEqual(
p.dtype, t.dtype, f"{op.name} and scalar on LHS - dtype mismatch"
)
instantiate_device_type_tests(TestCommon, globals(), allow_xpu=True)
instantiate_device_type_tests(TestCompositeCompliance, globals())
instantiate_device_type_tests(TestMathBits, globals())
instantiate_device_type_tests(TestRefsOpsInfo, globals(), only_for="cpu")
instantiate_device_type_tests(TestFakeTensor, globals())
instantiate_device_type_tests(TestTags, globals())
instantiate_device_type_tests(TestForwardADWithScalars, globals())
if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
| TestForwardADWithScalars |
python | scikit-learn__scikit-learn | sklearn/base.py | {
"start": 25834,
"end": 28977
} | class ____:
"""Mixin class for all bicluster estimators in scikit-learn.
This mixin defines the following functionality:
- `biclusters_` property that returns the row and column indicators;
- `get_indices` method that returns the row and column indices of a bicluster;
- `get_shape` method that returns the shape of a bicluster;
- `get_submatrix` method that returns the submatrix corresponding to a bicluster.
Examples
--------
>>> import numpy as np
>>> from sklearn.base import BaseEstimator, BiclusterMixin
>>> class DummyBiClustering(BiclusterMixin, BaseEstimator):
... def fit(self, X, y=None):
... self.rows_ = np.ones(shape=(1, X.shape[0]), dtype=bool)
... self.columns_ = np.ones(shape=(1, X.shape[1]), dtype=bool)
... return self
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> bicluster = DummyBiClustering().fit(X)
>>> hasattr(bicluster, "biclusters_")
True
>>> bicluster.get_indices(0)
(array([0, 1, 2, 3, 4, 5]), array([0, 1]))
"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the `i`'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : ndarray, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : ndarray, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the `i`'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
n_rows : int
Number of rows in the bicluster.
n_cols : int
Number of columns in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Return the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array-like of shape (n_samples, n_features)
The data.
Returns
-------
submatrix : ndarray of shape (n_rows, n_cols)
The submatrix corresponding to bicluster `i`.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
data = check_array(data, accept_sparse="csr")
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
| BiclusterMixin |
python | google__jax | jax/_src/tree_util.py | {
"start": 15830,
"end": 16328
} | class ____:
"""Object that delegates __call__, __hash__, and __eq__ to another object."""
def __init__(self, fun):
self.fun = fun
def __call__(self, *args, **kw):
return self.fun(*args, **kw)
def __hash__(self):
return hash(self.fun)
def __eq__(self, other):
if isinstance(other, _HashableCallableShim):
return self.fun == other.fun
return self.fun == other
def __repr__(self):
return f'_HashableCallableShim({self.fun!r})'
@export
| _HashableCallableShim |
python | doocs__leetcode | solution/0300-0399/0385.Mini Parser/Solution.py | {
"start": 1328,
"end": 1889
} | class ____:
def deserialize(self, s: str) -> NestedInteger:
if not s or s == '[]':
return NestedInteger()
if s[0] != '[':
return NestedInteger(int(s))
ans = NestedInteger()
depth, j = 0, 1
for i in range(1, len(s)):
if depth == 0 and (s[i] == ',' or i == len(s) - 1):
ans.add(self.deserialize(s[j:i]))
j = i + 1
elif s[i] == '[':
depth += 1
elif s[i] == ']':
depth -= 1
return ans
| Solution |
python | doocs__leetcode | solution/1800-1899/1816.Truncate Sentence/Solution.py | {
"start": 0,
"end": 110
} | class ____:
def truncateSentence(self, s: str, k: int) -> str:
return ' '.join(s.split()[:k])
| Solution |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 18521,
"end": 18664
} | class ____(BaseGerComplex):
blas_func = fblas.zgerc
dtype = complex128
def transform(self,x):
return conjugate(x)
"""
| TestZgerc |
python | RaRe-Technologies__gensim | gensim/corpora/svmlightcorpus.py | {
"start": 403,
"end": 6083
} | class ____(IndexedCorpus):
"""Corpus in SVMlight format.
Quoting http://svmlight.joachims.org/:
The input file contains the training examples. The first lines may contain comments and are ignored
if they start with #. Each of the following lines represents one training example
and is of the following format::
<line> .=. <target> <feature>:<value> <feature>:<value> ... <feature>:<value> # <info>
<target> .=. +1 | -1 | 0 | <float>
<feature> .=. <integer> | "qid"
<value> .=. <float>
<info> .=. <string>
The "qid" feature (used for SVMlight ranking), if present, is ignored.
Notes
-----
Although not mentioned in the specification above, SVMlight also expect its feature ids to be 1-based
(counting starts at 1). We convert features to 0-base internally by decrementing all ids when loading a SVMlight
input file, and increment them again when saving as SVMlight.
"""
def __init__(self, fname, store_labels=True):
"""
Parameters
----------
fname: str
Path to corpus.
store_labels : bool, optional
Whether to store labels (~SVM target class). They currently have no application but stored
in `self.labels` for convenience by default.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.length = None
self.store_labels = store_labels
self.labels = []
def __iter__(self):
""" Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
lineno = -1
self.labels = []
with utils.open(self.fname, 'rb') as fin:
for lineno, line in enumerate(fin):
doc = self.line2doc(line)
if doc is not None:
if self.store_labels:
self.labels.append(doc[1])
yield doc[0]
self.length = lineno + 1
@staticmethod
def save_corpus(fname, corpus, id2word=None, labels=False, metadata=False):
"""Save a corpus in the SVMlight format.
The SVMlight `<target>` class tag is taken from the `labels` array, or set to 0 for all documents
if `labels` is not supplied.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
labels : list or False
An SVMlight `<target>` class tags or False if not present.
metadata : bool
ARGUMENT WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
logger.info("converting corpus to SVMlight format: %s", fname)
if labels is not False:
# Cast any sequence (incl. a numpy array) to a list, to simplify the processing below.
labels = list(labels)
offsets = []
with utils.open(fname, 'wb') as fout:
for docno, doc in enumerate(corpus):
label = labels[docno] if labels else 0 # target class is 0 by default
offsets.append(fout.tell())
fout.write(utils.to_utf8(SvmLightCorpus.doc2line(doc, label)))
return offsets
def docbyoffset(self, offset):
"""Get the document stored at file position `offset`.
Parameters
----------
offset : int
Document's position.
Returns
-------
tuple of (int, float)
"""
with utils.open(self.fname, 'rb') as f:
f.seek(offset)
return self.line2doc(f.readline())[0]
# TODO: it brakes if gets None from line2doc
def line2doc(self, line):
"""Get a document from a single line in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.doc2line`.
Parameters
----------
line : str
Line in SVMLight format.
Returns
-------
(list of (int, float), str)
Document in BoW format and target class label.
"""
line = utils.to_unicode(line)
line = line[: line.find('#')].strip()
if not line:
return None # ignore comments and empty lines
parts = line.split()
if not parts:
raise ValueError('invalid line format in %s' % self.fname)
target, fields = parts[0], [part.rsplit(':', 1) for part in parts[1:]]
# ignore 'qid' features, convert 1-based feature ids to 0-based
doc = [(int(p1) - 1, float(p2)) for p1, p2 in fields if p1 != 'qid']
return doc, target
@staticmethod
def doc2line(doc, label=0):
"""Convert BoW representation of document in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.line2doc`.
Parameters
----------
doc : list of (int, float)
Document in BoW format.
label : int, optional
Document label (if provided).
Returns
-------
str
`doc` in SVMlight format.
"""
pairs = ' '.join("%i:%s" % (termid + 1, termval) for termid, termval in doc) # +1 to convert 0-base to 1-base
return "%s %s\n" % (label, pairs)
| SvmLightCorpus |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_ctor.py | {
"start": 3156,
"end": 3345
} | class ____:
goal_time = 0.2
def setup(self):
N = 1_000_000
self.data = range(N)
def time_frame_from_range(self):
self.df = DataFrame(self.data)
| FromRange |
python | huggingface__transformers | tests/models/speech_to_text/test_feature_extraction_speech_to_text.py | {
"start": 15645,
"end": 16225
} | class ____(Speech2TextFeatureExtractionTest):
def test_using_audio_utils(self):
# Tests that it uses audio_utils instead of torchaudio
feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
self.assertTrue(hasattr(feat_extract, "window"))
self.assertTrue(hasattr(feat_extract, "mel_filters"))
from transformers.models.speech_to_text.feature_extraction_speech_to_text import is_speech_available
self.assertFalse(is_speech_available())
| Speech2TextFeatureExtractionWithoutTorchaudioTest |
python | scikit-learn__scikit-learn | sklearn/base.py | {
"start": 5514,
"end": 18312
} | class ____(ReprHTMLMixin, _HTMLDocumentationLinkMixin, _MetadataRequester):
"""Base class for all estimators in scikit-learn.
Inheriting from this class provides default implementations of:
- setting and getting parameters used by `GridSearchCV` and friends;
- textual and HTML representation displayed in terminals and IDEs;
- estimator serialization;
- parameters validation;
- data validation;
- feature names validation.
Read more in the :ref:`User Guide <rolling_your_own_estimator>`.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
Examples
--------
>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> class MyEstimator(BaseEstimator):
... def __init__(self, *, param=1):
... self.param = param
... def fit(self, X, y=None):
... self.is_fitted_ = True
... return self
... def predict(self, X):
... return np.full(shape=X.shape[0], fill_value=self.param)
>>> estimator = MyEstimator(param=2)
>>> estimator.get_params()
{'param': 2}
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> y = np.array([1, 0, 1])
>>> estimator.fit(X, y).predict(X)
array([2, 2, 2])
>>> estimator.set_params(param=3).fit(X, y).predict(X)
array([3, 3, 3])
"""
def __dir__(self):
# Filters conditional methods that should be hidden based
# on the `available_if` decorator
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
return [attr for attr in super().__dir__() if hasattr(self, attr)]
_html_repr = estimator_html_repr
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError(
"scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention." % (cls, init_signature)
)
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, "get_params") and not isinstance(value, type):
deep_items = value.get_params().items()
out.update((key + "__" + k, val) for k, val in deep_items)
out[key] = value
return out
def _get_params_html(self, deep=True, doc_link=""):
"""
Get parameters for this estimator with a specific HTML representation.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
doc_link : str
URL to the estimator documentation.
Used for linking to the estimator's parameters documentation
available in HTML displays.
Returns
-------
params : ParamsDict
Parameter names mapped to their values. We return a `ParamsDict`
dictionary, which renders a specific HTML representation in table
form.
"""
out = self.get_params(deep=deep)
init_func = getattr(self.__init__, "deprecated_original", self.__init__)
init_default_params = inspect.signature(init_func).parameters
init_default_params = {
name: param.default for name, param in init_default_params.items()
}
def is_non_default(param_name, param_value):
"""Finds the parameters that have been set by the user."""
if param_name not in init_default_params:
# happens if k is part of a **kwargs
return True
if init_default_params[param_name] == inspect._empty:
# k has no default value
return True
# avoid calling repr on nested estimators
if isinstance(param_value, BaseEstimator) and type(param_value) is not type(
init_default_params[param_name]
):
return True
if is_pandas_na(param_value) and not is_pandas_na(
init_default_params[param_name]
):
return True
if not np.array_equal(
param_value, init_default_params[param_name]
) and not (
is_scalar_nan(init_default_params[param_name])
and is_scalar_nan(param_value)
):
return True
return False
# reorder the parameters from `self.get_params` using the `__init__`
# signature
remaining_params = [name for name in out if name not in init_default_params]
ordered_out = {name: out[name] for name in init_default_params if name in out}
ordered_out.update({name: out[name] for name in remaining_params})
non_default_ls = tuple(
[name for name, value in ordered_out.items() if is_non_default(name, value)]
)
return ParamsDict(
params=ordered_out,
non_default=non_default_ls,
estimator_class=self.__class__,
doc_link=doc_link,
)
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition("__")
if key not in valid_params:
local_valid_params = self._get_param_names()
raise ValueError(
f"Invalid parameter {key!r} for estimator {self}. "
f"Valid parameters are: {local_valid_params!r}."
)
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __sklearn_clone__(self):
return _clone_parametrized(self)
def __repr__(self, N_CHAR_MAX=700):
# N_CHAR_MAX is the (approximate) maximum number of non-blank
# characters to render. We pass it as an optional parameter to ease
# the tests.
from sklearn.utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
# use ellipsis for sequences with a lot of elements
pp = _EstimatorPrettyPrinter(
compact=True,
indent=1,
indent_at_name=True,
n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,
)
repr_ = pp.pformat(self)
# Use bruteforce ellipsis when there are a lot of non-blank characters
n_nonblank = len("".join(repr_.split()))
if n_nonblank > N_CHAR_MAX:
lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
regex = r"^(\s*\S){%d}" % lim
# The regex '^(\s*\S){%d}' % n
# matches from the start of the string until the nth non-blank
# character:
# - ^ matches the start of string
# - (pattern){n} matches n repetitions of pattern
# - \s*\S matches a non-blank char following zero or more blanks
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::-1]).end()
if "\n" in repr_[left_lim:-right_lim]:
# The left side and right side aren't on the same line.
# To avoid weird cuts, e.g.:
# categoric...ore',
# we need to start the right side with an appropriate newline
# character so that it renders properly as:
# categoric...
# handle_unknown='ignore',
# so we add [^\n]*\n which matches until the next \n
regex += r"[^\n]*\n"
right_lim = re.match(regex, repr_[::-1]).end()
ellipsis = "..."
if left_lim + len(ellipsis) < len(repr_) - right_lim:
# Only add ellipsis if it results in a shorter repr
repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:]
return repr_
def __getstate__(self):
if getattr(self, "__slots__", None):
raise TypeError(
"You cannot use `__slots__` in objects inheriting from "
"`sklearn.base.BaseEstimator`."
)
try:
state = super().__getstate__()
if state is None:
# For Python 3.11+, empty instance (no `__slots__`,
# and `__dict__`) will return a state equal to `None`.
state = self.__dict__.copy()
except AttributeError:
# Python < 3.11
state = self.__dict__.copy()
if type(self).__module__.startswith("sklearn."):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith("sklearn."):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
InconsistentVersionWarning(
estimator_name=self.__class__.__name__,
current_sklearn_version=__version__,
original_sklearn_version=pickle_version,
),
)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def __sklearn_tags__(self):
return Tags(
estimator_type=None,
target_tags=TargetTags(required=False),
transformer_tags=None,
regressor_tags=None,
classifier_tags=None,
)
def _validate_params(self):
"""Validate types and values of constructor parameters
The expected type and values must be defined in the `_parameter_constraints`
class attribute, which is a dictionary `param_name: list of constraints`. See
the docstring of `validate_parameter_constraints` for a description of the
accepted constraints.
"""
validate_parameter_constraints(
self._parameter_constraints,
self.get_params(deep=False),
caller_name=self.__class__.__name__,
)
| BaseEstimator |
python | pennersr__django-allauth | allauth/headless/account/inputs.py | {
"start": 8825,
"end": 8890
} | class ____(SelectEmailInput):
pass
| ResendEmailVerificationInput |
python | PrefectHQ__prefect | src/prefect/flow_engine.py | {
"start": 4271,
"end": 7313
} | class ____(Generic[P, R]):
flow: Union[Flow[P, R], Flow[P, Coroutine[Any, Any, R]]]
parameters: Optional[Dict[str, Any]] = None
flow_run: Optional[FlowRun] = None
flow_run_id: Optional[UUID] = None
logger: logging.Logger = field(default_factory=lambda: get_logger("engine"))
wait_for: Optional[Iterable[PrefectFuture[Any]]] = None
context: Optional[dict[str, Any]] = None
# holds the return value from the user code
_return_value: Union[R, Type[NotSet]] = NotSet
# holds the exception raised by the user code, if any
_raised: Union[Exception, Type[NotSet]] = NotSet
_is_started: bool = False
short_circuit: bool = False
_flow_run_name_set: bool = False
_telemetry: RunTelemetry = field(default_factory=RunTelemetry)
def __post_init__(self) -> None:
if self.flow is None and self.flow_run_id is None:
raise ValueError("Either a flow or a flow_run_id must be provided.")
if self.parameters is None:
self.parameters = {}
@property
def state(self) -> State:
return self.flow_run.state # type: ignore
def is_running(self) -> bool:
if getattr(self, "flow_run", None) is None:
return False
return getattr(self, "flow_run").state.is_running()
def is_pending(self) -> bool:
if getattr(self, "flow_run", None) is None:
return False # TODO: handle this differently?
return getattr(self, "flow_run").state.is_pending()
def cancel_all_tasks(self) -> None:
if hasattr(self.flow.task_runner, "cancel_all"):
self.flow.task_runner.cancel_all() # type: ignore
def _update_otel_labels(
self, span: trace.Span, client: Union[SyncPrefectClient, PrefectClient]
):
parent_flow_run_ctx = FlowRunContext.get()
if parent_flow_run_ctx and parent_flow_run_ctx.flow_run:
if traceparent := parent_flow_run_ctx.flow_run.labels.get(
LABELS_TRACEPARENT_KEY
):
carrier: KeyValueLabels = {TRACEPARENT_KEY: traceparent}
propagate.get_global_textmap().inject(
carrier={TRACEPARENT_KEY: traceparent},
setter=OTELSetter(),
)
else:
carrier: KeyValueLabels = {}
propagate.get_global_textmap().inject(
carrier,
context=trace.set_span_in_context(span),
setter=OTELSetter(),
)
if carrier.get(TRACEPARENT_KEY):
if self.flow_run:
client.update_flow_run_labels(
flow_run_id=self.flow_run.id,
labels={LABELS_TRACEPARENT_KEY: carrier[TRACEPARENT_KEY]},
)
else:
self.logger.info(
f"Tried to set traceparent {carrier[TRACEPARENT_KEY]} for flow run, but None was found"
)
@dataclass
| BaseFlowRunEngine |
python | pennersr__django-allauth | allauth/headless/mfa/inputs.py | {
"start": 1886,
"end": 1956
} | class ____(LoginWebAuthnForm, inputs.Input):
pass
| LoginWebAuthnInput |
python | conda__conda | conda/core/path_actions.py | {
"start": 38213,
"end": 39619
} | class ____(RemoveFromPrefixPathAction):
def __init__(
self,
transaction_context,
linked_package_data,
target_prefix,
target_short_path,
link_type=LinkType.hardlink,
):
super().__init__(
transaction_context, linked_package_data, target_prefix, target_short_path
)
self.holding_short_path = self.target_short_path + CONDA_TEMP_EXTENSION
self.holding_full_path = self.target_full_path + CONDA_TEMP_EXTENSION
self.link_type = link_type
def execute(self):
if self.link_type != LinkType.directory:
log.log(
TRACE,
"renaming %s => %s",
self.target_short_path,
self.holding_short_path,
)
backoff_rename(self.target_full_path, self.holding_full_path, force=True)
def reverse(self):
if self.link_type != LinkType.directory and lexists(self.holding_full_path):
log.log(
TRACE,
"reversing rename %s => %s",
self.holding_short_path,
self.target_short_path,
)
backoff_rename(self.holding_full_path, self.target_full_path, force=True)
def cleanup(self):
if not isdir(self.holding_full_path):
rm_rf(self.holding_full_path, clean_empty_parents=True)
| UnlinkPathAction |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 3467,
"end": 3605
} | class ____:
def x(self: _NotATypeVar) -> _NotATypeVar: ...
@classmethod
def y(self: type[_NotATypeVar]) -> _NotATypeVar: ...
| Foo |
python | pytorch__pytorch | torch/nn/modules/rnn.py | {
"start": 60810,
"end": 62713
} | class ____(Module):
__constants__ = ["input_size", "hidden_size", "bias"]
input_size: int
hidden_size: int
bias: bool
weight_ih: Tensor
weight_hh: Tensor
# WARNING: bias_ih and bias_hh purposely not defined here.
# See https://github.com/pytorch/pytorch/issues/39670
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool,
num_chunks: int,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = Parameter(
torch.empty((num_chunks * hidden_size, input_size), **factory_kwargs)
)
self.weight_hh = Parameter(
torch.empty((num_chunks * hidden_size, hidden_size), **factory_kwargs)
)
if bias:
self.bias_ih = Parameter(
torch.empty(num_chunks * hidden_size, **factory_kwargs)
)
self.bias_hh = Parameter(
torch.empty(num_chunks * hidden_size, **factory_kwargs)
)
else:
self.register_parameter("bias_ih", None)
self.register_parameter("bias_hh", None)
self.reset_parameters()
def extra_repr(self) -> str:
s = "{input_size}, {hidden_size}"
if "bias" in self.__dict__ and self.bias is not True:
s += ", bias={bias}"
if "nonlinearity" in self.__dict__ and self.nonlinearity != "tanh":
s += ", nonlinearity={nonlinearity}"
return s.format(**self.__dict__)
def reset_parameters(self) -> None:
stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
| RNNCellBase |
python | django__django | tests/expressions_case/models.py | {
"start": 1799,
"end": 1955
} | class ____(models.Model):
fk = models.ForeignKey(CaseTestModel, models.CASCADE, related_name="fk_rel")
integer = models.IntegerField()
| FKCaseTestModel |
python | doocs__leetcode | solution/1800-1899/1893.Check if All the Integers in a Range Are Covered/Solution.py | {
"start": 0,
"end": 368
} | class ____:
def isCovered(self, ranges: List[List[int]], left: int, right: int) -> bool:
diff = [0] * 52
for l, r in ranges:
diff[l] += 1
diff[r + 1] -= 1
s = 0
for i, x in enumerate(diff):
s += x
if s <= 0 and left <= i <= right:
return False
return True
| Solution |
python | ray-project__ray | python/ray/serve/tests/unit/test_cli.py | {
"start": 4246,
"end": 6387
} | class ____:
"""Test that enum representer correctly serializes enums in YAML dumps."""
def test_build_command_with_enum_serialization(self):
"""Test that serve build correctly serializes AggregationFunction enum."""
runner = CliRunner()
with NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"from ray import serve\n"
"from ray.serve.config import AggregationFunction, AutoscalingConfig\n\n"
"@serve.deployment(\n"
" autoscaling_config=AutoscalingConfig(\n"
" min_replicas=1,\n"
" max_replicas=2,\n"
" aggregation_function=AggregationFunction.MEAN,\n"
" )\n"
")\n"
"def test_deployment():\n"
" return 'ok'\n\n"
"app = test_deployment.bind()\n"
)
temp_path = f.name
output_path = None
try:
import_path = f"{pathlib.Path(temp_path).stem}:app"
with NamedTemporaryFile(
mode="w", suffix=".yaml", delete=False
) as output_file:
output_path = output_file.name
result = runner.invoke(
build,
[
import_path,
"--app-dir",
str(pathlib.Path(temp_path).parent),
"--output-path",
output_path,
],
)
assert result.exit_code == 0, result.output
with open(output_path, "r") as f:
config = yaml.safe_load(f)
agg_func = config["applications"][0]["deployments"][0][
"autoscaling_config"
]["aggregation_function"]
assert agg_func == "mean"
assert isinstance(agg_func, str)
finally:
os.unlink(temp_path)
if output_path:
os.unlink(output_path)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestEnumSerialization |
python | keras-team__keras | keras/src/backend/tensorflow/trackable.py | {
"start": 64,
"end": 1993
} | class ____(tf.__internal__.tracking.AutoTrackable):
"""Manages dependencies on other objects with Keras tracking.
Similar to TF AutoTrackable, but disabling tracking is based
on tracking within Keras.
This serves as an interface between Keras tracking and TF tracking.
"""
def __setattr__(self, name, value):
"""Support self.foo = trackable syntax."""
try:
if getattr(self, name) is value:
# Short circuit for `self.$x = self.$x`.
return
except AttributeError:
pass
if getattr(self, "_self_setattr_tracking", True):
value = sticky_attribute_assignment(
trackable=self, value=value, name=name
)
super().__setattr__(name, value)
def sticky_attribute_assignment(trackable, name, value):
"""Adds dependencies, called from __setattr__.
Args:
trackable: The object to add dependencies to (generally the one having
an attribute assigned).
name: The attribute name being assigned.
value: The value being assigned. Not necessarily a trackable object.
Returns:
The value which should be stored in the attribute.
"""
if isinstance(
value, (tracking.TrackedList, tracking.TrackedDict, tracking.TrackedSet)
) and hasattr(trackable, "_tracked"):
trackable._tracked.append(name)
if not tracking.is_tracking_enabled():
return value
if isinstance(value, tf.__internal__.tracking.Trackable):
trackable._track_trackable( # pylint: disable=protected-access
value,
name=name,
# Allow the user to switch the Trackable which is tracked by this
# name, since assigning a new variable to an attribute has
# historically been fine (e.g. Adam did this).
overwrite=True,
)
return value
| KerasAutoTrackable |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 1771,
"end": 15571
} | class ____(core.SingletonExpr):
"""Primary class for all Expressions
This mostly includes Dask protocols and various Pandas-like method
definitions to make us look more like a DataFrame.
"""
_is_length_preserving = False
_filter_passthrough = False
def _filter_passthrough_available(self, parent, dependents):
return self._filter_passthrough and is_filter_pushdown_available(
self, parent, dependents
)
@functools.cached_property
def ndim(self):
meta = self._meta
try:
return meta.ndim
except AttributeError:
return 0
def __dask_keys__(self):
return [(self._name, i) for i in range(self.npartitions)]
def optimize(self, **kwargs):
return optimize(self, **kwargs)
def __hash__(self):
return hash(self._name)
@property
def index(self):
return Index(self)
@property
def size(self):
return Size(self)
@property
def nbytes(self):
return NBytes(self)
def _tree_repr_lines(self, indent=0, recursive=True):
header = funcname(type(self)) + ":"
lines = []
for i, op in enumerate(self.operands):
if isinstance(op, Expr):
if recursive:
lines.extend(op._tree_repr_lines(2))
else:
if isinstance(op, _BackendData):
op = op._data
# TODO: this stuff is pandas-specific
if isinstance(op, pd.core.base.PandasObject):
op = "<pandas>"
elif is_dataframe_like(op):
op = "<dataframe>"
elif is_index_like(op):
op = "<index>"
elif is_series_like(op):
op = "<series>"
elif is_arraylike(op):
op = "<array>"
header = self._tree_repr_argument_construction(i, op, header)
lines = [header] + lines
lines = [" " * indent + line for line in lines]
return lines
def _operands_for_repr(self):
to_include = []
for param, operand in zip(self._parameters, self.operands):
if isinstance(operand, Expr) or (
not isinstance(operand, (pd.Series, pd.DataFrame))
and operand != self._defaults.get(param)
):
to_include.append(f"{param}={operand!r}")
return to_include
def __getattr__(self, key):
try:
return super().__getattr__(key)
except AttributeError:
if is_dataframe_like(self._meta) and key in self._meta.columns:
return self[key]
raise
def __getitem__(self, other):
if isinstance(other, Expr):
return Filter(self, other)
else:
return Projection(self, other) # df[["a", "b", "c"]]
def __bool__(self):
raise ValueError(
f"The truth value of a {self.__class__.__name__} is ambiguous. "
"Use a.any() or a.all()."
)
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __sub__(self, other):
return Sub(self, other)
def __rsub__(self, other):
return Sub(other, self)
def __mul__(self, other):
return Mul(self, other)
def __rmul__(self, other):
return Mul(other, self)
def __pow__(self, power):
return Pow(self, power)
def __rpow__(self, power):
return Pow(power, self)
def __truediv__(self, other):
return Div(self, other)
def __rtruediv__(self, other):
return Div(other, self)
def __lt__(self, other):
return LT(self, other)
def __rlt__(self, other):
return LT(other, self)
def __gt__(self, other):
return GT(self, other)
def __rgt__(self, other):
return GT(other, self)
def __le__(self, other):
return LE(self, other)
def __rle__(self, other):
return LE(other, self)
def __ge__(self, other):
return GE(self, other)
def __rge__(self, other):
return GE(other, self)
def __eq__(self, other):
return EQ(self, other)
def __ne__(self, other):
return NE(self, other)
def __and__(self, other):
return And(self, other)
def __rand__(self, other):
return And(other, self)
def __or__(self, other):
return Or(self, other)
def __ror__(self, other):
return Or(other, self)
def __xor__(self, other):
return XOr(self, other)
def __rxor__(self, other):
return XOr(other, self)
def __invert__(self):
return Invert(self)
def __neg__(self):
return Neg(self)
def __pos__(self):
return Pos(self)
def __mod__(self, other):
return Mod(self, other)
def __rmod__(self, other):
return Mod(other, self)
def __floordiv__(self, other):
return FloorDiv(self, other)
def __rfloordiv__(self, other):
return FloorDiv(other, self)
def __divmod__(self, other):
res1 = self // other
res2 = self % other
return res1, res2
def __rdivmod__(self, other):
res1 = other // self
res2 = other % self
return res1, res2
def sum(self, skipna=True, numeric_only=False, split_every=False, axis=0):
return Sum(self, skipna, numeric_only, split_every, axis)
def prod(self, skipna=True, numeric_only=False, split_every=False, axis=0):
return Prod(self, skipna, numeric_only, split_every, axis)
def var(self, axis=0, skipna=True, ddof=1, numeric_only=False, split_every=False):
if axis == 0:
return Var(self, skipna, ddof, numeric_only, split_every)
elif axis == 1:
return VarColumns(self, skipna, ddof, numeric_only)
else:
raise ValueError(f"axis={axis} not supported. Please specify 0 or 1")
def std(self, axis=0, skipna=True, ddof=1, numeric_only=False, split_every=False):
return Sqrt(self.var(axis, skipna, ddof, numeric_only, split_every=split_every))
def mean(self, skipna=True, numeric_only=False, split_every=False, axis=0):
return Mean(
self,
skipna=skipna,
numeric_only=numeric_only,
split_every=split_every,
axis=axis,
)
def max(self, skipna=True, numeric_only=False, split_every=False, axis=0):
return Max(self, skipna, numeric_only, split_every, axis)
def any(self, skipna=True, split_every=False):
return Any(self, skipna=skipna, split_every=split_every)
def all(self, skipna=True, split_every=False):
return All(self, skipna=skipna, split_every=split_every)
def idxmin(self, skipna=True, numeric_only=False, split_every=False):
return IdxMin(
self, skipna=skipna, numeric_only=numeric_only, split_every=split_every
)
def idxmax(self, skipna=True, numeric_only=False, split_every=False):
return IdxMax(
self, skipna=skipna, numeric_only=numeric_only, split_every=split_every
)
def mode(self, dropna=True, split_every=False):
return Mode(self, dropna=dropna, split_every=split_every)
def min(self, skipna=True, numeric_only=False, split_every=False, axis=0):
return Min(self, skipna, numeric_only, split_every=split_every, axis=axis)
def count(self, numeric_only=False, split_every=False):
return Count(self, numeric_only, split_every)
def cumsum(self, skipna=True):
from dask.dataframe.dask_expr._cumulative import CumSum
return CumSum(self, skipna=skipna)
def cumprod(self, skipna=True):
from dask.dataframe.dask_expr._cumulative import CumProd
return CumProd(self, skipna=skipna)
def cummax(self, skipna=True):
from dask.dataframe.dask_expr._cumulative import CumMax
return CumMax(self, skipna=skipna)
def cummin(self, skipna=True):
from dask.dataframe.dask_expr._cumulative import CumMin
return CumMin(self, skipna=skipna)
def abs(self):
return Abs(self)
def astype(self, dtypes):
return AsType(self, dtypes)
def clip(self, lower=None, upper=None, axis=None):
return Clip(self, lower=lower, upper=upper, axis=axis)
def combine_first(self, other):
if are_co_aligned(self, other):
return CombineFirst(self, other=other)
else:
return CombineFirstAlign(self, other)
def to_timestamp(self, freq=None, how="start"):
return ToTimestamp(self, freq=freq, how=how)
def isna(self):
return IsNa(self)
def isnull(self):
# These are the same anyway
return IsNa(self)
def round(self, decimals=0):
return Round(self, decimals=decimals)
def where(self, cond, other=np.nan):
if not are_co_aligned(self, *[c for c in [cond, other] if isinstance(c, Expr)]):
return WhereAlign(self, cond=cond, other=other)
return Where(self, cond=cond, other=other)
def mask(self, cond, other=np.nan):
if not are_co_aligned(self, *[c for c in [cond, other] if isinstance(c, Expr)]):
return MaskAlign(self, cond=cond, other=other)
return Mask(self, cond=cond, other=other)
def apply(self, function, *args, meta=None, **kwargs):
return Apply(self, function, args, meta, kwargs)
def replace(self, to_replace=None, value=no_default, regex=False):
return Replace(self, to_replace=to_replace, value=value, regex=regex)
def fillna(self, value=None):
if isinstance(value, Expr) and not are_co_aligned(self, value):
return FillnaAlign(self, value=value)
return Fillna(self, value=value)
def rename_axis(
self, mapper=no_default, index=no_default, columns=no_default, axis=0
):
return RenameAxis(self, mapper=mapper, index=index, columns=columns, axis=axis)
def align(self, other, join="outer", axis=None, fill_value=None):
from dask.dataframe.dask_expr._collection import new_collection
if not are_co_aligned(self, other):
aligned = AlignAlignPartitions(self, other, join, axis, fill_value)
else:
aligned = _Align(self, other, join, axis=axis, fill_value=fill_value)
return new_collection(AlignGetitem(aligned, position=0)), new_collection(
AlignGetitem(aligned, position=1)
)
def nunique_approx(self, split_every=None):
return NuniqueApprox(self, b=16, split_every=split_every)
def memory_usage_per_partition(self, index=True, deep=False):
return MemoryUsagePerPartition(self, index, deep)
@functools.cached_property
def divisions(self):
return tuple(self._divisions())
def _divisions(self):
raise NotImplementedError()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
@property
def npartitions(self):
if "npartitions" in self._parameters:
return self.operand("npartitions")
else:
return len(self.divisions) - 1
@property
def columns(self) -> list:
try:
return list(self._meta.columns)
except AttributeError:
if self.ndim == 1:
return [self.name]
return []
except Exception:
raise
@functools.cached_property
def unique_partition_mapping_columns_from_shuffle(self) -> set:
"""Preserves the columns defining the partition mapping from shuffles.
This property specifies if a column or a set of columns have a unique
partition mapping that was defined by a shuffle operation. The mapping
is created by hashing the values and the separating them onto partitions.
It is important that this property is only propagated if the values
in those columns did not change in this expression. The property is
only populated if the mapping was created by the ``partitioning_index``
function.
Simply knowing that every value is in only one partition is not a
satisfying condition, because we also use this property on merge
operations, where we need these values to be in matching partitions.
This is also the reason why set_index or sort_values can't set the
property, they fulfill a weaker condition than what this property enforces.
Normally, this set contains one tuple of either one or multiple columns.
It can contain 2, when the operation shuffles multiple columns of the
result, i.e. a merge operation and the left and right join columns.
Returns
-------
A set of column groups that have a unique partition mapping as
defined by a shuffle.
"""
return set()
@property
def _projection_columns(self):
return self.columns
@property
def name(self):
return self._meta.name
@property
def dtypes(self):
return self._meta.dtypes
def _filter_simplification(self, parent, predicate=None):
if predicate is None:
predicate = parent.predicate.substitute(self, self.frame)
if are_co_aligned(self.frame, predicate):
# Only do this if we are aligned
return type(self)(self.frame[predicate], *self.operands[1:])
def fuse(self):
return optimize_blockwise_fusion(self)
def finalize_compute(self):
return FinalizeComputeDF(self)
| Expr |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braze/components.py | {
"start": 792,
"end": 1460
} | class ____(AddFields):
def transform(
self,
record: Record,
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> Record:
"""
Transforms incoming string to a dictionary record.
"""
_record = {}
kwargs = {"record": record, "stream_state": stream_state, "stream_slice": stream_slice}
for parsed_field in self._parsed_fields:
value = parsed_field.value.eval(config, **kwargs)
dpath.new(_record, parsed_field.path, value)
return _record
@dataclass
| TransformToRecordComponent |
python | getsentry__sentry | tests/sentry/web/frontend/test_oauth_token.py | {
"start": 1300,
"end": 17600
} | class ____(TestCase):
@cached_property
def path(self) -> str:
return "/oauth/token/"
def setUp(self) -> None:
super().setUp()
self.application = ApiApplication.objects.create(
owner=self.user, redirect_uris="https://example.com"
)
self.client_secret = self.application.client_secret
self.grant = ApiGrant.objects.create(
user=self.user, application=self.application, redirect_uri="https://example.com"
)
def _basic_auth_value(self) -> str:
import base64
creds = f"{self.application.client_id}:{self.client_secret}".encode()
return f"Basic {base64.b64encode(creds).decode('ascii')}"
def test_basic_auth_header_too_large(self) -> None:
self.login_as(self.user)
oversized = "A" * 5001 # valid base64 chars, exceeds limit
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
},
HTTP_AUTHORIZATION=f"Basic {oversized}",
)
assert resp.status_code == 401
assert resp.json() == {"error": "invalid_client"}
def test_basic_auth_success(self) -> None:
self.login_as(self.user)
auth_value = self._basic_auth_value()
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
},
HTTP_AUTHORIZATION=auth_value,
)
assert resp.status_code == 200
data = resp.json()
assert isinstance(data["expires_in"], int)
assert data["token_type"] == "Bearer"
assert "no-store" in resp["Cache-Control"]
def test_basic_auth_invalid_base64_character(self) -> None:
self.login_as(self.user)
invalid_value = f"{self._basic_auth_value()}$"
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
},
HTTP_AUTHORIZATION=invalid_value,
)
assert resp.status_code == 401
assert resp.json() == {"error": "invalid_client"}
assert resp["WWW-Authenticate"].startswith("Basic ")
assert "no-store" in resp["Cache-Control"]
def test_basic_and_body_conflict(self) -> None:
self.login_as(self.user)
auth_value = self._basic_auth_value()
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
HTTP_AUTHORIZATION=auth_value,
)
assert resp.status_code == 400
assert resp.json() == {"error": "invalid_request"}
assert "no-store" in resp["Cache-Control"]
def test_missing_client_id(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 401
assert json.loads(resp.content) == {"error": "invalid_client"}
assert resp["WWW-Authenticate"].startswith("Basic ")
assert "no-store" in resp["Cache-Control"]
def test_invalid_client_id(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_id": "def",
"client_secret": self.client_secret,
},
)
assert resp.status_code == 401
assert json.loads(resp.content) == {"error": "invalid_client"}
def test_missing_client_secret(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"client_id": self.application.client_id,
"code": self.grant.code,
},
)
assert resp.status_code == 401
assert json.loads(resp.content) == {"error": "invalid_client"}
def test_invalid_client_secret(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": "rodrick_rules",
},
)
assert resp.status_code == 401
assert json.loads(resp.content) == {"error": "invalid_client"}
def test_missing_code(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert json.loads(resp.content) == {"error": "invalid_grant"}
def test_invalid_code(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": "abc",
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert json.loads(resp.content) == {"error": "invalid_grant"}
def test_expired_grant(self) -> None:
self.login_as(self.user)
expired_grant = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
expires_at="2022-01-01 11:11+00:00",
)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": expired_grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert json.loads(resp.content) == {"error": "invalid_grant"}
def test_one_time_use_grant(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 200
# attempt to re-use the same grant code
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
def test_grant_lock(self) -> None:
self.login_as(self.user)
# Simulate a concurrent request by using an existing grant
# that has its grant lock taken out.
lock = locks.get(ApiGrant.get_lock_key(self.grant.id), duration=10, name="api_grant")
lock.acquire()
# Attempt to create a token with the same grant
# This should fail because the lock is held by the previous request
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert resp.json() == {"error": "invalid_grant"}
def test_invalid_redirect_uri(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"code": self.grant.code,
"client_id": self.application.client_id,
"redirect_uri": "cheese.org",
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert json.loads(resp.content) == {"error": "invalid_grant"}
def test_no_open_id_token(self) -> None:
"""
Checks that the OIDC token is not returned unless the right scope is approved.
"""
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"code": self.grant.code,
"redirect_uri": self.application.get_default_redirect_uri(),
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 200
data = json.loads(resp.content)
assert "id_token" not in data
def test_missing_redirect_uri_when_bound(self) -> None:
"""
When the grant stored a redirect_uri, the token request must include
the exact same redirect_uri.
"""
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert json.loads(resp.content) == {"error": "invalid_grant"}
def test_valid_params(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 200
data = json.loads(resp.content)
token = ApiToken.objects.get(token=data["access_token"])
assert token.application == self.application
assert token.user == self.grant.user
assert token.get_scopes() == self.grant.get_scopes()
assert data["access_token"] == token.token
assert data["refresh_token"] == token.refresh_token
assert isinstance(data["expires_in"], int)
assert data["token_type"] == "Bearer"
assert "no-store" in resp["Cache-Control"]
assert data["user"]["id"] == str(token.user_id)
def test_expires_in_value(self) -> None:
"""
Verify that expires_in correctly represents seconds until expiry.
The old code incorrectly calculated (now - expires_at) instead of
(expires_at - now), producing negative values for valid tokens.
"""
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": self.grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 200
data = json.loads(resp.content)
# Default token expiration is 30 days (2,592,000 seconds)
# expires_in should be positive and close to 30 days
expires_in = data["expires_in"]
assert isinstance(expires_in, int)
assert expires_in > 0, "expires_in should be positive (seconds until expiry)"
# Allow for a few seconds of test execution time, but should be close to 30 days
expected_seconds = 30 * 24 * 60 * 60 # 2,592,000 seconds
assert expires_in >= expected_seconds - 60, "expires_in should be close to 30 days"
assert expires_in <= expected_seconds, "expires_in should not exceed 30 days"
def test_valid_params_id_token(self) -> None:
self.login_as(self.user)
open_id_grant = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid"],
)
with self.options({"codecov.signing_secret": "signing_secret"}):
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": open_id_grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 200
data = json.loads(resp.content)
token = ApiToken.objects.get(token=data["access_token"])
assert token.get_scopes() == ["openid"]
assert data["refresh_token"] == token.refresh_token
assert data["access_token"] == token.token
assert isinstance(data["expires_in"], int)
assert data["token_type"] == "Bearer"
assert data["user"]["id"] == str(token.user_id)
assert data["id_token"].count(".") == 2
def test_valid_params_id_token_additional_scopes(self) -> None:
self.login_as(self.user)
open_id_grant = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid", "profile", "email"],
)
with self.options({"codecov.signing_secret": "signing_secret"}):
resp = self.client.post(
self.path,
{
"grant_type": "authorization_code",
"redirect_uri": self.application.get_default_redirect_uri(),
"code": open_id_grant.code,
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 200
data = json.loads(resp.content)
token = ApiToken.objects.get(token=data["access_token"])
assert token.get_scopes() == ["email", "openid", "profile"]
assert data["refresh_token"] == token.refresh_token
assert data["access_token"] == token.token
assert isinstance(data["expires_in"], int)
assert data["token_type"] == "Bearer"
assert data["user"]["id"] == str(token.user_id)
assert data["id_token"].count(".") == 2
@control_silo_test
| OAuthTokenCodeTest |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 12406,
"end": 12665
} | class ____:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return self.handle(self.meth_name, self.action, *args)
| FakeMethod |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 128609,
"end": 130967
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.self_attn = Qwen3OmniMoeThinkerTextAttention(config, layer_idx)
if (layer_idx not in config.mlp_only_layers) and (
config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
):
self.mlp = Qwen3OmniMoeThinkerTextSparseMoeBlock(config)
else:
self.mlp = Qwen3OmniMoeThinkerTextMLP(config, intermediate_size=config.intermediate_size)
self.input_layernorm = Qwen3OmniMoeThinkerTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3OmniMoeThinkerTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hidden_size = config.hidden_size
self.mlp = Qwen3OmniMoeTalkerTextSparseMoeBlock(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring(
custom_intro=(
"Text part of Qwen3OmniMoe, "
"not a pure text-only model, as DeepStack integrates visual features into the early hidden states."
)
)
| Qwen3OmniMoeTalkerDecoderLayer |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-moves-in-a-grid.py | {
"start": 1587,
"end": 2254
} | class ____(object):
def maxMoves(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
q = set(xrange(len(grid)))
for c in xrange(len(grid[0])-1):
new_q = set()
for r in q:
if grid[r][c] < grid[r][c+1]:
new_q.add(r)
if r-1 >= 0 and grid[r][c] < grid[r-1][c+1]:
new_q.add(r-1)
if r+1 < len(grid) and grid[r][c] < grid[r+1][c+1]:
new_q.add(r+1)
q = new_q
if not q:
break
else:
c = len(grid[0])-1
return c
| Solution3 |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_resolved.py | {
"start": 502,
"end": 5303
} | class ____(SlackActivityNotificationTest, PerformanceIssueTestCase):
def create_notification(self, group):
return ResolvedActivityNotification(
Activity(
project=self.project,
group=group,
user_id=self.user.id,
type=ActivityType.SET_RESOLVED,
data={"assignee": ""},
)
)
def test_resolved_block(self) -> None:
"""
Test that a Slack message is sent with the expected payload when an issue is resolved
and block kit is enabled.
"""
with self.tasks():
self.create_notification(self.group).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
issue_link = (
f"http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}"
)
assert (
fallback_text
== f"{self.name} marked <{issue_link}/?referrer=activity_notification¬ification_uuid={notification_uuid}|{self.short_id}> as resolved"
)
assert blocks[0]["text"]["text"] == fallback_text
assert (
blocks[1]["text"]["text"]
== f":red_circle: <{issue_link}/?referrer=resolved_activity-slack¬ification_uuid={notification_uuid}|*{self.group.title}*>"
)
assert (
blocks[3]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_PERF_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_resolved_performance_issue_block_with_culprit_blocks(
self, occurrence: mock.MagicMock
) -> None:
"""
Test that a Slack message is sent with the expected payload when a performance issue is resolved
and block kit is enabled.
"""
event = self.create_performance_issue()
assert event.group is not None
with self.tasks():
self.create_notification(event.group).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[0]["text"]["text"])
assert (
fallback_text
== f"{self.name} marked <http://testserver/organizations/{self.organization.slug}/issues/{event.group.id}/?referrer=activity_notification¬ification_uuid={notification_uuid}|{self.project.slug.upper()}-{event.group.short_id}> as resolved"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_performance_issue_blocks_with_culprit_blocks(
blocks,
event.organization,
event.project.slug,
event.group,
"resolved_activity-slack",
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_resolved_generic_issue_block(self, occurrence: mock.MagicMock) -> None:
"""
Test that a Slack message is sent with the expected payload when a generic issue type is resolved
and block kit is enabled.
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
with self.tasks():
self.create_notification(group_event.group).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[0]["text"]["text"])
assert event.group
assert (
fallback_text
== f"{self.name} marked <http://testserver/organizations/{self.organization.slug}/issues/{event.group.id}/?referrer=activity_notification¬ification_uuid={notification_uuid}|{self.project.slug.upper()}-{event.group.short_id}> as resolved"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_generic_issue_blocks(
blocks,
group_event.organization,
group_event.project.slug,
group_event.group,
"resolved_activity-slack",
)
| SlackResolvedNotificationTest |
python | astropy__astropy | astropy/utils/masked/tests/test_function_helpers.py | {
"start": 4984,
"end": 6643
} | class ____(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.0)
@pytest.mark.skipif(
not NUMPY_LT_2_1, reason="support for 0d arrays was removed in numpy 2.1"
)
@pytest.mark.filterwarnings("ignore:Calling nonzero on 0d arrays is deprecated")
def test_nonzero_0d_np_lt_2_1(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], 0)
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], 0)
@pytest.mark.skipif(
NUMPY_LT_2_1, reason="support for 0d arrays was removed in numpy 2.1"
)
def test_nonzero_0d_np_ge_2_1(self):
with pytest.raises(ValueError):
Masked(1, mask=False).nonzero()
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.0)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.0)
| TestArgFunctions |
python | apache__airflow | providers/airbyte/src/airflow/providers/airbyte/operators/airbyte.py | {
"start": 1324,
"end": 6360
} | class ____(BaseOperator):
"""
Submits a job to an Airbyte server to run a integration process between your source and destination.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AirbyteTriggerSyncOperator`
:param airbyte_conn_id: Optional. The name of the Airflow connection to get connection
information for Airbyte. Defaults to "airbyte_default".
:param connection_id: Required. The Airbyte ConnectionId UUID between a source and destination.
:param asynchronous: Optional. Flag to get job_id after submitting the job to the Airbyte API.
This is useful for submitting long running jobs and
waiting on them asynchronously using the AirbyteJobSensor. Defaults to False.
:param deferrable: Run operator in the deferrable mode.
:param api_version: Optional. Airbyte API version. Defaults to "v1".
:param wait_seconds: Optional. Number of seconds between checks. Only used when ``asynchronous`` is False.
Defaults to 3 seconds.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Only used when ``asynchronous`` is False. Defaults to 3600 seconds (or 1 hour).
"""
template_fields: Sequence[str] = ("connection_id",)
ui_color = "#6C51FD"
def __init__(
self,
connection_id: str,
airbyte_conn_id: str = "airbyte_default",
asynchronous: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
api_version: str = "v1",
wait_seconds: float = 3,
timeout: float = 3600,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.airbyte_conn_id = airbyte_conn_id
self.connection_id = connection_id
self.timeout = timeout
self.api_version = api_version
self.wait_seconds = wait_seconds
self.asynchronous = asynchronous
self.deferrable = deferrable
def execute(self, context: Context) -> None:
"""Create Airbyte Job and wait to finish."""
hook = AirbyteHook(airbyte_conn_id=self.airbyte_conn_id, api_version=self.api_version)
job_object = hook.submit_sync_connection(connection_id=self.connection_id)
self.job_id = job_object.job_id
state = job_object.status
end_time = time.time() + self.timeout
self.log.info("Job %s was submitted to Airbyte Server", self.job_id)
if self.asynchronous:
self.log.info("Async Task returning job_id %s", self.job_id)
return self.job_id
if not self.deferrable:
self.log.debug("Running in non-deferrable mode...")
hook.wait_for_job(job_id=self.job_id, wait_seconds=self.wait_seconds, timeout=self.timeout)
else:
self.log.debug("Running in defferable mode in job state %s...", state)
if state in (JobStatusEnum.RUNNING, JobStatusEnum.PENDING, JobStatusEnum.INCOMPLETE):
self.defer(
timeout=self.execution_timeout,
trigger=AirbyteSyncTrigger(
conn_id=self.airbyte_conn_id,
job_id=self.job_id,
end_time=end_time,
poll_interval=60,
),
method_name="execute_complete",
)
elif state == JobStatusEnum.SUCCEEDED:
self.log.info("Job %s completed successfully", self.job_id)
return
elif state == JobStatusEnum.FAILED:
raise AirflowException(f"Job failed:\n{self.job_id}")
elif state == JobStatusEnum.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{self.job_id}")
else:
raise AirflowException(f"Encountered unexpected state `{state}` for job_id `{self.job_id}")
return self.job_id
def execute_complete(self, context: Context, event: Any = None) -> None:
"""
Invoke this callback when the trigger fires; return immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event["status"] == "error":
self.log.debug("Error occurred with context: %s", context)
raise AirflowException(event["message"])
self.log.info("%s completed successfully.", self.task_id)
return None
def on_kill(self):
"""Cancel the job if task is cancelled."""
hook = AirbyteHook(airbyte_conn_id=self.airbyte_conn_id, api_version=self.api_version)
self.log.debug(
"Job status for job_id %s prior to canceling is: %s",
self.job_id,
hook.get_job_status(self.job_id),
)
if self.job_id:
self.log.info("on_kill: cancel the airbyte Job %s", self.job_id)
hook.cancel_job(self.job_id)
| AirbyteTriggerSyncOperator |
python | spyder-ide__spyder | spyder/api/widgets/comboboxes.py | {
"start": 768,
"end": 1247
} | class ____(QProxyStyle):
"""Style proxy to adjust qdarkstyle issues."""
def styleHint(self, hint, option=None, widget=None, returnData=None):
if hint == QStyle.SH_ComboBox_Popup:
# Disable combobox popup top & bottom areas.
# See spyder-ide/spyder#9682.
# Taken from https://stackoverflow.com/a/21019371
return 0
return QProxyStyle.styleHint(self, hint, option, widget, returnData)
| _SpyderComboBoxProxyStyle |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax_error.py | {
"start": 1593,
"end": 1865
} | class ____(int):
pass
Alias2 = CustomCls | str # [unsupported-binary-operation]
var2 = CustomCls(1) | int(2)
# Check typing.NamedTuple
CustomNamedTuple = typing.NamedTuple(
"CustomNamedTuple", [("my_var", int | str)]) # [unsupported-binary-operation]
| CustomCls |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 15809,
"end": 18557
} | class ____(Variable, ResultBase):
"""An expression for a return value.
The name result is used to avoid conflicts with the reserved word
"return" in the Python language. It is also shorter than ReturnValue.
These may or may not need a name in the destination (e.g., "return(x*y)"
might return a value without ever naming it).
"""
def __init__(self, expr, name=None, result_var=None, datatype=None,
dimensions=None, precision=None):
"""Initialize a return value.
Parameters
==========
expr : SymPy expression
name : Symbol, MatrixSymbol, optional
The name of this return variable. When used for code generation,
this might appear, for example, in the prototype of function in a
list of return values. A dummy name is generated if omitted.
result_var : Symbol, Indexed, optional
Something that can be used to assign a value to this variable.
Typically the same as `name` but for Indexed this should be e.g.,
"y[i]" whereas `name` should be the Symbol "y". Defaults to
`name` if omitted.
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the expr argument.
dimensions : sequence containing tuples, optional
If present, this variable is interpreted as an array,
where this sequence of tuples specifies (lower, upper)
bounds for each index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
# Basic because it is the base class for all types of expressions
if not isinstance(expr, (Basic, MatrixBase)):
raise TypeError("The first argument must be a SymPy expression.")
if name is None:
name = 'result_%d' % abs(hash(expr))
if datatype is None:
#try to infer data type from the expression
datatype = get_default_datatype(expr)
if isinstance(name, str):
if isinstance(expr, (MatrixBase, MatrixExpr)):
name = MatrixSymbol(name, *expr.shape)
else:
name = Symbol(name)
if result_var is None:
result_var = name
Variable.__init__(self, name, datatype=datatype,
dimensions=dimensions, precision=precision)
ResultBase.__init__(self, expr, result_var)
def __str__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.expr, self.name,
self.result_var)
__repr__ = __str__
#
# Transformation of routine objects into code
#
| Result |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/hooks/databricks_base.py | {
"start": 2611,
"end": 36112
} | class ____(BaseHook):
"""
Base for interaction with Databricks.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:param retry_limit: The number of times to retry the connection in case of
service outages.
:param retry_delay: The number of seconds to wait between retries (it
might be a floating point number).
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
:param caller: The name of the operator that is calling the hook.
"""
conn_name_attr: str = "databricks_conn_id"
default_conn_name = "databricks_default"
conn_type = "databricks"
extra_parameters = [
"token",
"host",
"use_azure_managed_identity",
DEFAULT_AZURE_CREDENTIAL_SETTING_KEY,
"azure_ad_endpoint",
"azure_resource_id",
"azure_tenant_id",
"service_principal_oauth",
]
def __init__(
self,
databricks_conn_id: str = default_conn_name,
timeout_seconds: int = 180,
retry_limit: int = 3,
retry_delay: float = 1.0,
retry_args: dict[Any, Any] | None = None,
caller: str = "Unknown",
) -> None:
super().__init__()
self.databricks_conn_id = databricks_conn_id
self.timeout_seconds = timeout_seconds
if retry_limit < 1:
raise ValueError("Retry limit must be greater than or equal to 1")
self.retry_limit = retry_limit
self.retry_delay = retry_delay
self.oauth_tokens: dict[str, dict] = {}
self.token_timeout_seconds = 10
self.caller = caller
self._metadata_cache: dict[str, Any] = {}
self._metadata_expiry: float = 0
self._metadata_ttl: int = 300
def my_after_func(retry_state):
self._log_request_error(retry_state.attempt_number, retry_state.outcome)
if retry_args:
self.retry_args = copy.copy(retry_args)
self.retry_args["retry"] = retry_if_exception(self._retryable_error)
self.retry_args["after"] = my_after_func
else:
self.retry_args = {
"stop": stop_after_attempt(self.retry_limit),
"wait": wait_exponential(min=self.retry_delay, max=(2**retry_limit)),
"retry": retry_if_exception(self._retryable_error),
"after": my_after_func,
}
@cached_property
def databricks_conn(self) -> Connection:
return self.get_connection(self.databricks_conn_id) # type: ignore[return-value]
def get_conn(self) -> Connection:
return self.databricks_conn
@cached_property
def user_agent_header(self) -> dict[str, str]:
return {"user-agent": self.user_agent_value}
@cached_property
def user_agent_value(self) -> str:
manager = ProvidersManager()
package_name = manager.hooks[BaseDatabricksHook.conn_type].package_name # type: ignore[union-attr]
provider = manager.providers[package_name]
version = provider.version
python_version = platform.python_version()
system = platform.system().lower()
ua_string = (
f"databricks-airflow/{version} _/0.0.0 python/{python_version} os/{system} "
f"airflow/{__version__} operator/{self.caller}"
)
return ua_string
@cached_property
def host(self) -> str | None:
host = None
if "host" in self.databricks_conn.extra_dejson:
host = self._parse_host(self.databricks_conn.extra_dejson["host"])
elif self.databricks_conn.host:
host = self._parse_host(self.databricks_conn.host)
return host
async def __aenter__(self):
self._session = aiohttp.ClientSession()
return self
async def __aexit__(self, *err):
await self._session.close()
self._session = None
@staticmethod
def _parse_host(host: str) -> str:
"""
Parse host field data; this function is resistant to incorrect connection settings provided by users.
For example -- when users supply ``https://xx.cloud.databricks.com`` as the
host, we must strip out the protocol to get the host.::
h = DatabricksHook()
assert h._parse_host('https://xx.cloud.databricks.com') == \
'xx.cloud.databricks.com'
In the case where users supply the correct ``xx.cloud.databricks.com`` as the
host, this function is a no-op.::
assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com'
"""
urlparse_host = urlsplit(host).hostname
if urlparse_host:
# In this case, host = https://xx.cloud.databricks.com
return urlparse_host
# In this case, host = xx.cloud.databricks.com
return host
def _get_connection_attr(self, attr_name: str) -> str:
if not (attr := getattr(self.databricks_conn, attr_name)):
raise ValueError(f"`{attr_name}` must be present in Connection")
return attr
def _get_retry_object(self) -> Retrying:
"""
Instantiate a retry object.
:return: instance of Retrying class
"""
return Retrying(**self.retry_args)
def _a_get_retry_object(self) -> AsyncRetrying:
"""
Instantiate an async retry object.
:return: instance of AsyncRetrying class
"""
return AsyncRetrying(**self.retry_args)
def _get_sp_token(self, resource: str) -> str:
"""Get Service Principal token."""
sp_token = self.oauth_tokens.get(resource)
if sp_token and self._is_oauth_token_valid(sp_token):
return sp_token["access_token"]
self.log.info("Existing Service Principal token is expired, or going to expire soon. Refreshing...")
try:
for attempt in self._get_retry_object():
with attempt:
resp = requests.post(
resource,
auth=HTTPBasicAuth(self._get_connection_attr("login"), self.databricks_conn.password),
data="grant_type=client_credentials&scope=all-apis",
headers={
**self.user_agent_header,
"Content-Type": "application/x-www-form-urlencoded",
},
timeout=self.token_timeout_seconds,
)
resp.raise_for_status()
jsn = resp.json()
jsn["expires_on"] = int(time.time() + jsn["expires_in"])
self._is_oauth_token_valid(jsn)
self.oauth_tokens[resource] = jsn
break
except RetryError:
raise AirflowException(f"API requests to Databricks failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
msg = f"Response: {e.response.content.decode()}, Status Code: {e.response.status_code}"
raise AirflowException(msg)
return jsn["access_token"]
async def _a_get_sp_token(self, resource: str) -> str:
"""Async version of `_get_sp_token()`."""
sp_token = self.oauth_tokens.get(resource)
if sp_token and self._is_oauth_token_valid(sp_token):
return sp_token["access_token"]
self.log.info("Existing Service Principal token is expired, or going to expire soon. Refreshing...")
try:
async for attempt in self._a_get_retry_object():
with attempt:
async with self._session.post(
resource,
auth=aiohttp.BasicAuth(
self._get_connection_attr("login"), self.databricks_conn.password
),
data="grant_type=client_credentials&scope=all-apis",
headers={
**self.user_agent_header,
"Content-Type": "application/x-www-form-urlencoded",
},
timeout=self.token_timeout_seconds,
) as resp:
resp.raise_for_status()
jsn = await resp.json()
jsn["expires_on"] = int(time.time() + jsn["expires_in"])
self._is_oauth_token_valid(jsn)
self.oauth_tokens[resource] = jsn
break
except RetryError:
raise AirflowException(f"API requests to Databricks failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
msg = f"Response: {e.response.content.decode()}, Status Code: {e.response.status_code}"
raise AirflowException(msg)
return jsn["access_token"]
def _get_aad_token(self, resource: str) -> str:
"""
Get AAD token for given resource.
Supports managed identity or service principal auth.
:param resource: resource to issue token to
:return: AAD token, or raise an exception
"""
aad_token = self.oauth_tokens.get(resource)
if aad_token and self._is_oauth_token_valid(aad_token):
return aad_token["access_token"]
self.log.info("Existing AAD token is expired, or going to expire soon. Refreshing...")
try:
from azure.identity import ClientSecretCredential, ManagedIdentityCredential
for attempt in self._get_retry_object():
with attempt:
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
token = ManagedIdentityCredential().get_token(f"{resource}/.default")
else:
credential = ClientSecretCredential(
client_id=self._get_connection_attr("login"),
client_secret=self.databricks_conn.password,
tenant_id=self.databricks_conn.extra_dejson["azure_tenant_id"],
)
token = credential.get_token(f"{resource}/.default")
jsn = {
"access_token": token.token,
"token_type": "Bearer",
"expires_on": token.expires_on,
}
self._is_oauth_token_valid(jsn)
self.oauth_tokens[resource] = jsn
break
except ImportError as e:
raise AirflowOptionalProviderFeatureException(e)
except RetryError:
raise AirflowException(f"API requests to Azure failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
msg = f"Response: {e.response.content.decode()}, Status Code: {e.response.status_code}"
raise AirflowException(msg)
return jsn["access_token"]
async def _a_get_aad_token(self, resource: str) -> str:
"""
Async version of `_get_aad_token()`.
:param resource: resource to issue token to
:return: AAD token, or raise an exception
"""
aad_token = self.oauth_tokens.get(resource)
if aad_token and self._is_oauth_token_valid(aad_token):
return aad_token["access_token"]
self.log.info("Existing AAD token is expired, or going to expire soon. Refreshing...")
try:
from azure.identity.aio import (
ClientSecretCredential as AsyncClientSecretCredential,
ManagedIdentityCredential as AsyncManagedIdentityCredential,
)
async for attempt in self._a_get_retry_object():
with attempt:
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
async with AsyncManagedIdentityCredential() as credential:
token = await credential.get_token(f"{resource}/.default")
else:
async with AsyncClientSecretCredential(
client_id=self._get_connection_attr("login"),
client_secret=self.databricks_conn.password,
tenant_id=self.databricks_conn.extra_dejson["azure_tenant_id"],
) as credential:
token = await credential.get_token(f"{resource}/.default")
jsn = {
"access_token": token.token,
"token_type": "Bearer",
"expires_on": token.expires_on,
}
self._is_oauth_token_valid(jsn)
self.oauth_tokens[resource] = jsn
break
except ImportError as e:
raise AirflowOptionalProviderFeatureException(e)
except RetryError:
raise AirflowException(f"API requests to Azure failed {self.retry_limit} times. Giving up.")
except aiohttp.ClientResponseError as err:
raise AirflowException(f"Response: {err.message}, Status Code: {err.status}")
return jsn["access_token"]
def _get_aad_token_for_default_az_credential(self, resource: str) -> str:
"""
Get AAD token for given resource for workload identity.
Supports managed identity or service principal auth.
:param resource: resource to issue token to
:return: AAD token, or raise an exception
"""
aad_token = self.oauth_tokens.get(resource)
if aad_token and self._is_oauth_token_valid(aad_token):
return aad_token["access_token"]
self.log.info("Existing AAD token is expired, or going to expire soon. Refreshing...")
try:
from azure.identity import DefaultAzureCredential
for attempt in self._get_retry_object():
with attempt:
# This only works in an Azure Kubernetes Service Cluster given the following environment variables:
# AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE
#
# While there is a WorkloadIdentityCredential class, the below class is advised by Microsoft
# https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview
token = DefaultAzureCredential().get_token(f"{resource}/.default")
jsn = {
"access_token": token.token,
"token_type": "Bearer",
"expires_on": token.expires_on,
}
self._is_oauth_token_valid(jsn)
self.oauth_tokens[resource] = jsn
break
except ImportError as e:
raise AirflowOptionalProviderFeatureException(e)
except RetryError:
raise AirflowException(f"API requests to Azure failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
msg = f"Response: {e.response.content.decode()}, Status Code: {e.response.status_code}"
raise AirflowException(msg)
return token.token
async def _a_get_aad_token_for_default_az_credential(self, resource: str) -> str:
"""
Get AAD token for given resource for workload identity.
Supports managed identity or service principal auth.
:param resource: resource to issue token to
:return: AAD token, or raise an exception
"""
aad_token = self.oauth_tokens.get(resource)
if aad_token and self._is_oauth_token_valid(aad_token):
return aad_token["access_token"]
self.log.info("Existing AAD token is expired, or going to expire soon. Refreshing...")
try:
from azure.identity.aio import (
DefaultAzureCredential as AsyncDefaultAzureCredential,
)
for attempt in self._get_retry_object():
with attempt:
# This only works in an Azure Kubernetes Service Cluster given the following environment variables:
# AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE
#
# While there is a WorkloadIdentityCredential class, the below class is advised by Microsoft
# https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview
token = await AsyncDefaultAzureCredential().get_token(f"{resource}/.default")
jsn = {
"access_token": token.token,
"token_type": "Bearer",
"expires_on": token.expires_on,
}
self._is_oauth_token_valid(jsn)
self.oauth_tokens[resource] = jsn
break
except ImportError as e:
raise AirflowOptionalProviderFeatureException(e)
except RetryError:
raise AirflowException(f"API requests to Azure failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
msg = f"Response: {e.response.content.decode()}, Status Code: {e.response.status_code}"
raise AirflowException(msg)
return token.token
def _get_aad_headers(self) -> dict:
"""
Fill AAD headers if necessary (SPN is outside of the workspace).
:return: dictionary with filled AAD headers
"""
headers = {}
if "azure_resource_id" in self.databricks_conn.extra_dejson:
mgmt_token = self._get_aad_token(AZURE_MANAGEMENT_ENDPOINT)
headers["X-Databricks-Azure-Workspace-Resource-Id"] = self.databricks_conn.extra_dejson[
"azure_resource_id"
]
headers["X-Databricks-Azure-SP-Management-Token"] = mgmt_token
return headers
async def _a_get_aad_headers(self) -> dict:
"""
Async version of `_get_aad_headers()`.
:return: dictionary with filled AAD headers
"""
headers = {}
if "azure_resource_id" in self.databricks_conn.extra_dejson:
mgmt_token = await self._a_get_aad_token(AZURE_MANAGEMENT_ENDPOINT)
headers["X-Databricks-Azure-Workspace-Resource-Id"] = self.databricks_conn.extra_dejson[
"azure_resource_id"
]
headers["X-Databricks-Azure-SP-Management-Token"] = mgmt_token
return headers
@staticmethod
def _is_oauth_token_valid(token: dict, time_key="expires_on") -> bool:
"""
Check if an OAuth token is valid and hasn't expired yet.
:param sp_token: dict with properties of OAuth token
:param time_key: name of the key that holds the time of expiration
:return: true if token is valid, false otherwise
"""
if "access_token" not in token or token.get("token_type", "") != "Bearer" or time_key not in token:
raise AirflowException(f"Can't get necessary data from OAuth token: {token}")
return int(token[time_key]) > (int(time.time()) + TOKEN_REFRESH_LEAD_TIME)
def _check_azure_metadata_service(self) -> None:
"""
Check for Azure Metadata Service (with caching).
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service
"""
if self._metadata_cache and time.time() < self._metadata_expiry:
return
try:
for attempt in self._get_retry_object():
with attempt:
response = requests.get(
AZURE_METADATA_SERVICE_INSTANCE_URL,
params={"api-version": "2021-02-01"},
headers={"Metadata": "true"},
timeout=2,
)
response.raise_for_status()
response_json = response.json()
self._validate_azure_metadata_service(response_json)
self._metadata_cache = response_json
self._metadata_expiry = time.time() + self._metadata_ttl
break
except RetryError:
raise ConnectionError(f"Failed to reach Azure Metadata Service after {self.retry_limit} retries.")
except (requests_exceptions.RequestException, ValueError) as e:
raise ConnectionError(f"Can't reach Azure Metadata Service: {e}")
async def _a_check_azure_metadata_service(self):
"""Async version of `_check_azure_metadata_service()`."""
if self._metadata_cache and time.time() < self._metadata_expiry:
return
try:
async for attempt in self._a_get_retry_object():
with attempt:
async with self._session.get(
url=AZURE_METADATA_SERVICE_INSTANCE_URL,
params={"api-version": "2021-02-01"},
headers={"Metadata": "true"},
timeout=2,
) as resp:
resp.raise_for_status()
response_json = await resp.json()
self._validate_azure_metadata_service(response_json)
self._metadata_cache = response_json
self._metadata_expiry = time.time() + self._metadata_ttl
break
except RetryError:
raise ConnectionError(f"Failed to reach Azure Metadata Service after {self.retry_limit} retries.")
except (aiohttp.ClientError, ValueError) as e:
raise ConnectionError(f"Can't reach Azure Metadata Service: {e}")
def _validate_azure_metadata_service(self, response_json: dict) -> None:
if "compute" not in response_json or "azEnvironment" not in response_json["compute"]:
raise ValueError(
f"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {response_json}"
)
def _get_token(self, raise_error: bool = False) -> str | None:
if "token" in self.databricks_conn.extra_dejson:
self.log.info(
"Using token auth. For security reasons, please set token in Password field instead of extra"
)
return self.databricks_conn.extra_dejson["token"]
if not self.databricks_conn.login and self.databricks_conn.password:
self.log.debug("Using token auth.")
return self.databricks_conn.password
if "azure_tenant_id" in self.databricks_conn.extra_dejson:
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
raise AirflowException("Azure SPN credentials aren't provided")
self.log.debug("Using AAD Token for SPN.")
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
self.log.debug("Using AAD Token for managed identity.")
self._check_azure_metadata_service()
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
if self.databricks_conn.extra_dejson.get(DEFAULT_AZURE_CREDENTIAL_SETTING_KEY, False):
self.log.debug("Using default Azure Credential authentication.")
return self._get_aad_token_for_default_az_credential(DEFAULT_DATABRICKS_SCOPE)
if self.databricks_conn.extra_dejson.get("service_principal_oauth", False):
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
raise AirflowException("Service Principal credentials aren't provided")
self.log.debug("Using Service Principal Token.")
return self._get_sp_token(OIDC_TOKEN_SERVICE_URL.format(self.databricks_conn.host))
if raise_error:
raise AirflowException("Token authentication isn't configured")
return None
async def _a_get_token(self, raise_error: bool = False) -> str | None:
if "token" in self.databricks_conn.extra_dejson:
self.log.info(
"Using token auth. For security reasons, please set token in Password field instead of extra"
)
return self.databricks_conn.extra_dejson["token"]
if not self.databricks_conn.login and self.databricks_conn.password:
self.log.debug("Using token auth.")
return self.databricks_conn.password
if "azure_tenant_id" in self.databricks_conn.extra_dejson:
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
raise AirflowException("Azure SPN credentials aren't provided")
self.log.debug("Using AAD Token for SPN.")
return await self._a_get_aad_token(DEFAULT_DATABRICKS_SCOPE)
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
self.log.debug("Using AAD Token for managed identity.")
await self._a_check_azure_metadata_service()
return await self._a_get_aad_token(DEFAULT_DATABRICKS_SCOPE)
if self.databricks_conn.extra_dejson.get(DEFAULT_AZURE_CREDENTIAL_SETTING_KEY, False):
self.log.debug("Using AzureDefaultCredential for authentication.")
return await self._a_get_aad_token_for_default_az_credential(DEFAULT_DATABRICKS_SCOPE)
if self.databricks_conn.extra_dejson.get("service_principal_oauth", False):
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
raise AirflowException("Service Principal credentials aren't provided")
self.log.debug("Using Service Principal Token.")
return await self._a_get_sp_token(OIDC_TOKEN_SERVICE_URL.format(self.databricks_conn.host))
if raise_error:
raise AirflowException("Token authentication isn't configured")
return None
def _log_request_error(self, attempt_num: int, error: str) -> None:
self.log.error("Attempt %s API Request to Databricks failed with reason: %s", attempt_num, error)
def _endpoint_url(self, endpoint):
port = f":{self.databricks_conn.port}" if self.databricks_conn.port else ""
schema = self.databricks_conn.schema or "https"
return f"{schema}://{self.host}{port}/{endpoint}"
def _do_api_call(
self,
endpoint_info: tuple[str, str],
json: dict[str, Any] | None = None,
wrap_http_errors: bool = True,
):
"""
Perform an API call with retries.
:param endpoint_info: Tuple of method and endpoint
:param json: Parameters for this API call.
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
"""
method, endpoint = endpoint_info
# Automatically prepend 'api/' prefix to all endpoint paths
full_endpoint = f"api/{endpoint}"
url = self._endpoint_url(full_endpoint)
aad_headers = self._get_aad_headers()
headers = {**self.user_agent_header, **aad_headers}
auth: AuthBase
token = self._get_token()
if token:
auth = _TokenAuth(token)
else:
self.log.info("Using basic auth.")
auth = HTTPBasicAuth(self._get_connection_attr("login"), self.databricks_conn.password)
request_func: Any
if method == "GET":
request_func = requests.get
elif method == "POST":
request_func = requests.post
elif method == "PATCH":
request_func = requests.patch
elif method == "DELETE":
request_func = requests.delete
else:
raise AirflowException("Unexpected HTTP Method: " + method)
try:
for attempt in self._get_retry_object():
with attempt:
self.log.debug(
"Initiating %s request to %s with payload: %s, headers: %s",
method,
url,
json,
headers,
)
response = request_func(
url,
json=json if method in ("POST", "PATCH") else None,
params=json if method == "GET" else None,
auth=auth,
headers=headers,
timeout=self.timeout_seconds,
)
self.log.debug("Response Status Code: %s", response.status_code)
self.log.debug("Response text: %s", response.text)
response.raise_for_status()
return response.json()
except RetryError:
raise AirflowException(f"API requests to Databricks failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
if wrap_http_errors:
msg = f"Response: {e.response.content.decode()}, Status Code: {e.response.status_code}"
raise AirflowException(msg)
raise
async def _a_do_api_call(self, endpoint_info: tuple[str, str], json: dict[str, Any] | None = None):
"""
Async version of `_do_api_call()`.
:param endpoint_info: Tuple of method and endpoint
:param json: Parameters for this API call.
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise, throw an AirflowException.
"""
method, endpoint = endpoint_info
full_endpoint = f"api/{endpoint}"
url = self._endpoint_url(full_endpoint)
aad_headers = await self._a_get_aad_headers()
headers = {**self.user_agent_header, **aad_headers}
auth: aiohttp.BasicAuth
token = await self._a_get_token()
if token:
auth = BearerAuth(token)
else:
self.log.info("Using basic auth.")
auth = aiohttp.BasicAuth(self._get_connection_attr("login"), self.databricks_conn.password)
request_func: Any
if method == "GET":
request_func = self._session.get
elif method == "POST":
request_func = self._session.post
elif method == "PATCH":
request_func = self._session.patch
else:
raise AirflowException("Unexpected HTTP Method: " + method)
try:
async for attempt in self._a_get_retry_object():
with attempt:
self.log.debug(
"Initiating %s request to %s with payload: %s, headers: %s",
method,
url,
json,
headers,
)
async with request_func(
url,
json=json,
auth=auth,
headers={**headers, **self.user_agent_header},
timeout=self.timeout_seconds,
) as response:
self.log.debug("Response Status Code: %s", response.status)
self.log.debug("Response text: %s", response.text)
response.raise_for_status()
return await response.json()
except RetryError:
raise AirflowException(f"API requests to Databricks failed {self.retry_limit} times. Giving up.")
except aiohttp.ClientResponseError as err:
raise AirflowException(f"Response: {err.message}, Status Code: {err.status}")
@staticmethod
def _get_error_code(exception: BaseException) -> str:
if isinstance(exception, requests_exceptions.HTTPError):
try:
jsn = exception.response.json()
return jsn.get("error_code", "")
except JSONDecodeError:
pass
return ""
@staticmethod
def _retryable_error(exception: BaseException) -> bool:
if isinstance(exception, requests_exceptions.RequestException):
if isinstance(exception, (requests_exceptions.ConnectionError, requests_exceptions.Timeout)) or (
exception.response is not None
and (
exception.response.status_code >= 500
or exception.response.status_code == 429
or (
exception.response.status_code == 400
and BaseDatabricksHook._get_error_code(exception) == "COULD_NOT_ACQUIRE_LOCK"
)
)
):
return True
if isinstance(exception, aiohttp.ClientResponseError):
if exception.status >= 500 or exception.status == 429:
return True
if isinstance(exception, (ClientConnectorError, TimeoutError)):
return True
return False
| BaseDatabricksHook |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.