language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster-webserver/dagster_webserver/webserver.py | {
"start": 14574,
"end": 15323
} | class ____:
"""Middleware for counting traced dagster calls.
Args:
app (ASGI application): ASGI application
"""
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
traced_counter.set(Counter())
def send_wrapper(message: Message):
if message["type"] == "http.response.start":
counter = traced_counter.get()
if counter and isinstance(counter, Counter):
headers = MutableHeaders(scope=message)
headers.append("x-dagster-call-counts", json.dumps(counter.counts()))
return send(message)
await self.app(scope, receive, send_wrapper)
| DagsterTracedCounterMiddleware |
python | aimacode__aima-python | probability4e.py | {
"start": 2730,
"end": 6908
} | class ____(ProbDist):
"""A discrete probability distribute over a set of variables.
>>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25
>>> P[1, 1]
0.25
>>> P[dict(X=0, Y=1)] = 0.5
>>> P[dict(X=0, Y=1)]
0.5"""
def __init__(self, variables):
self.prob = {}
self.variables = variables
self.vals = defaultdict(list)
def __getitem__(self, values):
"""Given a tuple or dict of values, return P(values)."""
values = event_values(values, self.variables)
return ProbDist.__getitem__(self, values)
def __setitem__(self, values, p):
"""Set P(values) = p. Values can be a tuple or a dict; it must
have a value for each of the variables in the joint. Also keep track
of the values we have seen so far for each variable."""
values = event_values(values, self.variables)
self.prob[values] = p
for var, val in zip(self.variables, values):
if val not in self.vals[var]:
self.vals[var].append(val)
def values(self, var):
"""Return the set of possible values for a variable."""
return self.vals[var]
def __repr__(self):
return "P({})".format(self.variables)
def event_values(event, variables):
"""Return a tuple of the values of variables in event.
>>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])
(8, 10)
>>> event_values ((1, 2), ['C', 'A'])
(1, 2)
"""
if isinstance(event, tuple) and len(event) == len(variables):
return event
else:
return tuple([event[var] for var in variables])
def enumerate_joint_ask(X, e, P):
"""Return a probability distribution over the values of the variable X,
given the {var:val} observations e, in the JointProbDist P. [Section 12.3]
>>> P = JointProbDist(['X', 'Y'])
>>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125
>>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()
'0: 0.667, 1: 0.167, 2: 0.167'
"""
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X) # probability distribution for X, initially empty
Y = [v for v in P.variables if v != X and v not in e] # hidden variables.
for xi in P.values(X):
Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)
return Q.normalize()
def enumerate_joint(variables, e, P):
"""Return the sum of those entries in P consistent with e,
provided variables is P's remaining variables (the ones not in e)."""
if not variables:
return P[e]
Y, rest = variables[0], variables[1:]
return sum([enumerate_joint(rest, extend(e, Y, y), P)
for y in P.values(Y)])
# ______________________________________________________________________________
# 12.4 Independence
def is_independent(variables, P):
"""
Return whether a list of variables are independent given their distribution P
P is an instance of JoinProbDist
>>> P = JointProbDist(['X', 'Y'])
>>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[1,0] = 0.125
>>> is_independent(['X', 'Y'], P)
False
"""
for var in variables:
event_vars = variables[:]
event_vars.remove(var)
event = {}
distribution = enumerate_joint_ask(var, event, P)
events = gen_possible_events(event_vars, P)
for e in events:
conditional_distr = enumerate_joint_ask(var, e, P)
if conditional_distr.prob != distribution.prob:
return False
return True
def gen_possible_events(vars, P):
"""Generate all possible events of a collection of vars according to distribution of P"""
events = []
def backtrack(vars, P, temp):
if not vars:
events.append(temp)
return
var = vars[0]
for val in P.values(var):
temp[var] = val
backtrack([v for v in vars if v != var], P, copy.copy(temp))
backtrack(vars, P, {})
return events
# ______________________________________________________________________________
# Chapter 13 Probabilistic Reasoning
# 13.1 Representing Knowledge in an Uncertain Domain
| JointProbDist |
python | ansible__ansible | test/integration/targets/ansible-test-sanity-pylint/ansible_collections/ns/col/plugins/action/do_deprecated_stuff.py | {
"start": 323,
"end": 1981
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
deprecator = deprecator_from_collection_name('ns.col')
# ansible-deprecated-version - only ansible-core can encounter this
_display.deprecated(msg='ansible-deprecated-no-version')
# ansible-invalid-deprecated-version - only ansible-core can encounter this
_display.deprecated(msg='collection-deprecated-version', version='1.0.0')
_display.deprecated(msg='collection-invalid-deprecated-version', version='not-a-version')
# ansible-deprecated-no-collection-name - only a module_utils can encounter this
_display.deprecated(msg='wrong-collection-deprecated', collection_name='ns.wrong', version='3.0.0')
_display.deprecated(msg='ansible-expired-deprecated-date', date='2000-01-01')
_display.deprecated(msg='ansible-invalid-deprecated-date', date='not-a-date')
_display.deprecated(msg='ansible-deprecated-both-version-and-date', version='3.0.0', date='2099-01-01')
_display.deprecated(msg='removal-version-must-be-major', version='3.1.0')
# ansible-deprecated-date-not-permitted - only ansible-core can encounter this
_display.deprecated(msg='ansible-deprecated-unnecessary-collection-name', deprecator=deprecator, version='3.0.0')
# ansible-deprecated-collection-name-not-permitted - only ansible-core can encounter this
_display.deprecated(msg='ansible-deprecated-both-collection-name-and-deprecator', collection_name='ns.col', deprecator=deprecator, version='3.0.0')
return result
| ActionModule |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 62414,
"end": 62563
} | class ____(dtypes):
def __init__(self, *args):
super().__init__(*args, device_type="cpu")
# Overrides specified dtypes on CUDA.
| dtypesIfCPU |
python | huggingface__transformers | src/transformers/models/olmo/modular_olmo.py | {
"start": 7674,
"end": 7982
} | class ____(LlamaModel):
def __init__(self, config: OlmoConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[OlmoDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = OlmoLayerNorm(config.hidden_size)
| OlmoModel |
python | neetcode-gh__leetcode | python/1984-minimum-difference-between-highest-and-lowest-of-k-scores.py | {
"start": 0,
"end": 287
} | class ____:
def minimumDifference(self, nums: List[int], k: int) -> int:
nums.sort()
l, r = 0, k - 1
res = float("inf")
while r < len(nums):
res = min(res, nums[r] - nums[l])
l, r = l + 1, r + 1
return res
| Solution |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 25226,
"end": 26124
} | class ____(ChainedSource):
index: int
def __post_init__(self) -> None:
from .variables import ConstantVariable
assert ConstantVariable.is_literal(self.index)
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.load_import_from(utils.__name__, "set_getitem")
)
codegen(self.base)
codegen.append_output(codegen.create_load_const(self.index))
codegen.extend_output(create_call_function(2, False))
def name(self) -> str:
# set ordering might not be stable
return f"list({self.base.name()})[{self.index!r}]"
def is_dict_key(self) -> bool:
return False
# Used to access an item from the dictionary
@dataclasses.dataclass(frozen=True)
| NonSerializableSetGetItemSource |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_ps.py | {
"start": 51625,
"end": 51720
} | class ____(_Backend):
backend_version = 'Level II'
FigureCanvas = FigureCanvasPS
| _BackendPS |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 53460,
"end": 54328
} | class ____:
@skip_xp_backends("cupy", reason="XXX: can_cast in cupy <= 13.2")
def test_basic(self, xp):
g = xp.asarray([[5, 6, 4, 3],
[3, 5, 6, 2],
[2, 3, 5, 6],
[1, 6, 9, 7]], dtype=xp.float64)
h = xp.asarray([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667],
[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
assert_array_almost_equal(signal.wiener(g), h, decimal=6)
assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6)
padtype_options = ["mean", "median", "minimum", "maximum", "line"]
padtype_options += _upfirdn_modes
| TestWiener |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type_field_test.py | {
"start": 5637,
"end": 8077
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
# Simple types
dict(tp=int),
dict(tp=float),
dict(tp=str),
dict(tp=bytes),
dict(tp=bool),
dict(tp=None),
dict(tp=type(None)),
dict(tp=dtypes.DType),
dict(tp=tensor_shape.TensorShape),
dict(tp=tensor.Tensor),
dict(tp='A', allow_forward_references=True),
# Generic types
dict(tp=typing.Union[int, float]),
dict(tp=typing.Tuple[int, ...]),
dict(tp=typing.Tuple[int, int]),
dict(tp=_TUPLE[int, ...]),
dict(tp=_TUPLE[int, int]),
dict(tp=typing.Mapping[int, int]),
dict(tp=typing.Mapping[str, int]),
dict(tp=typing.Union[int, 'A'], allow_forward_references=True),
dict(tp=typing.Mapping['A', int], allow_forward_references=True),
dict(tp=typing.Union[int, typing.Tuple[typing.Tuple[int, int], ...]]),
dict(tp=typing.Union[int, _TUPLE[_TUPLE[int, int], ...]]),
dict(tp=typing.Union[int, _TUPLE[typing.Tuple[int, int], ...]]),
dict(tp=typing.Union[int, typing.Tuple[_TUPLE[int, int], ...]]),
])
def testValidPytype(self, tp, allow_forward_references=False):
extension_type_field.validate_field_value_type(
tp, allow_forward_references=allow_forward_references)
@parameterized.parameters([
dict(tp=dict, error="Unsupported type annotation 'dict'"),
dict(tp=list, error="Unsupported type annotation 'list'"),
dict(
tp=typing.Union[int, list],
error="Unsupported type annotation 'list'"),
dict(
tp=typing.Tuple[typing.Tuple[int, int, dict], ...],
error="Unsupported type annotation 'dict'"),
dict(
tp=_TUPLE[_TUPLE[int, int, dict], ...],
error="Unsupported type annotation 'dict'"),
dict(tp='A', error='Unresolved forward reference .*'),
dict(tp=typing.Union[int, 'A'], error='Unresolved forward reference .*'),
dict(tp=typing.Mapping[tensor.Tensor, int],
error="Mapping had a key 'Tensor' with type 'type'"),
dict(
tp=typing.Mapping[tensor_shape.TensorShape, int],
error="Mapping had a key 'TensorShape' with type 'ABCMeta'"),
])
def testInvalidPytype(self, tp, error):
with self.assertRaisesRegex(TypeError, error):
extension_type_field.validate_field_value_type(tp)
| ValidateFieldPyTypeTest |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 81816,
"end": 83035
} | class ____(Response):
"""
Response of events.get_task_metrics endpoint.
:param metrics: List of task with their metrics
:type metrics: Sequence[dict]
"""
_service = "events"
_action = "get_task_metrics"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"metrics": {
"description": "List of task with their metrics",
"items": {"type": "object"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, metrics: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetTaskMetricsResponse, self).__init__(**kwargs)
self.metrics = metrics
@schema_property("metrics")
def metrics(self) -> Optional[List[dict]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
self.assert_isinstance(value, "metrics", (dict,), is_array=True)
self._property_metrics = value
| GetTaskMetricsResponse |
python | numpy__numpy | numpy/matrixlib/tests/test_masked_matrix.py | {
"start": 6566,
"end": 8116
} | class ____:
# Test suite for masked subclasses of ndarray.
def _create_data(self):
x = np.arange(5, dtype='float')
mx = MMatrix(x, mask=[0, 1, 0, 0, 0])
return x, mx
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
mx = self._create_data()[1]
assert_(isinstance(mx._data, np.matrix))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
x, mx = self._create_data()
with np.errstate(divide='ignore'):
assert_(isinstance(log(mx), MMatrix))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
x, mx = self._create_data()
# Result should be a MMatrix
assert_(isinstance(add(mx, mx), MMatrix))
assert_(isinstance(add(mx, x), MMatrix))
# Result should work
assert_equal(add(mx, x), mx + x)
assert_(isinstance(add(mx, mx)._data, np.matrix))
with assert_raises(TypeError):
add.outer(mx, mx)
assert_(isinstance(hypot(mx, mx), MMatrix))
assert_(isinstance(hypot(mx, x), MMatrix))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
x, mx = self._create_data()
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
assert_(isinstance(divide(mx, mx), MMatrix))
assert_(isinstance(divide(mx, x), MMatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
| TestSubclassing |
python | pola-rs__polars | py-polars/src/polars/io/cloud/credential_provider/_builder.py | {
"start": 4671,
"end": 5225
} | class ____(abc.ABC):
@abc.abstractmethod
def __call__(self) -> CredentialProviderFunction | None:
pass
@property
@abc.abstractmethod
def provider_repr(self) -> str:
"""Used for logging."""
def __repr__(self) -> str:
provider_repr = self.provider_repr
builder_name = type(self).__name__
return f"{provider_repr} @ {builder_name}"
# Wraps an already initialized credential provider into the builder interface.
# Used for e.g. user-provided credential providers.
| CredentialProviderBuilderImpl |
python | pennersr__django-allauth | allauth/socialaccount/providers/dwolla/provider.py | {
"start": 295,
"end": 706
} | class ____(OAuth2Provider):
"""Provider for Dwolla"""
id = "dwolla"
name = "Dwolla"
account_class = DwollaAccount
oauth2_adapter_class = DwollaOAuth2Adapter
def extract_uid(self, data):
return str(data.get("id", None))
def extract_common_fields(self, data):
return dict(
name=data.get("name"),
)
provider_classes = [DwollaProvider]
| DwollaProvider |
python | getsentry__sentry | tests/sentry/seer/explorer/test_tools.py | {
"start": 29031,
"end": 29245
} | class ____(BaseModel):
"""
Required fields for the serialized events used by Seer Explorer.
"""
title: str
entries: list[dict]
tags: list[dict[str, str | None]] | None = None
| _SentryEventData |
python | simonw__datasette | datasette/views/database.py | {
"start": 15545,
"end": 34469
} | class ____(View):
async def post(self, request, datasette):
from datasette.app import TableNotFound
db = await datasette.resolve_database(request)
# We must be a canned query
table_found = False
try:
await datasette.resolve_table(request)
table_found = True
except TableNotFound as table_not_found:
canned_query = await datasette.get_canned_query(
table_not_found.database_name, table_not_found.table, request.actor
)
if canned_query is None:
raise
if table_found:
# That should not have happened
raise DatasetteError("Unexpected table found on POST", status=404)
# If database is immutable, return an error
if not db.is_mutable:
raise Forbidden("Database is immutable")
# Process the POST
body = await request.post_body()
body = body.decode("utf-8").strip()
if body.startswith("{") and body.endswith("}"):
params = json.loads(body)
# But we want key=value strings
for key, value in params.items():
params[key] = str(value)
else:
params = dict(parse_qsl(body, keep_blank_values=True))
# Don't ever send csrftoken as a SQL parameter
params.pop("csrftoken", None)
# Should we return JSON?
should_return_json = (
request.headers.get("accept") == "application/json"
or request.args.get("_json")
or params.get("_json")
)
params_for_query = MagicParameters(
canned_query["sql"], params, request, datasette
)
await params_for_query.execute_params()
ok = None
redirect_url = None
try:
cursor = await db.execute_write(canned_query["sql"], params_for_query)
# success message can come from on_success_message or on_success_message_sql
message = None
message_type = datasette.INFO
on_success_message_sql = canned_query.get("on_success_message_sql")
if on_success_message_sql:
try:
message_result = (
await db.execute(on_success_message_sql, params_for_query)
).first()
if message_result:
message = message_result[0]
except Exception as ex:
message = "Error running on_success_message_sql: {}".format(ex)
message_type = datasette.ERROR
if not message:
message = canned_query.get(
"on_success_message"
) or "Query executed, {} row{} affected".format(
cursor.rowcount, "" if cursor.rowcount == 1 else "s"
)
redirect_url = canned_query.get("on_success_redirect")
ok = True
except Exception as ex:
message = canned_query.get("on_error_message") or str(ex)
message_type = datasette.ERROR
redirect_url = canned_query.get("on_error_redirect")
ok = False
if should_return_json:
return Response.json(
{
"ok": ok,
"message": message,
"redirect": redirect_url,
}
)
else:
datasette.add_message(request, message, message_type)
return Response.redirect(redirect_url or request.path)
async def get(self, request, datasette):
from datasette.app import TableNotFound
await datasette.refresh_schemas()
db = await datasette.resolve_database(request)
database = db.name
# Get all tables/views this actor can see in bulk with private flag
allowed_tables_page = await datasette.allowed_resources(
"view-table",
request.actor,
parent=database,
include_is_private=True,
limit=1000,
)
# Create lookup dict for quick access
allowed_dict = {r.child: r for r in allowed_tables_page.resources}
# Are we a canned query?
canned_query = None
canned_query_write = False
if "table" in request.url_vars:
try:
await datasette.resolve_table(request)
except TableNotFound as table_not_found:
# Was this actually a canned query?
canned_query = await datasette.get_canned_query(
table_not_found.database_name, table_not_found.table, request.actor
)
if canned_query is None:
raise
canned_query_write = bool(canned_query.get("write"))
private = False
if canned_query:
# Respect canned query permissions
visible, private = await datasette.check_visibility(
request.actor,
action="view-query",
resource=QueryResource(database=database, query=canned_query["name"]),
)
if not visible:
raise Forbidden("You do not have permission to view this query")
else:
await datasette.ensure_permission(
action="execute-sql",
resource=DatabaseResource(database=database),
actor=request.actor,
)
# Flattened because of ?sql=&name1=value1&name2=value2 feature
params = {key: request.args.get(key) for key in request.args}
sql = None
if canned_query:
sql = canned_query["sql"]
elif "sql" in params:
sql = params.pop("sql")
# Extract any :named parameters
named_parameters = []
if canned_query and canned_query.get("params"):
named_parameters = canned_query["params"]
if not named_parameters:
named_parameters = derive_named_parameters(sql)
named_parameter_values = {
named_parameter: params.get(named_parameter) or ""
for named_parameter in named_parameters
if not named_parameter.startswith("_")
}
# Set to blank string if missing from params
for named_parameter in named_parameters:
if named_parameter not in params and not named_parameter.startswith("_"):
params[named_parameter] = ""
extra_args = {}
if params.get("_timelimit"):
extra_args["custom_time_limit"] = int(params["_timelimit"])
format_ = request.url_vars.get("format") or "html"
query_error = None
results = None
rows = []
columns = []
params_for_query = params
if not canned_query_write:
try:
if not canned_query:
# For regular queries we only allow SELECT, plus other rules
validate_sql_select(sql)
else:
# Canned queries can run magic parameters
params_for_query = MagicParameters(sql, params, request, datasette)
await params_for_query.execute_params()
results = await datasette.execute(
database, sql, params_for_query, truncate=True, **extra_args
)
columns = results.columns
rows = results.rows
except QueryInterrupted as ex:
raise DatasetteError(
textwrap.dedent(
"""
<p>SQL query took too long. The time limit is controlled by the
<a href="https://docs.datasette.io/en/stable/settings.html#sql-time-limit-ms">sql_time_limit_ms</a>
configuration option.</p>
<textarea style="width: 90%">{}</textarea>
<script>
let ta = document.querySelector("textarea");
ta.style.height = ta.scrollHeight + "px";
</script>
""".format(
markupsafe.escape(ex.sql)
)
).strip(),
title="SQL Interrupted",
status=400,
message_is_html=True,
)
except sqlite3.DatabaseError as ex:
query_error = str(ex)
results = None
rows = []
columns = []
except (sqlite3.OperationalError, InvalidSql) as ex:
raise DatasetteError(str(ex), title="Invalid SQL", status=400)
except sqlite3.OperationalError as ex:
raise DatasetteError(str(ex))
except DatasetteError:
raise
# Handle formats from plugins
if format_ == "csv":
async def fetch_data_for_csv(request, _next=None):
results = await db.execute(sql, params, truncate=True)
data = {"rows": results.rows, "columns": results.columns}
return data, None, None
return await stream_csv(datasette, fetch_data_for_csv, request, db.name)
elif format_ in datasette.renderers.keys():
# Dispatch request to the correct output format renderer
# (CSV is not handled here due to streaming)
result = call_with_supported_arguments(
datasette.renderers[format_][0],
datasette=datasette,
columns=columns,
rows=rows,
sql=sql,
query_name=canned_query["name"] if canned_query else None,
database=database,
table=None,
request=request,
view_name="table",
truncated=results.truncated if results else False,
error=query_error,
# These will be deprecated in Datasette 1.0:
args=request.args,
data={"ok": True, "rows": rows, "columns": columns},
)
if asyncio.iscoroutine(result):
result = await result
if result is None:
raise NotFound("No data")
if isinstance(result, dict):
r = Response(
body=result.get("body"),
status=result.get("status_code") or 200,
content_type=result.get("content_type", "text/plain"),
headers=result.get("headers"),
)
elif isinstance(result, Response):
r = result
# if status_code is not None:
# # Over-ride the status code
# r.status = status_code
else:
assert False, f"{result} should be dict or Response"
elif format_ == "html":
headers = {}
templates = [f"query-{to_css_class(database)}.html", "query.html"]
if canned_query:
templates.insert(
0,
f"query-{to_css_class(database)}-{to_css_class(canned_query['name'])}.html",
)
environment = datasette.get_jinja_environment(request)
template = environment.select_template(templates)
alternate_url_json = datasette.absolute_url(
request,
datasette.urls.path(path_with_format(request=request, format="json")),
)
data = {}
headers.update(
{
"Link": '<{}>; rel="alternate"; type="application/json+datasette"'.format(
alternate_url_json
)
}
)
metadata = await datasette.get_database_metadata(database)
renderers = {}
for key, (_, can_render) in datasette.renderers.items():
it_can_render = call_with_supported_arguments(
can_render,
datasette=datasette,
columns=data.get("columns") or [],
rows=data.get("rows") or [],
sql=data.get("query", {}).get("sql", None),
query_name=data.get("query_name"),
database=database,
table=data.get("table"),
request=request,
view_name="database",
)
it_can_render = await await_me_maybe(it_can_render)
if it_can_render:
renderers[key] = datasette.urls.path(
path_with_format(request=request, format=key)
)
allow_execute_sql = await datasette.allowed(
action="execute-sql",
resource=DatabaseResource(database=database),
actor=request.actor,
)
show_hide_hidden = ""
if canned_query and canned_query.get("hide_sql"):
if bool(params.get("_show_sql")):
show_hide_link = path_with_removed_args(request, {"_show_sql"})
show_hide_text = "hide"
show_hide_hidden = (
'<input type="hidden" name="_show_sql" value="1">'
)
else:
show_hide_link = path_with_added_args(request, {"_show_sql": 1})
show_hide_text = "show"
else:
if bool(params.get("_hide_sql")):
show_hide_link = path_with_removed_args(request, {"_hide_sql"})
show_hide_text = "show"
show_hide_hidden = (
'<input type="hidden" name="_hide_sql" value="1">'
)
else:
show_hide_link = path_with_added_args(request, {"_hide_sql": 1})
show_hide_text = "hide"
hide_sql = show_hide_text == "show"
# Show 'Edit SQL' button only if:
# - User is allowed to execute SQL
# - SQL is an approved SELECT statement
# - No magic parameters, so no :_ in the SQL string
edit_sql_url = None
is_validated_sql = False
try:
validate_sql_select(sql)
is_validated_sql = True
except InvalidSql:
pass
if allow_execute_sql and is_validated_sql and ":_" not in sql:
edit_sql_url = (
datasette.urls.database(database)
+ "/-/query"
+ "?"
+ urlencode(
{
**{
"sql": sql,
},
**named_parameter_values,
}
)
)
async def query_actions():
query_actions = []
for hook in pm.hook.query_actions(
datasette=datasette,
actor=request.actor,
database=database,
query_name=canned_query["name"] if canned_query else None,
request=request,
sql=sql,
params=params,
):
extra_links = await await_me_maybe(hook)
if extra_links:
query_actions.extend(extra_links)
return query_actions
r = Response.html(
await datasette.render_template(
template,
QueryContext(
database=database,
database_color=db.color,
query={
"sql": sql,
"params": params,
},
canned_query=canned_query["name"] if canned_query else None,
private=private,
canned_query_write=canned_query_write,
db_is_immutable=not db.is_mutable,
error=query_error,
hide_sql=hide_sql,
show_hide_link=datasette.urls.path(show_hide_link),
show_hide_text=show_hide_text,
editable=not canned_query,
allow_execute_sql=allow_execute_sql,
tables=await get_tables(datasette, request, db, allowed_dict),
named_parameter_values=named_parameter_values,
edit_sql_url=edit_sql_url,
display_rows=await display_rows(
datasette, database, request, rows, columns
),
table_columns=(
await _table_columns(datasette, database)
if allow_execute_sql
else {}
),
columns=columns,
renderers=renderers,
url_csv=datasette.urls.path(
path_with_format(
request=request, format="csv", extra_qs={"_size": "max"}
)
),
show_hide_hidden=markupsafe.Markup(show_hide_hidden),
metadata=canned_query or metadata,
alternate_url_json=alternate_url_json,
select_templates=[
f"{'*' if template_name == template.name else ''}{template_name}"
for template_name in templates
],
top_query=make_slot_function(
"top_query", datasette, request, database=database, sql=sql
),
top_canned_query=make_slot_function(
"top_canned_query",
datasette,
request,
database=database,
query_name=canned_query["name"] if canned_query else None,
),
query_actions=query_actions,
),
request=request,
view_name="database",
),
headers=headers,
)
else:
assert False, "Invalid format: {}".format(format_)
if datasette.cors:
add_cors_headers(r.headers)
return r
| QueryView |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/core/test_runner.py | {
"start": 4063,
"end": 5865
} | class ____:
"""Test PrefectDbtRunner initialization and configuration."""
def test_initializes_with_defaults(self):
"""Test that runner initializes with sensible defaults."""
runner = PrefectDbtRunner()
assert runner.settings is not None
assert isinstance(runner.settings, PrefectDbtSettings)
assert runner.raise_on_failure is True
assert runner.client is not None
assert runner.include_compiled_code is False
assert runner.disable_assets is False
assert runner._force_nodes_as_tasks is False
def test_accepts_custom_configuration(
self, mock_manifest, mock_settings, mock_client
):
"""Test that runner accepts and uses custom configuration."""
runner = PrefectDbtRunner(
manifest=mock_manifest,
settings=mock_settings,
raise_on_failure=False,
client=mock_client,
include_compiled_code=True,
_force_nodes_as_tasks=True,
)
assert runner.settings == mock_settings
assert runner.raise_on_failure is False
assert runner.client == mock_client
assert runner.include_compiled_code is True
assert runner._force_nodes_as_tasks is True
def test_property_accessors_work_correctly(self, mock_settings):
"""Test that property accessors return expected values."""
runner = PrefectDbtRunner(settings=mock_settings)
# Test that properties access the underlying settings
assert runner.target_path == mock_settings.target_path
assert runner.profiles_dir == mock_settings.profiles_dir
assert runner.project_dir == mock_settings.project_dir
assert runner.log_level == mock_settings.log_level
| TestPrefectDbtRunnerInitialization |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/string_join_op_test.py | {
"start": 859,
"end": 1923
} | class ____(test.TestCase):
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output, [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output, [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output, [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(self.evaluate(output), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output, [[b"bb"], [b"cc"]])
output = string_ops.string_join([])
self.assertAllEqual(output, b"")
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"shapes do not match|must be equal rank",
):
self.evaluate(string_ops.string_join([input0, input2]))
if __name__ == "__main__":
test.main()
| StringJoinOpTest |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_reflection.py | {
"start": 96643,
"end": 99042
} | class ____(fixtures.TablesTest):
__only_on__ = "postgresql"
__sparse_driver_backend__ = True
__requires__ = ("identity_columns",)
_names = ("t1", "T2", "MiXeDCaSe!")
@classmethod
def define_tables(cls, metadata):
for name in cls._names:
Table(
name,
metadata,
Column(
"id1",
Integer,
Identity(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
),
Column("id2", Integer, Identity()),
Column("id3", BigInteger, Identity()),
Column("id4", SmallInteger, Identity()),
)
@testing.combinations(*_names, argnames="name")
def test_reflect_identity(self, connection, name):
insp = inspect(connection)
default = dict(
always=False,
start=1,
increment=1,
minvalue=1,
cycle=False,
cache=1,
)
cols = insp.get_columns(name)
for col in cols:
if col["name"] == "id1":
is_true("identity" in col)
eq_(
col["identity"],
dict(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
)
elif col["name"] == "id2":
is_true("identity" in col)
exp = default.copy()
exp.update(maxvalue=2**31 - 1)
eq_(col["identity"], exp)
elif col["name"] == "id3":
is_true("identity" in col)
exp = default.copy()
exp.update(maxvalue=2**63 - 1)
eq_(col["identity"], exp)
elif col["name"] == "id4":
is_true("identity" in col)
exp = default.copy()
exp.update(maxvalue=2**15 - 1)
eq_(col["identity"], exp)
| IdentityReflectionTest |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 17968,
"end": 18692
} | class ____(DagRunStateResponse):
type: Literal["DagRunStateResult"] = "DagRunStateResult"
# TODO: Create a convert api_response to result classes so we don't need to do this
# for all the classes above
@classmethod
def from_api_response(cls, dr_state_response: DagRunStateResponse) -> DagRunStateResult:
"""
Create result class from API Response.
API Response is autogenerated from the API schema, so we need to convert it to Result
for communication between the Supervisor and the task process since it needs a
discriminator field.
"""
return cls(**dr_state_response.model_dump(exclude_defaults=True), type="DagRunStateResult")
| DagRunStateResult |
python | apache__airflow | airflow-core/tests/unit/serialization/serializers/test_serializers.py | {
"start": 1786,
"end": 2173
} | class ____(datetime.tzinfo):
name = "My/Custom"
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta:
return datetime.timedelta(hours=2)
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None:
return datetime.timedelta(0)
def tzname(self, dt: datetime.datetime | None) -> str | None:
return self.name
| CustomTZ |
python | pytorch__pytorch | test/test_sparse.py | {
"start": 7343,
"end": 7578
} | class ____(TestCase):
def run(self, result=None):
if TEST_WITH_CROSSREF:
with CrossRefSparseFakeMode():
return super().run(result)
else:
return super().run(result)
| TestSparseBase |
python | allegroai__clearml | clearml/backend_api/services/v2_23/workers.py | {
"start": 89327,
"end": 90386
} | class ____(Request):
"""
Unregister a worker in the system. Called by the Worker Daemon.
:param worker: Worker id. Must be unique in company.
:type worker: str
"""
_service = "workers"
_action = "unregister"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"worker": {
"description": "Worker id. Must be unique in company.",
"type": "string",
}
},
"required": ["worker"],
"type": "object",
}
def __init__(self, worker: str, **kwargs: Any) -> None:
super(UnregisterRequest, self).__init__(**kwargs)
self.worker = worker
@schema_property("worker")
def worker(self) -> str:
return self._property_worker
@worker.setter
def worker(self, value: str) -> None:
if value is None:
self._property_worker = None
return
self.assert_isinstance(value, "worker", six.string_types)
self._property_worker = value
| UnregisterRequest |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 95135,
"end": 97940
} | class ____:
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(ncu.PZERO, ncu.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(ncu.NZERO, ncu.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(ncu.PZERO, ncu.PZERO)
assert_arctan2_isnzero(ncu.NZERO, ncu.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(ncu.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(ncu.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(ncu.PZERO, 1)
assert_arctan2_isnzero(ncu.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, ncu.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, ncu.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, ncu.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, ncu.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, -np.inf), np.pi)
assert_almost_equal(ncu.arctan2(-1, -np.inf), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
| TestArctan2SpecialValues |
python | great-expectations__great_expectations | versioneer.py | {
"start": 12590,
"end": 15899
} | class ____:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg) as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
| VersioneerConfig |
python | GoogleCloudPlatform__python-docs-samples | compute/client_library/ingredients/instances/ip_address/get_vm_address.py | {
"start": 723,
"end": 2016
} | class ____(Enum):
INTERNAL = "internal"
EXTERNAL = "external"
IP_V6 = "ipv6"
def get_instance_ip_address(
instance: compute_v1.Instance, ip_type: IPType
) -> List[str]:
"""
Retrieves the specified type of IP address (ipv6, internal or external) of a specified Compute Engine instance.
Args:
instance (compute_v1.Instance): instance to get
ip_type (IPType): The type of IP address to retrieve (ipv6, internal or external).
Returns:
List[str]: Requested type IP addresses of the instance.
"""
ips = []
if not instance.network_interfaces:
return ips
for interface in instance.network_interfaces:
if ip_type == IPType.EXTERNAL:
for config in interface.access_configs:
if config.type_ == "ONE_TO_ONE_NAT":
ips.append(config.nat_i_p)
elif ip_type == IPType.IP_V6:
for ipv6_config in getattr(interface, "ipv6_access_configs", []):
if ipv6_config.type_ == "DIRECT_IPV6":
ips.append(ipv6_config.external_ipv6)
elif ip_type == IPType.INTERNAL:
# Internal IP is directly available in the network interface
ips.append(interface.network_i_p)
return ips
# </INGREDIENT>
| IPType |
python | langchain-ai__langchain | libs/core/langchain_core/indexing/api.py | {
"start": 4406,
"end": 7651
} | class ____(LangChainException):
"""Raised when an indexing operation fails."""
def _calculate_hash(
text: str, algorithm: Literal["sha1", "sha256", "sha512", "blake2b"]
) -> str:
"""Return a hexadecimal digest of *text* using *algorithm*."""
if algorithm == "sha1":
# Calculate the SHA-1 hash and return it as a UUID.
digest = hashlib.sha1(text.encode("utf-8"), usedforsecurity=False).hexdigest()
return str(uuid.uuid5(NAMESPACE_UUID, digest))
if algorithm == "blake2b":
return hashlib.blake2b(text.encode("utf-8")).hexdigest()
if algorithm == "sha256":
return hashlib.sha256(text.encode("utf-8")).hexdigest()
if algorithm == "sha512":
return hashlib.sha512(text.encode("utf-8")).hexdigest()
msg = f"Unsupported hashing algorithm: {algorithm}"
raise ValueError(msg)
def _get_document_with_hash(
document: Document,
*,
key_encoder: Callable[[Document], str]
| Literal["sha1", "sha256", "sha512", "blake2b"],
) -> Document:
"""Calculate a hash of the document, and assign it to the uid.
When using one of the predefined hashing algorithms, the hash is calculated
by hashing the content and the metadata of the document.
Args:
document: Document to hash.
key_encoder: Hashing algorithm to use for hashing the document.
If not provided, a default encoder using SHA-1 will be used.
SHA-1 is not collision-resistant, and a motivated attacker
could craft two different texts that hash to the
same cache key.
New applications should use one of the alternative encoders
or provide a custom and strong key encoder function to avoid this risk.
When changing the key encoder, you must change the
index as well to avoid duplicated documents in the cache.
Raises:
ValueError: If the metadata cannot be serialized using json.
Returns:
Document with a unique identifier based on the hash of the content and metadata.
"""
metadata: dict[str, Any] = dict(document.metadata or {})
if callable(key_encoder):
# If key_encoder is a callable, we use it to generate the hash.
hash_ = key_encoder(document)
else:
# The hashes are calculated separate for the content and the metadata.
content_hash = _calculate_hash(document.page_content, algorithm=key_encoder)
try:
serialized_meta = json.dumps(metadata, sort_keys=True)
except Exception as e:
msg = (
f"Failed to hash metadata: {e}. "
f"Please use a dict that can be serialized using json."
)
raise ValueError(msg) from e
metadata_hash = _calculate_hash(serialized_meta, algorithm=key_encoder)
hash_ = _calculate_hash(content_hash + metadata_hash, algorithm=key_encoder)
return Document(
# Assign a unique identifier based on the hash.
id=hash_,
page_content=document.page_content,
metadata=document.metadata,
)
# This internal abstraction was imported by the langchain package internally, so
# we keep it here for backwards compatibility.
| IndexingException |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 76006,
"end": 76738
} | class ____(PrefectFilterBaseModel):
"""Filter by `Artifact.type`."""
any_: Optional[list[str]] = Field(
default=None, description="A list of artifact types to include"
)
not_any_: Optional[list[str]] = Field(
default=None, description="A list of artifact types to exclude"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Artifact.type.in_(self.any_))
if self.not_any_ is not None:
filters.append(db.Artifact.type.notin_(self.not_any_))
return filters
| ArtifactFilterType |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 72273,
"end": 72517
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('value', c_ulonglong),
]
def __init__(self):
super(c_nvmlPdi_t, self).__init__(version=nvmlPdi_v1)
nvmlRepairStatus_v1 = 0x100000C
| c_nvmlPdi_t |
python | protocolbuffers__protobuf | python/google/protobuf/internal/wire_format_test.py | {
"start": 482,
"end": 9481
} | class ____(unittest.TestCase):
def testPackTag(self):
field_number = 0xabc
tag_type = 2
self.assertEqual((field_number << 3) | tag_type,
wire_format.PackTag(field_number, tag_type))
PackTag = wire_format.PackTag
# Number too high.
self.assertRaises(message.EncodeError, PackTag, field_number, 6)
# Number too low.
self.assertRaises(message.EncodeError, PackTag, field_number, -1)
def testUnpackTag(self):
# Test field numbers that will require various varint sizes.
for expected_field_number in (1, 15, 16, 2047, 2048):
for expected_wire_type in range(6): # Highest-numbered wiretype is 5.
field_number, wire_type = wire_format.UnpackTag(
wire_format.PackTag(expected_field_number, expected_wire_type))
self.assertEqual(expected_field_number, field_number)
self.assertEqual(expected_wire_type, wire_type)
self.assertRaises(TypeError, wire_format.UnpackTag, None)
self.assertRaises(TypeError, wire_format.UnpackTag, 'abc')
self.assertRaises(TypeError, wire_format.UnpackTag, 0.0)
self.assertRaises(TypeError, wire_format.UnpackTag, object())
def testZigZagEncode(self):
Z = wire_format.ZigZagEncode
self.assertEqual(0, Z(0))
self.assertEqual(1, Z(-1))
self.assertEqual(2, Z(1))
self.assertEqual(3, Z(-2))
self.assertEqual(4, Z(2))
self.assertEqual(0xfffffffe, Z(0x7fffffff))
self.assertEqual(0xffffffff, Z(-0x80000000))
self.assertEqual(0xfffffffffffffffe, Z(0x7fffffffffffffff))
self.assertEqual(0xffffffffffffffff, Z(-0x8000000000000000))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def testZigZagDecode(self):
Z = wire_format.ZigZagDecode
self.assertEqual(0, Z(0))
self.assertEqual(-1, Z(1))
self.assertEqual(1, Z(2))
self.assertEqual(-2, Z(3))
self.assertEqual(2, Z(4))
self.assertEqual(0x7fffffff, Z(0xfffffffe))
self.assertEqual(-0x80000000, Z(0xffffffff))
self.assertEqual(0x7fffffffffffffff, Z(0xfffffffffffffffe))
self.assertEqual(-0x8000000000000000, Z(0xffffffffffffffff))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def NumericByteSizeTestHelper(self, byte_size_fn, value, expected_value_size):
# Use field numbers that cause various byte sizes for the tag information.
for field_number, tag_bytes in ((15, 1), (16, 2), (2047, 2), (2048, 3)):
expected_size = expected_value_size + tag_bytes
actual_size = byte_size_fn(field_number, value)
self.assertEqual(expected_size, actual_size,
'byte_size_fn: %s, field_number: %d, value: %r\n'
'Expected: %d, Actual: %d'% (
byte_size_fn, field_number, value, expected_size, actual_size))
def testByteSizeFunctions(self):
# Test all numeric *ByteSize() functions.
NUMERIC_ARGS = [
# Int32ByteSize().
[wire_format.Int32ByteSize, 0, 1],
[wire_format.Int32ByteSize, 127, 1],
[wire_format.Int32ByteSize, 128, 2],
[wire_format.Int32ByteSize, -1, 10],
# Int64ByteSize().
[wire_format.Int64ByteSize, 0, 1],
[wire_format.Int64ByteSize, 127, 1],
[wire_format.Int64ByteSize, 128, 2],
[wire_format.Int64ByteSize, -1, 10],
# UInt32ByteSize().
[wire_format.UInt32ByteSize, 0, 1],
[wire_format.UInt32ByteSize, 127, 1],
[wire_format.UInt32ByteSize, 128, 2],
[wire_format.UInt32ByteSize, wire_format.UINT32_MAX, 5],
# UInt64ByteSize().
[wire_format.UInt64ByteSize, 0, 1],
[wire_format.UInt64ByteSize, 127, 1],
[wire_format.UInt64ByteSize, 128, 2],
[wire_format.UInt64ByteSize, wire_format.UINT64_MAX, 10],
# SInt32ByteSize().
[wire_format.SInt32ByteSize, 0, 1],
[wire_format.SInt32ByteSize, -1, 1],
[wire_format.SInt32ByteSize, 1, 1],
[wire_format.SInt32ByteSize, -63, 1],
[wire_format.SInt32ByteSize, 63, 1],
[wire_format.SInt32ByteSize, -64, 1],
[wire_format.SInt32ByteSize, 64, 2],
# SInt64ByteSize().
[wire_format.SInt64ByteSize, 0, 1],
[wire_format.SInt64ByteSize, -1, 1],
[wire_format.SInt64ByteSize, 1, 1],
[wire_format.SInt64ByteSize, -63, 1],
[wire_format.SInt64ByteSize, 63, 1],
[wire_format.SInt64ByteSize, -64, 1],
[wire_format.SInt64ByteSize, 64, 2],
# Fixed32ByteSize().
[wire_format.Fixed32ByteSize, 0, 4],
[wire_format.Fixed32ByteSize, wire_format.UINT32_MAX, 4],
# Fixed64ByteSize().
[wire_format.Fixed64ByteSize, 0, 8],
[wire_format.Fixed64ByteSize, wire_format.UINT64_MAX, 8],
# SFixed32ByteSize().
[wire_format.SFixed32ByteSize, 0, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MIN, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MAX, 4],
# SFixed64ByteSize().
[wire_format.SFixed64ByteSize, 0, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MIN, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MAX, 8],
# FloatByteSize().
[wire_format.FloatByteSize, 0.0, 4],
[wire_format.FloatByteSize, 1000000000.0, 4],
[wire_format.FloatByteSize, -1000000000.0, 4],
# DoubleByteSize().
[wire_format.DoubleByteSize, 0.0, 8],
[wire_format.DoubleByteSize, 1000000000.0, 8],
[wire_format.DoubleByteSize, -1000000000.0, 8],
# BoolByteSize().
[wire_format.BoolByteSize, False, 1],
[wire_format.BoolByteSize, True, 1],
# EnumByteSize().
[wire_format.EnumByteSize, 0, 1],
[wire_format.EnumByteSize, 127, 1],
[wire_format.EnumByteSize, 128, 2],
[wire_format.EnumByteSize, wire_format.UINT32_MAX, 5],
]
for args in NUMERIC_ARGS:
self.NumericByteSizeTestHelper(*args)
# Test strings and bytes.
for byte_size_fn in (wire_format.StringByteSize, wire_format.BytesByteSize):
# 1 byte for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(5, byte_size_fn(10, 'abc'))
# 2 bytes for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(6, byte_size_fn(16, 'abc'))
# 2 bytes for tag, 2 bytes for length, 128 bytes for contents.
self.assertEqual(132, byte_size_fn(16, 'a' * 128))
# Test UTF-8 string byte size calculation.
# 1 byte for tag, 1 byte for length, 8 bytes for content.
self.assertEqual(10, wire_format.StringByteSize(
5, b'\xd0\xa2\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8')))
class MockMessage(object):
def __init__(self, byte_size):
self.byte_size = byte_size
def ByteSize(self):
return self.byte_size
message_byte_size = 10
mock_message = MockMessage(byte_size=message_byte_size)
# Test groups.
# (2 * 1) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(2 + message_byte_size,
wire_format.GroupByteSize(1, mock_message))
# (2 * 2) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(4 + message_byte_size,
wire_format.GroupByteSize(16, mock_message))
# Test messages.
# 1 byte for tag, plus 1 byte for length, plus contents.
self.assertEqual(2 + mock_message.byte_size,
wire_format.MessageByteSize(1, mock_message))
# 2 bytes for tag, plus 1 byte for length, plus contents.
self.assertEqual(3 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# 2 bytes for tag, plus 2 bytes for length, plus contents.
mock_message.byte_size = 128
self.assertEqual(4 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# Test message set item byte size.
# 4 bytes for tags, plus 1 byte for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 10
self.assertEqual(mock_message.byte_size + 6,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 128
self.assertEqual(mock_message.byte_size + 7,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 2 byte for type_id,
# plus contents.
self.assertEqual(mock_message.byte_size + 8,
wire_format.MessageSetItemByteSize(128, mock_message))
# Too-long varint.
self.assertRaises(message.EncodeError,
wire_format.UInt64ByteSize, 1, 1 << 128)
if __name__ == '__main__':
unittest.main()
| WireFormatTest |
python | anthropics__anthropic-sdk-python | tests/test_response.py | {
"start": 1657,
"end": 3654
} | class ____(pydantic.BaseModel): ...
def test_response_parse_mismatched_basemodel(client: Anthropic) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
with pytest.raises(
TypeError,
match="Pydantic models must subclass our base model type, e.g. `from anthropic import BaseModel`",
):
response.parse(to=PydanticModel)
@pytest.mark.asyncio
async def test_async_response_parse_mismatched_basemodel(async_client: AsyncAnthropic) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
with pytest.raises(
TypeError,
match="Pydantic models must subclass our base model type, e.g. `from anthropic import BaseModel`",
):
await response.parse(to=PydanticModel)
def test_response_parse_custom_stream(client: Anthropic) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
stream=True,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
stream = response.parse(to=Stream[int])
assert stream._cast_to == int
@pytest.mark.asyncio
async def test_async_response_parse_custom_stream(async_client: AsyncAnthropic) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
stream=True,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
stream = await response.parse(to=Stream[int])
assert stream._cast_to == int
| PydanticModel |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 28513,
"end": 28689
} | class ____(BaseModel):
dag_id: str
logical_date: AwareDatetime
state: str | None = None
type: Literal["GetPreviousDagRun"] = "GetPreviousDagRun"
| GetPreviousDagRun |
python | falconry__falcon | falcon/routing/compiled.py | {
"start": 40589,
"end": 41327
} | class ____(_CxParent):
def __init__(self, segment_idx: int, pattern_idx: int, pattern_text: str) -> None:
super().__init__()
self._segment_idx = segment_idx
self._pattern_idx = pattern_idx
self._pattern_text = pattern_text
def src(self, indentation: int) -> str:
lines = [
'{0}match = patterns[{1}].match(path[{2}]) # {3}'.format(
_TAB_STR * indentation,
self._pattern_idx,
self._segment_idx,
self._pattern_text,
),
'{0}if match is not None:'.format(_TAB_STR * indentation),
self._children_src(indentation + 1),
]
return '\n'.join(lines)
| _CxIfPathSegmentPattern |
python | matplotlib__matplotlib | lib/matplotlib/_type1font.py | {
"start": 34396,
"end": 40737
} | class ____:
__slots__ = ('font', 'buildchar_stack', 'postscript_stack', 'glyphs', 'subrs')
def __init__(self, font):
self.font = font
self.buildchar_stack = []
self.postscript_stack = []
self.glyphs = set()
self.subrs = set()
def run(self, glyph_or_subr):
"""Run the charstring interpreter on a glyph or subroutine.
This does not actually execute the code but simulates it to find out
which subroutines get called when executing the glyph or subroutine.
Parameters
----------
glyph_or_subr : str or int
The name of the glyph or the index of the subroutine to simulate.
Returns
-------
glyphs : set[str]
The set of glyph names called by the glyph or subroutine.
subrs : set[int]
The set of subroutines called by the glyph or subroutine.
"""
if isinstance(glyph_or_subr, str):
program = self.font.prop['CharStrings'][glyph_or_subr]
self.glyphs.add(glyph_or_subr)
else:
program = self.font.prop['Subrs'][glyph_or_subr]
self.subrs.add(glyph_or_subr)
for opcode in self.font._charstring_tokens(program):
if opcode in ('return', 'endchar'):
return self.glyphs, self.subrs
self._step(opcode)
else:
font_name = self.font.prop.get('FontName', '(unknown)')
_log.info(
f"Glyph or subr {glyph_or_subr} in font {font_name} does not end "
"with return or endchar"
)
return self.glyphs, self.subrs
def _step(self, opcode):
"""Run one step in the charstring interpreter."""
match opcode:
case int():
self.buildchar_stack.append(opcode)
case (
'hsbw' | 'sbw' | 'closepath' | 'hlineto' | 'hmoveto' | 'hcurveto' |
'hvcurveto' | 'rlineto' | 'rmoveto' | 'rrcurveto' | 'vhcurveto' |
'vlineto' | 'vmoveto' | 'dotsection' | 'hstem' | 'hstem3' |
'vstem' | 'vstem3' | 'setcurrentpoint'
):
self.buildchar_stack.clear()
case 'seac': # Standard Encoding Accented Character
codes = self.buildchar_stack[3:5]
self.glyphs.update(_StandardEncoding[int(x)] for x in codes)
self.buildchar_stack.clear()
case 'div':
num1, num2 = self.buildchar_stack[-2:]
if num2 == 0:
_log.warning(
f"Division by zero in font {self.font.prop['FontName']}"
)
self.buildchar_stack[-2:] = [0]
else:
self.buildchar_stack[-2:] = [num1/num2]
case 'callothersubr':
n, othersubr = self.buildchar_stack[-2:]
if not isinstance(n, int):
_log.warning(
f"callothersubr {othersubr} with non-integer argument "
f"count in font {self.font.prop['FontName']}"
)
n = int(n)
args = self.buildchar_stack[-2-n:-2]
if othersubr == 3:
self.postscript_stack.append(args[0])
else:
self.postscript_stack.extend(args[::-1])
self.buildchar_stack[-2-n:] = []
case 'callsubr':
subr = self.buildchar_stack.pop()
if not isinstance(subr, int):
_log.warning(
f"callsubr with non-integer argument {subr} in font "
f"{self.font.prop['FontName']}"
)
subr = int(subr)
self.run(subr)
case 'pop':
if not self.postscript_stack:
_log.warning(
f"pop with empty stack in font {self.font.prop['FontName']}"
)
self.postscript_stack.append(0)
self.buildchar_stack.append(self.postscript_stack.pop())
case _:
raise RuntimeError(f'opcode {opcode}')
_StandardEncoding = {
**{ord(letter): letter for letter in string.ascii_letters},
0: '.notdef',
32: 'space',
33: 'exclam',
34: 'quotedbl',
35: 'numbersign',
36: 'dollar',
37: 'percent',
38: 'ampersand',
39: 'quoteright',
40: 'parenleft',
41: 'parenright',
42: 'asterisk',
43: 'plus',
44: 'comma',
45: 'hyphen',
46: 'period',
47: 'slash',
48: 'zero',
49: 'one',
50: 'two',
51: 'three',
52: 'four',
53: 'five',
54: 'six',
55: 'seven',
56: 'eight',
57: 'nine',
58: 'colon',
59: 'semicolon',
60: 'less',
61: 'equal',
62: 'greater',
63: 'question',
64: 'at',
91: 'bracketleft',
92: 'backslash',
93: 'bracketright',
94: 'asciicircum',
95: 'underscore',
96: 'quoteleft',
123: 'braceleft',
124: 'bar',
125: 'braceright',
126: 'asciitilde',
161: 'exclamdown',
162: 'cent',
163: 'sterling',
164: 'fraction',
165: 'yen',
166: 'florin',
167: 'section',
168: 'currency',
169: 'quotesingle',
170: 'quotedblleft',
171: 'guillemotleft',
172: 'guilsinglleft',
173: 'guilsinglright',
174: 'fi',
175: 'fl',
177: 'endash',
178: 'dagger',
179: 'daggerdbl',
180: 'periodcentered',
182: 'paragraph',
183: 'bullet',
184: 'quotesinglbase',
185: 'quotedblbase',
186: 'quotedblright',
187: 'guillemotright',
188: 'ellipsis',
189: 'perthousand',
191: 'questiondown',
193: 'grave',
194: 'acute',
195: 'circumflex',
196: 'tilde',
197: 'macron',
198: 'breve',
199: 'dotaccent',
200: 'dieresis',
202: 'ring',
203: 'cedilla',
205: 'hungarumlaut',
206: 'ogonek',
207: 'caron',
208: 'emdash',
225: 'AE',
227: 'ordfeminine',
232: 'Lslash',
233: 'Oslash',
234: 'OE',
235: 'ordmasculine',
241: 'ae',
245: 'dotlessi',
248: 'lslash',
249: 'oslash',
250: 'oe',
251: 'germandbls',
}
| _CharstringSimulator |
python | fabric__fabric | tests/task.py | {
"start": 200,
"end": 1058
} | class ____:
def accepts_Invoke_level_init_kwargs(self):
# Arbitrarily selected list of invoke-level kwargs...
def body(c, parts):
"I am a docstring"
pass
t = fabric.Task(
body=body,
name="dadbod",
aliases=["heavenly", "check", "shop"],
default=True,
help={"parts": "See: the sum of"},
iterable=["parts"],
)
assert t.body is body
assert t.__doc__ == "I am a docstring"
assert t.name == "dadbod"
assert "heavenly" in t.aliases
assert t.is_default
assert "parts" in t.help
assert "parts" in t.iterable
def allows_hosts_kwarg(self):
# NOTE: most tests are below, in @task tests
assert fabric.Task(Mock(), hosts=["user@host"]).hosts == ["user@host"]
| Task_ |
python | doocs__leetcode | solution/0400-0499/0431.Encode N-ary Tree to Binary Tree/Solution.py | {
"start": 356,
"end": 1342
} | class ____:
# Encodes an n-ary tree to a binary tree.
def encode(self, root: "Optional[Node]") -> Optional[TreeNode]:
if root is None:
return None
node = TreeNode(root.val)
if not root.children:
return node
left = self.encode(root.children[0])
node.left = left
for child in root.children[1:]:
left.right = self.encode(child)
left = left.right
return node
# Decodes your binary tree to an n-ary tree.
def decode(self, data: Optional[TreeNode]) -> "Optional[Node]":
if data is None:
return None
node = Node(data.val, [])
if data.left is None:
return node
left = data.left
while left:
node.children.append(self.decode(left))
left = left.right
return node
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(root))
| Codec |
python | ray-project__ray | python/ray/tests/test_advanced_6.py | {
"start": 4484,
"end": 6968
} | class ____:
def __init(self):
self._pid = None
def put(self, pid):
self._pid = pid
return True
def get(self):
return self._pid
def _store_pid_helper():
try:
pid_store_actor = ray.get_actor("pid-store", "test")
except Exception:
pid_store_actor = PidStoreActor.options(
name="pid-store", lifetime="detached").remote()
assert ray.get(pid_store_actor.put.remote(os.getpid()))
@ray.remote
def normal_task(large1, large2):
# Record the pid of this normal task.
_store_pid_helper()
time.sleep(60 * 60)
return "normaltask"
large = ray.put(np.zeros(100 * 2**10, dtype=np.int8))
obj = normal_task.remote(large, large)
print(ray.get(obj))
"""
driver_script = driver_template.format(address=ray_start_regular["address"])
driver_proc = run_string_as_driver_nonblocking(driver_script)
try:
driver_proc.wait(10)
except Exception:
pass
def get_normal_task_pid():
try:
pid_store_actor = ray.get_actor("pid-store", "test")
return ray.get(pid_store_actor.get.remote())
except Exception:
return None
wait_for_condition(lambda: get_normal_task_pid() is not None, 10)
pid_store_actor = ray.get_actor("pid-store", "test")
normal_task_pid = ray.get(pid_store_actor.get.remote())
assert normal_task_pid is not None
normal_task_proc = psutil.Process(normal_task_pid)
print("killing normal task process, pid =", normal_task_pid)
normal_task_proc.send_signal(signal.SIGTERM)
def normal_task_was_reconstructed():
curr_pid = get_normal_task_pid()
return curr_pid is not None and curr_pid != normal_task_pid
wait_for_condition(lambda: normal_task_was_reconstructed(), 10)
driver_proc.send_signal(signal.SIGTERM)
# Sleep here to make sure raylet has triggered cleaning up
# the idle workers.
wait_for_condition(lambda: not psutil.pid_exists(normal_task_pid), 10)
@pytest.mark.skipif(platform.system() == "Windows", reason="Niceness is posix-only")
def test_worker_niceness(ray_start_regular):
@ray.remote
class PIDReporter:
def get(self):
return os.getpid()
reporter = PIDReporter.remote()
worker_pid = ray.get(reporter.get.remote())
worker_proc = psutil.Process(worker_pid)
assert worker_proc.nice() == 15, worker_proc
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| PidStoreActor |
python | pytorch__pytorch | torch/onnx/_internal/fx/_pass.py | {
"start": 1193,
"end": 5493
} | class ____:
package_info: PackageInfo
@contextlib.contextmanager
def _patch_difflib_sequence_matcher_init():
"""Context patching `difflib.SequenceMatcher` for fx readable graph.
Under this context, the `autojunk` argument of `difflib.SequenceMatcher` will always
be considered as `False`. This is to prevent `difflib.SequenceMatcher` recognizing
stacktrace messages in fx readable graph as junk, as these messages tend to be long (>200)
and repeat multiple times, which falls under the junk filter criteria.
`difflib.SequenceMatcher` is used underneath by all sorts of diffing functions
in `difflib`, including `difflib.unified_diff`, `difflib.ndiff`, `difflib.context_diff`.
Unfortunately, there is no way to pass `autojunk` argument to these functions, and
they all default to `True`. This context patching will affect all of them.
`Reference: Automatic junk heuristic <https://docs.python.org/3/library/difflib.html>`_
"""
original_init = difflib.SequenceMatcher.__init__
def patched_init(self, isjunk=None, a="", b="", autojunk=True) -> None:
original_init(self, isjunk, a, b, autojunk=False)
difflib.SequenceMatcher.__init__ = patched_init # type: ignore[assignment]
try:
yield
finally:
difflib.SequenceMatcher.__init__ = original_init # type: ignore[assignment]
def _unified_diff(a: str, b: str) -> str:
"""Return a string containing the unified diff of two strings.
This function calls a patched version of `difflib.unified_diff` with `autojunk` set
to `False` for `difflib.SequenceMatcher` class. More details can be found in
`_patch_difflib_sequence_matcher_init` function.
Args:
a: The first string.
b: The second string.
Returns:
The unified diff of the two strings. If there is no diff, return "<no diff>".
Example::
>>> a = '''class GraphModule(torch.nn.Module):
... def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor):
... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
... view = input_ids.view(-1, 3); input_ids = None
... '''
>>> b = '''class <lambda>(torch.nn.Module):
... def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]):
... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
... view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None
... '''
>>> print(_unified_diff(a, b))
---
+++
@@ -1,4 +1,4 @@
-class GraphModule(torch.nn.Module):
- def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor):
+class <lambda>(torch.nn.Module):
+ def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]):
# File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
- view = input_ids.view(-1, 3); input_ids = None
+ view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None
"""
a_list = a.splitlines(keepends=True)
b_list = b.splitlines(keepends=True)
with _patch_difflib_sequence_matcher_init():
# Set `n` to `sys.maxsize` to show entire graph when there is a diff.
diff = "".join(difflib.unified_diff(a_list, b_list, n=sys.maxsize))
if not diff:
return "<no diff>"
return diff
def _transform_diagnose_call_message_formatter(
run: Callable,
self: Transform,
*args: Any,
**kwargs: Any,
) -> str:
return f"Running {self.__class__.__name__} pass. "
def maybe_fx_graph_tabular(graph: torch.fx.Graph) -> str | None:
"""Return the Graph nodes in tabular format. Equivalent to stdout of `graph.print_tabular()`.
If `tabulate` is not installed, return `None`.
Args:
graph: The Graph to print.
Returns:
The Graph printed in a tabular format. None if `tabulate` is not installed.
"""
f = io.StringIO()
with contextlib.redirect_stdout(f):
try:
graph.print_tabular()
except ImportError:
return None
return f.getvalue()
| GraphModuleOnnxMeta |
python | crytic__slither | slither/slithir/operations/condition.py | {
"start": 150,
"end": 666
} | class ____(Operation):
"""
Condition
Only present as last operation in conditional node
"""
def __init__(
self,
value: RVALUE,
) -> None:
assert is_valid_rvalue(value)
super().__init__()
self._value = value
@property
def read(
self,
) -> List[RVALUE]:
return [self.value]
@property
def value(self) -> RVALUE:
return self._value
def __str__(self) -> str:
return f"CONDITION {self.value}"
| Condition |
python | getsentry__sentry | src/sentry/api/endpoints/rule_snooze.py | {
"start": 3050,
"end": 3481
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
result = {
"ownerId": obj.owner_id,
"userId": obj.user_id or "everyone",
"until": obj.until or "forever",
"dateAdded": obj.date_added,
"ruleId": obj.rule_id,
"alertRuleId": obj.alert_rule_id,
}
return result
T = TypeVar("T", bound=Model)
| RuleSnoozeSerializer |
python | django__django | tests/requests_tests/tests.py | {
"start": 53768,
"end": 54828
} | class ____(SimpleTestCase):
def test_basic(self):
environ = {
"CONTENT_TYPE": "text/html",
"CONTENT_LENGTH": "100",
"HTTP_HOST": "example.com",
}
headers = HttpHeaders(environ)
self.assertEqual(sorted(headers), ["Content-Length", "Content-Type", "Host"])
self.assertEqual(
headers,
{
"Content-Type": "text/html",
"Content-Length": "100",
"Host": "example.com",
},
)
def test_parse_header_name(self):
tests = (
("PATH_INFO", None),
("HTTP_ACCEPT", "Accept"),
("HTTP_USER_AGENT", "User-Agent"),
("HTTP_X_FORWARDED_PROTO", "X-Forwarded-Proto"),
("CONTENT_TYPE", "Content-Type"),
("CONTENT_LENGTH", "Content-Length"),
)
for header, expected in tests:
with self.subTest(header=header):
self.assertEqual(HttpHeaders.parse_header_name(header), expected)
| HttpHeadersTests |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 83744,
"end": 87652
} | class ____(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), Mapping):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
| SimpleVal |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/nextcloud/tests.py | {
"start": 403,
"end": 1710
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = NextCloudProvider.id
def get_login_response_json(self, with_refresh_token=True):
return (
super(NextCloudTests, self)
.get_login_response_json(with_refresh_token=with_refresh_token)
.replace("uid", "user_id")
)
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"ocs": {
"meta": {
"status": "ok",
"statuscode": 100,
"message": "OK",
"totalitems": "",
"itemsperpage": ""
},
"data": {
"enabled": true,
"storageLocation": "\\/var\\/www\\/html\\/data\\/pennersr",
"id": "pennersr",
"lastLogin": 1730973409000,
"backend": "Database",
"subadmin": [],
"quota": {
"free": 9159623057408,
"used": 1585107741,
"total": 9161208165149,
"relative": 0.02,
"quota": -3
},
"email": "batman@wayne.com",
"displayname": "pennersr",
"phone": "",
"address": "",
"website": "",
"twitter": "",
"groups": [
"admin"
],
"language": "nl",
"locale": ""
}
}
}
""",
)
def get_expected_to_str(self):
return "batman@wayne.com"
| NextCloudTests |
python | pytorch__pytorch | torchgen/api/python.py | {
"start": 24610,
"end": 59687
} | class ____:
# The exprs that provide the binding for lambda arguments, e.g.:
#
# 'self' -> '_r.tensor(0)'
# 'min' -> 'out[0]' / 'min_indices' -> 'out[1]'
# 'options' -> 'options'
#
# It has 1-1 mapping with DispatchLambdaArgument.
exprs: Sequence[str]
# Special local inits, which might introduce new variables that
# the 'exprs' above reference, e.g.:
#
# 'auto out = _r.tensorlist_n<2>(2);'
#
inits: Sequence[str]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Helper Functions
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def _cpp_signature(f: NativeFunction, *, method: bool = False) -> CppSignature:
return CppSignatureGroup.from_native_function(f, method=method).signature
def has_tensor_options(f: NativeFunction) -> bool:
return f.func.arguments.tensor_options is not None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Python Signature
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# 'simple_type' was introduced by the old codegen, which is slightly
# different from the python schema type, e.g.: doesn't have '?' suffix
# for optional Tensor/TensorList; doesn't have '[size]' suffix for list type.
def argument_type_str(
t: Type, *, simple_type: bool = False, symint: bool = True
) -> str:
if isinstance(t, BaseType):
if t.name == BaseTy.int:
return "int64_t"
elif t.name == BaseTy.float:
return "double"
elif t.name == BaseTy.str:
return "c10::string_view"
elif t.name in [
BaseTy.Tensor,
BaseTy.bool,
BaseTy.QScheme,
BaseTy.Scalar,
BaseTy.ScalarType,
BaseTy.Generator,
BaseTy.Storage,
BaseTy.Layout,
BaseTy.Device,
BaseTy.DeviceIndex,
BaseTy.MemoryFormat,
BaseTy.Dimname,
BaseTy.Stream,
BaseTy.SymInt,
]:
# These python schema type names line up with their function schema names
return t.name.name
elif isinstance(t, OptionalType):
elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
return f"{elem}?"
elif isinstance(t, ListType):
size = t.size if not simple_type else None
if str(t.elem) == "bool":
assert t.size is not None
return f"::std::array<bool,{t.size}>"
elif str(t.elem) == "int":
return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
elif str(t.elem) == "SymInt":
if symint:
return (
f"SymIntArrayRef[{size}]" if size is not None else "SymIntArrayRef"
)
else:
return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
elif str(t.elem) == "Tensor":
return f"TensorList[{size}]" if size is not None else "TensorList"
elif str(t.elem) == "Scalar":
return f"ScalarList[{size}]" if size is not None else "ScalarList"
elif str(t.elem) == "Tensor?":
if simple_type:
return "c10::List<::std::optional<Tensor>>"
else:
return "const c10::List<::std::optional<Tensor>> &"
elif str(t.elem) == "Dimname":
return f"DimnameList[{size}]" if size is not None else "DimnameList"
elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
return f"ArrayRef<{elem}>"
raise RuntimeError(f"unrecognized type {repr(t)}")
def argument_type_size(t: Type) -> int | None:
l = t.is_list_like()
if l is not None and str(l.elem) != "bool":
return l.size
else:
return None
def argument(a: Argument) -> PythonArgument:
return PythonArgument(
name=a.name,
type=a.type,
# TODO: directly translate a.default to python default
default=(
str(pythonify_default(cpp.default_expr(a.default, a.type, symint=False)))
if a.default is not None
else None
),
default_init=None,
)
# Generates a PythonSignature that can be used for either .pyi or PythonArgParser codegen
def signature(
f: NativeFunction, *, method: bool = False, pyi: bool = False
) -> PythonSignature:
return signature_from_schema(
f.func, category_override=f.category_override, method=method, pyi=pyi
)
def signature_from_schema(
func: FunctionSchema,
*,
category_override: str | None,
method: bool = False,
pyi: bool = False,
) -> PythonSignature:
args: list[Argument] = []
args.extend(func.arguments.pre_self_positional)
# Skip SelfArgument if this is method.
if not method and func.arguments.self_arg is not None:
args.append(func.arguments.self_arg.argument)
args.extend(func.arguments.post_self_positional)
args.extend(func.arguments.pre_tensor_options_kwarg_only)
# Skip TensorOptionsArguments. Python side TensorOptions
# arguments are created based on different rules - see below.
args.extend(func.arguments.post_tensor_options_kwarg_only)
args.extend(func.arguments.out)
input_arg_set = {a.name for a in func.arguments.flat_positional}
kwarg_only_set = {a.name for a in func.arguments.flat_kwarg_only}
out_arg_set = {a.name for a in func.arguments.out}
input_args = tuple(map(argument, filter(lambda a: a.name in input_arg_set, args)))
input_kwargs = tuple(
map(argument, filter(lambda a: a.name in kwarg_only_set, args))
)
outputs = tuple(map(argument, filter(lambda a: a.name in out_arg_set, args)))
# Reintroduce the scattered fields of TensorOptions for Python.
# Compared to the cpp counterpart, the python arguments have new property
# (default_init) and a new argument 'requires_grad', which require some
# special handlings.
# [old codegen] TODO: because these aren't guaranteed to be 100% faithful
# to the original versions in the yaml, this recreation is a potential
# source of drift between eager and JIT. Pull this logic out to a shared place.
has_tensor_input_arg = any(
a.type.is_tensor_like() for a in func.arguments.flat_non_out
)
if any(a.name == "requires_grad" for a in func.schema_order_arguments()):
raise ValueError(
"argument named requires_grad is reserved, should not explicitly add it in the schema"
)
# [old codegen] this probably won't work if one of the returns is not a tensor,
# but it will produce a compile-time error that is obvious.
has_tensor_return = any(r.type.is_tensor_like() for r in func.returns)
name: str = cpp.name(func)
is_factory_function = category_override == "factory" or (
has_tensor_return and not has_tensor_input_arg
)
is_like_or_new_function = (
category_override in ("new", "like")
or name.startswith("new_")
or name.endswith("_like")
)
is_dummy_function = category_override == "dummy"
tensor_options_args: list[PythonArgument] = []
if (is_factory_function or is_like_or_new_function) and not is_dummy_function:
def topt_default_init(name: str) -> str | None:
topt_args = func.arguments.tensor_options
if topt_args is None:
return None
a = getattr(topt_args, name)
if a.default is None or a.default == "None":
return None
return cpp.default_expr(a.default, a.type, symint=False)
tensor_options_args.append(
PythonArgument(
name="dtype",
type=OptionalType(BaseType(BaseTy.ScalarType)),
default="None",
default_init=(
None if is_like_or_new_function else topt_default_init("dtype")
),
)
)
tensor_options_args.append(
PythonArgument(
name="layout",
type=OptionalType(BaseType(BaseTy.Layout)),
default="None",
default_init=(
None if is_like_or_new_function else topt_default_init("layout")
),
)
)
tensor_options_args.append(
PythonArgument(
name="device",
type=OptionalType(BaseType(BaseTy.Device)),
default="None",
default_init=(
None
if is_like_or_new_function
else (
topt_default_init("device")
or "torch::tensors::get_default_device()"
)
),
)
)
tensor_options_args.append(
PythonArgument(
name="pin_memory",
type=OptionalType(BaseType(BaseTy.bool)),
default="False",
default_init=None,
)
)
tensor_options_args.append(
PythonArgument(
name="requires_grad",
type=OptionalType(BaseType(BaseTy.bool)),
default="False",
default_init=None,
)
)
returns = PythonReturns(returns=func.returns)
return PythonSignature(
name=str(func.name.name),
input_args=input_args,
input_kwargs=input_kwargs,
output_args=PythonOutArgument.from_outputs(outputs),
tensor_options_args=tuple(tensor_options_args),
returns=returns,
method=method,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Python Interface
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def structseq_fieldnames(returns: tuple[Return, ...]) -> list[str]:
if len(returns) <= 1 or all(r.name is None for r in returns):
return []
else:
if any(r.name is None for r in returns):
# When building on Windows, `PyStructSequence_UnnamedField` could not be
# resolved by the linker for some reason, which cause error in building:
#
# python_nn_functions.cpp.obj : error LNK2001: unresolved external symbol
# PyStructSequence_UnnamedField
#
# Thus, at this point in time, we do not support unnamed
# fields in structseq; you must either name all fields,
# or none of them.
raise ValueError("Unnamed field is not supported by codegen")
return [str(r.name) for r in returns]
def argument_type_str_pyi(t: Type) -> str:
add_optional = False
if isinstance(t, OptionalType):
t = t.elem
add_optional = True
ret = ""
if isinstance(t, BaseType):
if t.name in [BaseTy.int, BaseTy.DeviceIndex]:
ret = "_int"
if t.name == BaseTy.SymInt:
ret = "_int | SymInt"
elif t.name == BaseTy.float:
ret = "_float"
elif t.name == BaseTy.str:
ret = "str"
elif t.name == BaseTy.Scalar:
ret = "Number | _complex"
elif t.name == BaseTy.ScalarType:
ret = "_dtype"
elif t.name == BaseTy.bool:
ret = "_bool"
elif t.name == BaseTy.QScheme:
ret = "_qscheme"
elif t.name == BaseTy.Layout:
ret = "_layout"
elif t.name == BaseTy.Device:
ret = "DeviceLikeType | None"
elif t.name == BaseTy.MemoryFormat:
ret = "memory_format"
elif t.name == BaseTy.Dimname:
ret = "str | EllipsisType | None"
elif t.name == BaseTy.Storage:
ret = "Storage | UntypedStorage"
elif t.name in [BaseTy.Tensor, BaseTy.Generator, BaseTy.Stream]:
# These python schema type names line up with their function schema names
ret = t.name.name
elif isinstance(t, ListType):
if str(t.elem) == "int":
ret = "_int | _size" if t.size is not None else "_size"
elif t.is_tensor_like():
# TODO: this doesn't seem right...
# Tensor?[] currently translates to tuple[Tensor, ...] | list[Tensor] | None
# It should probably translate to tuple[Tensor | None, ...] | list[Tensor | None]
add_optional = True
ret = (
"Tensor | tuple[Tensor, ...] | list[Tensor]"
if t.size is not None
else "tuple[Tensor, ...] | list[Tensor]"
)
elif str(t.elem) == "float":
ret = "Sequence[_float]"
elif str(t.elem) == "SymInt" and t.size is not None:
elem = argument_type_str_pyi(t.elem)
ret = f"{elem} | Sequence[{elem}]"
else:
elem = argument_type_str_pyi(t.elem)
ret = f"Sequence[{elem}]"
else:
raise RuntimeError(f"unrecognized type {repr(t)}")
if add_optional:
ret = f"{ret} | None".replace(" | None | None", " | None")
return ret
def return_type_str_pyi(t: Type) -> str:
# Where arguments are open to accepting Union, return types should return
# concrete types
if isinstance(t, OptionalType):
inner = return_type_str_pyi(t.elem)
return f"{inner} | None".replace(" | None | None", " | None")
if isinstance(t, BaseType):
if t.name == BaseTy.Device:
return "_device"
elif t.name == BaseTy.Dimname:
return "str | None"
else:
return argument_type_str_pyi(t)
if isinstance(t, ListType):
inner = return_type_str_pyi(t.elem)
return f"tuple[{inner}, ...]"
return argument_type_str_pyi(t)
def returns_structseq_pyi(signature: PythonSignature) -> tuple[str, str] | None:
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
structseq_name = signature.name
field_names = structseq_fieldnames(signature.returns.returns)
if field_names:
# These types are structseq objects which act like named NamedTuples, but
# the constructor acts like the constructor of tuple. Using typing.NamedTuple
# does not allow us to override __init__.
seq_type = f"tuple[{', '.join(python_returns)}]"
structseq_def_lines = [
f"class {structseq_name}({seq_type}): # fmt: skip",
]
for name, ret_type in zip(field_names, python_returns):
structseq_def_lines.extend(
[
" @property",
f" def {name}(self) -> {ret_type}: ...",
]
)
structseq_def_lines.extend(
[
" def __new__(",
" cls,",
f" sequence: {seq_type},",
" ) -> Self: # fmt: skip",
" ...",
f" n_fields: Final[_int] = {len(field_names)}",
f" n_sequence_fields: Final[_int] = {len(field_names)}",
" n_unnamed_fields: Final[_int] = 0",
" def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing",
"", # add an extra newline
]
)
structseq_def = "\n".join(structseq_def_lines)
# Example:
# structseq_def = (
# "class max(tuple[Tensor, Tensor]): # fmt: skip\n"
# " @property\n"
# " def values(self) -> Tensor: ...\n"
# " @property\n"
# " def indices(self) -> Tensor: ...\n"
# " def __new__(\n"
# " cls,\n"
# " sequence: tuple[Tensor, Tensor],\n"
# " ) -> Self: # fmt: skip\n"
# " ...\n"
# " n_fields: Final[_int] = 2",
# " n_sequence_fields: Final[_int] = 2",
# " n_unnamed_fields: Final[_int] = 0",
# " def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing",
# )
return structseq_name, structseq_def
return None
def returns_str_pyi(signature: PythonSignature) -> str:
field_names = structseq_fieldnames(signature.returns.returns)
if field_names:
return f"torch.return_types.{signature.name}"
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
if len(python_returns) > 1:
return "tuple[" + ", ".join(python_returns) + "]"
if len(python_returns) == 1:
return python_returns[0]
return "None"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# C++ Function Dispatch
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# This section provides APIs to generate the code that does C++ function
# dispatch. The C++ function call is wrapped by a lambda function.
# For example:
#
# // aten::selu_(Tensor(a!) self) -> Tensor(a!)
# auto dispatch_selu_ = [](Tensor self) -> Tensor {
# pybind11::gil_scoped_release no_gil;
# return at::selu_(self);
# };
#
# The lambda function's signature follows the C++ signature in common
# cases, e.g.:
#
# // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
# [](const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
#
# For out variant the 'out' argument's type is changed from 'Tensor &'
# to 'Tensor'. It's because when calling the lambda it passes in the
# PythonArgParser output '_r.tensor(3)', which is stack allocated object
# and needs to pass by value. Also see comments in 'dispatch_lambda_return_str()'.
#
# // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
# [](Tensor out, const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
#
# For multi-output case it can keep using reference type because the
# PythonArgParser output has been unpacked to local variables, e.g.:
#
# // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *,
# // Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
# [](Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) -> std::tuple<Tensor,Tensor>
#
# For deprecated python signature, it should follow deprecated python arg order.
# TODO: This is to keep same byte-for-byte result as the old codegen - maybe unnecessary?
def dispatch_lambda_args(
ps: PythonSignature, f: NativeFunction, symint: bool = True
) -> tuple[DispatchLambdaArgument, ...]:
if isinstance(ps, PythonSignatureDeprecated):
schema = ps.deprecated_schema
else:
schema = f.func
# Start with cpp arguments - dispatch lambda signature always include 'self'
cpp_args = cpp.arguments(
arguments=schema.arguments,
faithful=False,
symint=symint,
method=False,
cpp_no_default_args=f.cpp_no_default_args,
)
out_args: set[str] = {a.name for a in schema.arguments.out}
# Convert from cpp argument to lambda argument
def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument:
type_str = cpp_arg.type
is_out_arg = cpp_arg.name in out_args
if ps.method and cpp_arg.name == "self":
# For method's 'self', we can use 'const Tensor &' and simply ignore mutability!
type_str = "const at::Tensor &"
else:
# For other cases we need prevent dangling refs to temps (unless it's
# unpacked scattered output)
# The reason is explained in the comments above and in 'dispatch_lambda_return_str()'.
# TODO: avoid this special handling?
ensure_temp_safe = len(out_args) <= 1 or not is_out_arg
if ensure_temp_safe:
type_str = {
"at::Tensor &": "at::Tensor",
}.get(type_str, type_str)
return DispatchLambdaArgument(
name=cpp_arg.name,
type_str=type_str,
is_out_arg=is_out_arg,
)
return tuple(map(dispatch_lambda_arg, cpp_args))
# [old codegen] XXX: if you got here because of an assertion failure, it doesn't mean
# it's enough to just extend the list here. Before you do this, make sure
# to add an appropriate wrap() overload in torch/csrc/autograd/utils/wrap_outputs.h.
SUPPORTED_RETURN_TYPES = {
"at::Tensor",
"::std::tuple<at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,double,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,double,at::Tensor,int64_t>",
"::std::tuple<double,int64_t>",
"::std::tuple<at::Tensor,::std::vector<at::Tensor>>",
"::std::vector<at::Tensor>",
# Needed for flash attention forw/backward
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor>",
"at::Scalar",
"bool",
"int64_t",
"void*",
"void",
"at::QScheme",
"double",
"at::IntArrayRef",
"at::ScalarType",
"at::Stream",
}
def dispatch_lambda_return_str(f: NativeFunction) -> str:
# [old codegen] Remove type annotation (e.g. 'Tensor' rather than 'Tensor &')
# because the dispatch lambdas take mutable arguments *by value*, not
# by reference. If you then return a reference to such an argument, you
# will now have a pointer to a dangling stack entry. Not good.
#
# You want:
#
# auto dispatch_selu_ = [](Tensor self) -> Tensor { ...; return at::selu_(self); };
# ^^^^^^
#
# *not*
#
# auto dispatch_selu_ = [](Tensor self) -> Tensor& { ...; return at::selu_(self); };
# ^^^^^^^
#
# (NB: We can't make dispatch_selu_ take Tensor&, because the enclosing
# codegen looks like dispatch_selu_(_r.tensor(0)), and you can't take a
# mutable reference to temporary. Maybe we could assign it to a
# variable itself.)
returns_without_annotation = tuple(
Return(r.name, r.type, None) for r in f.func.returns
)
return_str = cpp.returns_type(returns_without_annotation, symint=True).cpp_type()
if return_str not in SUPPORTED_RETURN_TYPES:
raise RuntimeError(f"{f.func.name} returns unsupported type {return_str}")
return return_str
def cpp_dispatch_target(f: NativeFunction) -> str:
symint = f.func.has_symint()
name = cpp.name(f.func, symint_overload=symint)
if Variant.method in f.variants:
return f"self.{name}"
if Variant.function in f.variants:
if has_tensor_options(f) or f.func.name.name.base.endswith("_like"):
namespace = "torch"
else:
namespace = "at"
return f"{namespace}::{name}"
raise RuntimeError(f"could not dispatch, neither function nor method: {f.func}")
def cpp_dispatch_exprs(
f: NativeFunction,
*,
python_signature: PythonSignature | None = None,
) -> tuple[str, ...]:
cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments()
exprs: tuple[str, ...] = ()
if not isinstance(python_signature, PythonSignatureDeprecated):
# By default the exprs are consistent with the C++ signature.
exprs = tuple(a.name for a in cpp_args)
else:
# For deprecated python signature we may need fill in some constants.
exprs = tuple(
filter(
lambda n: n != "out" or f.func.is_out_fn(),
python_signature.deprecated_args_exprs,
)
)
if Variant.method in f.variants:
exprs = tuple(filter("self".__ne__, exprs))
return exprs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Python / C++ Args Binding
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# We explicitly enumerate the PythonArgParser unpacking methods for all
# supported types. This might be more verbose than necessary, partially
# because of the irregularity of unpacking method naming, partially
# because we want to mimic the old codegen behavior - to reject
# unexpected and/or unsupported cases which the old codegen rejects.
# For certain cases it is intentionally more restrictive than necessary,
# e.g.: it doesn't accepts doublelist with definite size.
def arg_parser_unpack_method(
t: Type, default: str | None, default_init: str | None, *, symint: bool = True
) -> str:
has_default_init = default_init is not None
if has_default_init and str(t) not in (
"ScalarType?",
"ScalarType",
"Device",
"Device?",
"Layout",
"Layout?",
"bool",
"bool?",
):
raise RuntimeError(f"type '{t}' does not supported unpacking with default")
if isinstance(t, BaseType):
if t.name in [
BaseTy.Tensor,
BaseTy.Stream,
BaseTy.Storage,
BaseTy.Scalar,
BaseTy.Dimname,
]:
# These unpack methods line up with their schema names
return t.name.name.lower()
elif t.name == BaseTy.ScalarType:
return "scalartypeWithDefault" if has_default_init else "scalartype"
elif t.name == BaseTy.Device:
return "deviceWithDefault" if has_default_init else "device"
elif t.name == BaseTy.DeviceIndex:
return "toInt64"
elif t.name == BaseTy.int:
return "toInt64"
elif t.name == BaseTy.SymInt:
return "toSymInt" if symint else "toInt64"
elif t.name == BaseTy.bool:
return "toBoolWithDefault" if has_default_init else "toBool"
elif t.name == BaseTy.float:
return "toDouble"
elif t.name == BaseTy.str:
return "stringView"
elif t.name == BaseTy.Layout:
return "layoutWithDefault" if has_default_init else "layout"
elif t.name == BaseTy.MemoryFormat:
return "memoryformat"
elif isinstance(t, OptionalType):
if str(t.elem) == "Tensor":
return "optionalTensor"
elif str(t.elem) == "Generator":
return "generator"
elif str(t.elem) == "Dimname[]":
return "toDimnameListOptional"
elif not has_default_init and default in (
None,
"None",
"::std::nullopt",
"std::nullopt",
):
# If default is None: append 'Optional' to elem's unpacking method
return (
arg_parser_unpack_method(t.elem, None, None, symint=symint) + "Optional"
)
else:
# Otherwise, load as underlying type with default
return arg_parser_unpack_method(
t.elem, default, default_init, symint=symint
)
elif isinstance(t, ListType):
if str(t.elem) == "Tensor":
# accept and use definite size
return f"tensorlist_n<{t.size}>" if t.size is not None else "tensorlist"
elif str(t.elem) == "Tensor?":
return "list_of_optional_tensors"
elif str(t.elem) == "Dimname":
# accept definite size
return "dimnamelist"
elif str(t.elem) == "int":
# accept definite size
return "intlist"
elif str(t.elem) == "float":
return "doublelist"
elif str(t.elem) == "SymInt":
# accept definite size
return "symintlist" if symint else "intlist"
elif str(t.elem) == "Scalar":
return "scalarlist"
raise RuntimeError(f"type '{t}' is not supported by PythonArgParser")
# Return RHS expression for python argument using PythonArgParser output.
# e.g. for arg name 'foo', arg type 'bool', arg_index = 2, returns '_r.toBool(2)'
def arg_parser_output_expr(
arg_index: int, a: PythonArgument, *, symint: bool = True
) -> PythonArgParserOutputExpr:
has_default = a.default_init is not None
unpack_method = arg_parser_unpack_method(
t=a.type, default=a.default, default_init=a.default_init, symint=symint
)
default = f", {a.default_init}" if has_default else ""
expr = f"_r.{unpack_method}({arg_index}{default})"
return PythonArgParserOutputExpr(
name=a.name,
expr=expr,
index=arg_index,
argument=a,
)
# Returns a map with key = arg_name and value = PythonArgParserOutputExpr.
def arg_parser_output_exprs(
ps: PythonSignature, f: NativeFunction, *, symint: bool = True
) -> dict[str, PythonArgParserOutputExpr]:
return {
e.name: e
for i, a in enumerate(ps.arguments())
for e in (arg_parser_output_expr(i, a, symint=symint),)
}
# argument name to type for scattered tensor options fields
TENSOR_OPTIONS_FIELDS = {
"dtype": "ScalarType?",
"device": "Device?",
"layout": "Layout?",
"pin_memory": "bool?",
"requires_grad": "bool?",
}
# bind arg parser outputs (python args) with dispatch lambda arguments (c++ args).
def dispatch_lambda_exprs(
ps: PythonSignature, f: NativeFunction, *, symint: bool = True
) -> DispatchLambdaArgumentExprs:
# This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing
# 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser
# outputs.
arg_parser_outputs = arg_parser_output_exprs(ps, f, symint=symint)
lambda_args = dispatch_lambda_args(ps, f, symint=symint)
inits: list[str] = []
lambda_args_exprs: dict[str, str] = {}
has_toptions = has_tensor_options(f)
# 1. special inits/unpacking to provide binding exprs for lambda arguments.
for a in ps.arguments(skip_tensor_options=True):
name = a.name
arg_parser_expr = arg_parser_outputs[a.name].expr
if has_toptions and name == "self":
# TODO: why this needs to be special case?
inits.extend(
[
f"auto self = {arg_parser_expr};",
]
)
lambda_args_exprs[name] = name
elif (
isinstance(a, PythonOutArgument)
and len(a.outputs) > 1
and f.func.is_out_fn()
):
inits.extend(
[
f"auto out = {arg_parser_expr};",
]
)
for i, out_arg in enumerate(a.outputs):
lambda_args_exprs[out_arg.name] = f"out[{i}]"
elif str(a.type) == "Dimname[]?":
# [old codegen]
# TODO: make this part of something more general, or get rid of it.
# optional<ArrayRef<T>> are special. The PythonArgParser returns an
# optional<vector<T>>, which cannot be implicitly converted to
# optional<ArrayRef<T>>. One needs to unwrap the optional and rewrap.
inits.extend(
[
f"auto __{name} = {arg_parser_expr};",
f"::std::optional<DimnameList> {name} = __{name} ? ::std::make_optional(DimnameList(__{name}.value())) : ::std::nullopt;", # noqa: B950
]
)
lambda_args_exprs[name] = name
else:
# default case - directly using PythonArgParser output expr
lambda_args_exprs[name] = arg_parser_expr
# method's self is passed directly to python binding, rather than parsed
if ps.method:
lambda_args_exprs["self"] = "self"
# 2. special packing/checking for TensorOptions.
tensor_options_args_names = [a.name for a in ps.tensor_options_args]
if has_toptions:
if f.func.is_out_fn():
raise RuntimeError(f"{f.func}: tensor options with output arg")
for a in ps.tensor_options_args:
if a.name not in TENSOR_OPTIONS_FIELDS:
raise RuntimeError(
f"{f.func}: unrecognized tensor options field '{a.name}' in python binding arguments"
)
if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name):
raise RuntimeError(
f"{f.func}: unrecognized type '{str(a.type)}' for tensor options field '{a.name}'"
)
if not all(a in tensor_options_args_names for a in TENSOR_OPTIONS_FIELDS):
raise RuntimeError(
f"{f.func}: incomplete tensor options args: {tensor_options_args_names}"
)
inits.append(
f"""\
const auto options = TensorOptions()
.dtype({arg_parser_outputs["dtype"].expr})
.device({arg_parser_outputs["device"].expr})
.layout({arg_parser_outputs["layout"].expr})
.requires_grad({arg_parser_outputs["requires_grad"].expr})
.pinned_memory({arg_parser_outputs["pin_memory"].expr});
torch::utils::maybe_initialize_device(options);
"""
)
lambda_args_exprs["options"] = "options"
# 3. special case - access scattered TensorOptions fields without packing
# TODO: maybe move to the generator side as it's not related to binding.
if not has_toptions and tensor_options_args_names:
if "dtype" in tensor_options_args_names:
# we're an output-arg variant, check these args against output tensor
if not f.func.is_out_fn():
raise RuntimeError(
f"{f.func}: dtype in tensor_options_args without output arg, {ps} {ps.arguments}"
)
if not all(a in tensor_options_args_names for a in ("layout", "device")):
raise RuntimeError(
f"{f.func}: incomplete tensor options for output check"
)
inits.append(
f"""\
check_out_type_matches({arg_parser_outputs["out"].expr}, {arg_parser_outputs["dtype"].expr},
{arg_parser_outputs["dtype"].is_none_expr}, {arg_parser_outputs["layout"].expr},
{arg_parser_outputs["device"].expr}, {arg_parser_outputs["device"].is_none_expr});
"""
)
# we'll set requires_grad on outgoing tensor
if "requires_grad" not in tensor_options_args_names:
raise RuntimeError(
f'{f.func}: expected "requires_grad" in tensor_options_args absent, but found [{tensor_options_args_names}]'
)
return DispatchLambdaArgumentExprs(
exprs=tuple(lambda_args_exprs[a.name] for a in lambda_args),
inits=inits,
)
| DispatchLambdaArgumentExprs |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_dms.py | {
"start": 33802,
"end": 35185
} | class ____:
FILTER = [{"Name": "replication-type", "Values": ["cdc"]}]
def get_replications(self):
return {
"Replications": [
{
"Status": "test",
"ReplicationArn": "XXXXXXXXXXXXXXXXXXXXXXXXX",
"ReplicationIdentifier": "test-config",
"SourceEndpointArn": "XXXXXXXXXXXXXXXXXXXXXXXXX",
"TargetEndpointArn": "XXXXXXXXXXXXXXXXXXXXXXXXX",
}
]
}
@mock.patch.object(DmsHook, "conn")
def test_filter(self, mock_conn):
mock_conn.describe_replications.return_value = self.get_replications()
op = DmsDescribeReplicationsOperator(
task_id="test_task",
filter=self.FILTER,
)
res = op.execute({})
mock_conn.describe_replications.assert_called_once_with(Filters=self.FILTER)
assert isinstance(res, list)
@mock.patch.object(DmsHook, "conn")
def test_filter_none(self, mock_conn):
mock_conn.describe_replications.return_value = self.get_replications()
op = DmsDescribeReplicationsOperator(
task_id="test_task",
)
res = op.execute({})
mock_conn.describe_replications.assert_called_once_with(Filters=[])
assert isinstance(res, list)
| TestDmsDescribeReplicationsOperator |
python | PrefectHQ__prefect | src/prefect/server/api/clients.py | {
"start": 8190,
"end": 9033
} | class ____(BaseClient):
async def __aenter__(self) -> Self:
return self
async def read_work_pool(self, work_pool_name: str) -> WorkPool:
"""
Reads information for a given work pool
Args:
work_pool_name: The name of the work pool to for which to get
information.
Returns:
Information about the requested work pool.
"""
try:
response = await self._http_client.get(f"/work_pools/{work_pool_name}")
response.raise_for_status()
return WorkPool.model_validate(response.json())
except httpx.HTTPStatusError as e:
if e.response.status_code == status.HTTP_404_NOT_FOUND:
raise ObjectNotFound(http_exc=e) from e
else:
raise
| WorkPoolsOrchestrationClient |
python | ethereum__web3.py | web3/main.py | {
"start": 9533,
"end": 11446
} | class ____(BaseWeb3):
# mypy types
eth: Eth
net: Net
geth: Geth
# Providers
HTTPProvider = HTTPProvider
IPCProvider = IPCProvider
EthereumTesterProvider = EthereumTesterProvider
def __init__(
self,
provider: BaseProvider | None = None,
middleware: Sequence[Any] | None = None,
modules: dict[str, type[Module] | Sequence[Any]] | None = None,
external_modules: None | (dict[str, type[Module] | Sequence[Any]]) = None,
ens: Union[ENS, "Empty"] = empty,
) -> None:
_validate_provider(self, provider)
self.manager = self.RequestManager(self, provider, middleware)
self.codec = ABICodec(build_strict_registry())
if modules is None:
modules = get_default_modules()
self.attach_modules(modules)
if external_modules is not None:
self.attach_modules(external_modules)
self.ens = ens
def is_connected(self, show_traceback: bool = False) -> bool:
return self.provider.is_connected(show_traceback)
@property
def provider(self) -> BaseProvider:
return cast(BaseProvider, self.manager.provider)
@provider.setter
def provider(self, provider: BaseProvider) -> None:
self.manager.provider = provider
@property
def client_version(self) -> str:
return self.manager.request_blocking(RPC.web3_clientVersion, [])
@property
def ens(self) -> Union[ENS, "Empty"]:
if self._ens is empty:
ns = ENS.from_web3(self)
ns.w3 = self
return ns
return self._ens
@ens.setter
def ens(self, new_ens: Union[ENS, "Empty"]) -> None:
if new_ens:
new_ens.w3 = self # set self object reference for ``ENS.w3``
self._ens = new_ens
# -- async -- #
AsyncProviderT = TypeVar("AsyncProviderT", bound=AsyncBaseProvider)
| Web3 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 64603,
"end": 65046
} | class ____(sgqlc.types.Enum):
"""The display color of a single-select field option.
Enumeration Choices:
* `BLUE`: BLUE
* `GRAY`: GRAY
* `GREEN`: GREEN
* `ORANGE`: ORANGE
* `PINK`: PINK
* `PURPLE`: PURPLE
* `RED`: RED
* `YELLOW`: YELLOW
"""
__schema__ = github_schema
__choices__ = ("BLUE", "GRAY", "GREEN", "ORANGE", "PINK", "PURPLE", "RED", "YELLOW")
| ProjectV2SingleSelectFieldOptionColor |
python | pytorch__pytorch | test/torch_np/test_ndarray_methods.py | {
"start": 2945,
"end": 3846
} | class ____(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_transpose_function(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
arr = np.asarray(arr)
assert np.transpose(arr, (1, 0)).tensor._base is arr.tensor
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_transpose_method(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_equal(a.transpose(None), [[1, 3], [2, 4]])
assert_raises((RuntimeError, ValueError), lambda: a.transpose(0))
assert_raises((RuntimeError, ValueError), lambda: a.transpose(0, 0))
assert_raises((RuntimeError, ValueError), lambda: a.transpose(0, 1, 2))
assert a.transpose().tensor._base is a.tensor
| TestTranspose |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 29907,
"end": 30221
} | class ____(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex='[a-z][0-9]')
| TestRegexField |
python | streamlit__streamlit | lib/streamlit/runtime/state/session_state.py | {
"start": 2358,
"end": 10197
} | class ____(MutableMapping[str, Any]):
"""A mapping of widget IDs to values. Widget values can be stored in
serialized or deserialized form, but when values are retrieved from the
mapping, they'll always be deserialized.
"""
states: dict[str, WState] = field(default_factory=dict)
widget_metadata: dict[str, WidgetMetadata[Any]] = field(default_factory=dict)
def __repr__(self) -> str:
return util.repr_(self)
def __getitem__(self, k: str) -> Any:
"""Return the value of the widget with the given key.
If the widget's value is currently stored in serialized form, it
will be deserialized first.
"""
wstate = self.states.get(k)
if wstate is None:
raise KeyError(k)
if isinstance(wstate, Value):
# The widget's value is already deserialized - return it directly.
return wstate.value
# The widget's value is serialized. We deserialize it, and return
# the deserialized value.
metadata = self.widget_metadata.get(k)
if metadata is None:
# No deserializer, which should only happen if state is
# gotten from a reconnecting browser and the script is
# trying to access it. Pretend it doesn't exist.
raise KeyError(k)
value_field_name = cast(
"ValueFieldName",
wstate.value.WhichOneof("value"),
)
value = (
wstate.value.__getattribute__(value_field_name)
if value_field_name # Field name is None if the widget value was cleared
else None
)
if is_array_value_field_name(value_field_name):
# Array types are messages with data in a `data` field
value = cast("Any", value).data
elif value_field_name == "json_value":
value = json.loads(cast("str", value))
deserialized = metadata.deserializer(value)
# Update metadata to reflect information from WidgetState proto
self.set_widget_metadata(
replace(
metadata,
value_type=value_field_name,
)
)
self.states[k] = Value(deserialized)
return deserialized
def __setitem__(self, k: str, v: WState) -> None:
self.states[k] = v
def __delitem__(self, k: str) -> None:
del self.states[k]
def __len__(self) -> int:
return len(self.states)
def __iter__(self) -> Iterator[str]:
# For this and many other methods, we can't simply delegate to the
# states field, because we need to invoke `__getitem__` for any
# values, to handle deserialization and unwrapping of values.
yield from self.states
def keys(self) -> KeysView[str]:
return KeysView(self.states)
def items(self) -> set[tuple[str, Any]]: # type: ignore[override]
return {(k, self[k]) for k in self}
def values(self) -> set[Any]: # type: ignore[override]
return {self[wid] for wid in self}
def update(self, other: WStates) -> None: # type: ignore[override]
"""Copy all widget values and metadata from 'other' into this mapping,
overwriting any data in this mapping that's also present in 'other'.
"""
self.states.update(other.states)
self.widget_metadata.update(other.widget_metadata)
def set_widget_from_proto(self, widget_state: WidgetStateProto) -> None:
"""Set a widget's serialized value, overwriting any existing value it has."""
self[widget_state.id] = Serialized(widget_state)
def set_from_value(self, k: str, v: Any) -> None:
"""Set a widget's deserialized value, overwriting any existing value it has."""
self[k] = Value(v)
def set_widget_metadata(self, widget_meta: WidgetMetadata[Any]) -> None:
"""Set a widget's metadata, overwriting any existing metadata it has."""
self.widget_metadata[widget_meta.id] = widget_meta
def remove_stale_widgets(
self,
active_widget_ids: set[str],
fragment_ids_this_run: list[str] | None,
) -> None:
"""Remove widget state for stale widgets."""
self.states = {
k: v
for k, v in self.states.items()
if not _is_stale_widget(
self.widget_metadata.get(k),
active_widget_ids,
fragment_ids_this_run,
)
}
def get_serialized(self, k: str) -> WidgetStateProto | None:
"""Get the serialized value of the widget with the given id.
If the widget doesn't exist, return None. If the widget exists but
is not in serialized form, it will be serialized first.
"""
item = self.states.get(k)
if item is None:
# No such widget: return None.
return None
if isinstance(item, Serialized):
# Widget value is serialized: return it directly.
return item.value
# Widget value is not serialized: serialize it first!
metadata = self.widget_metadata.get(k)
if metadata is None:
# We're missing the widget's metadata. (Can this happen?)
return None
widget = WidgetStateProto()
widget.id = k
field = metadata.value_type
serialized = metadata.serializer(item.value)
if is_array_value_field_name(field):
arr = getattr(widget, field)
arr.data.extend(serialized)
elif field in {"json_value", "json_trigger_value"}:
setattr(widget, field, json.dumps(serialized))
elif field == "file_uploader_state_value":
widget.file_uploader_state_value.CopyFrom(serialized)
elif field == "string_trigger_value":
widget.string_trigger_value.CopyFrom(serialized)
elif field == "chat_input_value":
widget.chat_input_value.CopyFrom(serialized)
elif field is not None and serialized is not None:
# If the field is None, the widget value was cleared
# by the user and therefore is None. But we cannot
# set it to None here, since the proto properties are
# not nullable. So we just don't set it.
setattr(widget, field, serialized)
return widget
def as_widget_states(self) -> list[WidgetStateProto]:
"""Return a list of serialized widget values for each widget with a value."""
states = [
self.get_serialized(widget_id)
for widget_id in self.states
if self.get_serialized(widget_id)
]
return cast("list[WidgetStateProto]", states)
def call_callback(self, widget_id: str) -> None:
"""Call the given widget's callback and return the callback's
return value. If the widget has no callback, return None.
If the widget doesn't exist, raise an Exception.
"""
metadata = self.widget_metadata.get(widget_id)
if metadata is None:
raise RuntimeError(f"Widget {widget_id} not found.")
callback = metadata.callback
if callback is None:
return
args = metadata.callback_args or ()
kwargs = metadata.callback_kwargs or {}
ctx = get_script_run_ctx()
if ctx and metadata.fragment_id is not None:
ctx.in_fragment_callback = True
callback(*args, **kwargs)
ctx.in_fragment_callback = False
else:
callback(*args, **kwargs)
def _missing_key_error_message(key: str) -> str:
return (
f'st.session_state has no key "{key}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/develop/concepts/architecture/session-state#initialization"
)
@dataclass
| WStates |
python | numba__numba | numba/core/typeinfer.py | {
"start": 28640,
"end": 29650
} | class ____(object):
"""A mixin class to provide the common refinement logic in setitem
and static setitem.
"""
def _refine_target_type(self, typeinfer, targetty, idxty, valty, sig):
"""Refine the target-type given the known index type and value type.
"""
# For array setitem, refine imprecise array dtype
if _is_array_not_precise(targetty):
typeinfer.add_type(self.target.name, sig.args[0], loc=self.loc)
# For Dict setitem
if isinstance(targetty, types.DictType):
if not targetty.is_precise():
refined = targetty.refine(idxty, valty)
typeinfer.add_type(
self.target.name, refined,
loc=self.loc,
)
elif isinstance(targetty, types.LiteralStrKeyDict):
typeinfer.add_type(
self.target.name, types.DictType(idxty, valty),
loc=self.loc,
)
| SetItemRefinement |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 17828,
"end": 18009
} | class ____(TestNoChunks10):
DEFAULT_EXTRA_CLIENT_HEADERS = {
'Connection': 'keep-alive',
}
EXPECT_CLOSE = False
EXPECT_KEEPALIVE = True
| TestNoChunks10KeepAlive |
python | cherrypy__cherrypy | cherrypy/_cpreqbody.py | {
"start": 34011,
"end": 37336
} | class ____(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See
# https://github.com/cherrypy/cherrypy/issues/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from
the socket.
"""
def __init__(self, fp, headers, params=None, request_params=None):
"""Initialize a request body entity."""
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(
self.fp,
self.length,
self.maxbytes,
bufsize=self.bufsize,
has_trailers='Trailer' in h,
)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| RequestBody |
python | spack__spack | lib/spack/spack/binary_distribution.py | {
"start": 2504,
"end": 3496
} | class ____(spack.database.Database):
"""A database for binary buildcaches.
A database supports writing buildcache index files, in which case certain fields are not
needed in each install record, and no locking is required. To use this feature, it provides
``lock_cfg=NO_LOCK``, and override the list of ``record_fields``.
"""
record_fields = ("spec", "ref_count", "in_buildcache")
def __init__(self, root):
super().__init__(root, lock_cfg=spack.database.NO_LOCK, layout=None)
self._write_transaction_impl = spack.llnl.util.lang.nullcontext
self._read_transaction_impl = spack.llnl.util.lang.nullcontext
def _handle_old_db_versions_read(self, check, db, *, reindex: bool):
if not self.is_readable():
raise spack.database.DatabaseNotReadableError(
f"cannot read buildcache v{self.db_version} at {self.root}"
)
return self._handle_current_version_read(check, db)
| BuildCacheDatabase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constrainedTypeVar14.py | {
"start": 239,
"end": 283
} | class ____(A): ...
T = TypeVar("T", A, B)
| A2 |
python | pytorch__pytorch | torch/distributed/tensor/_redistribute.py | {
"start": 35876,
"end": 40349
} | class ____(torch.autograd.Function):
@staticmethod
def forward( # type: ignore[override]
# pyre-fixme[2]: Parameter must be annotated.
ctx,
input: "dtensor.DTensor",
device_mesh: DeviceMesh,
placements: tuple[Placement, ...],
async_op: bool = False,
forward_dtype: torch.dtype | None = None,
backward_dtype: torch.dtype | None = None,
):
ctx.async_op = async_op
ctx.backward_dtype = backward_dtype
ctx.original_dtype = input._local_tensor.dtype
if forward_dtype is not None and forward_dtype != input._local_tensor.dtype:
local_tensor = input._local_tensor.to(dtype=forward_dtype)
current_spec = DTensorSpec(
mesh=device_mesh,
placements=input._spec.placements,
tensor_meta=TensorMeta(
shape=input.shape,
stride=input.stride(),
dtype=forward_dtype,
),
)
else:
local_tensor = input._local_tensor
current_spec = input._spec
ctx.current_spec = current_spec
if current_spec.placements != placements:
target_spec = DTensorSpec(
device_mesh, placements, tensor_meta=current_spec.tensor_meta
)
output = redistribute_local_tensor(
local_tensor, current_spec, target_spec, async_op=async_op
)
else:
# use the same local tensor if placements are the same.
output = local_tensor
target_spec = current_spec
# pyrefly: ignore [bad-argument-type]
return dtensor.DTensor(
# pyrefly: ignore [bad-argument-count]
output,
target_spec,
# pyrefly: ignore [unexpected-keyword]
requires_grad=input.requires_grad,
)
@staticmethod
def backward(ctx, grad_output: "dtensor.DTensor"): # type: ignore[override]
previous_spec = ctx.current_spec
async_op = ctx.async_op
backward_dtype = ctx.backward_dtype or ctx.original_dtype
if backward_dtype != grad_output._local_tensor.dtype:
local_tensor = grad_output._local_tensor.to(dtype=backward_dtype)
current_spec = DTensorSpec(
mesh=grad_output._spec.device_mesh,
placements=grad_output._spec.placements,
tensor_meta=TensorMeta(
shape=grad_output.shape,
stride=grad_output.stride(),
dtype=backward_dtype,
),
)
previous_spec = DTensorSpec(
mesh=previous_spec.device_mesh,
placements=previous_spec.placements,
tensor_meta=current_spec.tensor_meta,
)
else:
local_tensor = grad_output._local_tensor
current_spec = grad_output._spec
output = redistribute_local_tensor(
local_tensor,
current_spec,
previous_spec,
async_op=async_op,
is_backward=True,
)
if output.dtype != ctx.original_dtype:
output = output.to(ctx.original_dtype)
# normalize the target placement to replicate if it is partial
normalized_placements: list[Placement] = []
for previous_placement in previous_spec.placements:
if previous_placement.is_partial():
# keep target placement to replicate instead of partial in this case
normalized_placements.append(Replicate())
else:
normalized_placements.append(previous_placement)
spec = DTensorSpec(
previous_spec.device_mesh,
tuple(normalized_placements),
tensor_meta=TensorMeta(
shape=grad_output.shape,
stride=grad_output.stride(),
dtype=output.dtype,
),
)
# pyrefly: ignore [bad-argument-type]
output_dtensor = dtensor.DTensor(
# pyrefly: ignore [bad-argument-count]
output,
spec,
# pyrefly: ignore [unexpected-keyword]
requires_grad=grad_output.requires_grad,
)
return (
output_dtensor,
None,
None,
None,
None,
None,
)
| Redistribute |
python | readthedocs__readthedocs.org | readthedocs/projects/models.py | {
"start": 62143,
"end": 65923
} | class ____(TimeStampedModel):
"""A custom domain name for a project."""
# TODO: Overridden from TimeStampedModel just to allow null values,
# remove after deploy.
created = CreationDateTimeField(
_("created"),
null=True,
blank=True,
)
project = models.ForeignKey(
Project,
related_name="domains",
on_delete=models.CASCADE,
)
domain = models.CharField(
_("Domain"),
unique=True,
max_length=255,
validators=[validate_domain_name, validate_no_ip],
)
machine = models.BooleanField(
default=False,
help_text=_("This domain was auto-created"),
)
cname = models.BooleanField(
default=False,
help_text=_("This domain is a CNAME for the project"),
)
canonical = models.BooleanField(
default=False,
help_text=_(
"This domain is the primary one where the documentation is served from",
),
)
https = models.BooleanField(
_("Use HTTPS"),
default=True,
help_text=_("Always use HTTPS for this domain"),
)
count = models.IntegerField(
default=0,
help_text=_("Number of times this domain has been hit"),
)
# This is used in readthedocsext.
ssl_status = models.CharField(
_("SSL certificate status"),
max_length=30,
choices=constants.SSL_STATUS_CHOICES,
default=constants.SSL_STATUS_UNKNOWN,
# Remove after deploy
null=True,
blank=True,
)
skip_validation = models.BooleanField(
_("Skip validation process."),
default=False,
)
validation_process_start = models.DateTimeField(
_("Start date of the validation process."),
auto_now_add=True,
)
# Strict-Transport-Security header options
# These are not exposed to users because it's easy to misconfigure things
# and hard to back out changes cleanly
hsts_max_age = models.PositiveIntegerField(
default=0,
help_text=_("Set a custom max-age (eg. 31536000) for the HSTS header"),
)
hsts_include_subdomains = models.BooleanField(
default=False,
help_text=_("If hsts_max_age > 0, set the includeSubDomains flag with the HSTS header"),
)
hsts_preload = models.BooleanField(
default=False,
help_text=_("If hsts_max_age > 0, set the preload flag with the HSTS header"),
)
objects = DomainQueryset.as_manager()
class Meta:
ordering = ("-canonical", "-machine", "domain")
def __str__(self):
return self.domain
@property
def is_valid(self):
return self.ssl_status == constants.SSL_STATUS_VALID
@property
def validation_process_expiration_date(self):
return self.validation_process_start.date() + timezone.timedelta(
days=settings.RTD_CUSTOM_DOMAINS_VALIDATION_PERIOD
)
@property
def validation_process_expired(self):
return timezone.now().date() >= self.validation_process_expiration_date
def restart_validation_process(self):
"""Restart the validation process if it has expired."""
if not self.is_valid and self.validation_process_expired:
self.validation_process_start = timezone.now()
self.save()
def clean(self):
# Only check the limit when creating a new domain,
# not when updating existing ones.
if not self.pk:
check_domains_limit(self.project)
def save(self, *args, **kwargs):
parsed = urlparse(self.domain)
if parsed.scheme or parsed.netloc:
self.domain = parsed.netloc
else:
self.domain = parsed.path
super().save(*args, **kwargs)
| Domain |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/custom_io_manager.py | {
"start": 1556,
"end": 2131
} | class ____(dg.IOManager):
def _get_path(self, context) -> str:
if context.has_partition_key:
return "/".join(context.asset_key.path + [context.asset_partition_key])
else:
return "/".join(context.asset_key.path)
def handle_output(self, context: dg.OutputContext, obj):
write_csv(self._get_path(context), obj)
def load_input(self, context: dg.InputContext):
return read_csv(self._get_path(context))
# end_partitioned_marker
# start_df_marker
from dagster import ConfigurableIOManager
| MyPartitionedIOManager |
python | google__jax | tests/jaxpr_effects_test.py | {
"start": 5023,
"end": 9982
} | class ____(jtu.JaxTestCase):
def test_core_call_primitive_inherits_effects(self):
def f(x):
def f_(x):
effect_p.bind(effect=foo_effect)
effect_p.bind(effect=bar_effect)
return [x]
dbg = api_util.debug_info("test", f_, (2.,), {})
return core.call(
lu.wrap_init(f_, debug_info=dbg), x)[0]
jaxpr = jax.make_jaxpr(f)(2.)
self.assertIn(foo_effect, jaxpr.jaxpr.effects)
self.assertIn(bar_effect, jaxpr.jaxpr.effects)
def test_jit_primitive_inherits_effects(self):
@jax.jit
def f(x):
effect_p.bind(effect=foo_effect)
effect_p.bind(effect=bar_effect)
return x
jax.make_jaxpr(f)(2.)
jaxpr = jax.make_jaxpr(f)(2.)
self.assertIn(foo_effect, jaxpr.jaxpr.effects)
self.assertIn(bar_effect, jaxpr.jaxpr.effects)
def test_remat_call_primitive_inherits_effects(self):
@jax.checkpoint
def f(x):
x, = effect_p.bind(x, effect=foo_effect)
x, = effect_p.bind(x, effect=bar_effect)
return x
jax.make_jaxpr(f)(2.)
with self.assertRaisesRegex(NotImplementedError, "Effects not supported"):
jax.make_jaxpr(lambda x: jax.linearize(f, x)[1](x))(2.)
def test_new_remat_allows_certain_effects(self):
remat_effect = RematEffect()
@ad_checkpoint.checkpoint
def f(x):
x, = effect_p.bind(x, effect=remat_effect)
return x
jaxpr = jax.make_jaxpr(f)(2.)
self.assertSetEqual(jaxpr.effects, {remat_effect})
def test_custom_jvp_primitive_inherits_effects(self):
@jax.custom_jvp
def f(x):
effect_p.bind(effect=foo_effect)
effect_p.bind(effect=bar_effect)
return x
f.defjvp(lambda x, t: (x, t))
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
def test_custom_vjp_primitive_inherits_effects(self):
@jax.custom_vjp
def f(x):
effect_p.bind(effect=foo_effect)
effect_p.bind(effect=bar_effect)
return x
f.defvjp(
fwd=lambda x: (x, ()),
bwd=lambda _, g: g)
with self.assertRaisesRegex(NotImplementedError, 'Effects not supported'):
jax.make_jaxpr(f)(2.)
def test_pmap_inherits_effects(self):
if config.pmap_shmap_merge.value:
self.skipTest("Test does not raise under `pmap_shmap_merge=True`.")
@jax.pmap
def f(x):
effect_p.bind(effect=foo_effect)
effect_p.bind(effect=bar_effect)
return x
with self.assertRaisesRegex(
ValueError,
r"Ordered effects not supported for map primitives: \[.*\]"):
jax.make_jaxpr(f)(jnp.arange(jax.local_device_count()))
def test_jit_inherits_effects(self):
def f(x):
effect_p.bind(effect=foo_effect)
effect_p.bind(effect=bar_effect)
return x
mesh = jax.sharding.Mesh(np.array(jax.devices()), ['x'])
spec = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('x'))
f = jax.jit(f, in_shardings=spec, out_shardings=spec)
with jax.set_mesh(mesh):
jaxpr = jax.make_jaxpr(f)(np.arange(jax.local_device_count()))
self.assertSetEqual(jaxpr.effects, {foo_effect, bar_effect})
def test_pjit_const_input_effect_indexing(self):
# https://github.com/jax-ml/jax/issues/32399
@jax.jit
def bar(x, w):
def scan_fn(x, _):
c = jnp.array([])
o = w[...] @ x
x = jnp.concatenate([x, c], axis=-1)
return x, None
x, _ = jax.lax.scan(scan_fn, x, None, length=10)
return x
@jax.jit
def foo(w):
return bar(jnp.zeros((1,)), w)
foo(jax.new_ref(jnp.eye(1))) # don't crash
def test_jit_const_input_effect_indexing(self):
@jax.jit
def bar(w):
x = jnp.zeros((1,)) + jnp.array([0.])
x = jax.jit(lambda x: x + w[...])(x)
return x
@jax.jit
def foo(w):
return bar(w)
foo(jax.new_ref(jnp.ones((1,))))
jax.grad(jax.remat(lambda x: foo(jax.new_ref(x)).sum()))(jnp.ones((1,)))
def test_cond_const_input_effect_indexing(self):
@jax.custom_jvp
def weird(x):
return x
@weird.defjvp
def weird_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return jnp.sum(np.ones(3)) * x, xdot
@jax.jit
def f(x):
x_ref = jax.new_ref(0.)
return jax.lax.cond(x < 0, lambda: x_ref[...], lambda: weird(x[...]))
jax.jvp(f, (1.,), (1.,))
def test_scan_const_input_effect_indexing(self):
@jax.custom_jvp
def weird(x):
return x
@weird.defjvp
def weird_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return jnp.sum(np.ones(3)) * x, xdot
@jax.jit
def f(x):
x_ref = jax.new_ref(0.)
y, () = jax.lax.scan(lambda _, __: (weird(x_ref[...]), ()),
x_ref[...], length=1)
return y
jax.jvp(f, (1.,), (1.,))
jax.grad(jax.remat(f))(1.)
@jtu.thread_unsafe_test_class() # because of mlir.register_lowering calls
| HigherOrderPrimitiveTest |
python | pypa__pipenv | pipenv/vendor/tomlkit/items.py | {
"start": 7923,
"end": 8211
} | class ____(Enum):
TRUE = "true"
FALSE = "false"
def __bool__(self):
return {BoolType.TRUE: True, BoolType.FALSE: False}[self]
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
@dataclasses.dataclass
| BoolType |
python | huggingface__transformers | src/transformers/models/deit/modeling_deit.py | {
"start": 5414,
"end": 8042
} | class ____(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->DeiT
| DeiTPatchEmbeddings |
python | dagster-io__dagster | python_modules/libraries/dagster-snowflake/dagster_snowflake/resources.py | {
"start": 33633,
"end": 46731
} | class ____:
"""A connection to Snowflake that can execute queries. In general this class should not be
directly instantiated, but rather used as a resource in an op or asset via the
:py:func:`snowflake_resource`.
Note that the SnowflakeConnection is only used by the snowflake_resource. The Pythonic SnowflakeResource does
not use this SnowflakeConnection class.
"""
def __init__(
self, config: Mapping[str, str], log, snowflake_connection_resource: SnowflakeResource
):
self.snowflake_connection_resource = snowflake_connection_resource
self.log = log
@public
@contextmanager
def get_connection(
self, raw_conn: bool = True
) -> Iterator[Union[SqlDbConnection, snowflake.connector.SnowflakeConnection]]:
"""Gets a connection to Snowflake as a context manager.
If using the execute_query, execute_queries, or load_table_from_local_parquet methods,
you do not need to create a connection using this context manager.
Args:
raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw
connection. Defaults to True.
Examples:
.. code-block:: python
@op(
required_resource_keys={"snowflake"}
)
def get_query_status(query_id):
with context.resources.snowflake.get_connection() as conn:
# conn is a Snowflake Connection object or a SQLAlchemy Connection if
# sqlalchemy is specified as the connector in the Snowflake Resource config
return conn.get_query_status(query_id)
"""
with self.snowflake_connection_resource.get_connection(raw_conn=raw_conn) as conn:
yield conn
@public
def execute_query(
self,
sql: str,
parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,
fetch_results: bool = False,
use_pandas_result: bool = False,
):
"""Execute a query in Snowflake.
Args:
sql (str): the query to be executed
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to the query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the result of the query. Defaults to False. If True
and use_pandas_result is also True, results will be returned as a Pandas DataFrame.
use_pandas_result (bool): If True, will return the result of the query as a Pandas DataFrame.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The result of the query if fetch_results or use_pandas_result is True, otherwise returns None
Examples:
.. code-block:: python
@op
def drop_database(snowflake: SnowflakeResource):
snowflake.execute_query(
"DROP DATABASE IF EXISTS MY_DATABASE"
)
"""
check.str_param(sql, "sql")
check.opt_inst_param(parameters, "parameters", (list, dict))
check.bool_param(fetch_results, "fetch_results")
if not fetch_results and use_pandas_result:
check.failed("If use_pandas_result is True, fetch_results must also be True.")
with self.get_connection() as conn:
with closing(conn.cursor()) as cursor:
self.log.info("Executing query: " + sql)
parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters
cursor.execute(sql, parameters)
if use_pandas_result:
return cursor.fetch_pandas_all()
if fetch_results:
return cursor.fetchall()
@public
def execute_queries(
self,
sql_queries: Sequence[str],
parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,
fetch_results: bool = False,
use_pandas_result: bool = False,
) -> Optional[Sequence[Any]]:
"""Execute multiple queries in Snowflake.
Args:
sql_queries (str): List of queries to be executed in series
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to every query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the results of the queries as a list. Defaults to False. If True
and use_pandas_result is also True, results will be returned as Pandas DataFrames.
use_pandas_result (bool): If True, will return the results of the queries as a list of a Pandas DataFrames.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The results of the queries as a list if fetch_results or use_pandas_result is True,
otherwise returns None
Examples:
.. code-block:: python
@op
def create_fresh_database(snowflake: SnowflakeResource):
queries = ["DROP DATABASE IF EXISTS MY_DATABASE", "CREATE DATABASE MY_DATABASE"]
snowflake.execute_queries(
sql_queries=queries
)
"""
check.sequence_param(sql_queries, "sql_queries", of_type=str)
check.opt_inst_param(parameters, "parameters", (list, dict))
check.bool_param(fetch_results, "fetch_results")
if not fetch_results and use_pandas_result:
check.failed("If use_pandas_result is True, fetch_results must also be True.")
results: list[Any] = []
with self.get_connection() as conn:
with closing(conn.cursor()) as cursor:
for raw_sql in sql_queries:
sql = raw_sql.encode("utf-8") if sys.version_info[0] < 3 else raw_sql
self.log.info("Executing query: " + sql)
parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters
cursor.execute(sql, parameters)
if use_pandas_result:
results = results.append(cursor.fetch_pandas_all()) # type: ignore
elif fetch_results:
results.append(cursor.fetchall())
return results if len(results) > 0 else None
@public
def load_table_from_local_parquet(self, src: str, table: str):
"""Stores the content of a parquet file to a Snowflake table.
Args:
src (str): the name of the file to store in Snowflake
table (str): the name of the table to store the data. If the table does not exist, it will
be created. Otherwise the contents of the table will be replaced with the data in src
Examples:
.. code-block:: python
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
@op
def write_parquet_file(snowflake: SnowflakeResource):
df = pd.DataFrame({"one": [1, 2, 3], "ten": [11, 12, 13]})
table = pa.Table.from_pandas(df)
pq.write_table(table, "example.parquet')
snowflake.load_table_from_local_parquet(
src="example.parquet",
table="MY_TABLE"
)
"""
check.str_param(src, "src")
check.str_param(table, "table")
sql_queries = [
f"CREATE OR REPLACE TABLE {table} ( data VARIANT DEFAULT NULL);",
"CREATE OR REPLACE FILE FORMAT parquet_format TYPE = 'parquet';",
f"PUT {src} @%{table};",
f"COPY INTO {table} FROM @%{table} FILE_FORMAT = (FORMAT_NAME = 'parquet_format');",
]
self.execute_queries(sql_queries)
@dagster_maintained_resource
@resource(
config_schema=SnowflakeResource.to_config_schema(),
description="This resource is for connecting to the Snowflake data warehouse",
)
def snowflake_resource(context) -> SnowflakeConnection:
"""A resource for connecting to the Snowflake data warehouse. The returned resource object is an
instance of :py:class:`SnowflakeConnection`.
A simple example of loading data into Snowflake and subsequently querying that data is shown below:
Examples:
.. code-block:: python
from dagster import job, op
from dagster_snowflake import snowflake_resource
@op(required_resource_keys={'snowflake'})
def get_one(context):
context.resources.snowflake.execute_query('SELECT 1')
@job(resource_defs={'snowflake': snowflake_resource})
def my_snowflake_job():
get_one()
my_snowflake_job.execute_in_process(
run_config={
'resources': {
'snowflake': {
'config': {
'account': {'env': 'SNOWFLAKE_ACCOUNT'},
'user': {'env': 'SNOWFLAKE_USER'},
'password': {'env': 'SNOWFLAKE_PASSWORD'},
'database': {'env': 'SNOWFLAKE_DATABASE'},
'schema': {'env': 'SNOWFLAKE_SCHEMA'},
'warehouse': {'env': 'SNOWFLAKE_WAREHOUSE'},
}
}
}
}
)
"""
snowflake_resource = SnowflakeResource.from_resource_context(context)
return SnowflakeConnection(
config=context, log=context.log, snowflake_connection_resource=snowflake_resource
)
def fetch_last_updated_timestamps(
*,
snowflake_connection: Union[SqlDbConnection, snowflake.connector.SnowflakeConnection],
schema: str,
tables: Sequence[str],
database: Optional[str] = None,
ignore_missing_tables: Optional[bool] = False,
) -> Mapping[str, datetime]:
"""Fetch the last updated times of a list of tables in Snowflake.
If the underlying query to fetch the last updated time returns no results, a ValueError will be raised.
Args:
snowflake_connection (Union[SqlDbConnection, SnowflakeConnection]): A connection to Snowflake.
Accepts either a SnowflakeConnection or a sqlalchemy connection object,
which are the two types of connections emittable from the snowflake resource.
schema (str): The schema of the tables to fetch the last updated time for.
tables (Sequence[str]): A list of table names to fetch the last updated time for.
database (Optional[str]): The database of the table. Only required if the connection
has not been set with a database.
ignore_missing_tables (Optional[bool]): If True, tables not found in Snowflake
will be excluded from the result.
Returns:
Mapping[str, datetime]: A dictionary of table names to their last updated time in UTC.
"""
check.invariant(len(tables) > 0, "Must provide at least one table name to query upon.")
# Table names in snowflake's information schema are stored in uppercase
uppercase_tables = [table.upper() for table in tables]
tables_str = ", ".join([f"'{table_name}'" for table_name in uppercase_tables])
fully_qualified_table_name = (
f"{database}.information_schema.tables" if database else "information_schema.tables"
)
query = f"""
SELECT table_name, CONVERT_TIMEZONE('UTC', last_altered) AS last_altered
FROM {fully_qualified_table_name}
WHERE table_schema = '{schema}' AND table_name IN ({tables_str});
"""
result = snowflake_connection.cursor().execute(query)
if not result:
raise ValueError("No results returned from Snowflake update time query.")
result_mapping = {table_name: last_altered for table_name, last_altered in result}
result_correct_case = {}
for table_name in tables:
if table_name.upper() not in result_mapping:
if ignore_missing_tables:
continue
raise ValueError(f"Table {table_name} could not be found.")
last_altered = result_mapping[table_name.upper()]
check.invariant(
isinstance(last_altered, datetime),
"Expected last_altered to be a datetime, but it was not.",
)
result_correct_case[table_name] = last_altered
return result_correct_case
| SnowflakeConnection |
python | getsentry__sentry | tests/sentry/sentry_apps/tasks/test_sentry_apps.py | {
"start": 63278,
"end": 68510
} | class ____(TestCase):
def setUp(self) -> None:
self.project = self.create_project()
self.user = self.create_user()
self.sentry_app = self.create_sentry_app(
organization=self.project.organization,
events=["issue.resolved", "issue.ignored", "issue.assigned"],
)
self.install = self.create_sentry_app_installation(
organization=self.project.organization, slug=self.sentry_app.slug
)
self.issue = self.create_group(project=self.project)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_sends_resolved_webhook(self, mock_record: MagicMock, safe_urlopen: MagicMock) -> None:
workflow_notification(self.install.id, self.issue.id, "resolved", self.user.id)
((_, kwargs),) = safe_urlopen.call_args_list
assert kwargs["url"] == self.sentry_app.webhook_url
assert kwargs["headers"]["Sentry-Hook-Resource"] == "issue"
data = json.loads(kwargs["data"])
assert data["action"] == "resolved"
assert data["data"]["issue"]["id"] == str(self.issue.id)
# SLO assertions
assert_success_metric(mock_record)
# PREPARE_WEBHOOK (success) -> SEND_WEBHOOK (success) -> SEND_WEBHOOK (success)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=3
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=3
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_sends_resolved_webhook_as_Sentry_without_user(
self, mock_record: MagicMock, safe_urlopen: MagicMock
) -> None:
workflow_notification(self.install.id, self.issue.id, "resolved", None)
((_, kwargs),) = safe_urlopen.call_args_list
data = json.loads(kwargs["data"])
assert data["actor"]["type"] == "application"
assert data["actor"]["id"] == "sentry"
assert data["actor"]["name"] == "Sentry"
# SLO assertions
assert_success_metric(mock_record)
# PREPARE_WEBHOOK (success) -> SEND_WEBHOOK (success) -> SEND_WEBHOOK (success)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=3
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=3
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_does_not_send_if_no_service_hook_exists(
self, mock_record: MagicMock, safe_urlopen: MagicMock
) -> None:
sentry_app = self.create_sentry_app(
name="Another App", organization=self.project.organization, events=[]
)
install = self.create_sentry_app_installation(
organization=self.project.organization, slug=sentry_app.slug
)
workflow_notification(install.id, self.issue.id, "assigned", self.user.id)
assert not safe_urlopen.called
# SLO assertions
assert_failure_metric(
mock_record, SentryAppSentryError(SentryAppWebhookFailureReason.MISSING_SERVICEHOOK)
)
# APP_CREATE (success) -> UPDATE_WEBHOOK (success) -> GRANT_EXCHANGER (success) -> PREPARE_WEBHOOK (success) -> send_webhook (error)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=5
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=4
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.FAILURE, outcome_count=1
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_does_not_send_if_event_not_in_app_events(
self, mock_record: MagicMock, safe_urlopen: MagicMock
) -> None:
sentry_app = self.create_sentry_app(
name="Another App",
organization=self.project.organization,
events=["issue.resolved", "issue.ignored"],
)
install = self.create_sentry_app_installation(
organization=self.project.organization, slug=sentry_app.slug
)
workflow_notification(install.id, self.issue.id, "assigned", self.user.id)
assert not safe_urlopen.called
# SLO assertions
assert_failure_metric(
mock_record, SentryAppSentryError(SentryAppWebhookFailureReason.EVENT_NOT_IN_SERVCEHOOK)
)
# APP_CREATE (success) -> UPDATE_WEBHOOK (success) -> GRANT_EXCHANGER (success) -> PREPARE_WEBHOOK (success) -> SEND_WEBHOOK (failure)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=5
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=4
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.FAILURE, outcome_count=1
)
| TestWorkflowNotification |
python | wandb__wandb | wandb/filesync/step_prepare.py | {
"start": 630,
"end": 2732
} | class ____(NamedTuple):
birth_artifact_id: str
upload_url: Optional[str]
upload_headers: Sequence[str]
upload_id: Optional[str]
storage_path: Optional[str]
multipart_upload_urls: Optional[Dict[int, str]]
Request = Union[RequestPrepare, RequestFinish]
def _clamp(x: float, low: float, high: float) -> float:
return max(low, min(x, high))
def gather_batch(
request_queue: "queue.Queue[Request]",
batch_time: float,
inter_event_time: float,
max_batch_size: int,
clock: Callable[[], float] = time.monotonic,
) -> Tuple[bool, Sequence[RequestPrepare]]:
batch_start_time = clock()
remaining_time = batch_time
first_request = request_queue.get()
if isinstance(first_request, RequestFinish):
return True, []
batch: List[RequestPrepare] = [first_request]
while remaining_time > 0 and len(batch) < max_batch_size:
try:
request = request_queue.get(
timeout=_clamp(
x=inter_event_time,
low=1e-12, # 0 = "block forever", so just use something tiny
high=remaining_time,
),
)
if isinstance(request, RequestFinish):
return True, batch
batch.append(request)
remaining_time = batch_time - (clock() - batch_start_time)
except queue.Empty:
break
return False, batch
def prepare_response(response: "CreateArtifactFilesResponseFile") -> ResponsePrepare:
multipart_resp = response.get("uploadMultipartUrls")
part_list = multipart_resp["uploadUrlParts"] if multipart_resp else []
multipart_parts = {u["partNumber"]: u["uploadUrl"] for u in part_list} or None
return ResponsePrepare(
birth_artifact_id=response["artifact"]["id"],
upload_url=response["uploadUrl"],
upload_headers=response["uploadHeaders"],
upload_id=multipart_resp and multipart_resp.get("uploadID"),
storage_path=response.get("storagePath"),
multipart_upload_urls=multipart_parts,
)
| ResponsePrepare |
python | falconry__falcon | tests/test_validators.py | {
"start": 2637,
"end": 5269
} | class ____:
def __init__(self, valid=True):
self.media = _VALID_MEDIA if valid else {}
def call_method(asgi, method_name, *args):
resource = ResourceAsync() if asgi else Resource()
if asgi:
return falcon.async_to_sync(getattr(resource, method_name), *args)
return getattr(resource, method_name)(*args)
def test_req_schema_validation_success(asgi, jsonschema):
data = MockResp()
assert call_method(asgi, 'request_validated', MockReq(asgi), data) is data
@pytest.mark.parametrize(
'exception_cls', [falcon.HTTPBadRequest, falcon.MediaValidationError]
)
def test_req_schema_validation_failure(asgi, exception_cls, jsonschema):
with pytest.raises(exception_cls) as excinfo:
call_method(asgi, 'request_validated', MockReq(asgi, False), None)
assert excinfo.value.description == "'message' is a required property"
def test_resp_schema_validation_success(asgi, jsonschema):
data = MockResp()
assert call_method(asgi, 'response_validated', MockReq(asgi), data) is data
def test_resp_schema_validation_failure(asgi, jsonschema):
with pytest.raises(falcon.HTTPInternalServerError) as excinfo:
call_method(asgi, 'response_validated', MockReq(asgi), MockResp(False))
assert excinfo.value.title == 'Response data failed validation'
def test_both_schemas_validation_success(asgi, util, jsonschema):
req = MockReq(asgi)
resp = MockResp()
result = call_method(asgi, 'both_validated', req, resp)
assert result[0] is req
assert result[1] is resp
client = testing.TestClient(util.create_app(asgi))
resource = ResourceAsync() if asgi else Resource()
client.app.add_route('/test', resource)
result = client.simulate_put('/test', json=_VALID_MEDIA)
assert result.json == resp.media
def test_both_schemas_validation_failure(asgi, util, jsonschema):
bad_resp = MockResp(False)
with pytest.raises(falcon.HTTPInternalServerError) as excinfo:
call_method(asgi, 'both_validated', MockReq(asgi), bad_resp)
assert excinfo.value.title == 'Response data failed validation'
with pytest.raises(falcon.HTTPBadRequest) as excinfo:
call_method(asgi, 'both_validated', MockReq(asgi, False), MockResp())
assert excinfo.value.title == 'Request data failed validation'
client = testing.TestClient(util.create_app(asgi))
resource = ResourceAsync() if asgi else Resource()
with util.disable_asgi_non_coroutine_wrapping():
client.app.add_route('/test', resource)
result = client.simulate_put('/test', json=_INVALID_MEDIA)
assert result.status_code == 400
| MockResp |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 20093,
"end": 21318
} | class ____(Response):
"""
Response of queues.add_or_update_metadata endpoint.
:param updated: Number of queues updated (0 or 1)
:type updated: int
"""
_service = "queues"
_action = "add_or_update_metadata"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of queues updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(AddOrUpdateMetadataResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| AddOrUpdateMetadataResponse |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_coordinator_test.py | {
"start": 4635,
"end": 14302
} | class ____(test.TestCase):
@classmethod
def setUpClass(cls):
# We have to create a global in-process cluster because once an in-process
# tensorflow server is created, there is no way to terminate it. Please see
# multi_worker_test_base.py for more details.
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
cls._workers, cls._ps = test_util.create_local_cluster(
NUM_WORKERS, num_ps=NUM_PS)
cls._cluster_spec = {
WORKER: [
_strip_protocol(_bytes_to_str(w.target)) for w in cls._workers
],
PS: [_strip_protocol(_bytes_to_str(ps.target)) for ps in cls._ps]
}
def setUp(self):
self._result_correct = 0
self._lock = threading.Lock()
self._worker_context = {}
self._strategy_property = {}
self._std_servers = {}
self._barrier = distribute_coordinator._Barrier(NUM_WORKERS)
self._coord = coordinator.Coordinator()
@contextlib.contextmanager
def _test_session(self, target):
config = config_pb2.ConfigProto(allow_soft_placement=True)
config.graph_options.optimizer_options.opt_level = -1
with session.Session(graph=None, config=config, target=target) as sess:
yield sess
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
def _create_cluster_spec(self,
has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
cluster_spec = {}
if has_chief:
cluster_spec[CHIEF] = ["localhost:%s" % test_util.pick_unused_port()]
if num_workers:
cluster_spec[WORKER] = [
"localhost:%s" % test_util.pick_unused_port()
for _ in range(num_workers)
]
if num_ps:
cluster_spec[PS] = [
"localhost:%s" % test_util.pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec[EVALUATOR] = ["localhost:%s" % test_util.pick_unused_port()]
return cluster_spec
def _in_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
xs = []
expected = 0.0
for i in range(context.num_workers):
with ops.device("/job:worker/task:%d" % i):
x = variable_scope.get_variable("x_%d" % i, initializer=10.0)
x_add = x.assign_add(float(i))
xs.append(x_add)
expected += i + 10.0
with ops.device("/job:worker/task:0"):
result = math_ops.add_n(xs)
self.evaluate(variables.global_variables_initializer())
result_value = sess.run(result)
self.assertEqual(result_value, expected)
if result_value == expected:
self._result_correct += 1
def _wrapped_worker_fn(self, worker_fn):
def wrapped(*args, **kwargs):
with self._coord.stop_on_exception():
return worker_fn(*args, **kwargs)
return wrapped
def _run_coordinator_in_thread(self, worker_fn, strategy, **kwargs):
t = threading.Thread(
target=distribute_coordinator.run_distribute_coordinator,
args=(self._wrapped_worker_fn(worker_fn), strategy),
kwargs=kwargs)
t.start()
return t
def _run_multiple_coordinator_in_threads(self, worker_fn, strategy,
cluster_spec, **kwargs):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_coordinator_in_thread(
worker_fn,
strategy,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
**kwargs)
threads[task_type].append(t)
return threads
def _join_threads(self, threads):
try:
self._coord.join(threads)
except errors.UnknownError as e:
if "Could not start gRPC server" in e.message:
self.skipTest("Cannot start std servers.")
else:
raise
def _between_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable(
"x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable(
"y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
if context.is_chief:
self.evaluate(variables.global_variables_initializer())
# Synchronize workers after initialization.
if context.has_barrier:
context.wait_for_other_workers()
else:
while True:
uninit_vars = sess.run(variables.report_uninitialized_variables())
# pylint: disable=g-explicit-length-test
if len(uninit_vars) == 0:
break
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _between_graph_with_monitored_session(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable("xx", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable("yy", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
# The monitored session will run init or ready ops.
with monitored_session.MonitoredSession() as sess:
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _dump_worker_context(self, strategy):
"""Dumps the properties of each worker context.
It dumps the context properties to a dict mapping from task_type to a list
of tuples of master_target, num_workers, is_chief and distribute_mode, where
the list is indexed by the task_id.
Args:
strategy: a `DistributionStrategy` object.
"""
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._worker_context:
self._worker_context[task_type] = []
while len(self._worker_context[task_type]) <= task_id:
self._worker_context[task_type].append(None)
self._worker_context[task_type][task_id] = (context.master_target,
context.num_workers,
context.is_chief,
context.distributed_mode)
def _dump_strategy_property(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
self.assertEqual(context._strategy.extended.experimental_should_init,
strategy.extended.experimental_should_init)
self.assertEqual(context.should_checkpoint,
strategy.extended.should_checkpoint)
self.assertEqual(context.should_save_summary,
strategy.extended.should_save_summary)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._strategy_property:
self._strategy_property[task_type] = []
while len(self._strategy_property[task_type]) <= task_id:
self._strategy_property[task_type].append(None)
self._strategy_property[task_type][task_id] = (
context._strategy.extended.experimental_should_init,
context.should_checkpoint,
context.should_save_summary)
def _run_mock_std_server(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
task_type = str(task_type)
task_id = task_id or 0
with self._lock:
if task_type not in self._std_servers:
self._std_servers[task_type] = []
while len(self._std_servers[task_type]) <= task_id:
self._std_servers[task_type].append(None)
server = MockServer()
self._std_servers[task_type][task_id] = server
return server
| DistributeCoordinatorTestBase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum6.py | {
"start": 530,
"end": 681
} | class ____(NonEnum, Color):
pass
# This should generate an error because reassignment of enum
# values is not allowed.
Color.red = "new"
| ExtraColor |
python | huggingface__transformers | src/transformers/models/persimmon/modeling_persimmon.py | {
"start": 8685,
"end": 14545
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: PersimmonConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.rotary_ndims = int(self.head_dim * config.rope_parameters["partial_rotary_factor"])
self.is_causal = True
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)
self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True)
self.qk_layernorm = config.qk_layernorm
self.scaling = self.head_dim**-0.5
if self.qk_layernorm:
self.q_layernorm = nn.LayerNorm(
config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
)
self.k_layernorm = nn.LayerNorm(
config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
)
self.attention_dropout = nn.Dropout(config.attention_dropout)
def _split_heads(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
value: [batch_size, seq_length, num_heads, head_dim]
"""
batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
# [batch_size, seq_length, 3 x hidden_size]
fused_qkv = self.query_key_value(hidden_states)
# 3 x [batch_size, seq_length, num_heads, head_dim]
(query_states, key_states, value_states) = self._split_heads(fused_qkv)
if self.qk_layernorm:
query_states = self.q_layernorm(query_states)
key_states = self.k_layernorm(key_states)
# [batch_size, num_heads, seq_length, head_dim] -> [batch_size, seq_length, num_heads, head_dim]
query_states = query_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
cos, sin = position_embeddings
query_rot, query_pass = (
query_states[..., : self.rotary_ndims],
query_states[..., self.rotary_ndims :],
)
key_rot, key_pass = (
key_states[..., : self.rotary_ndims],
key_states[..., self.rotary_ndims :],
)
# [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin)
# [batch_size, seq_length, num_heads, head_dim]
query_states = torch.cat((query_rot, query_pass), dim=-1)
key_states = torch.cat((key_rot, key_pass), dim=-1)
if past_key_values is not None:
# Specific to RoPE models with partial rotation
cache_kwargs = {
"sin": sin,
"cos": cos,
"partial_rotation_size": self.rotary_ndims,
"cache_position": cache_position,
}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.config.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.dense(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
| PersimmonAttention |
python | pypa__setuptools | setuptools/errors.py | {
"start": 1143,
"end": 1313
} | class ____(OptionError): # type: ignore[valid-type, misc] # distutils imports are `Any` on python 3.12+
"""Error used for invalid configurations."""
| InvalidConfigError |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_aliases.py | {
"start": 4309,
"end": 19644
} | class ____(NamedTuple):
values: Array
inverse_indices: Array
def _unique_kwargs(xp: Namespace) -> dict[str, bool]:
# Older versions of NumPy and CuPy do not have equal_nan. Rather than
# trying to parse version numbers, just check if equal_nan is in the
# signature.
s = inspect.signature(xp.unique)
if "equal_nan" in s.parameters:
return {"equal_nan": False}
return {}
def unique_all(x: Array, /, xp: Namespace) -> UniqueAllResult:
kwargs = _unique_kwargs(xp)
values, indices, inverse_indices, counts = xp.unique(
x,
return_counts=True,
return_index=True,
return_inverse=True,
**kwargs,
)
# np.unique() flattens inverse indices, but they need to share x's shape
# See https://github.com/numpy/numpy/issues/20638
inverse_indices = inverse_indices.reshape(x.shape)
return UniqueAllResult(
values,
indices,
inverse_indices,
counts,
)
def unique_counts(x: Array, /, xp: Namespace) -> UniqueCountsResult:
kwargs = _unique_kwargs(xp)
res = xp.unique(
x, return_counts=True, return_index=False, return_inverse=False, **kwargs
)
return UniqueCountsResult(*res)
def unique_inverse(x: Array, /, xp: Namespace) -> UniqueInverseResult:
kwargs = _unique_kwargs(xp)
values, inverse_indices = xp.unique(
x,
return_counts=False,
return_index=False,
return_inverse=True,
**kwargs,
)
# xp.unique() flattens inverse indices, but they need to share x's shape
# See https://github.com/numpy/numpy/issues/20638
inverse_indices = inverse_indices.reshape(x.shape)
return UniqueInverseResult(values, inverse_indices)
def unique_values(x: Array, /, xp: Namespace) -> Array:
kwargs = _unique_kwargs(xp)
return xp.unique(
x,
return_counts=False,
return_index=False,
return_inverse=False,
**kwargs,
)
# These functions have different keyword argument names
def std(
x: Array,
/,
xp: Namespace,
*,
axis: int | tuple[int, ...] | None = None,
correction: float = 0.0, # correction instead of ddof
keepdims: bool = False,
**kwargs: object,
) -> Array:
return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
def var(
x: Array,
/,
xp: Namespace,
*,
axis: int | tuple[int, ...] | None = None,
correction: float = 0.0, # correction instead of ddof
keepdims: bool = False,
**kwargs: object,
) -> Array:
return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
# cumulative_sum is renamed from cumsum, and adds the include_initial keyword
# argument
def cumulative_sum(
x: Array,
/,
xp: Namespace,
*,
axis: int | None = None,
dtype: DType | None = None,
include_initial: bool = False,
**kwargs: object,
) -> Array:
wrapped_xp = array_namespace(x)
# TODO: The standard is not clear about what should happen when x.ndim == 0.
if axis is None:
if x.ndim > 1:
raise ValueError(
"axis must be specified in cumulative_sum for more than one dimension"
)
axis = 0
res = xp.cumsum(x, axis=axis, dtype=dtype, **kwargs)
# np.cumsum does not support include_initial
if include_initial:
initial_shape = list(x.shape)
initial_shape[axis] = 1
res = xp.concatenate(
[
wrapped_xp.zeros(
shape=initial_shape, dtype=res.dtype, device=_get_device(res)
),
res,
],
axis=axis,
)
return res
def cumulative_prod(
x: Array,
/,
xp: Namespace,
*,
axis: int | None = None,
dtype: DType | None = None,
include_initial: bool = False,
**kwargs: object,
) -> Array:
wrapped_xp = array_namespace(x)
if axis is None:
if x.ndim > 1:
raise ValueError(
"axis must be specified in cumulative_prod for more than one dimension"
)
axis = 0
res = xp.cumprod(x, axis=axis, dtype=dtype, **kwargs)
# np.cumprod does not support include_initial
if include_initial:
initial_shape = list(x.shape)
initial_shape[axis] = 1
res = xp.concatenate(
[
wrapped_xp.ones(
shape=initial_shape, dtype=res.dtype, device=_get_device(res)
),
res,
],
axis=axis,
)
return res
# The min and max argument names in clip are different and not optional in numpy, and type
# promotion behavior is different.
def clip(
x: Array,
/,
min: float | Array | None = None,
max: float | Array | None = None,
*,
xp: Namespace,
# TODO: np.clip has other ufunc kwargs
out: Array | None = None,
) -> Array:
def _isscalar(a: object) -> TypeIs[int | float | None]:
return isinstance(a, (int, float, type(None)))
min_shape = () if _isscalar(min) else min.shape
max_shape = () if _isscalar(max) else max.shape
wrapped_xp = array_namespace(x)
result_shape = xp.broadcast_shapes(x.shape, min_shape, max_shape)
# np.clip does type promotion but the array API clip requires that the
# output have the same dtype as x. We do this instead of just downcasting
# the result of xp.clip() to handle some corner cases better (e.g.,
# avoiding uint64 -> float64 promotion).
# Note: cases where min or max overflow (integer) or round (float) in the
# wrong direction when downcasting to x.dtype are unspecified. This code
# just does whatever NumPy does when it downcasts in the assignment, but
# other behavior could be preferred, especially for integers. For example,
# this code produces:
# >>> clip(asarray(0, dtype=int8), asarray(128, dtype=int16), None)
# -128
# but an answer of 0 might be preferred. See
# https://github.com/numpy/numpy/issues/24976 for more discussion on this issue.
# At least handle the case of Python integers correctly (see
# https://github.com/numpy/numpy/pull/26892).
if wrapped_xp.isdtype(x.dtype, "integral"):
if type(min) is int and min <= wrapped_xp.iinfo(x.dtype).min:
min = None
if type(max) is int and max >= wrapped_xp.iinfo(x.dtype).max:
max = None
dev = _get_device(x)
if out is None:
out = wrapped_xp.empty(result_shape, dtype=x.dtype, device=dev)
assert out is not None # workaround for a type-narrowing issue in pyright
out[()] = x
if min is not None:
a = wrapped_xp.asarray(min, dtype=x.dtype, device=dev)
a = xp.broadcast_to(a, result_shape)
ia = (out < a) | xp.isnan(a)
out[ia] = a[ia]
if max is not None:
b = wrapped_xp.asarray(max, dtype=x.dtype, device=dev)
b = xp.broadcast_to(b, result_shape)
ib = (out > b) | xp.isnan(b)
out[ib] = b[ib]
# Return a scalar for 0-D
return out[()]
# Unlike transpose(), the axes argument to permute_dims() is required.
def permute_dims(x: Array, /, axes: tuple[int, ...], xp: Namespace) -> Array:
return xp.transpose(x, axes)
# np.reshape calls the keyword argument 'newshape' instead of 'shape'
def reshape(
x: Array,
/,
shape: tuple[int, ...],
xp: Namespace,
*,
copy: Optional[bool] = None,
**kwargs: object,
) -> Array:
if copy is True:
x = x.copy()
elif copy is False:
y = x.view()
y.shape = shape
return y
return xp.reshape(x, shape, **kwargs)
# The descending keyword is new in sort and argsort, and 'kind' replaced with
# 'stable'
def argsort(
x: Array,
/,
xp: Namespace,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
**kwargs: object,
) -> Array:
# Note: this keyword argument is different, and the default is different.
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
# as the default whereas cupy.sort uses kind=None.
if stable:
kwargs["kind"] = "stable"
if not descending:
res = xp.argsort(x, axis=axis, **kwargs)
else:
# As NumPy has no native descending sort, we imitate it here. Note that
# simply flipping the results of xp.argsort(x, ...) would not
# respect the relative order like it would in native descending sorts.
res = xp.flip(
xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs),
axis=axis,
)
# Rely on flip()/argsort() to validate axis
normalised_axis = axis if axis >= 0 else x.ndim + axis
max_i = x.shape[normalised_axis] - 1
res = max_i - res
return res
def sort(
x: Array,
/,
xp: Namespace,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
**kwargs: object,
) -> Array:
# Note: this keyword argument is different, and the default is different.
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
# as the default whereas cupy.sort uses kind=None.
if stable:
kwargs["kind"] = "stable"
res = xp.sort(x, axis=axis, **kwargs)
if descending:
res = xp.flip(res, axis=axis)
return res
# nonzero should error for zero-dimensional arrays
def nonzero(x: Array, /, xp: Namespace, **kwargs: object) -> tuple[Array, ...]:
if x.ndim == 0:
raise ValueError("nonzero() does not support zero-dimensional arrays")
return xp.nonzero(x, **kwargs)
# ceil, floor, and trunc return integers for integer inputs
def ceil(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if xp.issubdtype(x.dtype, xp.integer):
return x
return xp.ceil(x, **kwargs)
def floor(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if xp.issubdtype(x.dtype, xp.integer):
return x
return xp.floor(x, **kwargs)
def trunc(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if xp.issubdtype(x.dtype, xp.integer):
return x
return xp.trunc(x, **kwargs)
# linear algebra functions
def matmul(x1: Array, x2: Array, /, xp: Namespace, **kwargs: object) -> Array:
return xp.matmul(x1, x2, **kwargs)
# Unlike transpose, matrix_transpose only transposes the last two axes.
def matrix_transpose(x: Array, /, xp: Namespace) -> Array:
if x.ndim < 2:
raise ValueError("x must be at least 2-dimensional for matrix_transpose")
return xp.swapaxes(x, -1, -2)
def tensordot(
x1: Array,
x2: Array,
/,
xp: Namespace,
*,
axes: int | tuple[Sequence[int], Sequence[int]] = 2,
**kwargs: object,
) -> Array:
return xp.tensordot(x1, x2, axes=axes, **kwargs)
def vecdot(x1: Array, x2: Array, /, xp: Namespace, *, axis: int = -1) -> Array:
if x1.shape[axis] != x2.shape[axis]:
raise ValueError("x1 and x2 must have the same size along the given axis")
if hasattr(xp, "broadcast_tensors"):
_broadcast = xp.broadcast_tensors
else:
_broadcast = xp.broadcast_arrays
x1_ = xp.moveaxis(x1, axis, -1)
x2_ = xp.moveaxis(x2, axis, -1)
x1_, x2_ = _broadcast(x1_, x2_)
res = xp.conj(x1_[..., None, :]) @ x2_[..., None]
return res[..., 0, 0]
# isdtype is a new function in the 2022.12 array API specification.
def isdtype(
dtype: DType,
kind: DType | str | tuple[DType | str, ...],
xp: Namespace,
*,
_tuple: bool = True, # Disallow nested tuples
) -> bool:
"""
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
Note that outside of this function, this compat library does not yet fully
support complex numbers.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
for more details
"""
if isinstance(kind, tuple) and _tuple:
return any(
isdtype(dtype, k, xp, _tuple=False)
for k in cast("tuple[DType | str, ...]", kind)
)
elif isinstance(kind, str):
if kind == "bool":
return dtype == xp.bool_
elif kind == "signed integer":
return xp.issubdtype(dtype, xp.signedinteger)
elif kind == "unsigned integer":
return xp.issubdtype(dtype, xp.unsignedinteger)
elif kind == "integral":
return xp.issubdtype(dtype, xp.integer)
elif kind == "real floating":
return xp.issubdtype(dtype, xp.floating)
elif kind == "complex floating":
return xp.issubdtype(dtype, xp.complexfloating)
elif kind == "numeric":
return xp.issubdtype(dtype, xp.number)
else:
raise ValueError(f"Unrecognized data type kind: {kind!r}")
else:
# This will allow things that aren't required by the spec, like
# isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be
# more strict here to match the type annotation? Note that the
# array_api_strict implementation will be very strict.
return dtype == kind
# unstack is a new function in the 2023.12 array API standard
def unstack(x: Array, /, xp: Namespace, *, axis: int = 0) -> tuple[Array, ...]:
if x.ndim == 0:
raise ValueError("Input array must be at least 1-d.")
return tuple(xp.moveaxis(x, axis, 0))
# numpy 1.26 does not use the standard definition for sign on complex numbers
def sign(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if isdtype(x.dtype, "complex floating", xp=xp):
out = (x / xp.abs(x, **kwargs))[...]
# sign(0) = 0 but the above formula would give nan
out[x == 0j] = 0j
else:
out = xp.sign(x, **kwargs)
# CuPy sign() does not propagate nans. See
# https://github.com/data-apis/array-api-compat/issues/136
if _is_cupy_namespace(xp) and isdtype(x.dtype, "real floating", xp=xp):
out[xp.isnan(x)] = xp.nan
return out[()]
def finfo(type_: DType | Array, /, xp: Namespace) -> Any:
# It is surprisingly difficult to recognize a dtype apart from an array.
# np.int64 is not the same as np.asarray(1).dtype!
try:
return xp.finfo(type_)
except (ValueError, TypeError):
return xp.finfo(type_.dtype)
def iinfo(type_: DType | Array, /, xp: Namespace) -> Any:
try:
return xp.iinfo(type_)
except (ValueError, TypeError):
return xp.iinfo(type_.dtype)
__all__ = [
"arange",
"empty",
"empty_like",
"eye",
"full",
"full_like",
"linspace",
"ones",
"ones_like",
"zeros",
"zeros_like",
"UniqueAllResult",
"UniqueCountsResult",
"UniqueInverseResult",
"unique_all",
"unique_counts",
"unique_inverse",
"unique_values",
"std",
"var",
"cumulative_sum",
"cumulative_prod",
"clip",
"permute_dims",
"reshape",
"argsort",
"sort",
"nonzero",
"ceil",
"floor",
"trunc",
"matmul",
"matrix_transpose",
"tensordot",
"vecdot",
"isdtype",
"unstack",
"sign",
"finfo",
"iinfo",
]
_all_ignore = ["inspect", "array_namespace", "NamedTuple"]
def __dir__() -> list[str]:
return __all__
| UniqueInverseResult |
python | pypa__virtualenv | src/virtualenv/app_data/na.py | {
"start": 1117,
"end": 1500
} | class ____(ContentStore):
def exists(self):
return False
def read(self):
"""Nothing to read."""
return
def write(self, content):
"""Nothing to write."""
def remove(self):
"""Nothing to remove."""
@contextmanager
def locked(self):
yield
__all__ = [
"AppDataDisabled",
"ContentStoreNA",
]
| ContentStoreNA |
python | walkccc__LeetCode | solutions/172. Factorial Trailing Zeroes/172.py | {
"start": 0,
"end": 124
} | class ____:
def trailingZeroes(self, n: int) -> int:
return 0 if n == 0 else n // 5 + self.trailingZeroes(n // 5)
| Solution |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 7140,
"end": 7225
} | class ____(PydanticTypeError):
msg_template = 'value is not a valid list'
| ListError |
python | getsentry__sentry | src/sentry/models/importchunk.py | {
"start": 331,
"end": 3163
} | class ____(DefaultFieldsModelExisting):
"""
Base class representing the map of import pks to final, post-import database pks.
"""
__relocation_scope__ = RelocationScope.Excluded
# Every import has a UUID assigned to it. If the import was triggered by a relocation, this UUID
# simply inherits from the `Relocation` model, and can be used to connect back to it. If it is
# not done via the `Relocation` pathway (that is, someone triggered it using a `sentry import`
# command via the CLI), it is randomly generated, but shared between all chunks of the same
# import.
import_uuid = UUIDField(db_index=True)
# The name of model that was imported.
model = models.CharField(db_index=True, max_length=64)
# The minimum ordinal (inclusive), relative to the source JSON, imported by this chunk.
min_ordinal = BoundedBigIntegerField()
# The maximum ordinal (inclusive), relative to the source JSON, imported by this chunk.
max_ordinal = BoundedBigIntegerField()
# The minimum (inclusive) original pks from the source blob seen by this chunk.
min_source_pk = BoundedBigIntegerField()
# The maximum (inclusive) original pks from the source blob seen by this chunk.
max_source_pk = BoundedBigIntegerField()
# The minimum assigned pk (inclusive) imported by this chunk. Nullable because it is possible
# that no insertions were performed, with all writes being merges or overwrites instead.
min_inserted_pk = BoundedBigIntegerField(null=True)
# The maximum assigned pk (inclusive) imported by this chunk. Nullable because it is possible
# that no insertions were performed, with all writes being merges or overwrites instead.
max_inserted_pk = BoundedBigIntegerField(null=True)
# A JSON object map from original pks in the source blob to the pks they were assigned when
# being inserted into the actual database.
inserted_map = models.JSONField(default=dict)
# A JSON object map from original pks in the source blob to the pks they were "merged" with (ie,
# existing database data that was kept in their stead).
existing_map = models.JSONField(default=dict)
# A JSON object map from original pks in the source blob to the pks they "overwrote" (ie, the
# data from the model was imported into an existing model, and that model's pk was retained).
overwrite_map = models.JSONField(default=dict)
# If the inserted model has a "slug" field, or some other similar globally unique string
# identifier, save it in a map from original pks in the source blob to that "slug" value.
inserted_identifiers = models.JSONField(default=dict)
__repr__ = sane_repr("import_uuid", "model", "min_ordinal", "max_ordinal")
class Meta:
abstract = True
@region_silo_model
| BaseImportChunk |
python | faif__python-patterns | patterns/creational/abstract_factory.py | {
"start": 1223,
"end": 1433
} | class ____:
def __init__(self, name: str) -> None:
self.name = name
def speak(self) -> None:
raise NotImplementedError
def __str__(self) -> str:
raise NotImplementedError
| Pet |
python | joke2k__faker | tests/providers/test_color.py | {
"start": 15523,
"end": 16102
} | class ____:
"""Test fa_IR color provider methods"""
def test_color_name(self, faker, num_samples):
for _ in range(num_samples):
color_name = faker.color_name()
assert isinstance(color_name, str)
assert color_name in FaIrColorProvider.all_colors.keys()
def test_safe_color_name(self, faker, num_samples):
for _ in range(num_samples):
safe_color_name = faker.safe_color_name()
assert isinstance(safe_color_name, str)
assert safe_color_name in FaIrColorProvider.safe_colors
| TestFaIr |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image55.py | {
"start": 315,
"end": 949
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image55.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"E9",
self.image_dir + "red.png",
{"url": "https://github.com/jmcnamara", "decorative": True},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess14.py | {
"start": 420,
"end": 1039
} | class ____(Generic[T_contra, V_co]):
def __init__(self, name: str, function: Callable[[T_contra], V_co]) -> None: ...
@overload
def __get__(
self: CachedSlotPropertyT, instance: None, owner: type[T_contra]
) -> CachedSlotPropertyT: ...
@overload
def __get__(self, instance: T_contra, owner: Any) -> V_co: ...
def __get__(
self: CachedSlotPropertyT, instance: T_contra | None, owner: Any
) -> CachedSlotPropertyT | V_co: ...
def cached_slot_property(
name: str,
) -> Callable[[Callable[[T_contra], V_co]], CachedSlotProperty[T_contra, V_co]]: ...
| CachedSlotProperty |
python | django__django | tests/template_tests/filter_tests/test_pluralize.py | {
"start": 930,
"end": 2288
} | class ____(SimpleTestCase):
def test_integers(self):
self.assertEqual(pluralize(1), "")
self.assertEqual(pluralize(0), "s")
self.assertEqual(pluralize(2), "s")
def test_floats(self):
self.assertEqual(pluralize(0.5), "s")
self.assertEqual(pluralize(1.5), "s")
def test_decimals(self):
self.assertEqual(pluralize(Decimal(1)), "")
self.assertEqual(pluralize(Decimal(0)), "s")
self.assertEqual(pluralize(Decimal(2)), "s")
def test_lists(self):
self.assertEqual(pluralize([1]), "")
self.assertEqual(pluralize([]), "s")
self.assertEqual(pluralize([1, 2, 3]), "s")
def test_suffixes(self):
self.assertEqual(pluralize(1, "es"), "")
self.assertEqual(pluralize(0, "es"), "es")
self.assertEqual(pluralize(2, "es"), "es")
self.assertEqual(pluralize(1, "y,ies"), "y")
self.assertEqual(pluralize(0, "y,ies"), "ies")
self.assertEqual(pluralize(2, "y,ies"), "ies")
self.assertEqual(pluralize(0, "y,ies,error"), "")
def test_no_len_type(self):
self.assertEqual(pluralize(object(), "y,es"), "")
self.assertEqual(pluralize(object(), "es"), "")
def test_value_error(self):
self.assertEqual(pluralize("", "y,es"), "")
self.assertEqual(pluralize("", "es"), "")
| FunctionTests |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 52151,
"end": 53121
} | class ____(ASTExpression):
def __init__(self, expr: str) -> None:
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTFallbackExpr):
return NotImplemented
return self.expr == other.expr
def __hash__(self) -> int:
return hash(self.expr)
def _stringify(self, transform: StringifyTransform) -> str:
return self.expr
def get_id(self, version: int) -> str:
return str(self.expr)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += nodes.literal(self.expr, self.expr)
################################################################################
# Types
################################################################################
# Things for ASTNestedName
################################################################################
| ASTFallbackExpr |
python | doocs__leetcode | solution/2100-2199/2133.Check if Every Row and Column Contains All Numbers/Solution.py | {
"start": 0,
"end": 177
} | class ____:
def checkValid(self, matrix: List[List[int]]) -> bool:
n = len(matrix)
return all(len(set(row)) == n for row in chain(matrix, zip(*matrix)))
| Solution |
python | doocs__leetcode | lcci/01.04.Palindrome Permutation/Solution2.py | {
"start": 0,
"end": 236
} | class ____:
def canPermutePalindrome(self, s: str) -> bool:
vis = set()
for c in s:
if c in vis:
vis.remove(c)
else:
vis.add(c)
return len(vis) < 2
| Solution |
python | pytorch__pytorch | torch/testing/_internal/jit_utils.py | {
"start": 29554,
"end": 33768
} | class ____:
def __init__(self) -> None:
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._get_graph_executor_optimize(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(True)
self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
torch._C._jit_set_te_must_use_llvm_cpu(False)
def restore(self):
torch._C._jit_set_profiling_executor(self.old_profiling_executor)
torch._C._get_graph_executor_optimize(self.old_profiling_mode)
torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)
torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
def clone_inputs(args):
inputs: list[Union[torch.Tensor, list[torch.Tensor]]] = []
for arg in args:
if isinstance(arg, torch.Tensor):
inputs.append(arg.detach().clone())
elif is_iterable_of_tensors(arg):
inputs.append([t.detach().clone() for t in arg])
else:
inputs.append(arg)
return inputs
def get_traced_sample_variant_pairs(device, dtype, op):
# tuples of (variant, sample)
outputs: list[tuple[Any, Any]] = []
samples = op.sample_inputs(device, dtype)
# Acquires variants to test
func = op.get_op()
method = op.get_method()
variants = {
# TODO: inplace tests currently fail, fix and add inplace variant
'function': func, 'method': method,
}
# TODO: find better way to standardize on op registration itself..
has_fake_function = op.name in ["resize_", 'resize_as_']
if has_fake_function:
variants = {'method': getattr(torch.Tensor, op.name)}
# In eager mode, these ops can take (Tensor, bool) args; but in
# JIT they can only take (Tensor, Scalar), and bool is not a
# scalar in the JIT type system. So to test these in JIT, the bool
# is converted to an int for the test.
ops_with_unsupported_bool_args = [
{
"name": "div_floor_rounding",
"arg_idx": [0],
},
{
"name": "div_no_rounding_mode",
"arg_idx": [0],
},
{
"name": "div_trunc_rounding",
"arg_idx": [0],
},
{
"name": "index_fill",
"arg_idx": [2],
},
{
"name": "full_like",
"arg_idx": [0],
},
{
"name": "mul",
"arg_idx": [0],
},
{
"name": "new_full",
"arg_idx": [1],
},
]
# doesn't support tracing
if has_fake_function:
return outputs
for sample in samples:
for variant in variants.values():
if variant is None:
continue
if is_lambda(variant):
continue
matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args)
for op_data in matching_ops:
for idx in op_data["arg_idx"]:
args = list(sample.args)
if len(sample.args) > idx and isinstance(sample.args[idx], bool):
args[idx] = int(args[idx])
sample.args = tuple(args)
outputs.append((variant, sample))
return outputs
# types.LambdaType gave false positives
def is_lambda(lamb):
LAMBDA = lambda: 0 # noqa: E731
return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__
| TensorExprTestOptions |
python | walkccc__LeetCode | solutions/1940. Longest Common Subsequence Between Sorted Arrays/1940-2.py | {
"start": 0,
"end": 166
} | class ____:
def longestCommonSubsequence(self, arrays: list[list[int]]) -> list[int]:
return sorted(functools.reduce(lambda a, b: set(a) & set(b), arrays))
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-xata/destination_xata/destination.py | {
"start": 420,
"end": 3937
} | class ____(Destination):
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
xata = XataClient(api_key=config["api_key"], db_url=config["db_url"])
xata.set_header("user-agent", f"airbyte/destination-xata:{__version__}")
bp = BulkProcessor(xata)
count = 0
for message in input_messages:
if message.type == Type.RECORD:
# Put record to processing queue
bp.put_record(message.record.stream, message.record.data)
count += 1
if message.type == Type.STATE:
yield message
bp.flush_queue()
logger.info(bp.get_stats())
if count != bp.get_stats()["total"] or bp.get_stats()["failed_batches"] != 0:
raise Exception(
"inconsistency found, expected %d records pushed, actual: %d with %d failures."
% (count, bp.get_stats()["total"], bp.get_stats()["failed_batches"])
)
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
xata = XataClient(api_key=config["api_key"], db_url=config["db_url"])
xata.set_header("user-agent", f"airbyte/destination-xata:{__version__}")
r = xata.users().getUser()
if r.status_code != 200:
raise Exception("Invalid connection parameters.")
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
| DestinationXata |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 10250,
"end": 10309
} | class ____(BitwiseShiftOperation):
pass
| BitwiseRightShift |
python | joerick__pyinstrument | pyinstrument/renderers/pstatsrenderer.py | {
"start": 484,
"end": 3276
} | class ____(FrameRenderer):
"""
Outputs a marshaled dict, containing processed frames in pstat format,
suitable for processing by gprof2dot and snakeviz.
"""
output_file_extension = "pstats"
output_is_binary = True
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
def frame_key(self, frame: Frame) -> FrameKey:
return (frame.file_path or "", frame.line_no or 0, frame.function)
def render_frame(self, frame: Frame | None, stats: StatsDict) -> None:
if frame is None:
return
key = self.frame_key(frame)
if key not in stats:
# create a new entry
# being a statistical profiler, we don't know the exact call time or
# number of calls, they're stubbed out
call_time = -1
number_calls = -1
total_time = 0
cumulative_time = 0
callers: dict[FrameKey, CallerValue] = {}
else:
call_time, number_calls, total_time, cumulative_time, callers = stats[key]
# update the total time and cumulative time
total_time += frame.total_self_time
cumulative_time += frame.time
if frame.parent:
parent_key = self.frame_key(frame.parent)
if parent_key not in callers:
p_call_time = -1
p_number_calls = -1
p_total_time = 0
p_cumulative_time = 0
else:
p_call_time, p_number_calls, p_total_time, p_cumulative_time = callers[parent_key]
p_total_time += frame.total_self_time
p_cumulative_time += frame.time
callers[parent_key] = p_call_time, p_number_calls, p_total_time, p_cumulative_time
stats[key] = (call_time, number_calls, total_time, cumulative_time, callers)
for child in frame.children:
if not child.is_synthetic:
self.render_frame(child, stats)
def render(self, session: Session):
frame = self.preprocess(session.root_frame())
stats: StatsDict = {}
self.render_frame(frame, stats)
# marshal.dumps returns bytes, so we need to decode it to a string
# using surrogateescape
return marshal.dumps(stats).decode(encoding="utf-8", errors="surrogateescape")
def default_processors(self) -> ProcessorList:
return [
processors.remove_importlib,
processors.remove_tracebackhide,
processors.merge_consecutive_self_time,
processors.aggregate_repeated_calls,
processors.remove_irrelevant_nodes,
processors.remove_unnecessary_self_time_nodes,
processors.remove_first_pyinstrument_frames_processor,
]
| PstatsRenderer |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modular_lfm2_moe.py | {
"start": 2422,
"end": 3835
} | class ____(Qwen2MoeExperts):
def __init__(self, config):
super().__init__(config)
del self.act_fn
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
if expert_idx == self.num_experts:
continue
top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = F.silu(gate) * up
current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
return final_hidden_states
| Lfm2MoeExperts |
python | joke2k__faker | faker/providers/address/da_DK/__init__.py | {
"start": 45,
"end": 50630
} | class ____(AddressProvider):
# Building numbers don't go higher than a 1000
building_number_formats = ("%##", "%#", "%")
street_name_formats = ("{{dk_street_name}}{{street_suffix}}",)
street_address_formats = ("{{street_name}} {{building_number}}",)
street_names = (
"Aberdeen",
"Dompap",
"Abildgaards",
"Skovhare",
"Svalehale",
"Abrikos",
"Absalons",
"Adel",
"Admiral",
"Adrians",
"Agerlands",
"Aggersborg",
"Aggersvold",
"Agger",
"Agnete",
"Ahlefeldts",
"Ahlmanns",
"Ahorns",
"Ahrenkildes",
"Albaniens",
"Aldersro",
"Allers",
"Alexandria",
"Alliance",
"Alperose",
"Als",
"Alsted",
"Amagerbro",
"Amagerfælled",
"Amager",
"Amagermotoren",
"Amager Strand",
"Amalie",
"Amalie Skrams",
"Amerika",
"Amsterdam",
"Angel",
"Anneberg",
"Anneke",
"Annex",
"Ansgars",
"Antoinette",
"Antoni",
"Apollo",
"Arabiens",
"Arendals",
"Arkona",
"Armeniens",
"Arne Jacobsens",
"Arnes",
"Arresø",
"Arsenal",
"Artilleri",
"Asger Jorns",
"Askø",
"Asminderød",
"Asnæs",
"Assens",
"Asters",
"Astrup",
"Asyl",
"Athens",
"Augusta",
"Australiens",
"Backers",
"Badens",
"Badstue",
"Bager",
"Baggesens",
"Bakke",
"Balders",
"Ballum",
"Baltika",
"Bandholm",
"Bangerts",
"Bangsbo",
"Bardenfleths",
"Søfly",
"Bartholins",
"Basnæs",
"Bastion",
"Bavnager",
"Bavnehøj",
"Beate",
"Bechgaards",
"Bedford",
"Beethovens",
"Beldringe",
"Belfast",
"Belgiens",
"Bellahøj",
"Belle de Boskoop",
"Bellida",
"Bellis",
"Bellmans",
"Bergens",
"Berggreens",
"Bergthoras",
"Bernstorffs",
"Bevtoft",
"Biens",
"Billed",
"Billesborg",
"Birkager",
"Birkedommer",
"Birke",
"Birkholm",
"Birma",
"Bisidder",
"Bispebjerg",
"Bispe",
"Bissens",
"Bjelkes",
"Bjergmarks",
"Bjergsted",
"Bjernede",
"Bjerregårds Side",
"Bjerregårds",
"Bjørneklo",
"Bjørnsons",
"Blanka",
"Blegdams",
"Blekinge",
"Blushøj",
"Blytækker",
"Blåbær",
"Blågårds",
"Blåmejse",
"Blåmunke",
"Bodils",
"Boeslunde",
"Bogense",
"Bogenæs",
"Bogfinke",
"Bogholder",
"Bogtrykker",
"Bohlendach",
"Bolands",
"Boldhus",
"Bolton",
"Bomhus",
"Bomslutter",
"Bomulds",
"Bordeaux",
"Borgbjergs",
"Borger",
"Borgmester Jensens",
"Borgskriver",
"Borneo",
"Bornholms",
"Borreby",
"Borthigs",
"Borups",
"Boserup",
"Botofte",
"Boyes",
"Brages",
"Bramminge",
"Bramslykke",
"Bratskov",
"Bredahls",
"Bredelands",
"Bred",
"Bregnerød",
"Breidablik",
"Bremens",
"Breslau",
"Brigården",
"Bri",
"Bristol",
"Broager",
"Brobergs",
"Brofoged",
"Brohus",
"Broksø",
"Brolægger",
"Brombær",
"Brorsons",
"Brydes",
"Brygger",
"Bryggeri",
"Brynhilde",
"Bryssel",
"Brøndkær",
"Brøndum",
"Brøndæble",
"Brønshøjgård",
"Brønshøjholms",
"Brønshøj Kirke",
"Brønshøj",
"Bulbjerg",
"Bulgariens",
"Buntmager",
"Burmeisters",
"Bustrup",
"Byager",
"Bygholm",
"Byglands",
"Bygmester",
"Bygård",
"Bykilde",
"Bymose",
"Bækkeskov",
"Bøhmens",
"Bøllegård",
"Bøllemosegårds",
"Børglum",
"Børs",
"Børskov",
"Bådehavns",
"Bådsmands",
"Calais",
"Capri",
"Carl Nielsens",
"Carls",
"Carstens",
"Castbergs",
"Ceylon",
"Christen Bergs",
"Christian II's",
"Christiansborg",
"Christianshavns Vold",
"Christiansminde",
"Classens",
"Clausholm",
"Clematis",
"Colbjørnsens",
"Collins",
"Container",
"Cox Orange",
"Cumberlands",
"Cyperns",
"Cæcilia",
"Dag Hammarskjölds",
"Dagmars",
"Dagø",
"Dahlerups",
"Dalby",
"Dalmose",
"Dalslands",
"Damager",
"Dampfærge",
"Dannebrogs",
"Danneskiold-Samsøes",
"Dannevirke",
"Danshøj",
"Danstrup",
"Degnemose",
"Degneæble",
"Delfin",
"Delos",
"Derby",
"Dige",
"Dirchs",
"Donau",
"Dorthea",
"Dovre",
"Dragsholm",
"Drechsels",
"Drejer",
"Drejø",
"Drogdens",
"Dronning Dagmars",
"Dronning Elisabeths",
"Dronningens",
"Dronningens Tvær",
"Dronninglund",
"Dublin",
"Dunhammer",
"Dunkerque",
"Dybbøls",
"Dybendals",
"Dybens",
"Dyvekes",
"Dønnerup",
"Ebbe Rodes",
"Eberts",
"Eckersbergs",
"Edel Sauntes",
"Edelsminde",
"Efterslægts",
"Egebæks",
"Ege",
"Egelykke",
"Egemarke",
"Egholm",
"Egils",
"Ehlers",
"Ejdersted",
"Ejler Billes",
"Ekvipagemester",
"Elba",
"Elias",
"Ellebjerg",
"Elme",
"Elmelunde",
"Elsdyrs",
"Elselille",
"Elstar",
"Elværks",
"Emblas",
"Emdrup Eng",
"Emdrupgårds",
"Emdrup Kær",
"Emdrup Mose",
"Emdrup",
"Enebær",
"Engblomme",
"Engdals",
"Engelholm",
"Engelsteds",
"Enghave",
"Englands",
"Engskifte",
"Eng",
"Enigheds",
"Enveloppe",
"Erantis",
"Eriks",
"Eriksholm",
"Eschrichts",
"Eskadre",
"Eskilds",
"Eskildstrup",
"Eskjær",
"Esrom",
"Esthers",
"Estlands",
"Eton",
"Ewalds",
"Fabrikmester",
"Fafners",
"Fajance",
"Fakse",
"Fakse Tvær",
"Faksinge",
"Falke",
"Fanø",
"Farum",
"Farver",
"Fehmerns",
"Femkløver",
"Fengers",
"Fenris",
"Fensmark",
"Ferring",
"Fersken",
"Finlands",
"Fiol",
"Firkløver",
"Fiskedams",
"Fjenneslev",
"Fladstjerne",
"Flaske",
"Flensborg",
"Flidsager",
"Flinterup",
"Floras",
"Florens",
"Florida",
"Flors",
"Folevads",
"Folke Bernadottes",
"Forbindelses",
"Fordresgård",
"Formosa",
"Fortun",
"Fossgårds",
"Fragt",
"Frankrigs",
"Fredensborg",
"Fredens",
"Fredenshøj",
"Fredericia",
"Frederiksberg",
"Frederiksborg",
"Frederiks",
"Frederiksgårds",
"Frederiksstads",
"Frederikssunds",
"Luftmarine",
"Frejas",
"Fremads",
"Freunds",
"Frilands",
"Frimester",
"Fruebjerg",
"Fuglager",
"Fuglefænger",
"Fuglsang",
"Funkia",
"Fussings",
"Fyens",
"Fyrbøder",
"Fyrtårn",
"Fælled",
"Fælledens Passage",
"Færgehavns",
"Følfod",
"Førslev",
"Fåborg",
"Gadekær",
"Gadstrup",
"Galions",
"Gamle Vasby",
"Gammel Jernbane",
"Gammel Konge",
"Gammel Køge Lande",
"Gammeltofts",
"Gartner",
"Gartneri",
"Gasværks",
"Gaunø",
"Gavlhus",
"Gearhals",
"Gefions",
"Geislers",
"Genua",
"Georgine",
"Gerbrands",
"Gerdas",
"Gerlev",
"Gerners",
"Gerts",
"Geysers",
"Gilbjerg",
"Gimles",
"Gislinge",
"Gitter",
"Gjorslev",
"Gladbo",
"Gladiolus",
"Glas",
"Glente",
"Glinkas",
"Glommens",
"Glucks",
"Glumsø",
"Glückstadts",
"Glænø",
"Godsbane",
"Godthåbs",
"Gorms",
"Gothers",
"Gransanger",
"Greis",
"Grenå",
"Grevinge",
"Gribskov",
"Griffenfelds",
"Grysgårds",
"Grækenlands",
"Grønager",
"Grøndals Park",
"Grøndalsvænge",
"Grønjords",
"Grønløkke",
"Grønne",
"Grønnehave",
"Grønnemose",
"Grønris",
"Gråbrødre",
"Gråbynke",
"Gråspurve",
"Gråstens",
"Gudenå",
"Guldager",
"Guldbergs",
"Guldstjerne",
"Gulkløver",
"Gullands",
"Gullfoss",
"Gunhilds",
"Gunløgs",
"Gyldenlak",
"Gyldenløves",
"Gyldenris",
"Gyrite",
"Gyrstinge",
"Gyvel",
"Gørtler",
"Gårdfæste",
"Gårdtofte",
"Gåsebæks",
"Gåse",
"Gåseurt",
"Haderslev",
"Hagbard",
"Hagested",
"Haifa",
"Haldager",
"Halfdans",
"Halgreens",
"Hallands",
"Hallins",
"Halsskov",
"Hambros",
"Hamlets",
"Hammelstrup",
"Hammerens",
"Hammerichs",
"Hammershus",
"Hannemanns",
"Hannover",
"Hans Bogbinders",
"Hanssted",
"Hanstholm",
"Haralds",
"Haraldsted",
"Harboøre",
"Hardanger",
"Hardenberg",
"Hare",
"Hareskov",
"Harrestrup",
"J.P.E. Hartmanns",
"Harwich",
"Hassel",
"Hastings",
"Hauser",
"Havdrup",
"Havkajak",
"Havne",
"Havre",
"Havsgårds",
"Haydns",
"Hedeby",
"Hedegaards",
"Hedemanns",
"Heibergs",
"Heils",
"Heimdals",
"Heines",
"Heises",
"Hejre",
"Heklas",
"Heldbo",
"Helgesens",
"Helgolands",
"Helikons",
"Hellas",
"Hellebæk",
"Helleliden",
"Hellested",
"Helsingborg",
"Helsingørmotoren",
"Hemsedals",
"Hendon",
"Henriks",
"Herbergen",
"Herfølge",
"Herholdts",
"Herjedal",
"Herlufsholm",
"Hermods",
"Herning",
"Herslev",
"Hesselø",
"Hessens",
"Hestemølle",
"Hildurs",
"Hillerød",
"Hillerødmotoren",
"Himmerlands",
"Hindbær",
"Hinde",
"Hindustan",
"Hirse",
"Hirtshals",
"Hjelms",
"Hjertensfryds",
"Hjerting",
"Hjortdals",
"Hjortholms",
"Hjortø",
"Hjørring",
"Hobro",
"Holbergs",
"Holbæk",
"Holbækmotoren",
"Hollands",
"Holmblads",
"Holstebro",
"Holsteinborg",
"Holsteins",
"Holte",
"Hornbæk",
"Hornemans",
"Horsekilde",
"Horsens",
"Horserød",
"Houmanns",
"Hovedvagts",
"Hovgaards",
"Hovmester",
"Hovmål",
"Hulgårds",
"Humlebæk",
"Hustofte",
"Husum",
"Hvalsø",
"Hvede",
"Hveens",
"Hvidbjerg",
"Hvidkilde",
"Hvidkløver",
"Hvidtjørne",
"Hyacint",
"Hyldebær",
"Hyltebjerg",
"Hysken",
"Hyttehus",
"Händels",
"Høffdings",
"Høgholt",
"Højbo",
"Højdevangs",
"Højde",
"Højmose",
"Højsager",
"Højstrup",
"Hørdums",
"Hørhus",
"Hørsholms",
"Hørtofte",
"Høsterkøb",
"Høstgilde",
"Høyens",
"Håbets",
"Ib Schønbergs",
"Ilford",
"India",
"Industri",
"Ingerslevs",
"Ingolfs",
"Ingrid Marie",
"Iran",
"Iris",
"Irlands",
"Irmingers",
"Isafjords",
"Islevhus",
"Istanbul",
"Isted",
"Italiens",
"Jagt",
"James Grieve",
"Jans",
"Japan",
"Java",
"Jellinge",
"Jemtelands",
"Jena",
"Jeppes",
"Jerichaus",
"Jernbane",
"Bilbao",
"Jernæble",
"Jolle",
"Jordbær",
"Joris",
"Judiths",
"Jupiter",
"Jyderup",
"Jyllinge",
"Jæger",
"Jægersborg",
"Jægerspris",
"Kabbeleje",
"Kaktus",
"Kaldæa",
"Kaliforniens",
"Kalkbrænderihavns",
"Kalø",
"Kampmanns",
"Kanada",
"Kanonbåds",
"Kansas",
"Kansler",
"Kapel",
"Kapsel",
"Kaprifolie",
"Karens",
"Karlskrona",
"Karlslunde",
"Karlstads",
"Kasemat",
"Kastanie",
"Kastels",
"Kastrup",
"Katholm",
"Katrinedals",
"Kattegat",
"Kattinge",
"Kejser",
"Keldsø",
"Kentia",
"Keplers",
"Kerteminde",
"Kildebrønde",
"Kildevælds",
"Kilholm",
"Kina",
"Kingos",
"Kingston",
"Kirkebjerg",
"Kirkegårds",
"Kirsteins",
"Kirstinedals",
"Kjeldsgårds",
"Kjærstrup",
"Klaipeda",
"Klaksvigs",
"Kleins",
"Klerke",
"Klingsey",
"Klinte",
"Klintholm",
"Klitmøller",
"Klostermarks",
"Klosterris",
"Kloster",
"Klubiens",
"Kløverblads",
"Kløvermarks",
"Knabro",
"Knabstrup",
"Knippelsbro",
"Knivholt",
"Knuthenborg",
"Kolding",
"Kompagni",
"Kongebro",
"Kongedybs",
"Kongelunds",
"Kongemarks",
"Kongeæble",
"Kongo",
"Kongsdal",
"Kongshøj",
"Kongsted",
"Korea",
"Korfu",
"Korinth",
"Kornblomst",
"Kornerup",
"Kornskyld",
"Korsager",
"Kors",
"Korsika",
"Korsør",
"Kortstilk",
"Krabbesholm",
"Kraftværks",
"Krauses",
"Kreta",
"Krims",
"Kristiania",
"Krogager",
"Krogerup",
"Kroghs",
"Krokodille",
"Kronborg",
"Kronprinsens",
"Kronprinsesse",
"Krudtløbs",
"Krudtmøllegårds",
"Krusemynte",
"Kruså",
"Krügers",
"Krystal",
"Kuglegårds",
"Kuhlaus",
"Kulbane",
"Kurlands",
"Kvintus",
"Kvægtorvs",
"Kvæsthus",
"Küchlers",
"Kyringe",
"Kæmner",
"Kærager",
"Kærsanger",
"Kærskifte",
"Købmager",
"Kålager",
"Kaalunds",
"Lager",
"Lakse",
"Landehjælp",
"Landfoged",
"Landgilde",
"Landlyst",
"Landsdommer",
"Landskrona",
"Landvindings",
"Langager",
"Langebro",
"Langelinie",
"Langhus",
"Langkær",
"Langø",
"Laplands",
"Larsbjørns",
"Larslejs",
"Laura",
"Lautrups",
"Lavendel",
"Ledager",
"Leifs",
"Lejre",
"Lemberg",
"Lemnos",
"Lerchenborg",
"Lerfos",
"Lergravs",
"Letlands",
"Lidemarks",
"Liflands",
"Lille Colbjørnsens",
"Lille Farimags",
"Lille Fredens",
"Lille",
"Lille Isted",
"Lille Kannike",
"Lille Kirke",
"Lille Kongens",
"Lille Strand",
"Lille Søndervold",
"Lille Thekla",
"Lilliendals",
"Limfjords",
"Linde",
"Lindenborg",
"Lindenovs",
"Lindgreens",
"Lindholms",
"Linnés",
"Lipkes",
"Liselund",
"Livjæger",
"Livorno",
"Livø",
"Lobelia",
"Lodi",
"Lombardi",
"Lotus",
"Lugano",
"Lukretia",
"Lundbyes",
"Lundeborg",
"Lundedals",
"Lundehus",
"Lundevangs",
"Lundings",
"Lundsfryd",
"Lunds",
"Lundtofte",
"Lupin",
"Lybæk",
"Helsinki",
"Lykkebo",
"Lyneborg",
"Lynette",
"Lyngby",
"Lyngholm",
"Lyngvig",
"Lynæs",
"Lyon",
"Lyrskov",
"Lysefjords",
"Lyshøj",
"Lyshøjgårds",
"Lystrup",
"Læder",
"Lærdals",
"Lærke",
"Læssøes",
"Cork",
"Løgstør",
"Løgæble",
"Løjtegårds",
"Lønborg",
"Løngang",
"Lønstrup",
"Løvetands",
"P.D. Løvs",
"Løv",
"Magdelone",
"Magister",
"Mag",
"Majrose",
"Malakka",
"Malmø",
"Malta",
"Mandals",
"Mandel",
"Mansas",
"Mantua",
"Manø",
"Marathon",
"Marbjerg",
"Marengo",
"Margretheholms",
"Maribo",
"Mariehamn",
"Markmands",
"Markskifte",
"Mark",
"Marmor",
"Marsala",
"Marskens",
"Marstals",
"Martha",
"Masnedø",
"Masteskurs",
"Matthæus",
"Meinungs",
"Meklenborg",
"Meldahls",
"Mellemforts",
"Mellemtofte",
"Merløse",
"Messina",
"Metro",
"Middelfart",
"Middelgrunds",
"Midgårds",
"Mikkel Skovs",
"Milano",
"Milos",
"Mimers",
"Mimosa",
"Mindstrup",
"Minør",
"Mirabelle",
"Mitchells",
"Mjøsens",
"Molbechs",
"Moldau",
"Monrads",
"Montagehals",
"Montagne",
"Morbær",
"Morgendug",
"Morsø",
"Mosedal",
"Mosel",
"Mozarts",
"Mullerup",
"Murcia",
"Murer",
"Musholm",
"Musvåge",
"Mutzu",
"Myggenæs",
"Mysunde",
"Møgeltønder",
"Mølle",
"Møllegårds",
"C.F. Møllers",
"Mønter",
"Møntmester",
"Mørkhøj",
"Måge",
"Mårum",
"Nakskov",
"Nannas",
"Nansens",
"Nattergale",
"Neapel",
"Nebraska",
"Nelson Mandelas",
"Nikolaj",
"Nivå",
"Njals",
"Nokken Forn",
"Nokken Hovedn",
"Nokken Strand",
"Nordbane",
"Nordborg",
"Nordby",
"Nordfeld",
"Skagerrak",
"Nordhavns",
"Nordlands",
"Nordmarks",
"Nordre",
"Nordre Dige",
"Nordre Fasan",
"Nordre Frihavns",
"Nordre Kongelunds",
"Nordrup",
"Nordsø",
"Norges",
"Norgesminde",
"Normandi",
"November",
"Ny Adel",
"Ny Blegdams",
"Nyborg",
"Nybo",
"Nybro",
"Ny",
"Nygårds",
"Ny Kongens",
"Nyminde",
"Nyrnberg",
"Nyrops",
"Nysted",
"Nysø",
"Ny Vester",
"Ny Øster",
"Nærum",
"Næsbyholm",
"Næstved",
"Nøddebo",
"Nøjsomheds",
"Nøkkerose",
"Nørager",
"Nørre",
"Nørrebro",
"Nørre Farimags",
"Nørre Sø",
"Nørretofte",
"Nørre Vold",
"Obdams",
"Ocean",
"Odense",
"Odins",
"Odins Tvær",
"Oehlenschlægers",
"Offenbachs",
"Oldermands",
"Oldfux",
"Oldenborg",
"Olieblads",
"Oliefabriks",
"Oliemølle",
"Olufs",
"Olympos",
"Omø",
"Orgelbygger",
"Orlogsværft",
"Ottilia",
"Otto Baches",
"Ourø",
"Overbys",
"Overdrevs",
"Overn Neden Vandet",
"Overn Oven Vandet",
"Overskous",
"Oxford",
"Padua",
"Pakhus",
"Palermo",
"Pakkeri",
"Palles",
"Palnatokes",
"Palæ",
"Panums",
"Parma",
"Parnas",
"Paros",
"Pasteurs",
"Peiters",
"Per Henrik Lings",
"Perlestikker",
"Pernille",
"Persiens",
"Persille",
"Peter Ipsens",
"Petersborg",
"Philip De Langes",
"Pile",
"Pindos",
"Pistol",
"Platan",
"Polens",
"Pommerns",
"Pomona",
"Poppel",
"Portlands",
"Portugals",
"Postholder",
"Pragtstjerne",
"Primula",
"Prinsesse",
"Prisholm",
"Provste",
"Præstegårds",
"Præstekær",
"Præstemarks",
"Præstø",
"Prøvestens",
"Puggaards",
"Thomas Koppels",
"Pæon",
"Radise",
"Rabarber",
"Raffinaderi",
"Ragna",
"Ragnhild",
"Rahbeks",
"Ramløse",
"Ramsings",
"Ramunds",
"Randbøl",
"Randers",
"Rantzaus",
"Raunstrup",
"Ravenna",
"Ravneholms",
"Ravnsborg",
"Ravnsborg Tvær",
"Rebekka",
"Reberbane",
"Rebild",
"Rebslager",
"Trelleborg",
"Gdansk",
"Reersø",
"Refshale",
"Refsnæs",
"Regitse",
"Reinette",
"Rejsby",
"Remise",
"Rentemester",
"Retort",
"Reventlows",
"Reverdils",
"Reykjaviks",
"Rialto",
"Ribe",
"Ridefoged",
"Riga",
"Rigens",
"Rindby",
"Ringholm",
"Ringkøbing",
"Ringsted",
"Risager",
"Risbyholm",
"Rismose",
"Rodos",
"Romsdals",
"Romsø",
"Rosbæks",
"Roselille",
"Rosenborg",
"Rosendals",
"Rosen",
"Rosenholms",
"Rosenlunds",
"Rosenvængets",
"Rosenvængets Hoved",
"Rosenørns",
"Roshage",
"Roskilde",
"Rosmarin",
"Rossinis",
"Rostgaards",
"Rostock",
"Rothes",
"Rovsings",
"Rubikon",
"Rubinola",
"Rubinsteins",
"Rugager",
"Rughave",
"Rug",
"Rumæniens",
"Rundholts",
"Ruths",
"Ryes",
"Rygårds",
"Rymarks",
"Rysensteens",
"Ryvangs",
"Ræve",
"Rødby",
"Rødding",
"Rødelands",
"Røde Mellem",
"Rødkilde",
"Rødkløver",
"Rødtjørne",
"Rømers",
"Rønnebær",
"Rønne",
"Rønnings",
"Rørholms",
"Rørmose",
"Rørsanger",
"Røså",
"Rådhus",
"Rådmands",
"Rådvads",
"Sadelmager",
"Sakskøbing",
"Salling",
"Saltholms",
"Saltø",
"Samos",
"Samsø",
"Sandbjerg",
"Sandbygård",
"Sandhus",
"Sankelmarks",
"Sankt Jørgens",
"Sassnitz",
"Saxhøj",
"Saxo",
"Saxtorphs",
"Scandia",
"Schacks",
"Scharlings",
"Scherfigs",
"Schleppegrells",
"Schuberts",
"Sejlklub",
"Sejrø",
"Seline",
"Selsø",
"Sele",
"Serbiens",
"Serridslev",
"Shetlands",
"Siam",
"Sibberns",
"Sibelius",
"Siciliens",
"Sigbrits",
"Sigersted",
"Signelil",
"Sigurds",
"Sigyns",
"Siljan",
"Silkeborg",
"Silke",
"Sions",
"Sixtus",
"Sjællands",
"Skaffer",
"Skanderborg",
"Skarø",
"Skelbæk",
"Skelmose",
"Skensved",
"Skibelund",
"Skinder",
"Skipper Clements",
"Skippinge",
"Skjulhøj",
"Skodsborg",
"Skole",
"Skoleholder",
"Flyhangar",
"Skotlands",
"Skotterup",
"Skoubo",
"Skovbogårds",
"Skovgaards",
"Skovløber",
"Skovstjerne",
"Skudehavns",
"Skydebane",
"Skyggelunds",
"Skytte",
"Skyttegård",
"Skåne",
"Slagelse",
"Slagtehus",
"Slangerup",
"Slejpners",
"Slesvigs",
"Slotsfoged",
"Slots",
"Slotsherrens",
"Slotsholms",
"Sluse",
"Slutteri",
"Slåen",
"Smede",
"Smyrna",
"Smørum",
"Smålands",
"Snare",
"H.C. Sneedorffs",
"Sneppe",
"Snertinge",
"Snorres",
"Sofie Brahes",
"Sofie",
"Sofienhøj",
"Sognefjords",
"Sokkelunds",
"Solitude",
"Solrød",
"Solsikke",
"Solskifte",
"Soltofte",
"Summerred",
"Sommersted",
"Sonnerup",
"Sorgenfri",
"Sorrento",
"Sorø",
"Southampton",
"Spanager",
"Spangbergs",
"Spaniens",
"Spanteloft",
"Sparresholm",
"Sparta",
"Speditør",
"Spinderi",
"Spiræa",
"Spontinis",
"Sporemager",
"Spøttrup",
"Stadfeldts",
"Stadil",
"Stald",
"Stampes",
"Statholder",
"Stavanger",
"Stavnstrup",
"Steenbergs",
"Stefans",
"Steins",
"Stemanns",
"Stenderup",
"Sten",
"Stenhugger",
"Stenkløver",
"Stenlands",
"Stenlille",
"Stenløse",
"Stenmagle",
"Stenos",
"Stenrose",
"Sternberg",
"Stevns",
"Stjerne",
"Stockholms",
"Stokhus",
"Stokrose",
"Stoltenbergs",
"Storegårds",
"Store Kannike",
"Store Kirke",
"Store Kongens",
"Store Regne",
"Store Strand",
"Store Søndervold",
"Storm",
"Stradellas",
"Strandager",
"Strand",
"Strandlods",
"Stranden",
"Stratford",
"Strauss",
"Strickers",
"Strindbergs",
"Struensee",
"Strynø",
"Strødam",
"Stubbeløb",
"Stubmølle",
"Studie",
"Studsgaards",
"Sturlas",
"Stære",
"Støberi",
"Støvnæs",
"Støvring",
"Suensons",
"Suhms",
"Sumatra",
"Sundbygårds",
"Sundby Park",
"Sundbyvester",
"Sundeveds",
"Sundholms",
"Sundkrogs",
"Svane",
"Svanemølle",
"Svankær",
"Svendborg",
"Svends",
"Svenstrup",
"Sverrigs",
"Svogerslev",
"Sværte",
"Sydhavns",
"Sydløbs",
"Sylvia",
"Syriens",
"Syvens",
"Syvstens",
"Sæby",
"Sæbyholms",
"Sætersdal",
"Søfort",
"Søllerød",
"Sølunds",
"Sølv",
"Sønderborg",
"Søndermarks",
"Søndervangs",
"Søndervig",
"Søndre",
"Søndre Fasan",
"Søren Norbys",
"Sørup",
"Saabyes",
"Taffelæble",
"Tagens",
"Takkelads",
"Takkelloft",
"Tallinn",
"Tartinis",
"Teglbrænder",
"Teglgård",
"Teglholm",
"Teglholms",
"Teglholm Tvær",
"Teglstrup",
"Teglværks",
"Telemarks",
"Tersløse",
"Theis",
"Thekla",
"Thingvalla",
"Thora",
"Thors",
"Thorshavns",
"Thorsminde",
"Thorupgård",
"Thorups",
"Thurebyholm",
"Thyras",
"Thyregods",
"Thy",
"Tibirke",
"Tietgens",
"Tiger",
"Tikøb",
"Timians",
"Tingskifte",
"Tingskriver",
"Ting",
"Tipsager",
"Tirsbæk",
"Titan",
"Tjæreby",
"Tjørne",
"Tjørnelunds",
"Todes",
"Toftager",
"Toftebakke",
"Toftegårds",
"Toftøje",
"Toldbod",
"Toldskriver",
"Tomat",
"Tomsgårds",
"Tonemester",
"Torbenfeldt",
"Torben Oxes",
"Tordenskjolds",
"Torfa",
"Tornebuske",
"Tornsanger",
"Torve",
"Toskifte",
"Tovelille",
"Tovværk",
"Tranehave",
"Trane",
"Trangravs",
"Traps",
"Trekløver",
"Trekroner",
"Trepkas",
"Troja",
"Tromsø",
"Trondhjems",
"Tryggevælde",
"Trøjborg",
"Tschernings",
"Tuborg",
"Tudseminde",
"Tudskær",
"Tuelands",
"Tulipan",
"Tullins",
"Turesens",
"Tustrup",
"Tuxens",
"Tværager",
"Kiel",
"Tybjerg",
"Tyborøn",
"Tycho Brahes",
"Tyrols",
"Tyttebær",
"Tøjhus",
"Tøjmester",
"Tølløse",
"Tømmergravs",
"Tømrer",
"Tøndebinder",
"Tønder",
"Tønnes",
"Tårnblæser",
"Tårnholms",
"Tårnhus",
"Tåsinge",
"Tåstrup",
"Udby",
"Uffes",
"Uggerløse",
"Ugle",
"Ullerup",
"Ulrik Birchs",
"Ulriksdal",
"Ulvefod",
"Ulvsund",
"Ungarns",
"Uplands",
"Upsala",
"Ural",
"Urbans",
"Utterslev",
"Wagners",
"Vagtmester",
"Valborg",
"Valbygårds",
"Valby Kirke",
"Valby Lang",
"Valby Maskinfabriks",
"Valby Torve",
"Valdemars",
"Valgårds",
"Valhals",
"Valhøj",
"Valkendorfs",
"Valkyrie",
"Vallekilde",
"Vallø",
"Valmue",
"Valnødde",
"Vangehus",
"Vangs",
"Vangså",
"Vanløse",
"Vanløse By",
"Varde",
"Vasby",
"Vatna",
"Webers",
"Vedbæk",
"Weidekamps",
"Weimar",
"Vejlands",
"Vejle",
"Vejrø",
"Veksø",
"Venders",
"Vendsyssel",
"Venedig",
"Vennely",
"Venneminde",
"Venø",
"Veras",
"Verdis",
"Vermlands",
"Vermunds",
"Verona",
"Wessels",
"Vestager",
"Vestbane",
"Vesterbro",
"Vester Farimags",
"Vesterfælled",
"Vester",
"Vestergårds",
"Vesterhavs",
"Vestermarks",
"Vester Sø",
"Vestervig",
"Vester Vold",
"Vestmanna",
"Vestre",
"Vestre Kirkegårds",
"Vestre Tegl",
"Weyses",
"Vibeke",
"Vibe",
"Viborg",
"Wibrandts",
"Wiedewelts",
"Vigerslev",
"Vigerslev Gods",
"Wiinblads",
"Viktoria",
"Vildande",
"Wilders",
"Vilhelm Thomsens",
"Willemoes",
"Willums",
"Vindebro",
"Vindinge",
"Vindmølle",
"Vindrue",
"Windsor",
"Vingård",
"Visby",
"Wittenberg",
"Vognborg",
"Vognmager",
"Vogter",
"Voldmester",
"Volos",
"Wolters",
"Vordingborg",
"Vulkan",
"Væbner",
"Værksted",
"Værnedams",
"Væver",
"Vølunds",
"Vånings",
"Yderlands",
"Yduns",
"Ystad",
"Zinns",
"Æbelø",
"Æble",
"Ægina",
"Ægirs",
"Ærtebjerg",
"Ærtelands",
"Ærte",
"Ætna",
"Ølands",
"Øresund Park",
"Øresundsmotoren",
"Øresunds",
"Ørevads",
"Ørhage",
"Ørholm",
"Ørne",
"Øsels",
"Østbane",
"Øster",
"Østerbro",
"Østerdals",
"Øster Farimags",
"Østergårds",
"Øster Sø",
"Østersø",
"Øster Vold",
"Østre Tegl",
"Østrigs",
"Åbakke",
"Åbjerg",
"Ådals",
"Å",
"Ågerup",
"Åkande",
"Ålands",
"Ålborg",
"Ålekiste",
"Ålholm",
"Ålstrup",
"Åløkke",
"Aarestrups",
"Århus",
"Mælkeen",
"Løvstikke",
"Murmansk",
"Antwerpen",
"Travemünde",
"Bohrs",
"Cylinder",
"Støbegods",
"Pladehals",
"Kul",
"Diesel",
"Gloster",
"Burgundy",
"Paradisæble",
)
street_suffixes = (
# gade and vej are the most common so they should be oversampled
"gade",
"gade",
"stræde",
"vej",
"vej",
"vej",
" Allé",
)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
# Postcode should be formatted as described in http://www.nr.dk/danmark.html
postcode_formats = ("%###",)
city_formats = ("{{city_name}}",)
cities = (
"Allinge",
"Allingåbro",
"Almind",
"Anholt",
"Ans by",
"Ansager",
"Arden",
"Askeby",
"Asnæs",
"Asperup",
"Assens",
"Asaa",
"Augustenborg",
"Aulum",
"Auning",
"Bagenkop",
"Bagsværd",
"Balle",
"Ballerup",
"Bandholm",
"Barrit",
"Beder",
"Bedsted Thy",
"Bevtoft",
"Billum",
"Billund",
"Bindslev",
"Birkerød",
"Bjerringbro",
"Bjert",
"Bjæverskov",
"Blokhus",
"Blommenslyst",
"Blåvand",
"Boeslunde",
"Bogense",
"Bogø By",
"Bolderslev",
"Bording",
"Borre",
"Borup",
"Brabrand",
"Bramming",
"Brande",
"Branderup",
"Bredebro",
"Bredsten",
"Brenderup",
"Broager",
"Broby",
"Brovst",
"Bryrup",
"Brædstrup",
"Brøndby",
"Brøndby Strand",
"Brønderslev",
"Brønshøj",
"Brørup",
"Bylderup-Bov",
"Bække",
"Bækmarksbro",
"Bælum",
"Børkop",
"Bøvlingbjerg",
"Charlottenlund",
"Christiansfeld",
"Dalby",
"Dalmose",
"Dannemare",
"Daugård",
"Dianalund",
"Dragør",
"Dronninglund",
"Dronningmølle",
"Dybvad",
"Ebberup",
"Ebeltoft",
"Egernsund",
"Egtved",
"Egå",
"Ejby",
"Ejstrupholm",
"Engesvang",
"Errindlev",
"Erslev",
"Esbjerg",
"Eskebjerg",
"Eskilstrup",
"Espergærde",
"Fakse",
"Fakse Ladeplads",
"Fanø",
"Farsø",
"Farum",
"Fejø",
"Ferritslev Fyn",
"Fjenneslev",
"Fjerritslev",
"Flemming",
"Fredensborg",
"Fredericia",
"Frederiksberg",
"Frederikshavn",
"Frederikssund",
"Frederiksværk",
"Frørup",
"Frøstrup",
"Fuglebjerg",
"Fur",
"Føllenslev",
"Føvling",
"Faaborg",
"Fårevejle",
"Fårup",
"Fårvang",
"Gadbjerg",
"Gadstrup",
"Galten",
"Gandrup",
"Gedser",
"Gedsted",
"Gedved",
"Gelsted",
"Gentofte",
"Gesten",
"Gilleleje",
"Gislev",
"Gislinge",
"Gistrup",
"Give",
"Gjerlev",
"Gjern",
"Glamsbjerg",
"Glejbjerg",
"Glesborg",
"Glostrup",
"Glumsø",
"Gram",
"Gredstedbro",
"Grenaa",
"Greve",
"Greve Strand",
"Grevinge",
"Grindsted",
"Græsted",
"Gråsten",
"Gudbjerg",
"Gudhjem",
"Gudme",
"Guldborg",
"Gørding",
"Gørlev",
"Gørløse",
"Haderslev",
"Haderup",
"Hadsten",
"Hadsund",
"Hagersten",
"Hals",
"Hammel",
"Hampen",
"Hanstholm",
"Harboøre",
"Harlev",
"Harndrup",
"Harpelunde",
"Hasle",
"Haslev",
"Hasselager",
"Havdrup",
"Havndal",
"Hedehusene",
"Hedensted",
"Hejls",
"Hejnsvig",
"Hellebæk",
"Hellerup",
"Helsinge",
"Helsingør",
"Hemmet",
"Henne",
"Herfølge",
"Herlev",
"Herlufmagle",
"Herning",
"Hesselager",
"Hillerød",
"Hinnerup",
"Hirtshals",
"Hjallerup",
"Hjerm",
"Hjortshøj",
"Hjørring",
"Hobro",
"Holbæk",
"Holeby",
"Holme-Olstrup",
"Holstebro",
"Holsted",
"Holte",
"Horbelev",
"Hornbæk",
"Hornslet",
"Hornsyld",
"Horsens",
"Horslunde",
"Hovborg",
"Hovedgård",
"Humble",
"Humlebæk",
"Hundested",
"Hundslund",
"Hurup Thy",
"Hvalsø",
"Hvide Sande",
"Hvidovre",
"Højbjerg",
"Højby",
"Højer",
"Højslev",
"Høng",
"Hørning",
"Hørsholm",
"Hørve",
"Haarby",
"Hårlev",
"Idestrup",
"Ikast",
"Ishøj",
"Janderup",
"Jelling",
"Jerslev",
"Jerslev",
"Jerup",
"Jordrup",
"Juelsminde",
"Jyderup",
"Jyllinge",
"Jystrup",
"Jægerspris",
"Kalundborg",
"Kalvehave",
"Karby",
"Karise",
"Karlslunde",
"Karrebæksminde",
"Karup",
"Kastrup",
"Kerteminde",
"Kettinge",
"Kibæk",
"Kirke Eskilstrup",
"Kirke Hyllinge",
"Kirke Såby",
"Kjellerup",
"Klampenborg",
"Klarup",
"Klemensker",
"Klippinge",
"Klovborg",
"Knebel",
"Kokkedal",
"Kolding",
"Kolind",
"Kongens Lyngby",
"Kongerslev",
"Korsør",
"Kruså",
"Kvistgård",
"Kværndrup",
"København",
"Køge",
"Langebæk",
"Langeskov",
"Langå",
"Lejre",
"Lem",
"Lemming",
"Lemvig",
"Lille Skensved",
"Lintrup",
"Liseleje",
"Lundby",
"Lunderskov",
"Lynge",
"Lystrup",
"Læsø",
"Løgstrup",
"Løgstør",
"Løgumkloster",
"Løkken",
"Løsning",
"Låsby",
"Malling",
"Mariager",
"Maribo",
"Marslev",
"Marstal",
"Martofte",
"Melby",
"Mern",
"Mesinge",
"Middelfart",
"Millinge",
"Morud",
"Munke Bjergby",
"Munkebo",
"Møldrup",
"Mørke",
"Mørkøv",
"Måløv",
"Mårslet",
"Nakskov",
"Nexø",
"Nibe",
"Nimtofte",
"Nivå",
"Nordborg",
"Nyborg",
"Nykøbing F",
"Nykøbing M",
"Nykøbing Sj",
"Nyrup",
"Nysted",
"Nærum",
"Næstved",
"Nørager",
"Nørre Alslev",
"Nørre Asmindrup",
"Nørre Nebel",
"Nørre Snede",
"Nørre Aaby",
"Nørreballe",
"Nørresundby",
"Odder",
"Odense",
"Oksbøl",
"Otterup",
"Oure",
"Outrup",
"Padborg",
"Pandrup",
"Præstø",
"Randbøl",
"Randers",
"Ranum",
"Rask Mølle",
"Redsted",
"Regstrup",
"Ribe",
"Ringe",
"Ringkøbing",
"Ringsted",
"Risskov",
"Roskilde",
"Roslev",
"Rude",
"Rudkøbing",
"Ruds Vedby",
"Rungsted Kyst",
"Ry",
"Rynkeby",
"Ryomgård",
"Ryslinge",
"Rødby",
"Rødding",
"Rødekro",
"Rødkærsbro",
"Rødovre",
"Rødvig Stevns",
"Rømø",
"Rønde",
"Rønne",
"Rønnede",
"Rørvig",
"Sabro",
"Sakskøbing",
"Saltum",
"Samsø",
"Sandved",
"Sejerø",
"Silkeborg",
"Sindal",
"Sjællands Odde",
"Sjølund",
"Skagen",
"Skals",
"Skamby",
"Skanderborg",
"Skibby",
"Skive",
"Skjern",
"Skodsborg",
"Skovlunde",
"Skælskør",
"Skærbæk",
"Skævinge",
"Skødstrup",
"Skørping",
"Skårup",
"Slagelse",
"Slangerup",
"Smørum",
"Snedsted",
"Snekkersten",
"Snertinge",
"Solbjerg",
"Solrød Strand",
"Sommersted",
"Sorring",
"Sorø",
"Spenstrup",
"Spjald",
"Sporup",
"Spøttrup",
"Stakroge",
"Stege",
"Stenderup",
"Stenlille",
"Stenløse",
"Stenstrup",
"Stensved",
"Stoholm",
"Stokkemarke",
"Store Fuglede",
"Store Heddinge",
"Store Merløse",
"Storvorde",
"Stouby",
"Strandby",
"Struer",
"Strøby",
"Stubbekøbing",
"Støvring",
"Suldrup",
"Sulsted",
"Sunds",
"Svaneke",
"Svebølle",
"Svendborg",
"Svenstrup",
"Svinninge",
"Sydals",
"Sæby",
"Søborg",
"Søby Ærø",
"Søllested",
"Sønder Felding",
"Sønder Omme",
"Sønder Stenderup",
"Sønderborg",
"Søndersø",
"Sørvad",
"Tappernøje",
"Tarm",
"Terndrup",
"Them",
"Thisted",
"Thorsø",
"Thyborøn",
"Thyholm",
"Tikøb",
"Tilst",
"Tim",
"Tinglev",
"Tistrup",
"Tisvildeleje",
"Tjele",
"Tjæreborg",
"Toftlund",
"Tommerup",
"Toreby",
"Torrig",
"Tranbjerg J",
"Tranekær",
"Trige",
"Trustrup",
"Tureby",
"Tylstrup",
"Tølløse",
"Tønder",
"Tørring",
"Tårs",
"Taastrup",
"Ugerløse",
"Uldum",
"Ulfborg",
"Ullerslev",
"Ulstrup",
"Vadum",
"Valby",
"Vallensbæk",
"Vallensbæk Strand",
"Vamdrup",
"Vandel",
"Vanløse",
"Varde",
"Vedbæk",
"Veflinge",
"Vejby",
"Vejen",
"Vejers Strand",
"Vejle",
"Vejle Øst",
"Vejstrup",
"Veksø Sjælland",
"Vemb",
"Vemmelev",
"Vesløs",
"Vestbjerg",
"Vester Skerninge",
"Vesterborg",
"Vestervig",
"Viborg",
"Viby J",
"Viby Sjælland",
"Videbæk",
"Vig",
"Vildbjerg",
"Vils",
"Vinderup",
"Vipperød",
"Virum",
"Vissenbjerg",
"Viuf",
"Vodskov",
"Vojens",
"Vonge",
"Vorbasse",
"Vordingborg",
"Vrå",
"Væggerløse",
"Værløse",
"Ærøskøbing",
"Ølgod",
"Ølsted",
"Ølstykke",
"Ørbæk",
"Ørnhøj",
"Ørsted",
"Ørum Djurs",
"Østbirk",
"Øster Assels",
"Øster Ulslev",
"Øster Vrå",
"Østermarie",
"Aabenraa",
"Aabybro",
"Åbyhøj",
"Aakirkeby",
"Aalborg",
"Ålbæk",
"Aalestrup",
"Ålsgårde",
"Århus",
"Årre",
"Aars",
"Årslev",
"Aarup",
)
countries = (
"Afghanistan",
"Albanien",
"Algeriet",
"Andorra",
"Angola",
"Antigua og Barbuda",
"Argentina",
"Armenien",
"Aserbajdsjan",
"Australien",
"Bahamas",
"Bahrain",
"Bangladesh",
"Barbados",
"Belgien",
"Belize",
"Benin",
"Bermuda",
"Bhutan",
"Bolivia",
"Bosnien og Hercegovina",
"Botswana",
"Brasilien",
"Brunei",
"Bulgarien",
"Burkina Faso",
"Burma",
"Burundi",
"Cambodja",
"Cameroun",
"Canada",
"Centralafrikanske Republik",
"Chile",
"Colombia",
"Comorerne",
"Republikken Congo",
"Den Demokratiske Republik Congo",
"Costa Rica",
"Cuba",
"Cypern",
"Danmark",
"Djibouti",
"Dominica",
"Dominikanske Republik",
"Ecuador",
"Egypten",
"El Salvador",
"Elfenbenskysten",
"Eritrea",
"Estland",
"Etiopien",
"Fiji",
"Filippinerne",
"Finland",
"Forenede Arabiske Emirater",
"Frankrig",
"Færøerne",
"Gabon",
"Gambia",
"Georgien",
"Ghana",
"Grenada",
"Grækenland",
"Grønland",
"Guatemala",
"Guinea",
"Guinea-Bissau",
"Guyana",
"Fransk Guiana",
"Haiti",
"Holland",
"Honduras",
"Hviderusland",
"Indien",
"Indonesien",
"Irak",
"Iran",
"Irland",
"Island",
"Israel",
"Italien",
"Jamaica",
"Japan",
"Jordan",
"Kap Verde",
"Kasakhstan",
"Kenya",
"Kina",
"Kirgisistan",
"Kiribati",
"Kroatien",
"Kuwait",
"Laos",
"Lesotho",
"Letland",
"Libanon",
"Liberia",
"Libyen",
"Liechtenstein",
"Litauen",
"Luxembourg",
"Madagaskar",
"Malawi",
"Malaysia",
"Maldiverne",
"Mali",
"Malta",
"Marokko",
"Marshall-øerne",
"Mauretanien",
"Mauritius",
"Mexico",
"Mikronesien",
"Moldova",
"Monaco",
"Mongoliet",
"Montenegro",
"Mozambique",
"Myanmar",
"Namibia",
"Nauru",
"Nederlandske Antiller",
"Nepal",
"New Zealand",
"Nicaragua",
"Niger",
"Nigeria",
"Niue",
"Nordkorea",
"Nordmakedonien",
"Norge",
"Oman",
"Pakistan",
"Palau",
"Palæstinensisk Selvstyreområde",
"Panama",
"Papua Ny Guinea",
"Paraguay",
"Peru",
"Pitcairn",
"Polen",
"Portugal",
"Puerto Rico",
"Qatar",
"Rumænien",
"Rusland",
"Rwanda",
"Saint Kitts and Nevis",
"Saint Lucia",
"Saint Vincent og Grenadinerne",
"Salomonøerne",
"Samoa",
"San Marino",
"São Tomé og Príncipe",
"Saudi-Arabien",
"Schweiz",
"Senegal",
"Serbien",
"Seychellerne",
"Sierra Leone",
"Singapore",
"Slovakiet",
"Slovenien",
"Somalia",
"Spanien",
"Sri Lanka",
"Storbritannien",
"Sudan",
"Surinam",
"Sverige",
"Swaziland",
"Sydafrika",
"Sydkorea",
"Syrien",
"Tadsjikistan",
"Taiwan",
"Tanzania",
"Tchad",
"Thailand",
"Tjekkiet",
"Tjetjenien",
"Togo",
"Tonga",
"Trinidad og Tobago",
"Tunesien",
"Turkmenistan",
"Tuvalu",
"Tyrkiet",
"Tyskland",
"Uganda",
"Ukraine",
"Ungarn",
"Uruguay",
"USA",
"Usbekistan",
"Vanuatu",
"Vatikanstaten",
"Venezuela",
"Vestsahara",
"Vietnam",
"Yemen",
"Zambia",
"Zimbabwe",
"Ækvatorialguinea",
"Østrig",
"Østtimor",
)
# Known as regions in Denmark
states = (
"Hovedstaden",
"Midtjylland",
"Nordjylland",
"Sjælland",
"Syddanmark",
)
def dk_street_name(self) -> str:
"""
This returns the name of a street, without any suffix.
"""
return self.random_element(self.street_names)
def city_name(self) -> str:
return self.random_element(self.cities)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
| Provider |
python | sphinx-doc__sphinx | sphinx/util/docfields.py | {
"start": 8310,
"end": 11645
} | class ____(GroupedField):
"""A doc field that is grouped and has type information for the arguments. It
always has an argument. The argument can be linked using the given
*rolename*, the type using the given *typerolename*.
Two uses are possible: either parameter and type description are given
separately, using a field from *names* and one from *typenames*,
respectively, or both are given using a field from *names*, see the example.
Example::
:param foo: description of parameter foo
:type foo: SomeClass
-- or --
:param SomeClass foo: description of parameter foo
"""
is_typed = True
def __init__(
self,
name: str,
names: tuple[str, ...] = (),
typenames: tuple[str, ...] = (),
label: str = '',
rolename: str = '',
typerolename: str = '',
can_collapse: bool = False,
) -> None:
super().__init__(name, names, label, rolename, can_collapse)
self.typenames = typenames
self.typerolename = typerolename
def make_field(
self,
types: _FieldTypes,
domain: str,
items: list[_FieldEntry], # type: ignore[override]
env: BuildEnvironment | None = None,
inliner: Inliner | None = None,
location: Element | None = None,
) -> nodes.field:
def handle_item(fieldarg: str, content: list[Node]) -> nodes.paragraph:
par = nodes.paragraph()
par.extend(
self.make_xrefs(
self.rolename, domain, fieldarg, addnodes.literal_strong, env=env
)
)
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = fieldtype[0].astext()
par.extend(
self.make_xrefs(
self.typerolename,
domain,
typename,
addnodes.literal_emphasis,
env=env,
inliner=inliner,
location=location,
)
)
else:
par += fieldtype
par += nodes.Text(')')
has_content = any(c.astext().strip() for c in content)
if has_content:
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode: Node = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
| TypedField |
python | celery__celery | t/unit/backends/test_cache.py | {
"start": 370,
"end": 445
} | class ____:
def __init__(self, data):
self.data = data
| SomeClass |
python | nedbat__coveragepy | tests/test_concurrency.py | {
"start": 5060,
"end": 12309
} | class ____(CoverageTest):
"""Tests of the concurrency support in coverage.py."""
QLIMIT = 1000
def try_some_code(
self,
code: str,
concurrency: str,
the_module: ModuleType,
expected_out: str | None = None,
) -> None:
"""Run some concurrency testing code and see that it was all covered.
`code` is the Python code to execute. `concurrency` is the name of
the concurrency regime to test it under. `the_module` is the imported
module that must be available for this to work at all. `expected_out`
is the text we expect the code to produce.
"""
self.make_file("try_it.py", code)
cmd = f"coverage run --concurrency={concurrency} try_it.py"
_, out = self.run_command_status(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert expected_cant_trace in out
pytest.skip(f"Can't test: {expected_cant_trace}")
else:
# We can fully measure the code if we are using the C tracer, which
# can support all the concurrency, or if we are using threads.
if expected_out is None:
expected_out = "%d\n" % (sum(range(self.QLIMIT)))
print(code)
assert out == expected_out
# Read the coverage file and see that try_it.py has all its lines
# executed.
data = coverage.CoverageData(".coverage")
data.read()
# If the test fails, it's helpful to see this info:
fname = abs_file("try_it.py")
linenos = data.lines(fname)
assert linenos is not None
print(f"{len(linenos)}: {linenos}")
print_simple_annotation(code, linenos)
lines = line_count(code)
assert line_counts(data)["try_it.py"] == lines
@pytest.mark.skipif(
not testenv.CAN_MEASURE_THREADS, reason="Can't measure threads with this core."
)
def test_threads(self) -> None:
code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
@pytest.mark.skipif(
not testenv.CAN_MEASURE_THREADS, reason="Can't measure threads with this core."
)
def test_threads_simple_code(self) -> None:
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_eventlet(self) -> None:
code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_eventlet_simple_code(self) -> None:
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
# https://github.com/coveragepy/coveragepy/issues/663
@pytest.mark.skipif(env.WINDOWS, reason="gevent has problems on Windows: #663")
def test_gevent(self) -> None:
code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_gevent_simple_code(self) -> None:
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_greenlet(self) -> None:
GREENLET = """\
from greenlet import greenlet
def test1(x, y):
z = gr2.switch(x+y)
print(z)
def test2(u):
print(u)
gr1.switch(42)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch("hello", " world")
"""
self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n")
def test_greenlet_simple_code(self) -> None:
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "greenlet", greenlet)
def test_bug_330(self) -> None:
BUG_330 = """\
from weakref import WeakKeyDictionary
import eventlet
def do():
eventlet.sleep(.01)
gts = WeakKeyDictionary()
for _ in range(100):
gts[eventlet.spawn(do)] = True
eventlet.sleep(.005)
eventlet.sleep(.1)
print(len(gts))
"""
self.try_some_code(BUG_330, "eventlet", eventlet, "0\n")
# Sometimes a test fails due to inherent randomness. Try more times.
@pytest.mark.skipif(
not testenv.CAN_MEASURE_THREADS, reason="Can't measure threads with this core."
)
@pytest.mark.flaky(max_runs=3)
def test_threads_with_gevent(self) -> None:
self.make_file(
"both.py",
"""\
import queue
import threading
import gevent
def work1(q):
q.put(1)
def gwork(q):
gevent.spawn(work1, q).join()
q.put(None)
print("done")
q = queue.Queue()
t = threading.Thread(target=gwork, args=(q,))
t.start()
t.join()
answer = q.get()
assert answer == 1
""",
)
_, out = self.run_command_status("coverage run --concurrency=thread,gevent both.py")
if gevent is None:
assert "Couldn't trace with concurrency=gevent, the module isn't installed.\n" in out
pytest.skip("Can't run test without gevent installed.")
if not testenv.C_TRACER:
assert testenv.PY_TRACER
assert out == (
"Can't support concurrency=gevent with PyTracer, only threads are supported.\n"
)
pytest.skip(f"Can't run gevent with {testenv.REQUESTED_TRACER_CLASS}.")
assert out == "done\n"
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 100%", last_line)
def test_bad_concurrency(self) -> None:
with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"):
self.command_line("run --concurrency=nothing prog.py")
def test_bad_concurrency_in_config(self) -> None:
self.make_file(".coveragerc", "[run]\nconcurrency = nothing\n")
with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"):
self.command_line("run prog.py")
def test_no_multiple_light_concurrency(self) -> None:
with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"):
self.command_line("run --concurrency=gevent,eventlet prog.py")
def test_no_multiple_light_concurrency_in_config(self) -> None:
self.make_file(".coveragerc", "[run]\nconcurrency = gevent, eventlet\n")
with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"):
self.command_line("run prog.py")
def test_multiprocessing_needs_config_file(self) -> None:
with pytest.raises(ConfigError, match="multiprocessing requires a configuration file"):
self.command_line("run --concurrency=multiprocessing prog.py")
| ConcurrencyTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/dirichlet_multinomial_test.py | {
"start": 1188,
"end": 19483
} | class ____(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def testSimpleShapes(self):
with self.cached_session():
alpha = np.random.rand(3)
dist = ds.DirichletMultinomial(1., alpha)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@test_util.run_deprecated_v1
def testComplexShapes(self):
with self.cached_session():
alpha = np.random.rand(3, 2, 2)
n = [[3., 2], [4, 5], [6, 7]]
dist = ds.DirichletMultinomial(n, alpha)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@test_util.run_deprecated_v1
def testNproperty(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.cached_session():
dist = ds.DirichletMultinomial(n, alpha)
self.assertEqual([1, 1], dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count)
@test_util.run_deprecated_v1
def testAlphaProperty(self):
alpha = [[1., 2, 3]]
with self.cached_session():
dist = ds.DirichletMultinomial(1, alpha)
self.assertEqual([1, 3], dist.concentration.get_shape())
self.assertAllClose(alpha, dist.concentration)
@test_util.run_deprecated_v1
def testPmfNandCountsAgree(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.cached_session():
dist = ds.DirichletMultinomial(n, alpha, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
with self.assertRaisesOpError("must be non-negative"):
dist.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError(
"last-dimension must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval()
@test_util.run_deprecated_v1
def testPmfNonIntegerCounts(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.cached_session():
dist = ds.DirichletMultinomial(n, alpha, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
dist.prob([3.0, 0, 2.0]).eval()
# Both equality and integer checking fail.
placeholder = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(
"cannot contain fractional components"):
dist.prob(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]})
dist = ds.DirichletMultinomial(n, alpha, validate_args=False)
dist.prob([1., 2., 3.]).eval()
# Non-integer arguments work.
dist.prob([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [1., 0]
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1 / 3., self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [3., 2]
dist = ds.DirichletMultinomial(5., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1 / 7., self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesMultidimensionalN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [1., 2]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
dist = ds.DirichletMultinomial(n, alpha)
pmf = dist.prob(counts)
self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, self.evaluate(pmf))
self.assertEqual((4, 3), pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [[1., 2]]
counts = [[1., 0], [0., 1]]
dist = ds.DirichletMultinomial([1.], alpha)
pmf = dist.prob(counts)
self.assertAllClose([1 / 3., 2 / 3.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [1., 2]
counts = [[1., 0], [0., 1]]
pmf = ds.DirichletMultinomial(1., alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 3.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [[1., 2], [2., 3]]
counts = [[1., 0]]
pmf = ds.DirichletMultinomial([1., 1.], alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 5.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.cached_session():
alpha = [[1., 2], [2., 3]]
counts = [1., 0]
pmf = ds.DirichletMultinomial(1., alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 5.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
@test_util.run_deprecated_v1
def testPmfForOneVoteIsTheMeanWithOneRecordInput(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
alpha = [1., 2, 3]
with self.cached_session():
for class_num in range(3):
counts = np.zeros([3], dtype=np.float32)
counts[class_num] = 1
dist = ds.DirichletMultinomial(1., alpha)
mean = dist.mean().eval()
pmf = dist.prob(counts).eval()
self.assertAllClose(mean[class_num], pmf)
self.assertAllEqual([3], mean.shape)
self.assertAllEqual([], pmf.shape)
@test_util.run_deprecated_v1
def testMeanDoubleTwoVotes(self):
# The probabilities of two votes falling into class k for
# DirichletMultinomial(2, alpha) is twice as much as the probability of one
# vote falling into class k for DirichletMultinomial(1, alpha)
alpha = [1., 2, 3]
with self.cached_session():
for class_num in range(3):
counts_one = np.zeros([3], dtype=np.float32)
counts_one[class_num] = 1.
counts_two = np.zeros([3], dtype=np.float32)
counts_two[class_num] = 2
dist1 = ds.DirichletMultinomial(1., alpha)
dist2 = ds.DirichletMultinomial(2., alpha)
mean1 = dist1.mean().eval()
mean2 = dist2.mean().eval()
self.assertAllClose(mean2[class_num], 2 * mean1[class_num])
self.assertAllEqual([3], mean1.shape)
@test_util.run_deprecated_v1
def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n.
alpha = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
# Ideally we'd be able to test broadcasting but, the multinomial sampler
# doesn't support different total counts.
n = np.float32(5)
with self.cached_session() as sess:
# batch_shape=[2], event_shape=[3]
dist = ds.DirichletMultinomial(n, alpha)
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0.04, rtol=0.)
self.assertAllClose(sample_cov_, analytic_cov, atol=0.05, rtol=0.)
self.assertAllClose(sample_var_, analytic_var, atol=0.05, rtol=0.)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.02, rtol=0.)
@test_util.run_without_tensor_float_32(
"Tests DirichletMultinomial.covariance, which calls matmul")
def testCovariance(self):
# Shape [2]
alpha = [1., 2]
ns = [2., 3., 4., 5.]
alpha_0 = np.sum(alpha)
# Diagonal entries are of the form:
# Var(X_i) = n * alpha_i / alpha_sum * (1 - alpha_i / alpha_sum) *
# (alpha_sum + n) / (alpha_sum + 1)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
# Off diagonal entries are of the form:
# Cov(X_i, X_j) = -n * alpha_i * alpha_j / (alpha_sum ** 2) *
# (alpha_sum + n) / (alpha_sum + 1)
covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2
# Shape [2, 2].
shared_matrix = np.array([[
variance_entry(alpha[0], alpha_0),
covariance_entry(alpha[0], alpha[1], alpha_0)
], [
covariance_entry(alpha[1], alpha[0], alpha_0),
variance_entry(alpha[1], alpha_0)
]])
with self.cached_session():
for n in ns:
# n is shape [] and alpha is shape [2].
dist = ds.DirichletMultinomial(n, alpha)
covariance = dist.covariance()
expected_covariance = n * (n + alpha_0) / (1 + alpha_0) * shared_matrix
self.assertEqual([2, 2], covariance.get_shape())
self.assertAllClose(expected_covariance, self.evaluate(covariance))
def testCovarianceNAlphaBroadcast(self):
alpha_v = [1., 2, 3]
alpha_0 = 6.
# Shape [4, 3]
alpha = np.array(4 * [alpha_v], dtype=np.float32)
# Shape [4, 1]
ns = np.array([[2.], [3.], [4.], [5.]], dtype=np.float32)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2
# Shape [4, 3, 3]
shared_matrix = np.array(
4 * [[[
variance_entry(alpha_v[0], alpha_0),
covariance_entry(alpha_v[0], alpha_v[1], alpha_0),
covariance_entry(alpha_v[0], alpha_v[2], alpha_0)
], [
covariance_entry(alpha_v[1], alpha_v[0], alpha_0),
variance_entry(alpha_v[1], alpha_0),
covariance_entry(alpha_v[1], alpha_v[2], alpha_0)
], [
covariance_entry(alpha_v[2], alpha_v[0], alpha_0),
covariance_entry(alpha_v[2], alpha_v[1], alpha_0),
variance_entry(alpha_v[2], alpha_0)
]]],
dtype=np.float32)
with self.cached_session():
# ns is shape [4, 1], and alpha is shape [4, 3].
dist = ds.DirichletMultinomial(ns, alpha)
covariance = dist.covariance()
expected_covariance = shared_matrix * (
ns * (ns + alpha_0) / (1 + alpha_0))[..., array_ops.newaxis]
self.assertEqual([4, 3, 3], covariance.get_shape())
self.assertAllClose(expected_covariance, self.evaluate(covariance))
def testCovarianceMultidimensional(self):
alpha = np.random.rand(3, 5, 4).astype(np.float32)
alpha2 = np.random.rand(6, 3, 3).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5, 1]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1, 1]).astype(np.float32)
with self.cached_session():
dist = ds.DirichletMultinomial(ns, alpha)
dist2 = ds.DirichletMultinomial(ns2, alpha2)
covariance = dist.covariance()
covariance2 = dist2.covariance()
self.assertEqual([3, 5, 4, 4], covariance.get_shape())
self.assertEqual([6, 3, 3, 3], covariance2.get_shape())
def testZeroCountsResultsInPmfEqualToOne(self):
# There is only one way for zero items to be selected, and this happens with
# probability 1.
alpha = [5, 0.5]
counts = [0., 0]
with self.cached_session():
dist = ds.DirichletMultinomial(0., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1.0, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testLargeTauGivesPreciseProbabilities(self):
# If tau is large, we are doing coin flips with probability mu.
mu = np.array([0.1, 0.1, 0.8], dtype=np.float32)
tau = np.array([100.], dtype=np.float32)
alpha = tau * mu
# One (three sided) coin flip. Prob[coin 3] = 0.8.
# Note that since it was one flip, value of tau didn't matter.
counts = [0., 0, 1]
with self.cached_session():
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.8, self.evaluate(pmf), atol=1e-4)
self.assertEqual((), pmf.get_shape())
# Two (three sided) coin flips. Prob[coin 3] = 0.8.
counts = [0., 0, 2]
with self.cached_session():
dist = ds.DirichletMultinomial(2., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.8**2, self.evaluate(pmf), atol=1e-2)
self.assertEqual((), pmf.get_shape())
# Three (three sided) coin flips.
counts = [1., 0, 2]
with self.cached_session():
dist = ds.DirichletMultinomial(3., alpha)
pmf = dist.prob(counts)
self.assertAllClose(3 * 0.1 * 0.8 * 0.8, self.evaluate(pmf), atol=1e-2)
self.assertEqual((), pmf.get_shape())
def testSmallTauPrefersCorrelatedResults(self):
# If tau is small, then correlation between draws is large, so draws that
# are both of the same class are more likely.
mu = np.array([0.5, 0.5], dtype=np.float32)
tau = np.array([0.1], dtype=np.float32)
alpha = tau * mu
# If there is only one draw, it is still a coin flip, even with small tau.
counts = [1., 0]
with self.cached_session():
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.5, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
# If there are two draws, it is much more likely that they are the same.
counts_same = [2., 0]
counts_different = [1, 1.]
with self.cached_session():
dist = ds.DirichletMultinomial(2., alpha)
pmf_same = dist.prob(counts_same)
pmf_different = dist.prob(counts_different)
self.assertLess(5 * self.evaluate(pmf_different), self.evaluate(pmf_same))
self.assertEqual((), pmf_same.get_shape())
@test_util.run_deprecated_v1
def testNonStrictTurnsOffAllChecks(self):
# Make totally invalid input.
with self.cached_session():
alpha = [[-1., 2]] # alpha should be positive.
counts = [[1., 0], [0., -1]] # counts should be non-negative.
n = [-5.3] # n should be a non negative integer equal to counts.sum.
dist = ds.DirichletMultinomial(n, alpha, validate_args=False)
dist.prob(counts).eval() # Should not raise.
@test_util.run_deprecated_v1
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
dist = ds.DirichletMultinomial(
total_count=5.,
concentration=1. + 2. * self._rng.rand(4, 3, 2).astype(np.float32))
n = int(3e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.20)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
@test_util.run_deprecated_v1
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
dist = ds.DirichletMultinomial(
total_count=5.,
concentration=1. + 2. * self._rng.rand(4).astype(np.float32))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.20)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
def testNotReparameterized(self):
total_count = constant_op.constant(5.0)
concentration = constant_op.constant([0.1, 0.1, 0.1])
with backprop.GradientTape() as tape:
tape.watch(total_count)
tape.watch(concentration)
dist = ds.DirichletMultinomial(
total_count=total_count,
concentration=concentration)
samples = dist.sample(100)
grad_total_count, grad_concentration = tape.gradient(
samples, [total_count, concentration])
self.assertIsNone(grad_total_count)
self.assertIsNone(grad_concentration)
if __name__ == "__main__":
test.main()
| DirichletMultinomialTest |
python | keras-team__keras | keras/src/layers/pooling/average_pooling_test.py | {
"start": 7860,
"end": 12786
} | class ____(testing.TestCase):
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
((2,), (2,), "valid", "channels_last"),
((2,), (2,), "valid", "channels_first"),
)
def test_average_pooling1d(self, pool_size, strides, padding, data_format):
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.AveragePooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool1d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "same", "channels_last"),
(2, 1, "same", "channels_first"),
((2,), (2,), "same", "channels_last"),
((2,), (2,), "same", "channels_first"),
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Same padding in Torch backend produces different results.",
)
def test_average_pooling1d_same_padding(
self, pool_size, strides, padding, data_format
):
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.AveragePooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool1d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
((2, 3), (2, 2), "valid", "channels_last"),
)
def test_average_pooling2d(self, pool_size, strides, padding, data_format):
inputs = np.arange(16, dtype="float32").reshape((1, 4, 4, 1))
layer = layers.AveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool2d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, (2, 1), "same", "channels_last"),
(2, (2, 1), "same", "channels_first"),
((2, 2), (2, 2), "same", "channels_last"),
((2, 2), (2, 2), "same", "channels_first"),
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Same padding in Torch backend produces different results.",
)
def test_average_pooling2d_same_padding(
self, pool_size, strides, padding, data_format
):
inputs = np.arange(16, dtype="float32").reshape((1, 4, 4, 1))
layer = layers.AveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool2d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
((2, 3, 2), (2, 2, 1), "valid", "channels_last"),
((2, 3, 2), (2, 2, 1), "valid", "channels_first"),
)
def test_average_pooling3d(self, pool_size, strides, padding, data_format):
inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2))
layer = layers.AveragePooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool3d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "same", "channels_last"),
(2, 1, "same", "channels_first"),
((2, 2, 2), (2, 2, 1), "same", "channels_last"),
((2, 2, 2), (2, 2, 1), "same", "channels_first"),
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Same padding in Torch backend produces different results.",
)
def test_average_pooling3d_same_padding(
self, pool_size, strides, padding, data_format
):
inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2))
layer = layers.AveragePooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = np_avgpool3d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
| AveragePoolingCorrectnessTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP039.py | {
"start": 31,
"end": 60
} | class ____() \
:
pass
| A |
python | getsentry__sentry | src/sentry/seer/endpoints/organization_seer_explorer_chat.py | {
"start": 1512,
"end": 4769
} | class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ML_AI
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"POST": {
RateLimitCategory.IP: RateLimit(limit=25, window=60),
RateLimitCategory.USER: RateLimit(limit=25, window=60),
RateLimitCategory.ORGANIZATION: RateLimit(limit=100, window=60 * 60),
},
"GET": {
RateLimitCategory.IP: RateLimit(limit=100, window=60),
RateLimitCategory.USER: RateLimit(limit=100, window=60),
RateLimitCategory.ORGANIZATION: RateLimit(limit=1000, window=60),
},
}
)
permission_classes = (OrganizationSeerExplorerChatPermission,)
def get(
self, request: Request, organization: Organization, run_id: int | None = None
) -> Response:
"""
Get the current state of a Seer Explorer session.
"""
if not run_id:
return Response({"session": None}, status=404)
try:
client = SeerExplorerClient(organization, request.user)
state = client.get_run(run_id=int(run_id))
return Response({"session": state.dict()})
except SeerPermissionError as e:
raise PermissionDenied(e.message) from e
except ValueError:
return Response({"session": None}, status=404)
def post(
self, request: Request, organization: Organization, run_id: int | None = None
) -> Response:
"""
Start a new chat session or continue an existing one.
Parameters:
- run_id: Optional session ID to continue an existing session (from URL).
- query: The user's query.
- insert_index: Optional index to insert the message at.
- on_page_context: Optional context from the user's screen.
Returns:
- run_id: The run ID.
"""
serializer = SeerExplorerChatSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
validated_data = serializer.validated_data
query = validated_data["query"]
insert_index = validated_data.get("insert_index")
on_page_context = validated_data.get("on_page_context")
try:
client = SeerExplorerClient(organization, request.user, is_interactive=True)
if run_id:
# Continue existing conversation
result_run_id = client.continue_run(
run_id=int(run_id),
prompt=query,
insert_index=insert_index,
on_page_context=on_page_context,
)
else:
# Start new conversation
result_run_id = client.start_run(
prompt=query,
on_page_context=on_page_context,
)
return Response({"run_id": result_run_id})
except SeerPermissionError as e:
raise PermissionDenied(e.message) from e
| OrganizationSeerExplorerChatEndpoint |
python | great-expectations__great_expectations | tests/expectations/metrics/query_metrics/test_query_metrics.py | {
"start": 2553,
"end": 2734
} | class ____(QueryMultipleColumns):
metric_name = "my_query.multiple_columns"
value_keys = ("my_query",)
query_param_name: ClassVar[str] = "my_query"
| MyQueryMultipleColumns |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 6738,
"end": 7224
} | class ____(DataContextError):
"""The great_expectations dir could not be found."""
def __init__(self) -> None:
self.message = """Error: No gx directory was found here!
- Please check that you are in the correct directory or have specified the correct directory.
- If you have never run Great Expectations in this project, please run `great_expectations init` to get started.
""" # noqa: E501 # FIXME CoP
super().__init__(self.message)
| ConfigNotFoundError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.