language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/types/graders/score_model_grader.py | {
"start": 1632,
"end": 2940
} | class ____(BaseModel):
max_completions_tokens: Optional[int] = None
"""The maximum number of tokens the grader model may generate in its response."""
reasoning_effort: Optional[ReasoningEffort] = None
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
"""
seed: Optional[int] = None
"""A seed value to initialize the randomness, during sampling."""
temperature: Optional[float] = None
"""A higher temperature increases randomness in the outputs."""
top_p: Optional[float] = None
"""An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
| SamplingParams |
python | pytorch__pytorch | torch/distributed/nn/functional.py | {
"start": 14303,
"end": 15357
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, group, output, output_split_sizes, input_split_sizes, input):
ctx.group = group
ctx.input_size = input.size()
ctx.output_split_sizes = input_split_sizes
ctx.input_split_sizes = output_split_sizes
dist.all_to_all_single(
output,
input,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
)
return output
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
tensor = torch.empty(
ctx.input_size, device=grad_output.device, dtype=grad_output.dtype
)
return (None, None, None, None) + (
_AlltoAllSingle.apply(
ctx.group,
tensor,
ctx.output_split_sizes,
ctx.input_split_sizes,
grad_output.contiguous(),
),
)
| _AlltoAllSingle |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-python-file/llama_index/tools/python_file/base.py | {
"start": 111,
"end": 1987
} | class ____(BaseToolSpec):
spec_functions = ["function_definitions", "get_function", "get_functions"]
def __init__(self, file_name: str) -> None:
f = open(file_name).read()
self.tree = ast.parse(f)
def function_definitions(self, external: Optional[bool] = True) -> str:
"""
Use this function to get the name and arguments of all function definitions in the python file.
Args:
external (Optional[bool]): Defaults to true. If false, this function will also return functions that start with _
"""
functions = ""
for node in ast.walk(self.tree):
if isinstance(node, ast.FunctionDef):
if external and node.name.startswith("_"):
continue
functions += f"""
name: {node.name}
arguments: {ast.dump(node.args)}
"""
return functions
def get_function(self, name: str) -> str:
"""
Use this function to get the name and arguments of a single function definition in the python file.
Args:
name (str): The name of the function to retrieve
"""
for node in ast.walk(self.tree):
if isinstance(node, ast.FunctionDef):
if node.name == name:
return f"""
name: {node.name}
arguments: {ast.dump(node.args)}
docstring: {ast.get_docstring(node)}
"""
return None
def get_functions(self, names: List[str]) -> str:
"""
Use this function to get the name and arguments of a list of function definition in the python file.
Args:
name (List[str]): The names of the functions to retrieve
"""
functions = ""
for name in names:
functions += self.get_function(name) + "\n"
return functions
| PythonFileToolSpec |
python | huggingface__transformers | src/transformers/models/longt5/modeling_longt5.py | {
"start": 13391,
"end": 14157
} | class ____(nn.Module):
def __init__(self, config: LongT5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = LongT5DenseGatedActDense(config)
else:
self.DenseReluDense = LongT5DenseActDense(config)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5
| LongT5LayerFF |
python | apache__airflow | providers/slack/tests/unit/slack/transfers/test_sql_to_slack_webhook.py | {
"start": 1441,
"end": 10238
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="slack_connection",
conn_type="slackwebhook",
password="xoxb-1234567890123-09876543210987-AbCdEfGhIjKlMnOpQrStUvWx",
)
)
def setup_method(self):
self.default_hook_parameters = {"timeout": None, "proxy": None, "retry_handlers": None}
@staticmethod
def _construct_operator(**kwargs):
operator = SqlToSlackWebhookOperator(task_id=TEST_TASK_ID, **kwargs)
return operator
@pytest.mark.parametrize(
("slack_op_kwargs", "hook_extra_kwargs"),
[
pytest.param(
{}, {"timeout": None, "proxy": None, "retry_handlers": None}, id="default-hook-parameters"
),
pytest.param(
{"slack_timeout": 42, "slack_proxy": "http://spam.egg", "slack_retry_handlers": []},
{"timeout": 42, "proxy": "http://spam.egg", "retry_handlers": []},
id="with-extra-hook-parameters",
),
],
)
def test_rendering_and_message_execution(self, slack_op_kwargs, hook_extra_kwargs, mocked_hook):
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
operator_args = {
"sql_conn_id": "snowflake_connection",
"slack_webhook_conn_id": "slack_connection",
"slack_message": "message: {{ ds }}, {{ results_df }}",
"slack_channel": "#test",
"sql": "sql {{ ds }}",
**slack_op_kwargs,
}
sql_to_slack_operator = self._construct_operator(**operator_args)
slack_webhook_hook = mocked_hook.return_value
sql_to_slack_operator._get_hook = mock_dbapi_hook
sql_to_slack_operator.render_template_fields({"ds": "2017-01-01"})
sql_to_slack_operator.execute({"ds": "2017-01-01"})
# Test that the Slack hook is instantiated with the right parameters
mocked_hook.assert_called_once_with(slack_webhook_conn_id="slack_connection", **hook_extra_kwargs)
# Test that the `SlackWebhookHook.send` method gets run once
slack_webhook_hook.send.assert_called_once_with(
text=f"message: 2017-01-01, {test_df}",
channel="#test",
)
def test_rendering_and_message_execution_with_slack_hook(self, mocked_hook):
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
operator_args = {
"sql_conn_id": "snowflake_connection",
"slack_webhook_conn_id": "slack_connection",
"slack_message": "message: {{ ds }}, {{ results_df }}",
"slack_channel": "#test",
"sql": "sql {{ ds }}",
}
sql_to_slack_operator = self._construct_operator(**operator_args)
slack_webhook_hook = mocked_hook.return_value
sql_to_slack_operator._get_hook = mock_dbapi_hook
sql_to_slack_operator.render_template_fields({"ds": "2017-01-01"})
sql_to_slack_operator.execute({"ds": "2017-01-01"})
# Test that the Slack hook is instantiated with the right parameters
mocked_hook.assert_called_once_with(
slack_webhook_conn_id="slack_connection", **self.default_hook_parameters
)
# Test that the `SlackWebhookHook.send` method gets run once
slack_webhook_hook.send.assert_called_once_with(
text=f"message: 2017-01-01, {test_df}",
channel="#test",
)
@pytest.mark.parametrize(
("slack_webhook_conn_id", "warning_expected", "expected_conn_id"),
[
pytest.param("foo", False, "foo", id="slack-webhook-conn-id"),
pytest.param("spam", True, "spam", id="mixin-conn-ids"),
],
)
def test_resolve_conn_ids(self, slack_webhook_conn_id, warning_expected, expected_conn_id):
operator_args = {
"sql_conn_id": "snowflake_connection",
"slack_message": "message: {{ ds }}, {{ xxxx }}",
"sql": "sql {{ ds }}",
}
if slack_webhook_conn_id:
operator_args["slack_webhook_conn_id"] = slack_webhook_conn_id
op = self._construct_operator(**operator_args)
assert op.slack_webhook_conn_id == expected_conn_id
def test_non_existing_slack_webhook_conn_id(self):
operator_args = {
"sql_conn_id": "snowflake_connection",
"slack_message": "message: {{ ds }}, {{ xxxx }}",
"sql": "sql {{ ds }}",
}
with pytest.raises(ValueError, match="Got an empty `slack_webhook_conn_id` value"):
self._construct_operator(**operator_args)
def test_rendering_custom_df_name_message_execution(self, mocked_hook):
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
operator_args = {
"sql_conn_id": "snowflake_connection",
"slack_webhook_conn_id": "slack_connection",
"slack_message": "message: {{ ds }}, {{ testing }}",
"slack_channel": "#test",
"sql": "sql {{ ds }}",
"results_df_name": "testing",
}
sql_to_slack_operator = self._construct_operator(**operator_args)
slack_webhook_hook = mocked_hook.return_value
sql_to_slack_operator._get_hook = mock_dbapi_hook
sql_to_slack_operator.render_template_fields({"ds": "2017-01-01"})
sql_to_slack_operator.execute({"ds": "2017-01-01"})
# Test that the Slack hook is instantiated with the right parameters
mocked_hook.assert_called_once_with(
slack_webhook_conn_id="slack_connection", **self.default_hook_parameters
)
# Test that the `SlackWebhookHook.send` method gets run once
slack_webhook_hook.send.assert_called_once_with(
text=f"message: 2017-01-01, {test_df}",
channel="#test",
)
def test_hook_params_building(self, mocked_get_connection):
mocked_get_connection.return_value = Connection(conn_id="snowflake_connection", conn_type="snowflake")
hook_params = {
"schema": "test_schema",
"role": "test_role",
"database": "test_database",
"warehouse": "test_warehouse",
}
operator_args = {
"sql_conn_id": "dummy_connection",
"sql": "sql {{ ds }}",
"results_df_name": "xxxx",
"sql_hook_params": hook_params,
"slack_webhook_conn_id": "slack_connection",
"parameters": ["1", "2", "3"],
"slack_message": "message: {{ ds }}, {{ xxxx }}",
}
sql_to_slack_operator = SqlToSlackWebhookOperator(task_id=TEST_TASK_ID, **operator_args)
assert sql_to_slack_operator.sql_hook_params == hook_params
def test_hook_params(self, mocked_get_connection):
mocked_get_connection.return_value = Connection(conn_id="postgres_test", conn_type="postgres")
op = SqlToSlackWebhookOperator(
task_id="sql_hook_params",
sql_conn_id="postgres_test",
slack_webhook_conn_id="slack_connection",
sql="SELECT 1",
slack_message="message: {{ ds }}, {{ xxxx }}",
sql_hook_params={
"log_sql": False,
},
)
hook = op._get_hook()
assert hook.log_sql == op.sql_hook_params["log_sql"]
def test_hook_params_snowflake(self, mocked_get_connection):
mocked_get_connection.return_value = Connection(conn_id="snowflake_default", conn_type="snowflake")
op = SqlToSlackWebhookOperator(
task_id="snowflake_hook_params",
sql_conn_id="snowflake_default",
slack_webhook_conn_id="slack_default",
results_df_name="xxxx",
sql="SELECT 1",
slack_message="message: {{ ds }}, {{ xxxx }}",
sql_hook_params={
"warehouse": "warehouse",
"database": "database",
"role": "role",
"schema": "schema",
},
)
hook = op._get_hook()
assert hook.warehouse == "warehouse"
assert hook.database == "database"
assert hook.role == "role"
assert hook.schema == "schema"
| TestSqlToSlackWebhookOperator |
python | viewflow__viewflow | viewflow/workflow/flow/views/actions.py | {
"start": 3362,
"end": 4203
} | class ____(
mixins.SuccessMessageMixin,
mixins.TaskViewTemplateNames,
generic.FormView,
):
"""
Default review view for flow task.
Get confirmation from user, and revives task
"""
form_class = forms.Form
template_filename = "task_revive.html"
success_message = _("Task {task} has been revived.")
def get_success_url(self):
if hasattr(self, "new_task"):
return self.new_task.flow_task.reverse(
"index", args=[self.new_task.process_id, self.new_task.pk]
)
return super().get_get_success_url()
def form_valid(self, *args, **kwargs):
"""If the form is valid, save the associated model and revives the task."""
self.new_task = self.request.activation.revive()
return super().form_valid(*args, **kwargs)
| ReviveTaskView |
python | huggingface__transformers | src/transformers/models/sam3/modeling_sam3.py | {
"start": 42734,
"end": 44285
} | class ____(nn.Module):
def __init__(self, config: Sam3GeometryEncoderConfig):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.self_attn = Sam3Attention(config)
self.dropout = nn.Dropout(config.dropout)
self.cross_attn = Sam3Attention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = Sam3MLP(config)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
def forward(
self,
prompt_feats: Tensor,
vision_feats: Tensor,
vision_pos_encoding: Tensor,
prompt_mask: Tensor,
**kwargs: Unpack[TransformersKwargs],
):
residual = prompt_feats
hidden_states = self.layer_norm1(prompt_feats)
hidden_states, _ = self.self_attn(
query=hidden_states, key=hidden_states, value=hidden_states, attention_mask=prompt_mask, **kwargs
)
hidden_states = self.dropout(hidden_states) + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
key = vision_feats + vision_pos_encoding
hidden_states, _ = self.cross_attn(query=hidden_states, key=key, value=vision_feats, **kwargs)
hidden_states = self.dropout(hidden_states) + residual
residual = hidden_states
hidden_states = self.layer_norm3(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.dropout(hidden_states) + residual
return hidden_states
| Sam3GeometryEncoderLayer |
python | pytorch__pytorch | tools/linter/adapters/test_has_main_linter.py | {
"start": 620,
"end": 1828
} | class ____(cst.CSTVisitor):
def __init__(self) -> None:
super().__init__()
self.found = False
def visit_Module(self, node: cst.Module) -> bool:
name = m.Name("__name__")
main = m.SimpleString('"__main__"') | m.SimpleString("'__main__'")
run_test_call = m.Call(
func=m.Name("run_tests") | m.Attribute(attr=m.Name("run_tests"))
)
# Distributed tests (i.e. MultiProcContinuousTest) calls `run_rank`
# instead of `run_tests` in main
run_rank_call = m.Call(
func=m.Name("run_rank") | m.Attribute(attr=m.Name("run_rank"))
)
raise_block = m.Raise()
# name == main or main == name
if_main1 = m.Comparison(
name,
[m.ComparisonTarget(m.Equal(), main)],
)
if_main2 = m.Comparison(
main,
[m.ComparisonTarget(m.Equal(), name)],
)
for child in node.children:
if m.matches(child, m.If(test=if_main1 | if_main2)):
if m.findall(child, raise_block | run_test_call | run_rank_call):
self.found = True
break
return False
| HasMainVisiter |
python | eventlet__eventlet | eventlet/green/http/cookiejar.py | {
"start": 46042,
"end": 66092
} | class ____:
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib.request.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII)
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
cookies.sort(key=lambda a: len(a.path), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
ns_hdrs = headers.get_all("Set-Cookie", [])
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
# derives from OSError for backwards-compatibility with Python 2.4.0
| CookieJar |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 33767,
"end": 46262
} | class ____(base_classes.Range):
def __init__(self, xl):
if isinstance(xl, tuple):
self._coords = xl
self._xl = missing
else:
self._coords = missing
self._xl = xl
@property
def xl(self):
if self._xl is missing:
xl_sheet, row, col, nrows, ncols = self._coords
if nrows and ncols:
self._xl = xl_sheet.Range(
xl_sheet.Cells(row, col),
xl_sheet.Cells(row + nrows - 1, col + ncols - 1),
)
else:
self._xl = None
return self._xl
@property
def coords(self):
if self._coords is missing:
self._coords = (
self.xl.Worksheet,
self.xl.Row,
self.xl.Column,
self.xl.Rows.Count,
self.xl.Columns.Count,
)
return self._coords
@property
def api(self):
return self.xl
@property
def sheet(self):
return Sheet(xl=self.coords[0])
def __len__(self):
return (self.xl and self.xl.Count) or 0
@property
def row(self):
return self.coords[1]
@property
def column(self):
return self.coords[2]
@property
def shape(self):
return self.coords[3], self.coords[4]
@property
def raw_value(self):
if self.xl is not None:
return self.xl.Value
else:
return None
@raw_value.setter
def raw_value(self, data):
if self.xl is not None:
self.xl.Value = data
def clear_contents(self):
if self.xl is not None:
self.xl.ClearContents()
def clear_formats(self):
self.xl.ClearFormats()
def clear(self):
if self.xl is not None:
self.xl.Clear()
@property
def formula(self):
if self.xl is not None:
return self.xl.Formula
else:
return None
@formula.setter
def formula(self, value):
if self.xl is not None:
self.xl.Formula = value
@property
def formula2(self):
if self.xl is not None:
return self.xl.Formula2
else:
return None
@formula2.setter
def formula2(self, value):
if self.xl is not None:
self.xl.Formula2 = value
def end(self, direction):
direction = directions_s2i.get(direction, direction)
return Range(xl=self.xl.End(direction))
@property
def formula_array(self):
if self.xl is not None:
return self.xl.FormulaArray
else:
return None
@formula_array.setter
def formula_array(self, value):
if self.xl is not None:
self.xl.FormulaArray = value
@property
def font(self):
return Font(self, self.xl.Font)
@property
def column_width(self):
if self.xl is not None:
return self.xl.ColumnWidth
else:
return 0
@column_width.setter
def column_width(self, value):
if self.xl is not None:
self.xl.ColumnWidth = value
@property
def row_height(self):
if self.xl is not None:
return self.xl.RowHeight
else:
return 0
@row_height.setter
def row_height(self, value):
if self.xl is not None:
self.xl.RowHeight = value
@property
def width(self):
if self.xl is not None:
return self.xl.Width
else:
return 0
@property
def height(self):
if self.xl is not None:
return self.xl.Height
else:
return 0
@property
def left(self):
if self.xl is not None:
return self.xl.Left
else:
return 0
@property
def top(self):
if self.xl is not None:
return self.xl.Top
else:
return 0
@property
def number_format(self):
if self.xl is not None:
return self.xl.NumberFormat
else:
return ""
@number_format.setter
def number_format(self, value):
if self.xl is not None:
self.xl.NumberFormat = value
def get_address(self, row_absolute, col_absolute, external):
if self.xl is not None:
return self.xl.GetAddress(row_absolute, col_absolute, 1, external)
else:
raise NotImplementedError()
@property
def address(self):
if self.xl is not None:
return self.xl.Address
else:
_, row, col, nrows, ncols = self.coords
return "$%s$%s{%sx%s}" % (col_name(col), str(row), nrows, ncols)
@property
def current_region(self):
if self.xl is not None:
return Range(xl=self.xl.CurrentRegion)
else:
return self
def autofit(self, axis=None):
if self.xl is not None:
if axis == "rows" or axis == "r":
self.xl.Rows.AutoFit()
elif axis == "columns" or axis == "c":
self.xl.Columns.AutoFit()
elif axis is None:
self.xl.Columns.AutoFit()
self.xl.Rows.AutoFit()
def insert(self, shift=None, copy_origin=None):
shifts = {
"down": InsertShiftDirection.xlShiftDown,
"right": InsertShiftDirection.xlShiftToRight,
None: None,
}
copy_origins = {
"format_from_left_or_above": InsertFormatOrigin.xlFormatFromLeftOrAbove,
"format_from_right_or_below": InsertFormatOrigin.xlFormatFromRightOrBelow,
}
self.xl.Insert(Shift=shifts[shift], CopyOrigin=copy_origins[copy_origin])
def delete(self, shift=None):
shifts = {
"up": DeleteShiftDirection.xlShiftUp,
"left": DeleteShiftDirection.xlShiftToLeft,
None: None,
}
self.xl.Delete(Shift=shifts[shift])
def copy(self, destination=None):
self.xl.Copy(Destination=destination.api if destination else None)
def paste(self, paste=None, operation=None, skip_blanks=False, transpose=False):
pastes = {
"all": -4104,
None: -4104,
"all_except_borders": 7,
"all_merging_conditional_formats": 14,
"all_using_source_theme": 13,
"column_widths": 8,
"comments": -4144,
"formats": -4122,
"formulas": -4123,
"formulas_and_number_formats": 11,
"validation": 6,
"values": -4163,
"values_and_number_formats": 12,
}
operations = {
"add": 2,
"divide": 5,
"multiply": 4,
None: -4142,
"subtract": 3,
}
self.xl.PasteSpecial(
Paste=pastes[paste],
Operation=operations[operation],
SkipBlanks=skip_blanks,
Transpose=transpose,
)
@property
def hyperlink(self):
if self.xl is not None:
try:
return self.xl.Hyperlinks(1).Address
except pywintypes.com_error:
raise Exception("The cell doesn't seem to contain a hyperlink!")
else:
return ""
def add_hyperlink(self, address, text_to_display, screen_tip):
if self.xl is not None:
# Another one of these pywin32 bugs that only materialize under certain
# circumstances: https://stackoverflow.com/questions/
# 6284227/hyperlink-will-not-show-display-proper-text
link = self.xl.Hyperlinks.Add(Anchor=self.xl, Address=address)
link.TextToDisplay = text_to_display
link.ScreenTip = screen_tip
@property
def color(self):
if self.xl is not None:
if self.xl.Interior.ColorIndex == ColorIndex.xlColorIndexNone:
return None
else:
return int_to_rgb(self.xl.Interior.Color)
else:
return None
@color.setter
def color(self, color_or_rgb):
if isinstance(color_or_rgb, str):
color_or_rgb = hex_to_rgb(color_or_rgb)
if self.xl is not None:
if color_or_rgb is None:
self.xl.Interior.ColorIndex = ColorIndex.xlColorIndexNone
elif isinstance(color_or_rgb, int):
self.xl.Interior.Color = color_or_rgb
else:
self.xl.Interior.Color = rgb_to_int(color_or_rgb)
@property
def name(self):
if self.xl is not None:
try:
name = Name(xl=self.xl.Name)
except pywintypes.com_error:
name = None
return name
else:
return None
@property
def has_array(self):
if self.xl is not None:
try:
return self.xl.HasArray
except pywintypes.com_error:
return False
else:
return False
@name.setter
def name(self, value):
if self.xl is not None:
self.xl.Name = value
def __call__(self, *args):
if self.xl is not None:
if len(args) == 0:
raise ValueError("Invalid arguments")
return Range(xl=self.xl(*args))
else:
raise NotImplementedError()
@property
def rows(self):
return Range(xl=self.xl.Rows)
@property
def columns(self):
return Range(xl=self.xl.Columns)
def select(self):
return self.xl.Select()
@property
def merge_area(self):
return Range(xl=self.xl.MergeArea)
@property
def merge_cells(self):
return self.xl.MergeCells
def merge(self, across):
self.xl.Merge(across)
def unmerge(self):
self.xl.UnMerge()
@property
def table(self):
if self.xl.ListObject:
return Table(self.xl.ListObject)
@property
def characters(self):
return Characters(parent=self, xl=self.xl.GetCharacters)
@property
def wrap_text(self):
return self.xl.WrapText
@wrap_text.setter
def wrap_text(self, value):
self.xl.WrapText = value
@property
def note(self):
return Note(xl=self.xl.Comment) if self.xl.Comment else None
def copy_picture(self, appearance, format):
_appearance = {"screen": 1, "printer": 2}
_format = {"picture": -4147, "bitmap": 2}
self.xl.CopyPicture(Appearance=_appearance[appearance], Format=_format[format])
def to_png(self, path):
max_retries = 10
for retry in range(max_retries):
# https://stackoverflow.com/questions/
# 24740062/copypicture-method-of-range-class-failed-sometimes
try:
# appearance="printer" fails here, not sure why
self.copy_picture(appearance="screen", format="bitmap")
im = ImageGrab.grabclipboard()
im.save(path)
break
except (pywintypes.com_error, AttributeError):
if retry == max_retries - 1:
raise
def to_pdf(self, path, quality):
self.xl.ExportAsFixedFormat(
Type=FixedFormatType.xlTypePDF,
Filename=path,
Quality=quality_types[quality],
IncludeDocProperties=True,
IgnorePrintAreas=False,
OpenAfterPublish=False,
)
def autofill(self, destination, type_):
types = {
"fill_copy": constants.AutoFillType.xlFillCopy,
"fill_days": constants.AutoFillType.xlFillDays,
"fill_default": constants.AutoFillType.xlFillDefault,
"fill_formats": constants.AutoFillType.xlFillFormats,
"fill_months": constants.AutoFillType.xlFillMonths,
"fill_series": constants.AutoFillType.xlFillSeries,
"fill_values": constants.AutoFillType.xlFillValues,
"fill_weekdays": constants.AutoFillType.xlFillWeekdays,
"fill_years": constants.AutoFillType.xlFillYears,
"growth_trend": constants.AutoFillType.xlGrowthTrend,
"linear_trend": constants.AutoFillType.xlLinearTrend,
"flash_fill": constants.AutoFillType.xlFlashFill,
}
self.xl.AutoFill(Destination=destination.api, Type=types[type_])
| Range |
python | python-openxml__python-docx | tests/text/test_run.py | {
"start": 689,
"end": 14859
} | class ____:
"""Unit-test suite for `docx.text.run.Run`."""
@pytest.mark.parametrize(
("r_cxml", "bool_prop_name", "expected_value"),
[
("w:r/w:rPr", "bold", None),
("w:r/w:rPr/w:b", "bold", True),
("w:r/w:rPr/w:b{w:val=on}", "bold", True),
("w:r/w:rPr/w:b{w:val=off}", "bold", False),
("w:r/w:rPr/w:b{w:val=1}", "bold", True),
("w:r/w:rPr/w:i{w:val=0}", "italic", False),
],
)
def it_knows_its_bool_prop_states(
self, r_cxml: str, bool_prop_name: str, expected_value: bool | None, paragraph_: Mock
):
run = Run(cast(CT_R, element(r_cxml)), paragraph_)
assert getattr(run, bool_prop_name) == expected_value
@pytest.mark.parametrize(
("initial_r_cxml", "bool_prop_name", "value", "expected_cxml"),
[
# -- nothing to True, False, and None ---------------------------
("w:r", "bold", True, "w:r/w:rPr/w:b"),
("w:r", "bold", False, "w:r/w:rPr/w:b{w:val=0}"),
("w:r", "italic", None, "w:r/w:rPr"),
# -- default to True, False, and None ---------------------------
("w:r/w:rPr/w:b", "bold", True, "w:r/w:rPr/w:b"),
("w:r/w:rPr/w:b", "bold", False, "w:r/w:rPr/w:b{w:val=0}"),
("w:r/w:rPr/w:i", "italic", None, "w:r/w:rPr"),
# -- True to True, False, and None ------------------------------
("w:r/w:rPr/w:b{w:val=on}", "bold", True, "w:r/w:rPr/w:b"),
("w:r/w:rPr/w:b{w:val=1}", "bold", False, "w:r/w:rPr/w:b{w:val=0}"),
("w:r/w:rPr/w:b{w:val=1}", "bold", None, "w:r/w:rPr"),
# -- False to True, False, and None -----------------------------
("w:r/w:rPr/w:i{w:val=false}", "italic", True, "w:r/w:rPr/w:i"),
("w:r/w:rPr/w:i{w:val=0}", "italic", False, "w:r/w:rPr/w:i{w:val=0}"),
("w:r/w:rPr/w:i{w:val=off}", "italic", None, "w:r/w:rPr"),
],
)
def it_can_change_its_bool_prop_settings(
self,
initial_r_cxml: str,
bool_prop_name: str,
value: bool | None,
expected_cxml: str,
paragraph_: Mock,
):
run = Run(cast(CT_R, element(initial_r_cxml)), paragraph_)
setattr(run, bool_prop_name, value)
assert run._r.xml == xml(expected_cxml)
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", False),
('w:r/w:t"foobar"', False),
('w:r/(w:t"abc", w:lastRenderedPageBreak, w:t"def")', True),
("w:r/(w:lastRenderedPageBreak, w:lastRenderedPageBreak)", True),
],
)
def it_knows_whether_it_contains_a_page_break(
self, r_cxml: str, expected_value: bool, paragraph_: Mock
):
run = Run(cast(CT_R, element(r_cxml)), paragraph_)
assert run.contains_page_break == expected_value
@pytest.mark.parametrize(
("r_cxml", "expected"),
[
# -- no content produces an empty iterator --
("w:r", []),
# -- contiguous text content is condensed into a single str --
('w:r/(w:t"foo",w:cr,w:t"bar")', ["str"]),
# -- page-breaks are a form of inner-content --
(
'w:r/(w:t"abc",w:br,w:lastRenderedPageBreak,w:noBreakHyphen,w:t"def")',
["str", "RenderedPageBreak", "str"],
),
# -- as are drawings --
(
'w:r/(w:t"abc", w:lastRenderedPageBreak, w:drawing)',
["str", "RenderedPageBreak", "Drawing"],
),
],
)
def it_can_iterate_its_inner_content_items(
self, r_cxml: str, expected: List[str], fake_parent: t.ProvidesStoryPart
):
r = cast(CT_R, element(r_cxml))
run = Run(r, fake_parent)
inner_content = run.iter_inner_content()
actual = [type(item).__name__ for item in inner_content]
assert actual == expected, f"expected: {expected}, got: {actual}"
def it_can_mark_a_comment_reference_range(self, paragraph_: Mock):
p = cast(CT_P, element('w:p/w:r/w:t"referenced text"'))
run = last_run = Run(p.r_lst[0], paragraph_)
run.mark_comment_range(last_run, comment_id=42)
assert p.xml == xml(
'w:p/(w:commentRangeStart{w:id=42},w:r/w:t"referenced text"'
",w:commentRangeEnd{w:id=42}"
",w:r/(w:rPr/w:rStyle{w:val=CommentReference},w:commentReference{w:id=42}))"
)
def it_knows_its_character_style(
self, part_prop_: Mock, document_part_: Mock, paragraph_: Mock
):
style_ = document_part_.get_style.return_value
part_prop_.return_value = document_part_
style_id = "Barfoo"
run = Run(cast(CT_R, element(f"w:r/w:rPr/w:rStyle{{w:val={style_id}}}")), paragraph_)
style = run.style
document_part_.get_style.assert_called_once_with(style_id, WD_STYLE_TYPE.CHARACTER)
assert style is style_
@pytest.mark.parametrize(
("r_cxml", "value", "style_id", "expected_cxml"),
[
("w:r", "Foo Font", "FooFont", "w:r/w:rPr/w:rStyle{w:val=FooFont}"),
("w:r/w:rPr", "Foo Font", "FooFont", "w:r/w:rPr/w:rStyle{w:val=FooFont}"),
(
"w:r/w:rPr/w:rStyle{w:val=FooFont}",
"Bar Font",
"BarFont",
"w:r/w:rPr/w:rStyle{w:val=BarFont}",
),
("w:r/w:rPr/w:rStyle{w:val=FooFont}", None, None, "w:r/w:rPr"),
("w:r", None, None, "w:r/w:rPr"),
],
)
def it_can_change_its_character_style(
self,
r_cxml: str,
value: str | None,
style_id: str | None,
expected_cxml: str,
part_prop_: Mock,
paragraph_: Mock,
):
part_ = part_prop_.return_value
part_.get_style_id.return_value = style_id
run = Run(cast(CT_R, element(r_cxml)), paragraph_)
run.style = value
part_.get_style_id.assert_called_once_with(value, WD_STYLE_TYPE.CHARACTER)
assert run._r.xml == xml(expected_cxml)
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr/w:u", None),
("w:r/w:rPr/w:u{w:val=single}", True),
("w:r/w:rPr/w:u{w:val=none}", False),
("w:r/w:rPr/w:u{w:val=double}", WD_UNDERLINE.DOUBLE),
("w:r/w:rPr/w:u{w:val=wave}", WD_UNDERLINE.WAVY),
],
)
def it_knows_its_underline_type(
self, r_cxml: str, expected_value: bool | WD_UNDERLINE | None, paragraph_: Mock
):
run = Run(cast(CT_R, element(r_cxml)), paragraph_)
assert run.underline is expected_value
@pytest.mark.parametrize(
("initial_r_cxml", "new_underline", "expected_cxml"),
[
("w:r", True, "w:r/w:rPr/w:u{w:val=single}"),
("w:r", False, "w:r/w:rPr/w:u{w:val=none}"),
("w:r", None, "w:r/w:rPr"),
("w:r", WD_UNDERLINE.SINGLE, "w:r/w:rPr/w:u{w:val=single}"),
("w:r", WD_UNDERLINE.THICK, "w:r/w:rPr/w:u{w:val=thick}"),
("w:r/w:rPr/w:u{w:val=single}", True, "w:r/w:rPr/w:u{w:val=single}"),
("w:r/w:rPr/w:u{w:val=single}", False, "w:r/w:rPr/w:u{w:val=none}"),
("w:r/w:rPr/w:u{w:val=single}", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:u{w:val=single}",
WD_UNDERLINE.SINGLE,
"w:r/w:rPr/w:u{w:val=single}",
),
(
"w:r/w:rPr/w:u{w:val=single}",
WD_UNDERLINE.DOTTED,
"w:r/w:rPr/w:u{w:val=dotted}",
),
],
)
def it_can_change_its_underline_type(
self,
initial_r_cxml: str,
new_underline: bool | WD_UNDERLINE | None,
expected_cxml: str,
paragraph_: Mock,
):
run = Run(cast(CT_R, element(initial_r_cxml)), paragraph_)
run.underline = new_underline
assert run._r.xml == xml(expected_cxml)
@pytest.mark.parametrize("invalid_value", ["foobar", 42, "single"])
def it_raises_on_assign_invalid_underline_value(self, invalid_value: Any, paragraph_: Mock):
run = Run(cast(CT_R, element("w:r/w:rPr")), paragraph_)
with pytest.raises(ValueError, match=" is not a valid WD_UNDERLINE"):
run.underline = invalid_value
def it_provides_access_to_its_font(self, Font_: Mock, font_: Mock, paragraph_: Mock):
Font_.return_value = font_
run = Run(cast(CT_R, element("w:r")), paragraph_)
font = run.font
Font_.assert_called_once_with(run._element)
assert font is font_
@pytest.mark.parametrize(
("r_cxml", "new_text", "expected_cxml"),
[
("w:r", "foo", 'w:r/w:t"foo"'),
('w:r/w:t"foo"', "bar", 'w:r/(w:t"foo", w:t"bar")'),
("w:r", "fo ", 'w:r/w:t{xml:space=preserve}"fo "'),
("w:r", "f o", 'w:r/w:t"f o"'),
],
)
def it_can_add_text(
self, r_cxml: str, new_text: str, expected_cxml: str, Text_: Mock, paragraph_: Mock
):
run = Run(cast(CT_R, element(r_cxml)), paragraph_)
text = run.add_text(new_text)
assert run._r.xml == xml(expected_cxml)
assert text is Text_.return_value
@pytest.mark.parametrize(
("break_type", "expected_cxml"),
[
(WD_BREAK.LINE, "w:r/w:br"),
(WD_BREAK.PAGE, "w:r/w:br{w:type=page}"),
(WD_BREAK.COLUMN, "w:r/w:br{w:type=column}"),
(WD_BREAK.LINE_CLEAR_LEFT, "w:r/w:br{w:clear=left}"),
(WD_BREAK.LINE_CLEAR_RIGHT, "w:r/w:br{w:clear=right}"),
(WD_BREAK.LINE_CLEAR_ALL, "w:r/w:br{w:clear=all}"),
],
)
def it_can_add_a_break(self, break_type: WD_BREAK, expected_cxml: str, paragraph_: Mock):
run = Run(cast(CT_R, element("w:r")), paragraph_)
run.add_break(break_type)
assert run._r.xml == xml(expected_cxml)
@pytest.mark.parametrize(
("r_cxml", "expected_cxml"), [('w:r/w:t"foo"', 'w:r/(w:t"foo", w:tab)')]
)
def it_can_add_a_tab(self, r_cxml: str, expected_cxml: str, paragraph_: Mock):
run = Run(cast(CT_R, element(r_cxml)), paragraph_)
run.add_tab()
assert run._r.xml == xml(expected_cxml)
def it_can_add_a_picture(
self,
part_prop_: Mock,
document_part_: Mock,
InlineShape_: Mock,
picture_: Mock,
paragraph_: Mock,
):
part_prop_.return_value = document_part_
run = Run(cast(CT_R, element("w:r/wp:x")), paragraph_)
image = "foobar.png"
width, height, inline = 1111, 2222, element("wp:inline{id=42}")
document_part_.new_pic_inline.return_value = inline
InlineShape_.return_value = picture_
picture = run.add_picture(image, width, height)
document_part_.new_pic_inline.assert_called_once_with(image, width, height)
assert run._r.xml == xml("w:r/(wp:x,w:drawing/wp:inline{id=42})")
InlineShape_.assert_called_once_with(inline)
assert picture is picture_
@pytest.mark.parametrize(
("initial_r_cxml", "expected_cxml"),
[
("w:r", "w:r"),
('w:r/w:t"foo"', "w:r"),
("w:r/w:br", "w:r"),
("w:r/w:rPr", "w:r/w:rPr"),
('w:r/(w:rPr, w:t"foo")', "w:r/w:rPr"),
(
'w:r/(w:rPr/(w:b, w:i), w:t"foo", w:cr, w:t"bar")',
"w:r/w:rPr/(w:b, w:i)",
),
],
)
def it_can_remove_its_content_but_keep_formatting(
self, initial_r_cxml: str, expected_cxml: str, paragraph_: Mock
):
run = Run(cast(CT_R, element(initial_r_cxml)), paragraph_)
cleared_run = run.clear()
assert run._r.xml == xml(expected_cxml)
assert cleared_run is run
@pytest.mark.parametrize(
("r_cxml", "expected_text"),
[
("w:r", ""),
('w:r/w:t"foobar"', "foobar"),
('w:r/(w:t"abc", w:tab, w:t"def", w:cr)', "abc\tdef\n"),
('w:r/(w:br{w:type=page}, w:t"abc", w:t"def", w:tab)', "abcdef\t"),
],
)
def it_knows_the_text_it_contains(self, r_cxml: str, expected_text: str, paragraph_: Mock):
run = Run(cast(CT_R, element(r_cxml)), paragraph_)
assert run.text == expected_text
@pytest.mark.parametrize(
("new_text", "expected_cxml"),
[
("abc def", 'w:r/w:t"abc def"'),
("abc\tdef", 'w:r/(w:t"abc", w:tab, w:t"def")'),
("abc\ndef", 'w:r/(w:t"abc", w:br, w:t"def")'),
("abc\rdef", 'w:r/(w:t"abc", w:br, w:t"def")'),
],
)
def it_can_replace_the_text_it_contains(
self, new_text: str, expected_cxml: str, paragraph_: Mock
):
run = Run(cast(CT_R, element('w:r/w:t"should get deleted"')), paragraph_)
run.text = new_text
assert run._r.xml == xml(expected_cxml)
# -- fixtures --------------------------------------------------------------------------------
@pytest.fixture
def document_part_(self, request: FixtureRequest):
return instance_mock(request, DocumentPart)
@pytest.fixture
def Font_(self, request: FixtureRequest):
return class_mock(request, "docx.text.run.Font")
@pytest.fixture
def font_(self, request: FixtureRequest):
return instance_mock(request, Font)
@pytest.fixture
def InlineShape_(self, request: FixtureRequest):
return class_mock(request, "docx.text.run.InlineShape")
@pytest.fixture
def paragraph_(self, request: FixtureRequest):
return instance_mock(request, Paragraph)
@pytest.fixture
def part_prop_(self, request: FixtureRequest):
return property_mock(request, Run, "part")
@pytest.fixture
def picture_(self, request: FixtureRequest):
return instance_mock(request, InlineShape)
@pytest.fixture
def Text_(self, request: FixtureRequest):
return class_mock(request, "docx.text.run._Text")
| DescribeRun |
python | huggingface__transformers | src/transformers/data/processors/glue.py | {
"start": 18889,
"end": 21383
} | class ____(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
| WnliProcessor |
python | neetcode-gh__leetcode | python/0138-copy-list-with-random-pointer.py | {
"start": 203,
"end": 661
} | class ____:
def copyRandomList(self, head: "Node") -> "Node":
oldToCopy = {None: None}
cur = head
while cur:
copy = Node(cur.val)
oldToCopy[cur] = copy
cur = cur.next
cur = head
while cur:
copy = oldToCopy[cur]
copy.next = oldToCopy[cur.next]
copy.random = oldToCopy[cur.random]
cur = cur.next
return oldToCopy[head]
| Solution |
python | agronholm__apscheduler | src/apscheduler/_exceptions.py | {
"start": 744,
"end": 858
} | class ____(LookupError):
"""Raised when the target callable for a job could not be found."""
| CallableLookupError |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/instance_tests/test_instance.py | {
"start": 31187,
"end": 34147
} | class ____(dg.DagsterInstance):
def __init__(self, *args, foo=None, baz=None, **kwargs):
self._foo = foo
self._baz = baz
super().__init__(*args, **kwargs)
def foo(self):
return self._foo
@property
def baz(self):
return self._baz
@classmethod
def config_schema(cls):
return {
"foo": dg.Field(str, is_required=True),
"baz": dg.Field(str, is_required=False),
}
@staticmethod
def config_defaults(base_dir):
defaults = InstanceRef.config_defaults(base_dir)
defaults["run_coordinator"] = ConfigurableClassData( # pyright: ignore[reportIndexIssue]
"dagster._core.run_coordinator.queued_run_coordinator",
"QueuedRunCoordinator",
yaml.dump({}),
)
return defaults
def test_instance_subclass():
with dg.instance_for_test(
overrides={
"instance_class": {
"module": "dagster_tests.core_tests.instance_tests.test_instance",
"class": "TestInstanceSubclass",
},
"foo": "bar",
}
) as subclass_instance:
assert isinstance(subclass_instance, dg.DagsterInstance)
# isinstance(subclass_instance, TestInstanceSubclass) does not pass
# Likely because the imported/dynamically loaded class is different from the local one
assert subclass_instance.__class__.__name__ == "TestInstanceSubclass"
assert subclass_instance.foo() == "bar" # pyright: ignore[reportAttributeAccessIssue]
assert subclass_instance.baz is None # pyright: ignore[reportAttributeAccessIssue]
assert isinstance(subclass_instance.run_coordinator, dg.QueuedRunCoordinator)
with dg.instance_for_test(
overrides={
"instance_class": {
"module": "dagster_tests.core_tests.instance_tests.test_instance",
"class": "TestInstanceSubclass",
},
"foo": "bar",
"baz": "quux",
}
) as subclass_instance:
assert isinstance(subclass_instance, dg.DagsterInstance)
assert subclass_instance.__class__.__name__ == "TestInstanceSubclass"
assert subclass_instance.foo() == "bar" # pyright: ignore[reportAttributeAccessIssue]
assert subclass_instance.baz == "quux" # pyright: ignore[reportAttributeAccessIssue]
# omitting foo leads to a config schema validation error
with pytest.raises(dg.DagsterInvalidConfigError):
with dg.instance_for_test(
overrides={
"instance_class": {
"module": "dagster_tests.core_tests.instance_tests.test_instance",
"class": "TestInstanceSubclass",
},
"baz": "quux",
}
) as subclass_instance:
pass
# class that doesn't implement needed methods on ConfigurableClass
| TestInstanceSubclass |
python | apache__airflow | providers/sftp/src/airflow/providers/sftp/hooks/sftp.py | {
"start": 2398,
"end": 27382
} | class ____(SSHHook):
"""
Interact with SFTP.
This hook inherits the SSH hook. Please refer to SSH hook for the input
arguments.
:Pitfalls::
- In contrast with FTPHook describe_directory only returns size, type and
modify. It doesn't return unix.owner, unix.mode, perm, unix.group and
unique.
- If no mode is passed to create_directory it will be created with 777
permissions.
Errors that may occur throughout but should be handled downstream.
For consistency reasons with SSHHook, the preferred parameter is "ssh_conn_id".
:param ssh_conn_id: The :ref:`sftp connection id<howto/connection:sftp>`
"""
conn_name_attr = "ssh_conn_id"
default_conn_name = "sftp_default"
conn_type = "sftp"
hook_name = "SFTP"
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
return {
"hidden_fields": ["schema"],
"relabeling": {
"login": "Username",
},
}
def __init__(
self,
ssh_conn_id: str | None = "sftp_default",
host_proxy_cmd: str | None = None,
use_managed_conn: bool = True,
*args,
**kwargs,
) -> None:
self.conn: SFTPClient | None = None
self.use_managed_conn = use_managed_conn
# TODO: remove support for ssh_hook when it is removed from SFTPOperator
if kwargs.get("ssh_hook") is not None:
warnings.warn(
"Parameter `ssh_hook` is deprecated and will be ignored.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
ftp_conn_id = kwargs.pop("ftp_conn_id", None)
if ftp_conn_id:
warnings.warn(
"Parameter `ftp_conn_id` is deprecated. Please use `ssh_conn_id` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
ssh_conn_id = ftp_conn_id
kwargs["ssh_conn_id"] = ssh_conn_id
kwargs["host_proxy_cmd"] = host_proxy_cmd
self.ssh_conn_id = ssh_conn_id
self._ssh_conn: SSHClient | None = None
self._sftp_conn: SFTPClient | None = None
self._conn_count = 0
super().__init__(*args, **kwargs)
def get_conn(self) -> SFTPClient: # type: ignore[override]
"""Open an SFTP connection to the remote host."""
if self.conn is None:
self.conn = super().get_conn().open_sftp()
return self.conn
def close_conn(self) -> None:
"""Close the SFTP connection."""
if self.conn is not None:
self.conn.close()
self.conn = None
@contextmanager
def get_managed_conn(self) -> Generator[SFTPClient, None, None]:
"""Context manager that closes the connection after use."""
if self._sftp_conn is None:
ssh_conn: SSHClient = super().get_conn()
self._ssh_conn = ssh_conn
self._sftp_conn = ssh_conn.open_sftp()
self._conn_count += 1
try:
yield self._sftp_conn
finally:
self._conn_count -= 1
if self._conn_count == 0 and self._ssh_conn is not None and self._sftp_conn is not None:
self._sftp_conn.close()
self._sftp_conn = None
self._ssh_conn.close()
self._ssh_conn = None
if hasattr(self, "host_proxy"):
del self.host_proxy
def get_conn_count(self) -> int:
"""Get the number of open connections."""
return self._conn_count
@handle_connection_management
def describe_directory(self, path: str) -> dict[str, dict[str, str | int | None]]:
"""
Get file information in a directory on the remote system.
The return format is ``{filename: {attributes}}``. The remote system
support the MLSD command.
:param path: full path to the remote directory
"""
return {
f.filename: {
"size": f.st_size,
"type": "dir" if stat.S_ISDIR(f.st_mode) else "file", # type: ignore[union-attr]
"modify": datetime.datetime.fromtimestamp(f.st_mtime or 0).strftime("%Y%m%d%H%M%S"),
}
for f in sorted(self.conn.listdir_attr(path), key=lambda f: f.filename) # type: ignore[union-attr]
}
@handle_connection_management
def list_directory(self, path: str) -> list[str]:
"""
List files in a directory on the remote system.
:param path: full path to the remote directory to list
"""
return sorted(self.conn.listdir(path)) # type: ignore[union-attr]
@handle_connection_management
def list_directory_with_attr(self, path: str) -> list[SFTPAttributes]:
"""
List files in a directory on the remote system including their SFTPAttributes.
:param path: full path to the remote directory to list
"""
return [file for file in self.conn.listdir_attr(path)] # type: ignore[union-attr]
@handle_connection_management
def mkdir(self, path: str, mode: int = 0o777) -> None:
"""
Create a directory on the remote system.
The default mode is ``0o777``, but on some systems, the current umask
value may be first masked out.
:param path: full path to the remote directory to create
:param mode: int permissions of octal mode for directory
"""
return self.conn.mkdir(path, mode) # type: ignore[union-attr,return-value]
@handle_connection_management
def isdir(self, path: str) -> bool:
"""
Check if the path provided is a directory.
:param path: full path to the remote directory to check
"""
try:
return stat.S_ISDIR(self.conn.stat(path).st_mode) # type: ignore[union-attr,arg-type]
except OSError:
return False
@handle_connection_management
def isfile(self, path: str) -> bool:
"""
Check if the path provided is a file.
:param path: full path to the remote file to check
"""
try:
return stat.S_ISREG(self.conn.stat(path).st_mode) # type: ignore[arg-type,union-attr]
except OSError:
return False
@handle_connection_management
def create_directory(self, path: str, mode: int = 0o777) -> None:
"""
Create a directory on the remote system.
The default mode is ``0o777``, but on some systems, the current umask
value may be first masked out. Different from :func:`.mkdir`, this
function attempts to create parent directories if needed, and returns
silently if the target directory already exists.
:param path: full path to the remote directory to create
:param mode: int permissions of octal mode for directory
"""
if self.isdir(path):
self.log.info("%s already exists", path)
return
if self.isfile(path):
raise AirflowException(f"{path} already exists and is a file")
dirname, basename = os.path.split(path)
if dirname and not self.isdir(dirname):
self.create_directory(dirname, mode)
if basename:
self.log.info("Creating %s", path)
self.conn.mkdir(path, mode=mode) # type: ignore
@handle_connection_management
def delete_directory(self, path: str, include_files: bool = False) -> None:
"""
Delete a directory on the remote system.
:param path: full path to the remote directory to delete
"""
files: list[str] = []
dirs: list[str] = []
if include_files is True:
files, dirs, _ = self.get_tree_map(path)
dirs = dirs[::-1] # reverse the order for deleting deepest directories first
for file_path in files:
self.conn.remove(file_path) # type: ignore
for dir_path in dirs:
self.conn.rmdir(dir_path) # type: ignore
self.conn.rmdir(path) # type: ignore
@handle_connection_management
def retrieve_file(self, remote_full_path: str, local_full_path: str, prefetch: bool = True) -> None:
"""
Transfer the remote file to a local location.
If local_full_path is a string path, the file will be put
at that location.
:param remote_full_path: full path to the remote file
:param local_full_path: full path to the local file or a file-like buffer
:param prefetch: controls whether prefetch is performed (default: True)
"""
if isinstance(local_full_path, BytesIO):
# It's a file-like object ( BytesIO), so use getfo().
self.log.info("Using streaming download for %s", remote_full_path)
self.conn.getfo(remote_full_path, local_full_path, prefetch=prefetch)
# We use hasattr checking for 'write' for cases like google.cloud.storage.fileio.BlobWriter
elif hasattr(local_full_path, "write"):
self.log.info("Using streaming download for %s", remote_full_path)
# We need to cast to pass prek hook checks
stream_full_path = cast("IO[bytes]", local_full_path)
self.conn.getfo(remote_full_path, stream_full_path, prefetch=prefetch) # type: ignore[union-attr]
elif isinstance(local_full_path, (str, bytes, os.PathLike)):
# It's a string path, so use get().
self.log.info("Using standard file download for %s", remote_full_path)
self.conn.get(remote_full_path, local_full_path, prefetch=prefetch) # type: ignore[union-attr]
# If it's neither, it's an unsupported type.
else:
raise TypeError(
f"Unsupported type for local_full_path: {type(local_full_path)}. "
"Expected a stream-like object or a path-like object."
)
@handle_connection_management
def store_file(self, remote_full_path: str, local_full_path: str, confirm: bool = True) -> None:
"""
Transfer a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location.
:param remote_full_path: full path to the remote file
:param local_full_path: full path to the local file or a file-like buffer
"""
if isinstance(local_full_path, BytesIO):
self.conn.putfo(local_full_path, remote_full_path, confirm=confirm) # type: ignore
else:
self.conn.put(local_full_path, remote_full_path, confirm=confirm) # type: ignore
@handle_connection_management
def delete_file(self, path: str) -> None:
"""
Remove a file on the server.
:param path: full path to the remote file
"""
self.conn.remove(path) # type: ignore[arg-type, union-attr]
def retrieve_directory(self, remote_full_path: str, local_full_path: str, prefetch: bool = True) -> None:
"""
Transfer the remote directory to a local location.
If local_full_path is a string path, the directory will be put
at that location.
:param remote_full_path: full path to the remote directory
:param local_full_path: full path to the local directory
:param prefetch: controls whether prefetch is performed (default: True)
"""
if Path(local_full_path).exists():
raise AirflowException(f"{local_full_path} already exists")
Path(local_full_path).mkdir(parents=True)
files, dirs, _ = self.get_tree_map(remote_full_path)
for dir_path in dirs:
new_local_path = os.path.join(local_full_path, os.path.relpath(dir_path, remote_full_path))
Path(new_local_path).mkdir(parents=True, exist_ok=True)
for file_path in files:
new_local_path = os.path.join(local_full_path, os.path.relpath(file_path, remote_full_path))
self.retrieve_file(file_path, new_local_path, prefetch)
def retrieve_directory_concurrently(
self,
remote_full_path: str,
local_full_path: str,
workers: int = os.cpu_count() or 2,
prefetch: bool = True,
) -> None:
"""
Transfer the remote directory to a local location concurrently.
If local_full_path is a string path, the directory will be put
at that location.
:param remote_full_path: full path to the remote directory
:param local_full_path: full path to the local directory
:param prefetch: controls whether prefetch is performed (default: True)
:param workers: number of workers to use for concurrent transfer (default: number of CPUs or 2 if undetermined)
"""
def retrieve_file_chunk(
conn: SFTPClient, local_file_chunk: list[str], remote_file_chunk: list[str], prefetch: bool = True
):
for local_file, remote_file in zip(local_file_chunk, remote_file_chunk):
conn.get(remote_file, local_file, prefetch=prefetch)
with self.get_managed_conn():
if Path(local_full_path).exists():
raise AirflowException(f"{local_full_path} already exists")
Path(local_full_path).mkdir(parents=True)
new_local_file_paths, remote_file_paths = [], []
files, dirs, _ = self.get_tree_map(remote_full_path)
for dir_path in dirs:
new_local_path = os.path.join(local_full_path, os.path.relpath(dir_path, remote_full_path))
Path(new_local_path).mkdir(parents=True, exist_ok=True)
for file in files:
remote_file_paths.append(file)
new_local_file_paths.append(
os.path.join(local_full_path, os.path.relpath(file, remote_full_path))
)
remote_file_chunks = [remote_file_paths[i::workers] for i in range(workers)]
local_file_chunks = [new_local_file_paths[i::workers] for i in range(workers)]
self.log.info("Opening %s new SFTP connections", workers)
conns = [SFTPHook(ssh_conn_id=self.ssh_conn_id).get_conn() for _ in range(workers)]
try:
self.log.info("Retrieving files concurrently with %s threads", workers)
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(
retrieve_file_chunk,
conns[i],
local_file_chunks[i],
remote_file_chunks[i],
prefetch,
)
for i in range(workers)
]
for future in concurrent.futures.as_completed(futures):
future.result()
finally:
for conn in conns:
conn.close()
@handle_connection_management
def store_directory(self, remote_full_path: str, local_full_path: str, confirm: bool = True) -> None:
"""
Transfer a local directory to the remote location.
If local_full_path is a string path, the directory will be read
from that location.
:param remote_full_path: full path to the remote directory
:param local_full_path: full path to the local directory
"""
if self.path_exists(remote_full_path):
raise AirflowException(f"{remote_full_path} already exists")
self.create_directory(remote_full_path)
for root, dirs, files in os.walk(local_full_path):
for dir_name in dirs:
dir_path = os.path.join(root, dir_name)
new_remote_path = os.path.join(remote_full_path, os.path.relpath(dir_path, local_full_path))
self.create_directory(new_remote_path)
for file_name in files:
file_path = os.path.join(root, file_name)
new_remote_path = os.path.join(remote_full_path, os.path.relpath(file_path, local_full_path))
self.store_file(new_remote_path, file_path, confirm)
def store_directory_concurrently(
self,
remote_full_path: str,
local_full_path: str,
confirm: bool = True,
workers: int = os.cpu_count() or 2,
) -> None:
"""
Transfer a local directory to the remote location concurrently.
If local_full_path is a string path, the directory will be read
from that location.
:param remote_full_path: full path to the remote directory
:param local_full_path: full path to the local directory
:param confirm: whether to confirm the file size after transfer (default: True)
:param workers: number of workers to use for concurrent transfer (default: number of CPUs or 2 if undetermined)
"""
def store_file_chunk(
conn: SFTPClient, local_file_chunk: list[str], remote_file_chunk: list[str], confirm: bool
):
for local_file, remote_file in zip(local_file_chunk, remote_file_chunk):
conn.put(local_file, remote_file, confirm=confirm)
with self.get_managed_conn():
if self.path_exists(remote_full_path):
raise AirflowException(f"{remote_full_path} already exists")
self.create_directory(remote_full_path)
local_file_paths, new_remote_file_paths = [], []
for root, dirs, files in os.walk(local_full_path):
for dir_name in dirs:
dir_path = os.path.join(root, dir_name)
new_remote_path = os.path.join(
remote_full_path, os.path.relpath(dir_path, local_full_path)
)
self.create_directory(new_remote_path)
for file_name in files:
file_path = os.path.join(root, file_name)
new_remote_path = os.path.join(
remote_full_path, os.path.relpath(file_path, local_full_path)
)
local_file_paths.append(file_path)
new_remote_file_paths.append(new_remote_path)
remote_file_chunks = [new_remote_file_paths[i::workers] for i in range(workers)]
local_file_chunks = [local_file_paths[i::workers] for i in range(workers)]
self.log.info("Opening %s new SFTP connections", workers)
conns = [SFTPHook(ssh_conn_id=self.ssh_conn_id).get_conn() for _ in range(workers)]
try:
self.log.info("Storing files concurrently with %s threads", workers)
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(
store_file_chunk, conns[i], local_file_chunks[i], remote_file_chunks[i], confirm
)
for i in range(workers)
]
for future in concurrent.futures.as_completed(futures):
future.result()
finally:
for conn in conns:
conn.close()
@handle_connection_management
def get_mod_time(self, path: str) -> str:
"""
Get an entry's modification time.
:param path: full path to the remote file
"""
ftp_mdtm = self.conn.stat(path).st_mtime # type: ignore[union-attr]
return datetime.datetime.fromtimestamp(ftp_mdtm).strftime("%Y%m%d%H%M%S") # type: ignore
@handle_connection_management
def path_exists(self, path: str) -> bool:
"""
Whether a remote entity exists.
:param path: full path to the remote file or directory
"""
try:
self.conn.stat(path) # type: ignore[union-attr]
except OSError:
return False
return True
@staticmethod
def _is_path_match(path: str, prefix: str | None = None, delimiter: str | None = None) -> bool:
"""
Whether given path starts with ``prefix`` (if set) and ends with ``delimiter`` (if set).
:param path: path to be checked
:param prefix: if set path will be checked is starting with prefix
:param delimiter: if set path will be checked is ending with suffix
:return: bool
"""
if prefix is not None and not path.startswith(prefix):
return False
if delimiter is not None and not path.endswith(delimiter):
return False
return True
def walktree(
self,
path: str,
fcallback: Callable[[str], Any | None],
dcallback: Callable[[str], Any | None],
ucallback: Callable[[str], Any | None],
recurse: bool = True,
) -> None:
"""
Recursively descend, depth first, the directory tree at ``path``.
This calls discrete callback functions for each regular file, directory,
and unknown file type.
:param str path:
root of remote directory to descend, use '.' to start at
:attr:`.pwd`
:param callable fcallback:
callback function to invoke for a regular file.
(form: ``func(str)``)
:param callable dcallback:
callback function to invoke for a directory. (form: ``func(str)``)
:param callable ucallback:
callback function to invoke for an unknown file type.
(form: ``func(str)``)
:param bool recurse: *Default: True* - should it recurse
"""
for entry in self.list_directory_with_attr(path):
pathname = os.path.join(path, entry.filename)
mode = entry.st_mode
if stat.S_ISDIR(mode): # type: ignore
# It's a directory, call the dcallback function
dcallback(pathname)
if recurse:
# now, recurse into it
self.walktree(pathname, fcallback, dcallback, ucallback)
elif stat.S_ISREG(mode): # type: ignore
# It's a file, call the fcallback function
fcallback(pathname)
else:
# Unknown file type
ucallback(pathname)
def get_tree_map(
self, path: str, prefix: str | None = None, delimiter: str | None = None
) -> tuple[list[str], list[str], list[str]]:
"""
Get tuple with recursive lists of files, directories and unknown paths.
It is possible to filter results by giving prefix and/or delimiter parameters.
:param path: path from which tree will be built
:param prefix: if set paths will be added if start with prefix
:param delimiter: if set paths will be added if end with delimiter
:return: tuple with list of files, dirs and unknown items
"""
files: list[str] = []
dirs: list[str] = []
unknowns: list[str] = []
def append_matching_path_callback(list_: list[str]) -> Callable:
return lambda item: list_.append(item) if self._is_path_match(item, prefix, delimiter) else None
self.walktree(
path=path,
fcallback=append_matching_path_callback(files),
dcallback=append_matching_path_callback(dirs),
ucallback=append_matching_path_callback(unknowns),
recurse=True,
)
return files, dirs, unknowns
def test_connection(self) -> tuple[bool, str]:
"""Test the SFTP connection by calling path with directory."""
try:
with self.get_managed_conn() as conn:
conn.normalize(".")
return True, "Connection successfully tested"
except Exception as e:
return False, str(e)
def get_file_by_pattern(self, path, fnmatch_pattern) -> str:
"""
Get the first matching file based on the given fnmatch type pattern.
:param path: path to be checked
:param fnmatch_pattern: The pattern that will be matched with `fnmatch`
:return: string containing the first found file, or an empty string if none matched
"""
for file in self.list_directory(path):
if fnmatch(file, fnmatch_pattern):
return file
return ""
def get_files_by_pattern(self, path, fnmatch_pattern) -> list[str]:
"""
Get all matching files based on the given fnmatch type pattern.
:param path: path to be checked
:param fnmatch_pattern: The pattern that will be matched with `fnmatch`
:return: list of string containing the found files, or an empty list if none matched
"""
matched_files = []
for file in self.list_directory_with_attr(path):
if fnmatch(file.filename, fnmatch_pattern):
matched_files.append(file.filename)
return matched_files
| SFTPHook |
python | huggingface__transformers | src/transformers/models/pvt_v2/modeling_pvt_v2.py | {
"start": 13944,
"end": 15670
} | class ____(nn.Module):
def __init__(self, config: PvtV2Config):
super().__init__()
self.config = config
self.gradient_checkpointing = False
# encoder layers
self.layers = nn.ModuleList([PvtV2EncoderLayer(config, i) for i in range(config.num_encoder_blocks)])
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
batch_size = pixel_values.shape[0]
hidden_states = pixel_values
for idx, layer in enumerate(self.layers):
layer_output = layer(hidden_states, output_attentions)
outputs, height, width = layer_output
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
# reshape back to (batch_size, num_channels, height, width)
hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
| PvtV2Encoder |
python | doocs__leetcode | solution/2400-2499/2430.Maximum Deletions on a String/Solution2.py | {
"start": 0,
"end": 505
} | class ____:
def deleteString(self, s: str) -> int:
n = len(s)
g = [[0] * (n + 1) for _ in range(n + 1)]
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
if s[i] == s[j]:
g[i][j] = g[i + 1][j + 1] + 1
f = [1] * n
for i in range(n - 1, -1, -1):
for j in range(1, (n - i) // 2 + 1):
if g[i][i + j] >= j:
f[i] = max(f[i], f[i + j] + 1)
return f[0]
| Solution |
python | plotly__plotly.py | plotly/graph_objs/sunburst/legendgrouptitle/_font.py | {
"start": 233,
"end": 9932
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sunburst.legendgrouptitle"
_path_str = "sunburst.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sunburst.legen
dgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sunburst.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | ray-project__ray | python/ray/dashboard/dashboard.py | {
"start": 878,
"end": 10755
} | class ____:
"""A dashboard process for monitoring Ray nodes.
This dashboard is made up of a REST API which collates data published by
Reporter processes on nodes into a json structure, and a webserver
which polls said API for display purposes.
Args:
host: Host address of dashboard aiohttp server.
port: Port number of dashboard aiohttp server.
port_retries: The retry times to select a valid port.
gcs_address: GCS address of the cluster.
cluster_id_hex: Cluster ID hex string.
node_ip_address: The IP address of the dashboard.
serve_frontend: If configured, frontend HTML
is not served from the dashboard.
log_dir: Log directory of dashboard.
logging_level: The logging level (e.g. logging.INFO, logging.DEBUG)
logging_format: The format string for log messages
logging_filename: The name of the log file
logging_rotate_bytes: Max size in bytes before rotating log file
logging_rotate_backup_count: Number of backup files to keep when rotating
"""
def __init__(
self,
host: str,
port: int,
port_retries: int,
gcs_address: str,
cluster_id_hex: str,
node_ip_address: str,
log_dir: str,
logging_level: int,
logging_format: str,
logging_filename: str,
logging_rotate_bytes: int,
logging_rotate_backup_count: int,
temp_dir: str = None,
session_dir: str = None,
minimal: bool = False,
serve_frontend: bool = True,
modules_to_load: Optional[Set[str]] = None,
):
self.dashboard_head = dashboard_head.DashboardHead(
http_host=host,
http_port=port,
http_port_retries=port_retries,
gcs_address=gcs_address,
cluster_id_hex=cluster_id_hex,
node_ip_address=node_ip_address,
log_dir=log_dir,
logging_level=logging_level,
logging_format=logging_format,
logging_filename=logging_filename,
logging_rotate_bytes=logging_rotate_bytes,
logging_rotate_backup_count=logging_rotate_backup_count,
temp_dir=temp_dir,
session_dir=session_dir,
minimal=minimal,
serve_frontend=serve_frontend,
modules_to_load=modules_to_load,
)
async def run(self):
await self.dashboard_head.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Ray dashboard.")
parser.add_argument(
"--host", required=True, type=str, help="The host to use for the HTTP server."
)
parser.add_argument(
"--port", required=True, type=int, help="The port to use for the HTTP server."
)
parser.add_argument(
"--port-retries",
required=False,
type=int,
default=0,
help="The retry times to select a valid port.",
)
parser.add_argument(
"--gcs-address", required=True, type=str, help="The address (ip:port) of GCS."
)
parser.add_argument(
"--cluster-id-hex", required=True, type=str, help="The cluster ID in hex."
)
parser.add_argument(
"--node-ip-address",
required=True,
type=str,
help="The IP address of the node where this is running.",
)
parser.add_argument(
"--logging-level",
required=False,
type=lambda s: logging.getLevelName(s.upper()),
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP,
)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP,
)
parser.add_argument(
"--logging-filename",
required=False,
type=str,
default=dashboard_consts.DASHBOARD_LOG_FILENAME,
help="Specify the name of log file, "
'log to stdout if set empty, default is "{}"'.format(
dashboard_consts.DASHBOARD_LOG_FILENAME
),
)
parser.add_argument(
"--logging-rotate-bytes",
required=False,
type=int,
default=LOGGING_ROTATE_BYTES,
help="Specify the max bytes for rotating "
"log file, default is {} bytes.".format(LOGGING_ROTATE_BYTES),
)
parser.add_argument(
"--logging-rotate-backup-count",
required=False,
type=int,
default=LOGGING_ROTATE_BACKUP_COUNT,
help="Specify the backup count of rotated log file, default is {}.".format(
LOGGING_ROTATE_BACKUP_COUNT
),
)
parser.add_argument(
"--log-dir",
required=True,
type=str,
default=None,
help="Specify the path of log directory.",
)
parser.add_argument(
"--temp-dir",
required=True,
type=str,
default=None,
help="Specify the path of the temporary directory use by Ray process.",
)
parser.add_argument(
"--session-dir",
required=True,
type=str,
default=None,
help="Specify the path of the session directory of the cluster.",
)
parser.add_argument(
"--minimal",
action="store_true",
help=(
"Minimal dashboard only contains a subset of features that don't "
"require additional dependencies installed when ray is installed "
"by `pip install ray[default]`."
),
)
parser.add_argument(
"--modules-to-load",
required=False,
default=None,
help=(
"Specify the list of module names in [module_1],[module_2] format."
"E.g., JobHead,StateHead... "
"If nothing is specified, all modules are loaded."
),
)
parser.add_argument(
"--disable-frontend",
action="store_true",
help=("If configured, frontend html is not served from the server."),
)
parser.add_argument(
"--stdout-filepath",
required=False,
type=str,
default="",
help="The filepath to dump dashboard stdout.",
)
parser.add_argument(
"--stderr-filepath",
required=False,
type=str,
default="",
help="The filepath to dump dashboard stderr.",
)
args = parser.parse_args()
try:
# Disable log rotation for windows platform.
logging_rotation_bytes = (
args.logging_rotate_bytes if sys.platform != "win32" else 0
)
logging_rotation_backup_count = (
args.logging_rotate_backup_count if sys.platform != "win32" else 1
)
setup_component_logger(
logging_level=args.logging_level,
logging_format=args.logging_format,
log_dir=args.log_dir,
filename=args.logging_filename,
max_bytes=logging_rotation_bytes,
backup_count=logging_rotation_backup_count,
)
# Setup stdout/stderr redirect files if redirection enabled.
logging_utils.redirect_stdout_stderr_if_needed(
args.stdout_filepath,
args.stderr_filepath,
logging_rotation_bytes,
logging_rotation_backup_count,
)
if args.modules_to_load:
modules_to_load = set(args.modules_to_load.strip(" ,").split(","))
else:
# None == default.
modules_to_load = None
loop = get_or_create_event_loop()
dashboard = Dashboard(
host=args.host,
port=args.port,
port_retries=args.port_retries,
gcs_address=args.gcs_address,
cluster_id_hex=args.cluster_id_hex,
node_ip_address=args.node_ip_address,
log_dir=args.log_dir,
logging_level=args.logging_level,
logging_format=args.logging_format,
logging_filename=args.logging_filename,
logging_rotate_bytes=logging_rotation_bytes,
logging_rotate_backup_count=logging_rotation_backup_count,
temp_dir=args.temp_dir,
session_dir=args.session_dir,
minimal=args.minimal,
serve_frontend=(not args.disable_frontend),
modules_to_load=modules_to_load,
)
def sigterm_handler():
logger.warning("Exiting with SIGTERM immediately...")
os._exit(signal.SIGTERM)
if sys.platform != "win32":
# TODO(rickyyx): we currently do not have any logic for actual
# graceful termination in the dashboard. Most of the underlying
# async tasks run by the dashboard head doesn't handle CancelledError.
# So a truly graceful shutdown is not trivial w/o much refactoring.
# Re-open the issue: https://github.com/ray-project/ray/issues/25518
# if a truly graceful shutdown is required.
loop.add_signal_handler(signal.SIGTERM, sigterm_handler)
loop.run_until_complete(dashboard.run())
except Exception as e:
traceback_str = format_error_message(traceback.format_exc())
message = (
f"The dashboard on node {platform.uname()[1]} "
f"failed with the following "
f"error:\n{traceback_str}"
)
if isinstance(e, dashboard_utils.FrontendNotFoundError):
logger.warning(message)
else:
logger.error(message)
raise e
# Something went wrong, so push an error to all drivers.
publish_error_to_driver(
ray_constants.DASHBOARD_DIED_ERROR,
message,
gcs_client=ray._raylet.GcsClient(address=args.gcs_address),
)
| Dashboard |
python | openai__openai-python | tests/api_resources/beta/chatkit/test_sessions.py | {
"start": 4259,
"end": 8429
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.chatkit.sessions.create(
user="x",
workflow={"id": "id"},
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.chatkit.sessions.create(
user="x",
workflow={
"id": "id",
"state_variables": {"foo": "string"},
"tracing": {"enabled": True},
"version": "version",
},
chatkit_configuration={
"automatic_thread_titling": {"enabled": True},
"file_upload": {
"enabled": True,
"max_file_size": 1,
"max_files": 1,
},
"history": {
"enabled": True,
"recent_threads": 1,
},
},
expires_after={
"anchor": "created_at",
"seconds": 1,
},
rate_limits={"max_requests_per_1_minute": 1},
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.sessions.with_raw_response.create(
user="x",
workflow={"id": "id"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.sessions.with_streaming_response.create(
user="x",
workflow={"id": "id"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = await response.parse()
assert_matches_type(ChatSession, session, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.chatkit.sessions.cancel(
"cksess_123",
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.sessions.with_raw_response.cancel(
"cksess_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.sessions.with_streaming_response.cancel(
"cksess_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = await response.parse()
assert_matches_type(ChatSession, session, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"):
await async_client.beta.chatkit.sessions.with_raw_response.cancel(
"",
)
| TestAsyncSessions |
python | facebook__pyre-check | tools/typeshed_patcher/tests/buck_test.py | {
"start": 327,
"end": 1202
} | class ____(testslide.TestCase):
def test_generate_mapped_source(self) -> None:
sample_typeshed = MemoryBackedTypeshed(
{
Path("stdlib/math.pyi"): "",
Path("stdlib/os/path.pyi"): "",
Path("stubs/ujson/ujson.pyi"): "",
Path("stubs/mysqlclient/MySQLdb/__init__.pyi"): "",
}
)
mapped_source = generate_mapped_source(sample_typeshed)
self.assertDictEqual(
mapped_source.mapping,
{
Path("math.pyi"): Path("stdlib/math.pyi"),
Path("os/path.pyi"): Path("stdlib/os/path.pyi"),
Path("ujson.pyi"): Path("stubs/ujson/ujson.pyi"),
Path("MySQLdb/__init__.pyi"): Path(
"stubs/mysqlclient/MySQLdb/__init__.pyi"
),
},
)
| BuckTest |
python | Lightning-AI__lightning | src/lightning/pytorch/plugins/precision/double.py | {
"start": 1241,
"end": 2198
} | class ____(Precision):
"""Plugin for training with double (``torch.float64``) precision."""
precision: Literal["64-true"] = "64-true"
@override
def convert_module(self, module: nn.Module) -> nn.Module:
return module.double()
@override
def tensor_init_context(self) -> AbstractContextManager:
return _DtypeContextManager(torch.float64)
@override
def module_init_context(self) -> AbstractContextManager:
return self.tensor_init_context()
@override
@contextmanager
def forward_context(self) -> Generator[None, None, None]:
"""A context manager to change the default tensor type.
See: :func:`torch.set_default_dtype`
"""
with self.tensor_init_context():
yield
@override
def convert_input(self, data: Any) -> Any:
return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=torch.double)
| DoublePrecision |
python | aimacode__aima-python | logic.py | {
"start": 42795,
"end": 49530
} | class ____(PropKB):
"""
Create a Knowledge Base that contains the a temporal "Wumpus physics" and temporal rules with time zero.
"""
def __init__(self, dimrow):
super().__init__()
self.dimrow = dimrow
self.tell(~wumpus(1, 1))
self.tell(~pit(1, 1))
for y in range(1, dimrow + 1):
for x in range(1, dimrow + 1):
pits_in = list()
wumpus_in = list()
if x > 1: # West room exists
pits_in.append(pit(x - 1, y))
wumpus_in.append(wumpus(x - 1, y))
if y < dimrow: # North room exists
pits_in.append(pit(x, y + 1))
wumpus_in.append(wumpus(x, y + 1))
if x < dimrow: # East room exists
pits_in.append(pit(x + 1, y))
wumpus_in.append(wumpus(x + 1, y))
if y > 1: # South room exists
pits_in.append(pit(x, y - 1))
wumpus_in.append(wumpus(x, y - 1))
self.tell(equiv(breeze(x, y), new_disjunction(pits_in)))
self.tell(equiv(stench(x, y), new_disjunction(wumpus_in)))
# Rule that describes existence of at least one Wumpus
wumpus_at_least = list()
for x in range(1, dimrow + 1):
for y in range(1, dimrow + 1):
wumpus_at_least.append(wumpus(x, y))
self.tell(new_disjunction(wumpus_at_least))
# Rule that describes existence of at most one Wumpus
for i in range(1, dimrow + 1):
for j in range(1, dimrow + 1):
for u in range(1, dimrow + 1):
for v in range(1, dimrow + 1):
if i != u or j != v:
self.tell(~wumpus(i, j) | ~wumpus(u, v))
# Temporal rules at time zero
self.tell(location(1, 1, 0))
for i in range(1, dimrow + 1):
for j in range(1, dimrow + 1):
self.tell(implies(location(i, j, 0), equiv(percept_breeze(0), breeze(i, j))))
self.tell(implies(location(i, j, 0), equiv(percept_stench(0), stench(i, j))))
if i != 1 or j != 1:
self.tell(~location(i, j, 0))
self.tell(wumpus_alive(0))
self.tell(have_arrow(0))
self.tell(facing_east(0))
self.tell(~facing_north(0))
self.tell(~facing_south(0))
self.tell(~facing_west(0))
def make_action_sentence(self, action, time):
actions = [move_forward(time), shoot(time), turn_left(time), turn_right(time)]
for a in actions:
if action is a:
self.tell(action)
else:
self.tell(~a)
def make_percept_sentence(self, percept, time):
# Glitter, Bump, Stench, Breeze, Scream
flags = [0, 0, 0, 0, 0]
# Things perceived
if isinstance(percept, Glitter):
flags[0] = 1
self.tell(percept_glitter(time))
elif isinstance(percept, Bump):
flags[1] = 1
self.tell(percept_bump(time))
elif isinstance(percept, Stench):
flags[2] = 1
self.tell(percept_stench(time))
elif isinstance(percept, Breeze):
flags[3] = 1
self.tell(percept_breeze(time))
elif isinstance(percept, Scream):
flags[4] = 1
self.tell(percept_scream(time))
# Things not perceived
for i in range(len(flags)):
if flags[i] == 0:
if i == 0:
self.tell(~percept_glitter(time))
elif i == 1:
self.tell(~percept_bump(time))
elif i == 2:
self.tell(~percept_stench(time))
elif i == 3:
self.tell(~percept_breeze(time))
elif i == 4:
self.tell(~percept_scream(time))
def add_temporal_sentences(self, time):
if time == 0:
return
t = time - 1
# current location rules
for i in range(1, self.dimrow + 1):
for j in range(1, self.dimrow + 1):
self.tell(implies(location(i, j, time), equiv(percept_breeze(time), breeze(i, j))))
self.tell(implies(location(i, j, time), equiv(percept_stench(time), stench(i, j))))
s = list()
s.append(equiv(location(i, j, time), location(i, j, time) & ~move_forward(time) | percept_bump(time)))
if i != 1:
s.append(location(i - 1, j, t) & facing_east(t) & move_forward(t))
if i != self.dimrow:
s.append(location(i + 1, j, t) & facing_west(t) & move_forward(t))
if j != 1:
s.append(location(i, j - 1, t) & facing_north(t) & move_forward(t))
if j != self.dimrow:
s.append(location(i, j + 1, t) & facing_south(t) & move_forward(t))
# add sentence about location i,j
self.tell(new_disjunction(s))
# add sentence about safety of location i,j
self.tell(equiv(ok_to_move(i, j, time), ~pit(i, j) & ~wumpus(i, j) & wumpus_alive(time)))
# Rules about current orientation
a = facing_north(t) & turn_right(t)
b = facing_south(t) & turn_left(t)
c = facing_east(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_east(time), a | b | c)
self.tell(s)
a = facing_north(t) & turn_left(t)
b = facing_south(t) & turn_right(t)
c = facing_west(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_west(time), a | b | c)
self.tell(s)
a = facing_east(t) & turn_left(t)
b = facing_west(t) & turn_right(t)
c = facing_north(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_north(time), a | b | c)
self.tell(s)
a = facing_west(t) & turn_left(t)
b = facing_east(t) & turn_right(t)
c = facing_south(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_south(time), a | b | c)
self.tell(s)
# Rules about last action
self.tell(equiv(move_forward(t), ~turn_right(t) & ~turn_left(t)))
# Rule about the arrow
self.tell(equiv(have_arrow(time), have_arrow(t) & ~shoot(t)))
# Rule about Wumpus (dead or alive)
self.tell(equiv(wumpus_alive(time), wumpus_alive(t) & ~percept_scream(time)))
def ask_if_true(self, query):
return pl_resolution(self, query)
# ______________________________________________________________________________
| WumpusKB |
python | tensorflow__tensorflow | tensorflow/python/util/lazy_loader_test.py | {
"start": 1959,
"end": 2386
} | class ____(test.TestCase):
def testPickleLazyLoader(self):
name = PickleTest.__module__ # Try to pickle current module.
lazy_loader_module = lazy_loader.LazyLoader(
"lazy_loader_module", globals(), name)
restored = pickle.loads(pickle.dumps(lazy_loader_module))
self.assertEqual(restored.__name__, name)
self.assertIsNotNone(restored.PickleTest)
if __name__ == "__main__":
test.main()
| PickleTest |
python | encode__django-rest-framework | tests/models.py | {
"start": 1411,
"end": 1571
} | class ____(RESTFrameworkModel):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=100)
| UUIDForeignKeyTarget |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator_test.py | {
"start": 15922,
"end": 17257
} | class ____(test.TestCase):
@classmethod
def setUpClass(cls):
cls._threading_thread = threading.Thread
threading.Thread = ErrorReportingThread
super(TestCaseWithErrorReportingThread, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestCaseWithErrorReportingThread, cls).tearDownClass()
threading.Thread = cls._threading_thread
def setUp(self):
ErrorReportingThread.error = None
super(TestCaseWithErrorReportingThread, self).setUp()
def tearDown(self):
super(TestCaseWithErrorReportingThread, self).tearDown()
if ErrorReportingThread.error:
raise ErrorReportingThread.error # pylint: disable=raising-bad-type
def make_coordinator(num_workers, num_ps, partitioner=None):
# TODO(rchao): Test the internal rpc_layer version.
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer='grpc')
cluster_def['chief'] = [
'localhost:%d' % test_util.pick_unused_port()
]
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
ClusterSpec(cluster_def), rpc_layer='grpc')
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
cluster_resolver, variable_partitioner=partitioner)
return coordinator_lib.ClusterCoordinator(strategy)
| TestCaseWithErrorReportingThread |
python | numba__numba | numba/tests/support.py | {
"start": 37837,
"end": 43717
} | class ____(object):
@contextlib.contextmanager
def check_warnings(self, messages, category=RuntimeWarning):
with warnings.catch_warnings(record=True) as catch:
warnings.simplefilter("always")
yield
found = 0
for w in catch:
for m in messages:
if m in str(w.message):
self.assertEqual(w.category, category)
found += 1
self.assertEqual(found, len(messages))
def _format_jit_options(**jit_options):
if not jit_options:
return ''
out = []
for key, value in jit_options.items():
if isinstance(value, str):
value = '"{}"'.format(value)
out.append('{}={}'.format(key, value))
return ', '.join(out)
@contextlib.contextmanager
def create_temp_module(source_lines, **jit_options):
"""A context manager that creates and imports a temporary module
from sources provided in ``source_lines``.
Optionally it is possible to provide jit options for ``jit_module`` if it
is explicitly used in ``source_lines`` like ``jit_module({jit_options})``.
"""
# Use try/finally so cleanup happens even when an exception is raised
try:
tempdir = temp_directory('test_temp_module')
# Generate random module name
temp_module_name = 'test_temp_module_{}'.format(
str(uuid.uuid4()).replace('-', '_'))
temp_module_path = os.path.join(tempdir, temp_module_name + '.py')
jit_options = _format_jit_options(**jit_options)
with open(temp_module_path, 'w') as f:
lines = source_lines.format(jit_options=jit_options)
f.write(lines)
# Add test_module to sys.path so it can be imported
sys.path.insert(0, tempdir)
test_module = importlib.import_module(temp_module_name)
yield test_module
finally:
sys.modules.pop(temp_module_name, None)
sys.path.remove(tempdir)
shutil.rmtree(tempdir)
def run_in_subprocess(code, flags=None, env=None, timeout=30):
"""Run a snippet of Python code in a subprocess with flags, if any are
given. 'env' is passed to subprocess.Popen(). 'timeout' is passed to
popen.communicate().
Returns the stdout and stderr of the subprocess after its termination.
"""
if flags is None:
flags = []
cmd = [sys.executable,] + flags + ["-c", code]
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, err = popen.communicate(timeout=timeout)
if popen.returncode != 0:
msg = "process failed with code %s: stderr follows\n%s\n"
raise AssertionError(msg % (popen.returncode, err.decode()))
return out, err
def strace(work, syscalls, timeout=10):
"""Runs strace whilst executing the function work() in the current process,
captures the listed syscalls (list of strings). Takes an optional timeout in
seconds, default is 10, if this is exceeded the process will be sent a
SIGKILL. Returns a list of lines that are output by strace.
"""
# Open a tmpfile for strace to write into.
with tempfile.NamedTemporaryFile('w+t') as ntf:
parent_pid = os.getpid()
strace_binary = shutil.which('strace')
if strace_binary is None:
raise ValueError("No valid 'strace' binary could be found")
cmd = [strace_binary, # strace
'-q', # quietly (no attach/detach print out)
'-p', str(parent_pid), # this PID
'-e', ','.join(syscalls), # these syscalls
'-o', ntf.name] # put output into this file
# redirect stdout, stderr is handled by the `-o` flag to strace.
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,)
strace_pid = popen.pid
thread_timeout = threading.Timer(timeout, popen.kill)
thread_timeout.start()
def check_return(problem=''):
ret = popen.returncode
if ret != 0:
msg = ("strace exited non-zero, process return code was:"
f"{ret}. {problem}")
raise RuntimeError(msg)
try:
# push the communication onto a thread so it doesn't block.
# start comms thread
thread_comms = threading.Thread(target=popen.communicate)
thread_comms.start()
# do work
work()
# Flush the output buffer file
ntf.flush()
# interrupt the strace process to stop it if it's still running
if popen.poll() is None:
os.kill(strace_pid, signal.SIGINT)
else:
# it's not running, probably an issue, raise
problem="If this is SIGKILL, increase the timeout?"
check_return(problem)
# Make sure the return code is 0, SIGINT to detach is considered
# a successful exit.
popen.wait()
check_return()
# collect the data
strace_data = ntf.readlines()
finally:
# join communication, should be stopped now as process has
# exited
thread_comms.join()
# should be stopped already
thread_timeout.cancel()
return strace_data
def strace_supported():
"""Checks if strace is supported and working"""
# Only support this on linux where the `strace` binary is likely to be the
# strace needed.
if not sys.platform.startswith('linux'):
return False
def force_clone(): # subprocess triggers a clone
subprocess.run([sys.executable, '-c', 'exit()'])
syscall = 'clone'
try:
trace = strace(force_clone, [syscall,])
except Exception:
return False
return syscall in ''.join(trace)
| CheckWarningsMixin |
python | python-openxml__python-docx | src/docx/enum/text.py | {
"start": 6394,
"end": 7034
} | class ____(BaseXmlEnum):
"""Specifies the character to use as the leader with formatted tabs.
MS API name: `WdTabLeader`
URL: https://msdn.microsoft.com/en-us/library/office/ff845050.aspx
"""
SPACES = (0, "none", "Spaces. Default.")
"""Spaces. Default."""
DOTS = (1, "dot", "Dots.")
"""Dots."""
DASHES = (2, "hyphen", "Dashes.")
"""Dashes."""
LINES = (3, "underscore", "Double lines.")
"""Double lines."""
HEAVY = (4, "heavy", "A heavy line.")
"""A heavy line."""
MIDDLE_DOT = (5, "middleDot", "A vertically-centered dot.")
"""A vertically-centered dot."""
| WD_TAB_LEADER |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/test_security.py | {
"start": 3401,
"end": 3871
} | class ____(Model):
__tablename__ = "some_model"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
field_string: Mapped[str] = mapped_column(String(50), unique=True, nullable=False)
field_integer: Mapped[int | None] = mapped_column(Integer())
field_float: Mapped[float | None] = mapped_column(Float())
field_date: Mapped[datetime.date | None] = mapped_column(Date())
def __repr__(self):
return str(self.field_string)
| SomeModel |
python | joke2k__faker | faker/providers/color/color.py | {
"start": 2756,
"end": 11792
} | class ____:
"""Implement random color generation in a human-friendly way.
This helper class encapsulates the internal implementation and logic of the
:meth:`color() <faker.providers.color.Provider.color>` method.
"""
def __init__(self, generator: Optional["Generator"] = None, seed: Optional[SeedType] = None) -> None:
self.colormap = COLOR_MAP
# Option to specify a seed was not removed so this class
# can still be tested independently w/o generators
if generator:
self.random = generator.random
else:
self.seed = seed if seed else random.randint(0, sys.maxsize)
self.random = random.Random(int(self.seed))
def generate(
self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
color_format: ColorFormat = "hex",
) -> str:
"""Generate and format a color.
Whenever :meth:`color() <faker.providers.color.Provider.color>` is
called, the arguments used are simply passed into this method, and this
method handles the rest.
"""
# Generate HSV color tuple from picked hue and luminosity
hsv = self.generate_hsv(hue=hue, luminosity=luminosity)
# Return the HSB/V color in the desired string format
return self.set_format(hsv, color_format)
def generate_hsv(
self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
) -> Tuple[int, int, int]:
"""Generate a HSV color tuple."""
# First we pick a hue (H)
h = self.pick_hue(hue)
# Then use H to determine saturation (S)
s = self.pick_saturation(h, hue, luminosity)
# Then use S and H to determine brightness/value (B/V).
v = self.pick_brightness(h, s, luminosity)
return h, s, v
def generate_rgb(
self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
) -> Tuple[int, int, int]:
"""Generate a RGB color tuple of integers."""
return self.hsv_to_rgb(self.generate_hsv(hue=hue, luminosity=luminosity))
def generate_rgb_float(
self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
) -> Tuple[float, float, float]:
"""Generate a RGB color tuple of floats."""
return self.hsv_to_rgb_float(self.generate_hsv(hue=hue, luminosity=luminosity))
def generate_hsl(
self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
) -> Tuple[int, int, int]:
"""Generate a HSL color tuple."""
return self.hsv_to_hsl(self.generate_hsv(hue=hue, luminosity=luminosity))
def pick_hue(self, hue: Optional[HueType]) -> int:
"""Return a numerical hue value."""
hue_ = self.random_within(self.get_hue_range(hue))
# Instead of storing red as two separate ranges,
# we group them, using negative numbers
if hue_ < 0:
hue_ += 360
return hue_
def pick_saturation(self, hue: int, hue_name: Optional[HueType], luminosity: Optional[str]) -> int:
"""Return a numerical saturation value."""
if luminosity is None:
luminosity = ""
if luminosity == "random":
return self.random_within((0, 100))
if isinstance(hue_name, str) and hue_name == "monochrome":
return 0
s_min, s_max = self.get_saturation_range(hue)
if luminosity == "bright":
s_min = 55
elif luminosity == "dark":
s_min = s_max - 10
elif luminosity == "light":
s_max = 55
return self.random_within((s_min, s_max))
def pick_brightness(self, h: int, s: int, luminosity: Optional[str]) -> int:
"""Return a numerical brightness value."""
if luminosity is None:
luminosity = ""
b_min = self.get_minimum_brightness(h, s)
b_max = 100
if luminosity == "dark":
b_max = b_min + 20
elif luminosity == "light":
b_min = (b_max + b_min) // 2
elif luminosity == "random":
b_min = 0
b_max = 100
return self.random_within((b_min, b_max))
def set_format(self, hsv: Tuple[int, int, int], color_format: ColorFormat) -> str:
"""Handle conversion of HSV values into desired format."""
if color_format == "hsv":
color = f"hsv({hsv[0]}, {hsv[1]}, {hsv[2]})"
elif color_format == "hsl":
hsl = self.hsv_to_hsl(hsv)
color = f"hsl({hsl[0]}, {hsl[1]}, {hsl[2]})"
elif color_format == "rgb":
rgb = self.hsv_to_rgb(hsv)
color = f"rgb({rgb[0]}, {rgb[1]}, {rgb[2]})"
else:
rgb = self.hsv_to_rgb(hsv)
color = f"#{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}"
return color
def get_minimum_brightness(self, h: int, s: int) -> int:
"""Return the minimum allowed brightness for ``h`` and ``s``."""
lower_bounds: Sequence[Tuple[int, int]] = self.get_color_info(h)["lower_bounds"]
for i in range(len(lower_bounds) - 1):
s1, v1 = lower_bounds[i]
s2, v2 = lower_bounds[i + 1]
if s1 <= s <= s2:
m: float = (v2 - v1) / (s2 - s1)
b: float = v1 - m * s1
return int(m * s + b)
return 0
def _validate_color_input(self, color_input: HueType) -> Tuple[int, int]:
if (
not isinstance(color_input, (list, tuple))
or len(color_input) != 2
or any(not isinstance(c, (float, int)) for c in color_input)
):
raise TypeError("Hue must be a valid string, numeric type, or a tuple/list of 2 numeric types.")
return color_input[0], color_input[1]
def get_hue_range(self, color_input: Optional[HueType]) -> Tuple[int, int]:
"""Return the hue range for a given ``color_input``."""
if color_input is None:
return 0, 360
if isinstance(color_input, (int, float)) and 0 <= color_input <= 360:
color_input = int(color_input)
return color_input, color_input
if isinstance(color_input, str) and color_input in self.colormap:
return self.colormap[color_input]["hue_range"][0]
color_input = self._validate_color_input(color_input)
v1 = int(color_input[0])
v2 = int(color_input[1])
if v2 < v1:
v1, v2 = v2, v1
v1 = max(v1, 0)
v2 = min(v2, 360)
return v1, v2
def get_saturation_range(self, hue: int) -> Tuple[int, int]:
"""Return the saturation range for a given numerical ``hue`` value."""
saturation_bounds = [s for s, v in self.get_color_info(hue)["lower_bounds"]]
return min(saturation_bounds), max(saturation_bounds)
def get_color_info(self, hue: int) -> Dict[str, Sequence[Tuple[int, int]]]:
"""Return the color info for a given numerical ``hue`` value."""
# Maps red colors to make picking hue easier
if 334 <= hue <= 360:
hue -= 360
for color_name, color in self.colormap.items():
hue_range: Tuple[int, int] = color["hue_range"][0]
if hue_range[0] <= hue <= hue_range[1]:
return self.colormap[color_name]
else:
raise ValueError("Value of hue `%s` is invalid." % hue)
def random_within(self, r: Sequence[int]) -> int:
"""Return a random integer within the range ``r``."""
return self.random.randint(int(r[0]), int(r[1]))
@classmethod
def hsv_to_rgb_float(cls, hsv: Tuple[int, int, int]) -> Tuple[float, float, float]:
"""Convert HSV to RGB.
This method expects ``hsv`` to be a 3-tuple of H, S, and V values, and
it will return a 3-tuple of the equivalent R, G, and B float values.
"""
h, s, v = hsv
h = max(h, 1)
h = min(h, 359)
return colorsys.hsv_to_rgb(h / 360, s / 100, v / 100)
@classmethod
def hsv_to_rgb(cls, hsv: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Convert HSV to RGB.
This method expects ``hsv`` to be a 3-tuple of H, S, and V values, and
it will return a 3-tuple of the equivalent R, G, and B integer values.
"""
r, g, b = cls.hsv_to_rgb_float(hsv)
return int(r * 255), int(g * 255), int(b * 255)
@classmethod
def hsv_to_hsl(cls, hsv: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Convert HSV to HSL.
This method expects ``hsv`` to be a 3-tuple of H, S, and V values, and
it will return a 3-tuple of the equivalent H, S, and L values.
"""
h, s, v = hsv
s_: float = s / 100.0
v_: float = v / 100.0
l = 0.5 * v_ * (2 - s_) # noqa: E741
s_ = 0.0 if l in [0, 1] else v_ * s_ / (1 - math.fabs(2 * l - 1))
return int(h), int(s_ * 100), int(l * 100)
| RandomColor |
python | prakhar1989__Algorithms | tests/graph_algorithms_test.py | {
"start": 195,
"end": 5562
} | class ____(unittest.TestCase):
def setUp(self):
self.gr = graph()
self.gr.add_nodes(["s", "a", "b", "c", "d", "e", "f", "g", "h", "j", "k", "l"])
self.gr.add_edges([("s", "a"), ("s", "b"), ("a", "c"), ("c", "e")])
self.gr.add_edges([("e", "d"), ("d", "b"), ("a", "b"), ("c", "d")])
self.gr.add_edges([("g", "h"), ("f", "g")])
self.gr.add_edges([("j", "k"), ("j", "l")])
self.digr = digraph()
self.digr.add_nodes(['s', 'a', 'b', 'c', 'd', 'e', 'f'])
self.digr.add_edges([("s", "a"), ("a", "b"), ("b", "a"), ("c", "b")])
self.digr.add_edges([("b", "s"), ("s", "d"), ("d", "e"), ("e", "d")])
self.digr.add_edges([("b", "f"), ("e", "f")])
def test_bfs_undirected_graph(self):
self.assertEqual(len(BFS(self.gr, "s")), 6)
self.assertEqual(len(BFS(self.gr, "j")), 3)
self.assertEqual(len(BFS(self.gr, "g")), 3)
def test_bfs_directed_graph(self):
self.assertEqual(len(BFS(self.digr, "s")), 6)
self.assertEqual(len(BFS(self.digr, "c")), 7)
self.assertEqual(len(BFS(self.digr, "f")), 1)
def test_dfs_undirected_graph(self):
self.assertEqual(len(DFS(self.gr, "s")), 6)
self.assertEqual(len(DFS(self.gr, "j")), 3)
self.assertEqual(len(DFS(self.gr, "g")), 3)
def test_dfs_directed_graph(self):
self.assertEqual(len(DFS(self.digr, "s")), 6)
self.assertEqual(len(DFS(self.digr, "c")), 7)
self.assertEqual(len(DFS(self.digr, "f")), 1)
def test_shortest_hops_undirected_graph(self):
self.assertEqual(shortest_hops(self.gr, "s")["c"], 2)
self.assertEqual(shortest_hops(self.gr, "c")["s"], 2)
self.assertEqual(shortest_hops(self.gr, "s")["s"], 0)
self.assertEqual(shortest_hops(self.gr, "c")["j"], float('inf'))
def test_shortest_hops_directed_graph(self):
self.assertEqual(shortest_hops(self.digr, "s")["f"], 3)
self.assertEqual(shortest_hops(self.digr, "f")["s"], float('inf'))
self.assertEqual(shortest_hops(self.digr, "s")["s"], 0)
self.assertEqual(shortest_hops(self.digr, "s")["c"], float('inf'))
def test_undirected_connected_component(self):
self.assertEqual(len(undirected_connected_components(self.gr)), 3)
self.assertRaises(Exception, undirected_connected_components, self.digr)
def test_topological_ordering(self):
dag = digraph() # directed acyclic graph
dag.add_nodes(["a", "b", "c", "d", "e", "f", "g", "h"])
dag.add_edges([("a", "b"), ("a", "c"), ("a", "e"), ("d", "a")])
dag.add_edges([("g", "b"), ("g", "f"), ("f", "e"), ("h", "f"), ("h", "a")])
order = {o[0]: o[1] for o in topological_ordering(dag)}
self.assertEqual(sum([order[u] < order[v] for (u, v) in
dag.edges()]), len(dag.edges())) # all comparisons are True
def test_directed_connected_components(self):
digr = digraph()
digr.add_nodes(["a", "b", "c", "d", "e", "f", "g", "h", "i"])
digr.add_edges([("b", "a"), ("a", "c"), ("c", "b"), ("d", "b")])
digr.add_edges([("d", "f"), ("f", "e"), ("e", "d"), ("g", "e")])
digr.add_edges([("g", "h"), ("h", "i"), ("i", "g")])
self.assertEqual(len(directed_connected_components(digr)), 3)
digr2 = digraph()
digr2.add_nodes(["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"])
digr2.add_edges([("a", "b"), ("b", "c"), ("c", "a"), ("b", "d"), ("d", "e")])
digr2.add_edges([("e", "f"), ("f", "g"), ("g", "e"), ("d", "g"), ("i", "f")])
digr2.add_edges([("h", "g"), ("c", "h"), ("c", "k"), ("h", "i"), ("i", "j")])
digr2.add_edges([("h", "j"), ("j", "k"), ("k", "h")])
self.assertEqual(len(directed_connected_components(digr2)), 4)
def test_shortest_path_in_directed_graph(self):
digr = digraph()
digr.add_nodes(["a", "b", "c", "d", "e", "f"])
digr.add_edge(("a", "b"), 7)
digr.add_edge(("a", "c"), 9)
digr.add_edge(("a", "f"), 14)
digr.add_edge(("f", "e"), 9)
digr.add_edge(("c", "f"), 2)
digr.add_edge(("c", "d"), 11)
digr.add_edge(("b", "c"), 10)
digr.add_edge(("b", "d"), 15)
digr.add_edge(("d", "e"), 6)
self.assertEqual(shortest_path(digr, "a")["a"], 0)
self.assertEqual(shortest_path(digr, "a")["b"], 7)
self.assertEqual(shortest_path(digr, "a")["c"], 9)
self.assertEqual(shortest_path(digr, "a")["d"], 20)
self.assertEqual(shortest_path(digr, "a")["e"], 20)
self.assertEqual(shortest_path(digr, "a")["f"], 11)
def test_prims_minimum_spanning_tree(self):
gr = graph()
gr.add_nodes(["a", "b", "c", "d"])
gr.add_edge(("a", "b"), 4)
gr.add_edge(("b", "c"), 3)
gr.add_edge(("a", "c"), 1)
gr.add_edge(("c", "d"), 2)
min_cost = minimum_spanning_tree(gr)
self.assertEqual(min_cost, 6)
def test_kruskals_minimum_spanning_tree(self):
gr = graph()
gr.add_nodes(["a", "b", "c", "d"])
gr.add_edge(("a", "b"), 4)
gr.add_edge(("b", "c"), 3)
gr.add_edge(("a", "c"), 1)
gr.add_edge(("c", "d"), 2)
min_cost = kruskal_MST(gr)
self.assertEqual(min_cost, 6)
if __name__ == "__main__":
unittest.main()
| test_graph |
python | sqlalchemy__sqlalchemy | examples/custom_attributes/custom_management.py | {
"start": 1021,
"end": 1519
} | class ____(InstrumentationManager):
def get_instance_dict(self, class_, instance):
return instance._goofy_dict
def initialize_instance_dict(self, class_, instance):
instance.__dict__["_goofy_dict"] = {}
def install_state(self, class_, instance, state):
instance.__dict__["_goofy_dict"]["state"] = state
def state_getter(self, class_):
def find(instance):
return instance.__dict__["_goofy_dict"]["state"]
return find
| MyClassState |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call_param.py | {
"start": 4070,
"end": 4341
} | class ____(TypedDict, total=False):
id: Required[str]
"""The ID of the pending safety check."""
code: Optional[str]
"""The type of the pending safety check."""
message: Optional[str]
"""Details about the pending safety check."""
| PendingSafetyCheck |
python | facebookresearch__faiss | tests/test_autotune.py | {
"start": 212,
"end": 1643
} | class ____(unittest.TestCase):
def test_nprobe(self):
index = faiss.index_factory(32, "IVF32,Flat")
ps = faiss.ParameterSpace()
ps.set_index_parameter(index, "nprobe", 5)
self.assertEqual(index.nprobe, 5)
def test_nprobe_2(self):
index = faiss.index_factory(32, "IDMap,IVF32,Flat")
ps = faiss.ParameterSpace()
ps.set_index_parameter(index, "nprobe", 5)
index2 = faiss.downcast_index(index.index)
self.assertEqual(index2.nprobe, 5)
def test_nprobe_3(self):
index = faiss.index_factory(32, "IVF32,SQ8,RFlat")
ps = faiss.ParameterSpace()
ps.set_index_parameter(index, "nprobe", 5)
index2 = faiss.downcast_index(index.base_index)
self.assertEqual(index2.nprobe, 5)
def test_nprobe_4(self):
index = faiss.index_factory(32, "PCAR32,IVF32,SQ8,RFlat")
ps = faiss.ParameterSpace()
ps.set_index_parameter(index, "nprobe", 5)
index2 = faiss.downcast_index(index.base_index)
index2 = faiss.downcast_index(index2.index)
self.assertEqual(index2.nprobe, 5)
def test_efSearch(self):
index = faiss.index_factory(32, "IVF32_HNSW32,SQ8")
ps = faiss.ParameterSpace()
ps.set_index_parameter(index, "quantizer_efSearch", 5)
index2 = faiss.downcast_index(index.quantizer)
self.assertEqual(index2.hnsw.efSearch, 5)
| TestParameterSpace |
python | sqlalchemy__sqlalchemy | test/base/test_events.py | {
"start": 33661,
"end": 35063
} | class ____(TearDownLocalEventsFixture, fixtures.TestBase):
def setup_test(self):
class TargetEvents(event.Events):
def event_one(self, target, arg):
pass
class BaseTarget:
dispatch = event.dispatcher(TargetEvents)
class SubTarget(BaseTarget):
_sa_propagate_class_events = False
def __init__(self, parent):
self.dispatch = self.dispatch._join(parent.dispatch)
self.BaseTarget = BaseTarget
self.SubTarget = SubTarget
def test_listen_invoke_clslevel(self):
canary = Mock()
event.listen(self.BaseTarget, "event_one", canary)
s1 = self.SubTarget(self.BaseTarget())
s1.dispatch.event_one()
eq_(canary.mock_calls, [call.event_one()])
def test_insert_invoke_clslevel(self):
canary = Mock()
event.listen(self.BaseTarget, "event_one", canary, insert=True)
s1 = self.SubTarget(self.BaseTarget())
s1.dispatch.event_one()
eq_(canary.mock_calls, [call.event_one()])
def test_remove_invoke_clslevel(self):
canary = Mock()
event.listen(self.BaseTarget, "event_one", canary)
s1 = self.SubTarget(self.BaseTarget())
event.remove(self.BaseTarget, "event_one", canary)
s1.dispatch.event_one()
eq_(canary.mock_calls, [])
| DisableClsPropagateTest |
python | pypa__setuptools | _distutils_hack/__init__.py | {
"start": 6273,
"end": 6755
} | class ____:
def __enter__(self) -> None:
insert_shim()
def __exit__(self, exc: object, value: object, tb: object) -> None:
_remove_shim()
def insert_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def _remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass
if sys.version_info < (3, 12):
# DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632)
remove_shim = _remove_shim
| shim |
python | spack__spack | lib/spack/spack/test/error_messages.py | {
"start": 925,
"end": 1047
} | class ____(Package):
version("3.5")
version("3.4")
depends_on("x4@4.0")
""",
)
_pkgx4 = (
"x4",
"""\
| X3 |
python | redis__redis-py | redis/multidb/database.py | {
"start": 245,
"end": 991
} | class ____(ABC):
@property
@abstractmethod
def weight(self) -> float:
"""The weight of this database in compare to others. Used to determine the database failover to."""
pass
@weight.setter
@abstractmethod
def weight(self, weight: float):
"""Set the weight of this database in compare to others."""
pass
@property
@abstractmethod
def health_check_url(self) -> Optional[str]:
"""Health check URL associated with the current database."""
pass
@health_check_url.setter
@abstractmethod
def health_check_url(self, health_check_url: Optional[str]):
"""Set the health check URL associated with the current database."""
pass
| AbstractDatabase |
python | google__pytype | pytype/tests/test_errors1.py | {
"start": 34618,
"end": 34945
} | class ____(test_base.BaseTest):
"""Test in-place operations."""
def test_iadd(self):
errors = self.CheckWithErrors("""
def f(): v = []; v += 3 # unsupported-operands[e]
""")
self.assertErrorSequences(
errors, {"e": ["+=", "list", "int", "__iadd__ on list", "Iterable"]}
)
| InPlaceOperationsTest |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/projection/_x.py | {
"start": 233,
"end": 3460
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.projection"
_path_str = "scatter3d.projection.x"
_valid_props = {"opacity", "scale", "show"}
@property
def opacity(self):
"""
Sets the projection color.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def scale(self):
"""
Sets the scale factor determining the size of the projection
marker points.
The 'scale' property is a number and may be specified as:
- An int or float in the interval [0, 10]
Returns
-------
int|float
"""
return self["scale"]
@scale.setter
def scale(self, val):
self["scale"] = val
@property
def show(self):
"""
Sets whether or not projections are shown along the x axis.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
opacity
Sets the projection color.
scale
Sets the scale factor determining the size of the
projection marker points.
show
Sets whether or not projections are shown along the x
axis.
"""
def __init__(self, arg=None, opacity=None, scale=None, show=None, **kwargs):
"""
Construct a new X object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.projection.X`
opacity
Sets the projection color.
scale
Sets the scale factor determining the size of the
projection marker points.
show
Sets whether or not projections are shown along the x
axis.
Returns
-------
X
"""
super().__init__("x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.projection.X
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.projection.X`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("opacity", arg, opacity)
self._set_property("scale", arg, scale)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| X |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/video_intelligence.py | {
"start": 9965,
"end": 14206
} | class ____(GoogleCloudBaseOperator):
"""
Performs video annotation, annotating video shots.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVideoIntelligenceDetectVideoShotsOperator`
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only
Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined
based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to ``google_cloud_default``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_video_intelligence_detect_video_shots_template_fields]
template_fields: Sequence[str] = (
"input_uri",
"output_uri",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_video_intelligence_detect_video_shots_template_fields]
def __init__(
self,
*,
input_uri: str,
output_uri: str | None = None,
input_content: bytes | None = None,
video_context: dict | VideoContext | None = None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.input_uri = input_uri
self.output_uri = output_uri
self.input_content = input_content
self.video_context = video_context
self.location = location
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVideoIntelligenceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.annotate_video(
input_uri=self.input_uri,
input_content=self.input_content,
video_context=self.video_context,
location=self.location,
retry=self.retry,
features=[Feature.SHOT_CHANGE_DETECTION],
timeout=self.timeout,
)
self.log.info("Processing video for video shots annotations")
result = MessageToDict(operation.result()._pb)
self.log.info("Finished processing.")
return result
| CloudVideoIntelligenceDetectVideoShotsOperator |
python | keon__algorithms | tests/test_heap.py | {
"start": 1202,
"end": 2297
} | class ____(unittest.TestCase):
def test_get_skyline(self):
buildings = [[2, 9, 10], [3, 7, 15], [5, 12, 12],
[15, 20, 10], [19, 24, 8]]
# Expect output
output = [[2, 10], [3, 15], [7, 12], [12, 0], [15, 10],
[20, 8], [24, 0]]
self.assertEqual(output, get_skyline(buildings))
def test_max_sliding_window(self):
nums = [1, 3, -1, -3, 5, 3, 6, 7]
self.assertEqual([3, 3, 5, 5, 6, 7], max_sliding_window(nums, 3))
def test_k_closest_points(self):
points = [(1, 0), (2, 3), (5, 2), (1, 1), (2, 8), (10, 2),
(-1, 0), (-2, -2)]
self.assertEqual([(-1, 0), (1, 0)], k_closest(points, 2))
self.assertEqual([(1, 1), (-1, 0), (1, 0)], k_closest(points, 3))
self.assertEqual([(-2, -2), (1, 1), (1, 0),
(-1, 0)], k_closest(points, 4))
self.assertEqual([(10, 2), (2, 8), (5, 2), (-2, -2), (2, 3),
(1, 0), (-1, 0), (1, 1)], k_closest(points, 8))
if __name__ == "__main__":
unittest.main()
| TestSuite |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 1636,
"end": 1816
} | class ____(factory.django.DjangoModelFactory):
class Meta:
model = models.NonIntegerPk
foo = factory.Sequence(lambda n: "foo%d" % n)
bar = ''
| NonIntegerPkFactory |
python | pytorch__pytorch | torch/storage.py | {
"start": 784,
"end": 14637
} | class ____:
_cdata: Any
is_sparse: _bool = False
is_sparse_csr: _bool = False
device: torch.device
# Used when
# (1) stashing FakeTensor device onto storage in torch.serialization.skip_data
# (2) stashing device onto storage to propagate to FakeTensor when torch.load under FakeTensorMode
_fake_device: _Optional[torch.device] = None
# Used when loading with FakeTensorMode to give information about offset of storage in torch.saved-file
_checkpoint_offset: _Optional[int] = None
def __init__(self, *args, **kwargs):
pass
def __len__(self) -> _int:
raise NotImplementedError
def __getitem__(self, idx):
raise NotImplementedError
def __setitem__(self, *args, **kwargs):
raise NotImplementedError
def copy_(self, source: T, non_blocking: _Optional[_bool] = None) -> T:
raise NotImplementedError
def new(self) -> Union[_StorageBase, TypedStorage]:
raise NotImplementedError
def nbytes(self) -> _int:
raise NotImplementedError
def size(self) -> _int:
return self.nbytes()
def type(
self, dtype: _Optional[str] = None, non_blocking: _bool = False
) -> Union[_StorageBase, TypedStorage]:
return _type(self, dtype, non_blocking)
def cuda(
self, device=None, non_blocking=False
) -> Union[_StorageBase, TypedStorage]:
"""Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device, then
no copy is performed and the original object is returned.
Args:
device (int): The destination GPU id. Defaults to the current device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host. Otherwise,
the argument has no effect.
"""
device2 = torch.device("cuda", device) if device else torch.device("cuda")
return self.to(device=device2, non_blocking=non_blocking)
def hpu(self, device=None, non_blocking=False) -> Union[_StorageBase, TypedStorage]:
"""Returns a copy of this object in HPU memory.
If this object is already in HPU memory and on the correct device, then
no copy is performed and the original object is returned.
Args:
device (int): The destination HPU id. Defaults to the current device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host. Otherwise,
the argument has no effect.
"""
device2 = torch.device("hpu", device) if device else torch.device("hpu")
return self.to(device=device2, non_blocking=non_blocking)
def element_size(self) -> _int:
raise NotImplementedError
def get_device(self) -> _int:
return self.device.index
def data_ptr(self) -> _int:
raise NotImplementedError
def resizable(self) -> _bool:
raise NotImplementedError
# Defined in torch/csrc/generic/StorageSharing.cpp
def _share_filename_cpu_(self, *args, **kwargs):
raise NotImplementedError
def _share_fd_cpu_(self, *args, **kwargs):
raise NotImplementedError
@classmethod
def _new_using_filename_cpu(cls, size: _int) -> Self:
raise NotImplementedError
@classmethod
def _new_using_fd_cpu(cls, size: _int) -> Self:
raise NotImplementedError
@classmethod
def from_buffer(cls, *args, **kwargs) -> Self:
raise NotImplementedError
@classmethod
def _new_shared_filename_cpu(
cls,
manager,
obj,
size,
*,
device=None,
dtype=None,
) -> Self:
raise NotImplementedError
@classmethod
def _release_ipc_counter(cls, *args, device=None, **kwargs):
return cls._release_ipc_counter_cuda(*args, **kwargs)
@classmethod
def _release_ipc_counter_cuda(cls, *args, **kwargs) -> Self:
raise NotImplementedError
@classmethod
def _new_with_weak_ptr(cls, *args, **kwargs) -> Self:
raise NotImplementedError
def _shared_decref(self) -> Union[_StorageBase, TypedStorage]:
raise NotImplementedError
def _write_file(self, *args, **kwargs):
raise NotImplementedError
def resize_(self, size: _int):
raise NotImplementedError
def _weak_ref(self, *args, **kwargs) -> Union[_StorageBase, TypedStorage]:
raise NotImplementedError
def _set_from_file(self, *args, **kwargs):
raise NotImplementedError
def _set_cdata(self, *args, **kwargs):
raise NotImplementedError
def _share_cuda_(self, *args, **kwargs):
raise NotImplementedError
def is_shared(self) -> _bool:
raise NotImplementedError
@classmethod
def _new_shared_cuda(cls, *args, **kwargs) -> Self:
raise NotImplementedError
def _shared_incref(self, *args, **kwargs):
raise NotImplementedError
@classmethod
def _free_weak_ref(cls, *args, **kwargs):
raise NotImplementedError
@property
def is_cuda(self):
raise NotImplementedError
@property
def is_hpu(self):
raise NotImplementedError
@classmethod
def from_file(cls, filename, shared, nbytes) -> Union[_StorageBase, TypedStorage]:
raise NotImplementedError
@classmethod
def _expired(cls, *args, **kwargs) -> Union[_StorageBase, TypedStorage]:
raise NotImplementedError
def _byteswap(self, *args, **kwargs):
raise NotImplementedError
def _get_filename(self, *args, **kwargs) -> _Optional[str]:
raise NotImplementedError
def __repr__(self):
info_str = f"[{torch.typename(self)}(device={self.device}) of size {len(self)}]"
if self.device.type == "meta":
return "...\n" + info_str
data_str = " " + "\n ".join(str(self[i]) for i in range(self.size()))
return data_str + "\n" + info_str
def __iter__(self):
return iter(self[i] for i in range(self.size()))
def __copy__(self):
return self.clone()
def __deepcopy__(self, memo):
memo = memo.setdefault("torch", {})
if self._cdata in memo:
return memo[self._cdata]
new_storage = self.clone()
memo[self._cdata] = new_storage
return new_storage
def __reduce__(self):
b = io.BytesIO()
torch.save(self, b, _use_new_zipfile_serialization=False)
return (_load_from_bytes, (b.getvalue(),))
def __sizeof__(self):
return super().__sizeof__() + self.size()
def clone(self):
"""Return a copy of this storage."""
return type(self)(self.nbytes(), device=self.device).copy_(self)
def tolist(self):
"""Return a list containing the elements of this storage."""
return list(self)
def cpu(self):
"""Return a CPU copy of this storage if it's not already on the CPU."""
if self.device.type != "cpu":
return torch.UntypedStorage(self.size()).copy_(self, False)
return self
def mps(self):
"""Return a MPS copy of this storage if it's not already on the MPS."""
if self.device.type != "mps":
return torch.UntypedStorage(self.size(), device="mps").copy_(self, False)
return self
def _to(self, dtype):
if not isinstance(dtype, torch.dtype):
raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")
storage = (
torch.tensor([], dtype=torch.uint8, device=self.device)
.set_(cast(Storage, self))
.to(dtype)
._typed_storage()
)
if storage.data_ptr() == self.data_ptr():
storage = storage.clone()
return storage
def to(self, *, device: DeviceLikeType, non_blocking: _bool = False):
if not isinstance(device, torch.device):
device = torch.device(device)
return _to(self, device, non_blocking)
def double(self):
"""Casts this storage to double type."""
return self._to(torch.double)
def float(self):
"""Casts this storage to float type."""
return self._to(torch.float)
def half(self):
"""Casts this storage to half type."""
return self._to(torch.half)
def long(self):
"""Casts this storage to long type."""
return self._to(torch.long)
def int(self):
"""Casts this storage to int type."""
return self._to(torch.int)
def short(self):
"""Casts this storage to short type."""
return self._to(torch.short)
def char(self):
"""Casts this storage to char type."""
return self._to(torch.int8)
def byte(self):
"""Casts this storage to byte type."""
return self._to(torch.uint8)
def bool(self):
"""Casts this storage to bool type."""
return self._to(torch.bool)
def bfloat16(self):
"""Casts this storage to bfloat16 type."""
return self._to(torch.bfloat16)
def complex_double(self):
"""Casts this storage to complex double type."""
return self._to(torch.cdouble)
def complex_float(self):
"""Casts this storage to complex float type."""
return self._to(torch.cfloat)
def float8_e5m2(self):
"""Casts this storage to float8_e5m2 type"""
return self._to(torch.float8_e5m2)
def float8_e4m3fn(self):
"""Casts this storage to float8_e4m3fn type"""
return self._to(torch.float8_e4m3fn)
def float8_e5m2fnuz(self):
"""Casts this storage to float8_e5m2fnuz type"""
return self._to(torch.float8_e5m2fnuz)
def float8_e4m3fnuz(self):
"""Casts this storage to float8_e4m3fnuz type"""
return self._to(torch.float8_e4m3fnuz)
def is_pinned(self, device: Union[str, torch.device] = "cuda"):
r"""Determine whether the CPU storage is already pinned on device.
Args:
device (str or torch.device): The device to pin memory on (default: ``'cuda'``).
This argument is discouraged and subject to deprecated.
Returns:
A boolean variable.
"""
return (
torch.tensor([], dtype=torch.uint8, device=self.device)
.set_(cast(Storage, self))
.is_pinned(device)
)
def pin_memory(self, device: Union[str, torch.device] = "cuda"):
r"""Copy the CPU storage to pinned memory, if it's not already pinned.
Args:
device (str or torch.device): The device to pin memory on (default: ``'cuda'``).
This argument is discouraged and subject to deprecated.
Returns:
A pinned CPU storage.
"""
if self.device.type != "cpu":
raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
pinned_tensor = (
torch.tensor([], dtype=torch.uint8, device=self.device)
.set_(cast(Storage, self))
.pin_memory(device)
)
return pinned_tensor.untyped_storage()
def share_memory_(self):
"""See :meth:`torch.UntypedStorage.share_memory_`"""
from torch.multiprocessing import get_sharing_strategy
if self.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
pass # CUDA or PrivateUse1 doesn't use POSIX shared memory
elif get_sharing_strategy() == "file_system":
self._share_filename_cpu_()
else:
self._share_fd_cpu_()
return self
@classmethod
def _new_shared(cls, size, *, device="cpu"):
"""Create a new storage in shared memory with the same data type."""
from torch.multiprocessing import get_sharing_strategy
device = torch.device(device)
if device.type in ["cuda", torch._C._get_privateuse1_backend_name(), "hpu"]:
return cls(size, device=device)
elif get_sharing_strategy() == "file_system":
return cls._new_using_filename_cpu(size)
else:
return cls._new_using_fd_cpu(size)
def untyped(self):
return self
def byteswap(self, dtype):
"""Swap bytes in underlying data."""
elem_size = torch._utils._element_size(dtype)
# for complex types, don't swap first and second numbers
if dtype.is_complex:
elem_size = max(int(elem_size / 2), 1)
self._byteswap(elem_size)
def _share_memory_lock_protected(fn):
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
to_free = None
to_wait = None
with _share_memory_lock:
key = self._cdata
if key in _share_memory_map:
to_wait = _share_memory_map[key]
else:
_share_memory_map[key] = threading.RLock()
_share_memory_map[key].acquire()
to_free = key
# If we're already in the process of sharing the storage, wait
# for it to be done.
if to_wait is not None:
with to_wait:
pass
try:
return fn(self, *args, **kwargs)
finally:
# If we acquired the storage lock here and we're done working on it
# we can now release it and free the entry.
if to_free is not None:
# Ensure that the cdata from the storage didn't change and only
# the data_ptr did.
assert self._cdata == to_free
with _share_memory_lock:
_share_memory_map[to_free].release()
del _share_memory_map[to_free]
return wrapper
| _StorageBase |
python | mamba-org__mamba | micromamba/tests/test_virtual_pkgs.py | {
"start": 55,
"end": 976
} | class ____:
def test_virtual_packages(self):
infos = info()
assert "virtual packages :" in infos
assert "__archspec=1=" in infos
if platform.system() == "Windows":
assert "__win" in infos
elif platform.system() == "Darwin":
assert "__unix=0=0" in infos
assert "__osx" in infos
elif platform.system() == "Linux":
assert "__unix=0=0" in infos
assert "__glibc" in infos
linux_ver = platform.release().split("-", 1)[0]
assert f"__linux={linux_ver}=0" in infos
def test_virtual_linux(self):
if platform.system() == "Linux":
infos = info()
assert "__linux=" in infos
assert "__linux=0=0" not in infos
else:
infos = info(env={**os.environ, "CONDA_SUBDIR": "linux-64"})
assert "__linux=0=0" in infos
| TestVirtualPkgs |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 358204,
"end": 359414
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("contributions", "repository")
contributions = sgqlc.types.Field(
sgqlc.types.non_null(CreatedIssueContributionConnection),
graphql_name="contributions",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
ContributionOrder,
graphql_name="orderBy",
default={"direction": "DESC"},
),
),
)
),
)
repository = sgqlc.types.Field(
sgqlc.types.non_null("Repository"), graphql_name="repository"
)
| IssueContributionsByRepository |
python | huggingface__transformers | src/transformers/models/starcoder2/modular_starcoder2.py | {
"start": 2721,
"end": 5630
} | class ____(MistralAttention):
def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None):
super().__init__(config=config, layer_idx=layer_idx)
self.residual_dropout = config.residual_dropout
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None), # diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
attn_output = nn.functional.dropout(
attn_output, p=self.residual_dropout, training=self.training
) # diff with Llama
return attn_output, attn_weights
| Starcoder2Attention |
python | scikit-learn__scikit-learn | sklearn/feature_extraction/text.py | {
"start": 62417,
"end": 77057
} | class ____(CountVectorizer):
r"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to :class:`CountVectorizer` followed by
:class:`TfidfTransformer`.
Read more in the :ref:`User Guide <tfidf>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
stop_words : {'english'}, list, default=None
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. In this case, setting `max_df`
to a higher value, such as in the range (0.7, 1.0), can automatically detect
and filter stop words based on intra corpus document frequency of terms.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
max_df : float or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float in range [0.0, 1.0], the parameter represents a proportion of
documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float in range of [0.0, 1.0], the parameter represents a proportion
of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
`max_features` ordered by term frequency across the corpus.
Otherwise, all features are used.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : bool, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set `binary` to True, `use_idf` to False and
`norm` to None to get 0/1 outputs).
dtype : dtype, default=float64
Type of the matrix returned by fit_transform() or transform().
norm : {'l1', 'l2'} or None, default='l2'
Each output row will have unit norm, either:
- 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
- 'l1': Sum of absolute values of vector elements is 1.
See :func:`~sklearn.preprocessing.normalize`.
- None: No normalization.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
idf_ : array of shape (n_features,)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfTransformer : Performs the TF-IDF transformation from a provided
matrix of counts.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = TfidfVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third',
'this'], ...)
>>> print(X.shape)
(4, 9)
"""
_parameter_constraints: dict = {**CountVectorizer._parameter_constraints}
_parameter_constraints.update(
{
"norm": [StrOptions({"l1", "l2"}), None],
"use_idf": ["boolean"],
"smooth_idf": ["boolean"],
"sublinear_tf": ["boolean"],
}
)
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
analyzer="word",
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.float64,
norm="l2",
use_idf=True,
smooth_idf=True,
sublinear_tf=False,
):
super().__init__(
input=input,
encoding=encoding,
decode_error=decode_error,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
analyzer=analyzer,
stop_words=stop_words,
token_pattern=token_pattern,
ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
max_features=max_features,
vocabulary=vocabulary,
binary=binary,
dtype=dtype,
)
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
if not hasattr(self, "_tfidf"):
raise NotFittedError(
f"{self.__class__.__name__} is not fitted yet. Call 'fit' with "
"appropriate arguments before using this attribute."
)
return self._tfidf.idf_
@idf_.setter
def idf_(self, value):
if not self.use_idf:
raise ValueError("`idf_` cannot be set when `user_idf=False`.")
if not hasattr(self, "_tfidf"):
# We should support transferring `idf_` from another `TfidfTransformer`
# and therefore, we need to create the transformer instance it does not
# exist yet.
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
self._validate_vocabulary()
if hasattr(self, "vocabulary_"):
if len(self.vocabulary_) != len(value):
raise ValueError(
"idf length = %d must be equal to vocabulary size = %d"
% (len(value), len(self.vocabulary))
)
self._tfidf.idf_ = value
def _check_params(self):
if self.dtype not in FLOAT_DTYPES:
warnings.warn(
"Only {} 'dtype' should be used. {} 'dtype' will "
"be converted to np.float64.".format(FLOAT_DTYPES, self.dtype),
UserWarning,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._check_params()
self._warn_for_unused_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
self._check_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted")
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.string = True
tags.input_tags.two_d_array = False
tags._skip_test = True
return tags
| TfidfVectorizer |
python | facebook__pyre-check | scripts/explore_pysa_models.py | {
"start": 12861,
"end": 17560
} | class ____:
format: str = "json"
show_sources: bool = True
show_sinks: bool = True
show_tito: bool = True
show_tito_positions: bool = True
show_class_intervals: bool = True
show_features: bool = True
show_leaf_names: bool = True
kind: Optional[str] = None
caller_port: Optional[str] = None
def apply_options(self, **kwargs: Union[bool, str]) -> "FormattingOptions":
options = copy.copy(self)
for name, value in kwargs.items():
if not hasattr(options, name):
raise AssertionError(f"Unknown formatting option `{name}`")
setattr(options, name, value)
return options
__default_formatting_options: FormattingOptions = FormattingOptions(
show_tito_positions=False,
show_class_intervals=False,
show_features=False,
show_leaf_names=False,
)
def set_formatting(**kwargs: Union[str, bool]) -> None:
"""
Set default formatting options.
Available options with their default values:
format = 'json' Display format ('json' or 'text')
kind = None Filter by taint kind.
caller_port = None Filter by caller port.
show_sources = True
show_sinks = True
show_tito = True
show_tito_positions = False
show_class_intervals = False
show_features = False
show_leaf_names = False
Most functions accept formatting options as optional arguments.
"""
global __default_formatting_options
__default_formatting_options = __default_formatting_options.apply_options(**kwargs)
def show_formatting() -> None:
"""Show default formatting options."""
print(__default_formatting_options)
def get_raw_model(
callable: str, cache: Optional[Dict[str, Dict[str, Any]]] = None
) -> Dict[str, Any]:
"""Get the model for the given callable."""
if cache is not None and callable in cache:
return cache[callable]
directory = _assert_loaded()
if callable not in directory.index_.models:
raise AssertionError(f"no model for callable `{callable}`.")
message = json.loads(_read(directory.index_.models[callable]))
assert message["kind"] == "model"
model = message["data"]
if cache is not None:
cache[callable] = model
return model
def get_model(
callable: str,
**kwargs: Union[str, bool],
) -> Dict[str, Any]:
"""Get the model for the given callable (with formatting options)"""
model = get_raw_model(callable)
options = __default_formatting_options.apply_options(**kwargs)
if not options.show_sources and "sources" in model:
del model["sources"]
if not options.show_sinks and "sinks" in model:
del model["sinks"]
if not options.show_tito and "tito" in model:
del model["tito"]
if options.kind is not None:
model = filter_model_kind(model, options.kind)
if options.caller_port is not None:
model = filter_model_caller_port(model, options.caller_port)
if not options.show_tito_positions:
model = model_remove_tito_positions(model)
if not options.show_class_intervals:
model = model_remove_class_intervals(model)
if not options.show_features:
model = model_remove_features(model)
if not options.show_leaf_names:
model = model_remove_leaf_names(model)
return model
def print_json(data: object) -> None:
"""Pretty print json objects with syntax highlighting."""
if isinstance(data, str):
data = json.loads(data)
try:
subprocess.run(["jq", "-C"], input=json.dumps(data).encode(), check=True)
except FileNotFoundError:
print(json.dumps(data, indent=" " * 2))
global __warned_missing_jq
if not __warned_missing_jq:
print(
"[HINT] Install `jq` to use syntax highlighting, https://stedolan.github.io/jq/"
)
__warned_missing_jq = True
def green(text: str | int) -> str:
return f"\033[32m{text}\033[0m"
def blue(text: str | int) -> str:
return f"\033[34m{text}\033[0m"
def feature_to_string(feature: Union[str, Dict[str, str]]) -> str:
if isinstance(feature, str):
return feature
elif isinstance(feature, dict):
if len(feature) == 1:
key, value = next(iter(feature.items()))
return f"{key}:{value}"
else:
raise AssertionError(f"unexpected feature: {feature}")
else:
raise AssertionError(f"unexpected feature: {feature}")
def leaf_name_to_string(leaf: Dict[str, str]) -> str:
name = leaf["name"]
if "port" in leaf:
name += f':{leaf["port"]}'
return name
| FormattingOptions |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/naming.py | {
"start": 788,
"end": 6855
} | class ____:
def __init__(self, const, table, convention):
self.const = const
self._is_fk = isinstance(const, ForeignKeyConstraint)
self.table = table
self.convention = convention
self._const_name = const.name
def _key_table_name(self):
return self.table.name
def _column_X(self, idx, attrname):
if self._is_fk:
try:
fk = self.const.elements[idx]
except IndexError:
return ""
else:
return getattr(fk.parent, attrname)
else:
cols = list(self.const.columns)
try:
col = cols[idx]
except IndexError:
return ""
else:
return getattr(col, attrname)
def _key_constraint_name(self):
if self._const_name in (None, _NONE_NAME):
raise exc.InvalidRequestError(
"Naming convention including "
"%(constraint_name)s token requires that "
"constraint is explicitly named."
)
if not isinstance(self._const_name, conv):
self.const.name = None
return self._const_name
def _key_column_X_key(self, idx):
# note this method was missing before
# [ticket:3989], meaning tokens like ``%(column_0_key)s`` weren't
# working even though documented.
return self._column_X(idx, "key")
def _key_column_X_name(self, idx):
return self._column_X(idx, "name")
def _key_column_X_label(self, idx):
return self._column_X(idx, "_ddl_label")
def _key_referred_table_name(self):
fk = self.const.elements[0]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return reftable
def _key_referred_column_X_name(self, idx):
fk = self.const.elements[idx]
# note that before [ticket:3989], this method was returning
# the specification for the :class:`.ForeignKey` itself, which normally
# would be using the ``.key`` of the column, not the name.
return fk.column.name
def __getitem__(self, key):
if key in self.convention:
return self.convention[key](self.const, self.table)
elif hasattr(self, "_key_%s" % key):
return getattr(self, "_key_%s" % key)()
else:
col_template = re.match(r".*_?column_(\d+)(_?N)?_.+", key)
if col_template:
idx = col_template.group(1)
multiples = col_template.group(2)
if multiples:
if self._is_fk:
elems = self.const.elements
else:
elems = list(self.const.columns)
tokens = []
for idx, elem in enumerate(elems):
attr = "_key_" + key.replace("0" + multiples, "X")
try:
tokens.append(getattr(self, attr)(idx))
except AttributeError:
raise KeyError(key)
sep = "_" if multiples.startswith("_") else ""
return sep.join(tokens)
else:
attr = "_key_" + key.replace(idx, "X")
idx = int(idx)
if hasattr(self, attr):
return getattr(self, attr)(idx)
raise KeyError(key)
_prefix_dict = {
Index: "ix",
PrimaryKeyConstraint: "pk",
CheckConstraint: "ck",
UniqueConstraint: "uq",
ForeignKeyConstraint: "fk",
}
def _get_convention(dict_, key):
for super_ in key.__mro__:
if super_ in _prefix_dict and _prefix_dict[super_] in dict_:
return dict_[_prefix_dict[super_]]
elif super_ in dict_:
return dict_[super_]
else:
return None
def _constraint_name_for_table(const, table):
metadata = table.metadata
convention = _get_convention(metadata.naming_convention, type(const))
if isinstance(const.name, conv):
return const.name
elif (
convention is not None
and not isinstance(const.name, conv)
and (
const.name is None
or "constraint_name" in convention
or const.name is _NONE_NAME
)
):
return conv(
convention
% ConventionDict(const, table, metadata.naming_convention)
)
elif convention is _NONE_NAME:
return None
@event.listens_for(
PrimaryKeyConstraint, "_sa_event_column_added_to_pk_constraint"
)
def _column_added_to_pk_constraint(pk_constraint, col):
if pk_constraint._implicit_generated:
# only operate upon the "implicit" pk constraint for now,
# as we have to force the name to None to reset it. the
# "implicit" constraint will only have a naming convention name
# if at all.
table = pk_constraint.table
pk_constraint.name = None
newname = _constraint_name_for_table(pk_constraint, table)
if newname:
pk_constraint.name = newname
@event.listens_for(Constraint, "after_parent_attach")
@event.listens_for(Index, "after_parent_attach")
def _constraint_name(const, table):
if isinstance(table, Column):
# this path occurs for a CheckConstraint linked to a Column
# for column-attached constraint, set another event
# to link the column attached to the table as this constraint
# associated with the table.
event.listen(
table,
"after_parent_attach",
lambda col, table: _constraint_name(const, table),
)
elif isinstance(table, Table):
if isinstance(const.name, conv) or const.name is _NONE_NAME:
return
newname = _constraint_name_for_table(const, table)
if newname:
const.name = newname
| ConventionDict |
python | pytorch__pytorch | test/inductor/test_perf.py | {
"start": 20828,
"end": 21454
} | class ____(TestCase):
def test_tiling_simple(self):
def f(a, b):
return a + b.t()
inp = (T(10, 10), T(10, 10))
self.assertExpectedInline(count_numel(f, *inp), """300""")
def f(a, b):
return a.t() + b
inp = (T(10, 10), T(10, 10))
self.assertExpectedInline(count_numel(f, *inp), """300""")
def test_tiling_three(self):
def f(a, b, c):
return a + b.permute(1, 2, 0) + c.permute(2, 0, 1)
inp = (T(10, 10, 10), T(10, 10, 10), T(10, 10, 10))
self.assertExpectedInline(count_numel(f, *inp), """4000""")
| TilingTests |
python | conda__conda | conda/exceptions.py | {
"start": 4587,
"end": 5292
} | class ____(ArgumentError):
def __init__(
self,
expected: int,
received: int,
offending_arguments: Iterable[str],
optional_message: str = "",
*args,
):
self.expected = expected
self.received = received
self.offending_arguments = offending_arguments
self.optional_message = optional_message
suffix = "s" if received - expected > 1 else ""
msg = "{} Got {} argument{} ({}) but expected {}.".format(
optional_message,
received,
suffix,
", ".join(offending_arguments),
expected,
)
super().__init__(msg, *args)
| TooManyArgumentsError |
python | pytransitions__transitions | transitions/extensions/factory.py | {
"start": 3658,
"end": 4590
} | class ____(GraphMachine, HierarchicalAsyncMachine):
"""A hierarchical machine that supports asynchronous event/callback processing with Graphviz support."""
transition_cls = NestedAsyncTransition
# 4d tuple (graph, nested, locked, async)
_CLASS_MAP = {
(False, False, False, False): Machine,
(False, False, True, False): LockedMachine,
(False, True, False, False): HierarchicalMachine,
(False, True, True, False): LockedHierarchicalMachine,
(True, False, False, False): GraphMachine,
(True, False, True, False): LockedGraphMachine,
(True, True, False, False): HierarchicalGraphMachine,
(True, True, True, False): LockedHierarchicalGraphMachine,
(False, False, False, True): AsyncMachine,
(True, False, False, True): AsyncGraphMachine,
(False, True, False, True): HierarchicalAsyncMachine,
(True, True, False, True): HierarchicalAsyncGraphMachine
}
| HierarchicalAsyncGraphMachine |
python | pytorch__pytorch | torch/_dynamo/package.py | {
"start": 40119,
"end": 42179
} | class ____(DiskDynamoStore):
"""
Special DiskDynamoStore which adds some helper functions for automatically
tracking paths of packages
"""
def save(self, package: CompilePackage) -> None:
"""
Saves a package to a given path. Grabs backends from PrecompileContext.
"""
key = package.source_id
logger.info("Saving CompilePackage for %s", package.source_id)
super().save_package(package, key)
def load(self, fn: Callable[..., Any]) -> Optional[PrecompileCacheEntry]:
"""
Loads a package from a given path and returns it plus a list of deserialized backends
"""
key = CompilePackage.source_id_from_fn(fn)
logger.info("Loading CompilePackage for %s", key)
path = os.path.join(self.path_prefix(), key)
if os.path.exists(path):
try:
result = super().load_cache_entry(key)
counters["dynamo_cache"]["dynamo_cache_hit"] += 1
return result
except Exception:
counters["dynamo_cache"]["dynamo_cache_error"] += 1
logger.warning("Failed to load package from path %s", exc_info=True)
return None
logger.info("No package found for %s", key)
counters["dynamo_cache"]["dynamo_cache_miss"] += 1
return None
def load_and_install_package(
self, fn: Callable[..., Any]
) -> Optional[CompilePackage]:
"""
Load directly into a package and install backends
"""
results = self.load(fn)
if results is None:
return None
else:
package = CompilePackage(fn, results.dynamo)
package.install(results.backends)
return package
def path_prefix(self) -> str:
return os.path.join(cache_dir(), "dynamo")
def cache_dir() -> str:
from torch._inductor.runtime.cache_dir_utils import cache_dir
return cache_dir()
DynamoCache = DiskDynamoCache(os.path.join(cache_dir(), "dynamo"))
| DiskDynamoCache |
python | chardet__chardet | chardet/sbcsgroupprober.py | {
"start": 1956,
"end": 4071
} | class ____(CharSetGroupProber):
def __init__(self) -> None:
super().__init__()
hebrew_prober = HebrewProber()
logical_hebrew_prober = SingleByteCharSetProber(
WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober
)
# TODO: See if using ISO-8859-8 Hebrew model works better here, since
# it's actually the visual one
visual_hebrew_prober = SingleByteCharSetProber(
WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober
)
hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
# TODO: ORDER MATTERS HERE. I changed the order vs what was in master
# and several tests failed that did not before. Some thought
# should be put into the ordering, and we should consider making
# order not matter here, because that is very counter-intuitive.
self.probers = [
SingleByteCharSetProber(WINDOWS_1251_RUSSIAN_MODEL),
SingleByteCharSetProber(KOI8_R_RUSSIAN_MODEL),
SingleByteCharSetProber(ISO_8859_5_RUSSIAN_MODEL),
SingleByteCharSetProber(MACCYRILLIC_RUSSIAN_MODEL),
SingleByteCharSetProber(IBM866_RUSSIAN_MODEL),
SingleByteCharSetProber(IBM855_RUSSIAN_MODEL),
SingleByteCharSetProber(ISO_8859_7_GREEK_MODEL),
SingleByteCharSetProber(WINDOWS_1253_GREEK_MODEL),
SingleByteCharSetProber(ISO_8859_5_BULGARIAN_MODEL),
SingleByteCharSetProber(WINDOWS_1251_BULGARIAN_MODEL),
# TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
# after we retrain model.
# SingleByteCharSetProber(ISO_8859_2_HUNGARIAN_MODEL),
# SingleByteCharSetProber(WINDOWS_1250_HUNGARIAN_MODEL),
SingleByteCharSetProber(TIS_620_THAI_MODEL),
SingleByteCharSetProber(ISO_8859_9_TURKISH_MODEL),
hebrew_prober,
logical_hebrew_prober,
visual_hebrew_prober,
]
self.reset()
| SBCSGroupProber |
python | pytransitions__transitions | transitions/extensions/diagrams_base.py | {
"start": 332,
"end": 5991
} | class ____(object):
"""Provides the common foundation for graphs generated either with pygraphviz or graphviz. This abstract class
should not be instantiated directly. Use .(py)graphviz.(Nested)Graph instead.
Attributes:
machine (GraphMachine): The associated GraphMachine
fsm_graph (object): The AGraph-like object that holds the graphviz information
"""
def __init__(self, machine):
self.machine = machine
self.fsm_graph = None
self.generate()
@abc.abstractmethod
def generate(self):
"""Triggers the generation of a graph."""
@abc.abstractmethod
def set_previous_transition(self, src, dst):
"""Sets the styling of an edge to 'previous'
Args:
src (str): Name of the source state
dst (str): Name of the destination
"""
@abc.abstractmethod
def reset_styling(self):
"""Resets the styling of the currently generated graph."""
@abc.abstractmethod
def set_node_style(self, state, style):
"""Sets the style of nodes associated with a model state
Args:
state (str, Enum or list): Name of the state(s) or Enum(s)
style (str): Name of the style
"""
@abc.abstractmethod
def get_graph(self, title=None, roi_state=None):
"""Returns a graph object.
Args:
title (str): Title of the generated graph
roi_state (State): If not None, the returned graph will only contain edges and states connected to it.
Returns:
A graph instance with a `draw` that allows to render the graph.
"""
def _convert_state_attributes(self, state):
label = state.get("label", state["name"])
if self.machine.show_state_attributes:
if "tags" in state:
label += " [" + ", ".join(state["tags"]) + "]"
if "on_enter" in state:
label += r"\l- enter:\l + " + r"\l + ".join(state["on_enter"])
if "on_exit" in state:
label += r"\l- exit:\l + " + r"\l + ".join(state["on_exit"])
if "timeout" in state:
label += r'\l- timeout(' + state['timeout'] + 's) -> (' + ', '.join(state['on_timeout']) + ')'
# end each label with a left-aligned newline
return label + r"\l"
def _get_state_names(self, state):
if isinstance(state, (list, tuple, set)):
for res in state:
for inner in self._get_state_names(res):
yield inner
else:
yield self.machine.state_cls.separator.join(self.machine._get_enum_path(state))\
if hasattr(state, "name") else state
def _transition_label(self, tran):
edge_label = tran.get("label", tran["trigger"])
if "dest" not in tran:
edge_label += " [internal]"
if self.machine.show_conditions and any(prop in tran for prop in ["conditions", "unless"]):
edge_label = "{edge_label} [{conditions}]".format(
edge_label=edge_label,
conditions=" & ".join(
tran.get("conditions", []) + ["!" + u for u in tran.get("unless", [])]
),
)
return edge_label
def _get_global_name(self, path):
if path:
state = path.pop(0)
with self.machine(state):
return self._get_global_name(path)
else:
return self.machine.get_global_name()
def _flatten(self, *lists):
return (e for a in lists for e in
(self._flatten(*a)
if isinstance(a, (tuple, list))
else (a.name if hasattr(a, 'name') else a,)))
def _get_elements(self):
states = []
transitions = []
try:
markup = self.machine.get_markup_config()
queue = [([], markup)]
while queue:
prefix, scope = queue.pop(0)
for transition in scope.get("transitions", []):
if prefix:
tran = copy.copy(transition)
tran["source"] = self.machine.state_cls.separator.join(
prefix + [tran["source"]]
)
if "dest" in tran: # don't do this for internal transitions
tran["dest"] = self.machine.state_cls.separator.join(
prefix + [tran["dest"]]
)
else:
tran = transition
transitions.append(tran)
for state in scope.get("children", []) + scope.get("states", []):
if not prefix:
states.append(state)
ini = state.get("initial", [])
if not isinstance(ini, list):
ini = ini.name if hasattr(ini, "name") else ini
tran = dict(
trigger="",
source=self.machine.state_cls.separator.join(prefix + [state["name"]]),
dest=self.machine.state_cls.separator.join(
prefix + [state["name"], ini]
),
)
transitions.append(tran)
if state.get("children", []):
queue.append((prefix + [state["name"]], state))
except KeyError:
_LOGGER.error("Graph creation incomplete!")
return states, transitions
| BaseGraph |
python | PrefectHQ__prefect | tests/client/test_base_client.py | {
"start": 26881,
"end": 27748
} | class ____:
@pytest.fixture
def prefect_version(self, monkeypatch: pytest.MonkeyPatch) -> str:
v = "42.43.44"
monkeypatch.setattr(prefect, "__version__", v)
return v
@pytest.fixture
def prefect_api_version(self, monkeypatch: pytest.MonkeyPatch) -> str:
v = "45.46.47"
monkeypatch.setattr(prefect.client.constants, "SERVER_API_VERSION", v)
return v
async def test_passes_informative_user_agent(
self,
prefect_version: str,
prefect_api_version: str,
):
async with mocked_client(responses=[RESPONSE_200]) as (client, send):
await client.get(url="fake.url/fake/route")
request = send.call_args[0][1]
assert isinstance(request, httpx.Request)
assert request.headers["User-Agent"] == "prefect/42.43.44 (API 45.46.47)"
| TestUserAgent |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/common_tree/base.py | {
"start": 932,
"end": 8559
} | class ____:
"""
GPT tree index builder.
Helper class to build the tree-structured index,
or to synthesize an answer.
"""
def __init__(
self,
num_children: int,
summary_prompt: BasePromptTemplate,
llm: Optional[LLM] = None,
docstore: Optional[BaseDocumentStore] = None,
show_progress: bool = False,
use_async: bool = False,
) -> None:
"""Initialize with params."""
if num_children < 2:
raise ValueError("Invalid number of children.")
self.num_children = num_children
self.summary_prompt = summary_prompt
self._llm = llm or Settings.llm
self._prompt_helper = Settings._prompt_helper or PromptHelper.from_llm_metadata(
self._llm.metadata,
)
self._callback_manager = Settings.callback_manager
self._use_async = use_async
self._show_progress = show_progress
self._docstore = docstore or get_default_docstore()
@property
def docstore(self) -> BaseDocumentStore:
"""Return docstore."""
return self._docstore
def build_from_nodes(
self,
nodes: Sequence[BaseNode],
build_tree: bool = True,
) -> IndexGraph:
"""
Build from text.
Returns:
IndexGraph: graph object consisting of all_nodes, root_nodes
"""
index_graph = IndexGraph()
for node in nodes:
index_graph.insert(node)
if build_tree:
return self.build_index_from_nodes(
index_graph, index_graph.all_nodes, index_graph.all_nodes, level=0
)
else:
return index_graph
def _prepare_node_and_text_chunks(
self, cur_node_ids: Dict[int, str]
) -> Tuple[List[int], List[List[BaseNode]], List[str]]:
"""Prepare node and text chunks."""
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
logger.info(
f"> Building index from nodes: {len(cur_nodes) // self.num_children} chunks"
)
indices, cur_nodes_chunks, text_chunks = [], [], []
for i in range(0, len(cur_node_list), self.num_children):
cur_nodes_chunk = cur_node_list[i : i + self.num_children]
truncated_chunks = self._prompt_helper.truncate(
prompt=self.summary_prompt,
text_chunks=[
node.get_content(metadata_mode=MetadataMode.LLM)
for node in cur_nodes_chunk
],
llm=self._llm,
)
text_chunk = "\n".join(truncated_chunks)
indices.append(i)
cur_nodes_chunks.append(cur_nodes_chunk)
text_chunks.append(text_chunk)
return indices, cur_nodes_chunks, text_chunks
def _construct_parent_nodes(
self,
index_graph: IndexGraph,
indices: List[int],
cur_nodes_chunks: List[List[BaseNode]],
summaries: List[str],
) -> Dict[int, str]:
"""
Construct parent nodes.
Save nodes to docstore.
"""
new_node_dict = {}
for i, cur_nodes_chunk, new_summary in zip(
indices, cur_nodes_chunks, summaries
):
logger.debug(
f"> {i}/{len(cur_nodes_chunk)}, "
f"summary: {truncate_text(new_summary, 50)}"
)
new_node = TextNode(text=new_summary)
index_graph.insert(new_node, children_nodes=cur_nodes_chunk)
index = index_graph.get_index(new_node)
new_node_dict[index] = new_node.node_id
self._docstore.add_documents([new_node], allow_update=False)
return new_node_dict
def build_index_from_nodes(
self,
index_graph: IndexGraph,
cur_node_ids: Dict[int, str],
all_node_ids: Dict[int, str],
level: int = 0,
) -> IndexGraph:
"""Consolidates chunks recursively, in a bottoms-up fashion."""
if len(cur_node_ids) <= self.num_children:
index_graph.root_nodes = cur_node_ids
return index_graph
indices, cur_nodes_chunks, text_chunks = self._prepare_node_and_text_chunks(
cur_node_ids
)
with self._callback_manager.event(
CBEventType.TREE, payload={EventPayload.CHUNKS: text_chunks}
) as event:
if self._use_async:
tasks = [
self._llm.apredict(self.summary_prompt, context_str=text_chunk)
for text_chunk in text_chunks
]
outputs: List[Tuple[str, str]] = run_async_tasks(
tasks,
show_progress=self._show_progress,
progress_bar_desc="Generating summaries",
)
summaries = [output[0] for output in outputs]
else:
text_chunks_progress = get_tqdm_iterable(
text_chunks,
show_progress=self._show_progress,
desc="Generating summaries",
)
summaries = [
self._llm.predict(self.summary_prompt, context_str=text_chunk)
for text_chunk in text_chunks_progress
]
event.on_end(payload={"summaries": summaries, "level": level})
new_node_dict = self._construct_parent_nodes(
index_graph, indices, cur_nodes_chunks, summaries
)
all_node_ids.update(new_node_dict)
index_graph.root_nodes = new_node_dict
if len(new_node_dict) <= self.num_children:
return index_graph
else:
return self.build_index_from_nodes(
index_graph, new_node_dict, all_node_ids, level=level + 1
)
async def abuild_index_from_nodes(
self,
index_graph: IndexGraph,
cur_node_ids: Dict[int, str],
all_node_ids: Dict[int, str],
level: int = 0,
) -> IndexGraph:
"""Consolidates chunks recursively, in a bottoms-up fashion."""
if len(cur_node_ids) <= self.num_children:
index_graph.root_nodes = cur_node_ids
return index_graph
indices, cur_nodes_chunks, text_chunks = self._prepare_node_and_text_chunks(
cur_node_ids
)
with self._callback_manager.event(
CBEventType.TREE, payload={EventPayload.CHUNKS: text_chunks}
) as event:
text_chunks_progress = get_tqdm_iterable(
text_chunks,
show_progress=self._show_progress,
desc="Generating summaries",
)
tasks = [
self._llm.apredict(self.summary_prompt, context_str=text_chunk)
for text_chunk in text_chunks_progress
]
summaries = await asyncio.gather(*tasks)
event.on_end(payload={"summaries": summaries, "level": level})
new_node_dict = self._construct_parent_nodes(
index_graph, indices, cur_nodes_chunks, summaries
)
all_node_ids.update(new_node_dict)
index_graph.root_nodes = new_node_dict
if len(new_node_dict) <= self.num_children:
return index_graph
else:
return await self.abuild_index_from_nodes(
index_graph, new_node_dict, all_node_ids, level=level + 1
)
| GPTTreeIndexBuilder |
python | kamyu104__LeetCode-Solutions | Python/check-if-an-array-is-consecutive.py | {
"start": 42,
"end": 285
} | class ____(object):
def isConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return max(nums)-min(nums)+1 == len(nums) == len(set(nums))
# Time: O(nlogn)
# Space: O(1)
# sort
| Solution |
python | celery__celery | celery/exceptions.py | {
"start": 7720,
"end": 7825
} | class ____(PendingDeprecationWarning):
"""Warning of pending deprecation."""
| CPendingDeprecationWarning |
python | getsentry__sentry | src/sentry/services/filestore/gcs.py | {
"start": 13444,
"end": 14741
} | class ____(GoogleCloudStorage):
"""Google cloud storage class with replay upload policy."""
# "try_get" inherits the default behavior. We don't want to exponentially wait in that
# context. We're maintaining the status-quo for now but in the future we can add policies for
# these methods or use no policy at all and implement retries at a higher, more contextual
# level.
#
# def try_get(self, callable: Callable[[], None]) -> None:
def create_retry_policy(self):
"""Retry an action with sigmoid delay for a maximum of five attempts."""
def should_retry(attempt: int, e: Exception) -> bool:
"""Retry gateway timeout exceptions up to the limit."""
return attempt <= REPLAY_GCS_RETRIES and isinstance(e, GCS_RETRYABLE_ERRORS)
# Retry cadence: After a brief period of fast retries the function will retry once
# per second for two minutes.
return ConditionalRetryPolicy(should_retry, sigmoid_delay())
def try_set(self, callable: Callable[[], None]) -> None:
policy = self.create_retry_policy()
policy(callable)
def try_del(self, callable: Callable[[], None]) -> None:
policy = self.create_retry_policy()
policy(callable)
| GoogleCloudStorageWithReplayUploadPolicy |
python | simplejson__simplejson | simplejson/tests/test_encode_basestring_ascii.py | {
"start": 1175,
"end": 2337
} | class ____(TestCase):
def test_py_encode_basestring_ascii(self):
self._test_encode_basestring_ascii(simplejson.encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self):
if not simplejson.encoder.c_encode_basestring_ascii:
return
self._test_encode_basestring_ascii(simplejson.encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
fname = encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = encode_basestring_ascii(input_string)
#self.assertEqual(result, expect,
# '{0!r} != {1!r} for {2}({3!r})'.format(
# result, expect, fname, input_string))
self.assertEqual(result, expect,
'%r != %r for %s(%r)' % (result, expect, fname, input_string))
def test_sorted_dict(self):
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = simplejson.dumps(dict(items), sort_keys=True)
self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}')
| TestEncodeBaseStringAscii |
python | catalyst-team__catalyst | catalyst/contrib/datasets/mnist.py | {
"start": 746,
"end": 7482
} | class ____(Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset for testing purposes.
Args:
root: Root directory of dataset where
``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from
``training.pt``, otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again.
normalize (tuple, optional): mean and std
for the MNIST dataset normalization.
numpy (bool, optional): boolean flag to return an np.ndarray,
rather than torch.tensor (default: False).
Raises:
RuntimeError: If ``download is False`` and the dataset not found.
"""
_repr_indent = 4
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
resources = [
(
"https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz", # noqa: E501, W505
"f68b3c2dcbeaaa9fbdd348bbdeb94873",
),
(
"https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz", # noqa: E501, W505
"d53e105ee54ea40749a09fcbcd1e9432",
),
(
"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz", # noqa: E501, W505
"9fb629c4189551a2d022fa330f9573f3",
),
(
"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz", # noqa: E501, W505
"ec29112dd5afa0611ce80d1b7f02629c",
),
]
training_file = "training.pt"
test_file = "test.pt"
cache_folder = "MNIST"
classes = [
"0 - zero",
"1 - one",
"2 - two",
"3 - three",
"4 - four",
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
def __init__(
self,
root: str,
train: bool = True,
download: bool = True,
normalize: tuple = (0.1307, 0.3081),
numpy: bool = False,
):
"""Init."""
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
self.train = train # training set or test set
self.normalize = normalize
if self.normalize is not None:
assert len(self.normalize) == 2, "normalize should be (mean, variance)"
self.numpy = numpy
if download:
self.download()
if not self._check_exists():
raise RuntimeError(
"Dataset not found. You can use download=True to download it"
)
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(
os.path.join(self.processed_folder, data_file)
)
self.data = torch.tensor(self.data)
self.targets = torch.tensor(self.targets)
def __getitem__(self, index):
"""
Args:
index: Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index].float().unsqueeze(0), int(self.targets[index])
if self.normalize is not None:
img = self.normalize_tensor(img, *self.normalize)
if self.numpy:
img = img.cpu().numpy()[0]
return img, target
def __len__(self):
"""Length."""
return len(self.data)
def __repr__(self):
"""Repr."""
head = "Dataset " + self.cache_folder
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
@staticmethod
def normalize_tensor(
tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0
) -> torch.Tensor:
"""Internal tensor normalization."""
mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device)
return tensor.sub(mean).div(std)
@property
def raw_folder(self):
"""@TODO: Docs. Contribution is welcome."""
return os.path.join(self.root, self.cache_folder, "raw")
@property
def processed_folder(self):
"""@TODO: Docs. Contribution is welcome."""
return os.path.join(self.root, self.cache_folder, "processed")
@property
def class_to_idx(self):
"""@TODO: Docs. Contribution is welcome."""
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return os.path.exists(
os.path.join(self.processed_folder, self.training_file)
) and os.path.exists(os.path.join(self.processed_folder, self.test_file))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition("/")[2]
download_and_extract_archive(
url, download_root=self.raw_folder, filename=filename, md5=md5
)
# process and save as torch files
print("Processing...")
training_set = (
_read_image_file(os.path.join(self.raw_folder, "train-images-idx3-ubyte")),
_read_label_file(os.path.join(self.raw_folder, "train-labels-idx1-ubyte")),
)
test_set = (
_read_image_file(os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")),
_read_label_file(os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")),
)
with open(os.path.join(self.processed_folder, self.training_file), "wb") as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), "wb") as f:
torch.save(test_set, f)
print("Done!")
def extra_repr(self):
"""@TODO: Docs. Contribution is welcome."""
return "Split: {}".format("Train" if self.train is True else "Test")
| MNIST |
python | catalyst-team__catalyst | tests/catalyst/callbacks/test_control_flow.py | {
"start": 284,
"end": 390
} | class ____(Callback):
def __init__(self):
super().__init__(CallbackOrder.Internal)
| DummyCallback |
python | django__django | tests/admin_inlines/models.py | {
"start": 5274,
"end": 5322
} | class ____(models.Model):
pass
| TitleCollection |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 104532,
"end": 104925
} | class ____:
method = "interior-point"
LCT = LinprogCommonTests
# these are a few of the existing tests that have redundancy
test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility
test_bug_10349 = LCT.test_bug_10349
test_bug_7044 = LCT.test_bug_7044
test_NFLC = LCT.test_network_flow_limited_capacity
test_enzo_example_b = LCT.test_enzo_example_b
| RRTests |
python | PrefectHQ__prefect | src/prefect/variables.py | {
"start": 533,
"end": 8342
} | class ____(BaseModel):
"""
Variables are named, mutable JSON values that can be shared across tasks and flows.
Arguments:
name: A string identifying the variable.
value: A string that is the value of the variable.
tags: An optional list of strings to associate with the variable.
"""
name: str = Field(
default=...,
description="The name of the variable",
examples=["my_variable"],
max_length=MAX_VARIABLE_NAME_LENGTH,
)
value: StrictVariableValue = Field(
default=...,
description="The value of the variable",
examples=["my-value"],
)
tags: Optional[list[str]] = Field(default=None)
@classmethod
async def aset(
cls,
name: str,
value: StrictVariableValue,
tags: Optional[list[str]] = None,
overwrite: bool = False,
) -> "Variable":
"""
Asynchronously sets a new variable. If one exists with the same name, must pass `overwrite=True`
Returns the newly set variable object.
Args:
- name: The name of the variable to set.
- value: The value of the variable to set.
- tags: An optional list of strings to associate with the variable.
- overwrite: Whether to overwrite the variable if it already exists.
Example:
Set a new variable and overwrite it if it already exists.
```
from prefect.variables import Variable
@flow
async def my_flow():
await Variable.aset(name="my_var",value="test_value", tags=["hi", "there"], overwrite=True)
```
"""
client, _ = get_or_create_client()
variable_exists = await client.read_variable_by_name(name)
var_dict = {"name": name, "value": value, "tags": tags or []}
if variable_exists:
if not overwrite:
raise ValueError(
f"Variable {name!r} already exists. Use `overwrite=True` to update it."
)
await client.update_variable(
variable=VariableUpdate.model_validate(var_dict)
)
variable = await client.read_variable_by_name(name)
for key in var_dict.keys():
var_dict.update({key: getattr(variable, key)})
else:
await client.create_variable(
variable=VariableCreate.model_validate(var_dict)
)
return cls.model_validate(var_dict)
@classmethod
@async_dispatch(aset)
def set(
cls,
name: str,
value: StrictVariableValue,
tags: Optional[list[str]] = None,
overwrite: bool = False,
) -> "Variable":
"""
Sets a new variable. If one exists with the same name, must pass `overwrite=True`
Returns the newly set variable object.
Args:
- name: The name of the variable to set.
- value: The value of the variable to set.
- tags: An optional list of strings to associate with the variable.
- overwrite: Whether to overwrite the variable if it already exists.
Example:
Set a new variable and overwrite it if it already exists.
```
from prefect.variables import Variable
@flow
def my_flow():
Variable.set(name="my_var",value="test_value", tags=["hi", "there"], overwrite=True)
```
"""
with get_client(sync_client=True) as client:
variable_exists = client.read_variable_by_name(name)
var_dict = {"name": name, "value": value, "tags": tags or []}
if variable_exists:
if not overwrite:
raise ValueError(
f"Variable {name!r} already exists. Use `overwrite=True` to update it."
)
client.update_variable(variable=VariableUpdate.model_validate(var_dict))
variable = client.read_variable_by_name(name)
for key in var_dict.keys():
var_dict.update({key: getattr(variable, key)})
else:
client.create_variable(variable=VariableCreate.model_validate(var_dict))
return cls.model_validate(var_dict)
@classmethod
async def aget(
cls,
name: str,
default: StrictVariableValue = None,
) -> StrictVariableValue:
"""
Asynchronously get a variable's value by name.
If the variable does not exist, return the default value.
Args:
- name: The name of the variable value to get.
- default: The default value to return if the variable does not exist.
Example:
Get a variable's value by name.
```python
from prefect import flow
from prefect.variables import Variable
@flow
async def my_flow():
var = await Variable.aget("my_var")
```
"""
client, _ = get_or_create_client()
variable = await client.read_variable_by_name(name)
return variable.value if variable else default
@classmethod
@async_dispatch(aget)
def get(
cls,
name: str,
default: StrictVariableValue = None,
) -> StrictVariableValue:
"""
Get a variable's value by name.
If the variable does not exist, return the default value.
Args:
- name: The name of the variable value to get.
- default: The default value to return if the variable does not exist.
Example:
Get a variable's value by name.
```python
from prefect import flow
from prefect.variables import Variable
@flow
def my_flow():
var = Variable.get("my_var")
```
"""
with get_client(sync_client=True) as client:
variable = client.read_variable_by_name(name)
return variable.value if variable else default
@classmethod
async def aunset(cls, name: str) -> bool:
"""
Asynchronously unset a variable by name.
Args:
- name: The name of the variable to unset.
Returns `True` if the variable was deleted, `False` if the variable did not exist.
Example:
Unset a variable by name.
```python
from prefect import flow
from prefect.variables import Variable
@flow
async def my_flow():
await Variable.aunset("my_var")
```
"""
client, _ = get_or_create_client()
try:
await client.delete_variable_by_name(name=name)
return True
except ObjectNotFound:
return False
@classmethod
@async_dispatch(aunset)
def unset(cls, name: str) -> bool:
"""
Unset a variable by name.
Args:
- name: The name of the variable to unset.
Returns `True` if the variable was deleted, `False` if the variable did not exist.
Example:
Unset a variable by name.
```python
from prefect import flow
from prefect.variables import Variable
@flow
def my_flow():
Variable.unset("my_var")
```
"""
with get_client(sync_client=True) as client:
try:
client.delete_variable_by_name(name=name)
return True
except ObjectNotFound:
return False
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
| Variable |
python | django-haystack__django-haystack | test_haystack/test_fields.py | {
"start": 10929,
"end": 11595
} | class ____(TestCase):
def test_init(self):
try:
foo = FloatField(model_attr="foo")
except:
self.fail()
def test_prepare(self):
mock = MockModel()
mock.floaty = 12.5
floaty = FloatField(model_attr="floaty")
self.assertEqual(floaty.prepare(mock), 12.5)
# Simulate default=1.5.
mock = MockModel()
default = FloatField(default=1.5)
self.assertEqual(default.prepare(mock), 1.5)
# Simulate null=True.
mock = MockModel()
floaty_none = FloatField(null=True)
self.assertEqual(floaty_none.prepare(mock), None)
| FloatFieldTestCase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ranges.py | {
"start": 27511,
"end": 28710
} | class ____(AbstractRange[Range[_T]]):
"""Base for PostgreSQL RANGE types.
These are types that return a single :class:`_postgresql.Range` object.
.. seealso::
`PostgreSQL range functions <https://www.postgresql.org/docs/current/static/functions-range.html>`_
""" # noqa: E501
__abstract__ = True
def _resolve_for_literal(self, value: Range[Any]) -> Any:
spec = value.lower if value.lower is not None else value.upper
if isinstance(spec, int):
# pg is unreasonably picky here: the query
# "select 1::INTEGER <@ '[1, 4)'::INT8RANGE" raises
# "operator does not exist: integer <@ int8range" as of pg 16
if _is_int32(value):
return INT4RANGE()
else:
return INT8RANGE()
elif isinstance(spec, (Decimal, float)):
return NUMRANGE()
elif isinstance(spec, datetime):
return TSRANGE() if not spec.tzinfo else TSTZRANGE()
elif isinstance(spec, date):
return DATERANGE()
else:
# empty Range, SQL datatype can't be determined here
return sqltypes.NULLTYPE
| AbstractSingleRange |
python | apache__airflow | airflow-core/tests/unit/core/test_exceptions.py | {
"start": 834,
"end": 2528
} | class ____:
def setup_method(self):
self.old_modules = dict(sys.modules)
def teardown_method(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
for mod in [m for m in sys.modules if m not in self.old_modules]:
del sys.modules[mod]
def test_pod_mutation_hook_exceptions_compatibility(
self,
):
from airflow.exceptions import (
PodMutationHookException as CoreMutationHookException,
)
from airflow.providers.cncf.kubernetes.exceptions import (
PodMutationHookException as ProviderMutationHookException,
)
from airflow.providers.cncf.kubernetes.pod_generator import (
PodMutationHookException as ProviderGeneratorMutationHookException,
)
assert ProviderMutationHookException == CoreMutationHookException
assert ProviderMutationHookException == ProviderGeneratorMutationHookException
def test_pod_reconciliation_error_exceptions_compatibility(
self,
):
from airflow.exceptions import (
PodReconciliationError as CoreReconciliationError,
)
from airflow.providers.cncf.kubernetes.exceptions import (
PodReconciliationError as ProviderReconciliationError,
)
from airflow.providers.cncf.kubernetes.pod_generator import (
PodReconciliationError as ProviderGeneratorReconciliationError,
)
assert ProviderReconciliationError == CoreReconciliationError
assert ProviderReconciliationError == ProviderGeneratorReconciliationError
| TestExceptions |
python | doocs__leetcode | solution/3300-3399/3355.Zero Array Transformation I/Solution.py | {
"start": 0,
"end": 343
} | class ____:
def isZeroArray(self, nums: List[int], queries: List[List[int]]) -> bool:
d = [0] * (len(nums) + 1)
for l, r in queries:
d[l] += 1
d[r + 1] -= 1
s = 0
for x, y in zip(nums, d):
s += y
if x > s:
return False
return True
| Solution |
python | ray-project__ray | python/ray/serve/_private/controller.py | {
"start": 3120,
"end": 53097
} | class ____:
"""Responsible for managing the state of the serving system.
The controller implements fault tolerance by persisting its state in
a new checkpoint each time a state change is made. If the actor crashes,
the latest checkpoint is loaded and the state is recovered. Checkpoints
are written/read using a provided KV-store interface.
All hard state in the system is maintained by this actor and persisted via
these checkpoints. Soft state required by other components is fetched by
those actors from this actor on startup and updates are pushed out from
this actor.
All other actors started by the controller are named, detached actors
so they will not fate share with the controller if it crashes.
The following guarantees are provided for state-changing calls to the
controller:
- If the call succeeds, the change was made and will be reflected in
the system even if the controller or other actors die unexpectedly.
- If the call fails, the change may have been made but isn't guaranteed
to have been. The client should retry in this case. Note that this
requires all implementations here to be idempotent.
"""
async def __init__(
self,
*,
http_options: HTTPOptions,
global_logging_config: LoggingConfig,
grpc_options: Optional[gRPCOptions] = None,
):
self._controller_node_id = ray.get_runtime_context().get_node_id()
assert (
self._controller_node_id == get_head_node_id()
), "Controller must be on the head node."
self.ray_worker_namespace = ray.get_runtime_context().namespace
self.gcs_client = GcsClient(address=ray.get_runtime_context().gcs_address)
kv_store_namespace = f"ray-serve-{self.ray_worker_namespace}"
self.kv_store = RayInternalKVStore(kv_store_namespace, self.gcs_client)
self.long_poll_host = LongPollHost()
self.done_recovering_event = asyncio.Event()
# Try to read config from checkpoint
# logging config from checkpoint take precedence over the one passed in
# the constructor.
self.global_logging_config = None
log_config_checkpoint = self.kv_store.get(LOGGING_CONFIG_CHECKPOINT_KEY)
if log_config_checkpoint is not None:
global_logging_config = pickle.loads(log_config_checkpoint)
self.reconfigure_global_logging_config(global_logging_config)
configure_component_memory_profiler(
component_name="controller", component_id=str(os.getpid())
)
if RAY_SERVE_CONTROLLER_CALLBACK_IMPORT_PATH:
logger.info(
"Calling user-provided callback from import path "
f"{RAY_SERVE_CONTROLLER_CALLBACK_IMPORT_PATH}."
)
call_function_from_import_path(RAY_SERVE_CONTROLLER_CALLBACK_IMPORT_PATH)
# Used to read/write checkpoints.
self.cluster_node_info_cache = create_cluster_node_info_cache(self.gcs_client)
self.cluster_node_info_cache.update()
# Configure proxy default HTTP and gRPC options.
self.proxy_state_manager = ProxyStateManager(
http_options=configure_http_options_with_defaults(http_options),
head_node_id=self._controller_node_id,
cluster_node_info_cache=self.cluster_node_info_cache,
logging_config=self.global_logging_config,
grpc_options=set_proxy_default_grpc_options(grpc_options),
)
# We modify the HTTP and gRPC options above, so delete them to avoid
del http_options, grpc_options
self.endpoint_state = EndpointState(self.kv_store, self.long_poll_host)
# Fetch all running actors in current cluster as source of current
# replica state for controller failure recovery
all_current_actors = ray.util.list_named_actors(all_namespaces=True)
all_serve_actor_names = [
actor["name"]
for actor in all_current_actors
if actor["namespace"] == SERVE_NAMESPACE
]
self.autoscaling_state_manager = AutoscalingStateManager()
self.deployment_state_manager = DeploymentStateManager(
self.kv_store,
self.long_poll_host,
all_serve_actor_names,
get_all_live_placement_group_names(),
self.cluster_node_info_cache,
self.autoscaling_state_manager,
)
# Manage all applications' state
self.application_state_manager = ApplicationStateManager(
self.deployment_state_manager,
self.autoscaling_state_manager,
self.endpoint_state,
self.kv_store,
self.global_logging_config,
)
# Controller actor details
self._actor_details = ServeActorDetails(
node_id=ray.get_runtime_context().get_node_id(),
node_ip=ray.util.get_node_ip_address(),
node_instance_id=ray.util.get_node_instance_id(),
actor_id=ray.get_runtime_context().get_actor_id(),
actor_name=SERVE_CONTROLLER_NAME,
worker_id=ray.get_runtime_context().get_worker_id(),
log_file_path=get_component_logger_file_path(),
)
self._shutting_down = False
self._shutdown_event = asyncio.Event()
self._shutdown_start_time = None
self._create_control_loop_metrics()
run_background_task(self.run_control_loop())
# The target capacity percentage for all deployments across the cluster.
self._target_capacity: Optional[float] = None
self._target_capacity_direction: Optional[TargetCapacityDirection] = None
self._recover_state_from_checkpoint()
# Nodes where proxy actors should run.
self._proxy_nodes = set()
self._update_proxy_nodes()
def reconfigure_global_logging_config(self, global_logging_config: LoggingConfig):
if (
self.global_logging_config
and self.global_logging_config == global_logging_config
):
return
self.kv_store.put(
LOGGING_CONFIG_CHECKPOINT_KEY, pickle.dumps(global_logging_config)
)
self.global_logging_config = global_logging_config
self.long_poll_host.notify_changed(
{LongPollNamespace.GLOBAL_LOGGING_CONFIG: global_logging_config}
)
configure_component_logger(
component_name="controller",
component_id=str(os.getpid()),
logging_config=global_logging_config,
)
logger.info(
f"Controller starting (version='{ray.__version__}').",
extra={"log_to_stderr": False},
)
logger.debug(
"Configure the serve controller logger "
f"with logging config: {self.global_logging_config}"
)
def check_alive(self) -> None:
"""No-op to check if this controller is alive."""
return
def get_pid(self) -> int:
return os.getpid()
def record_autoscaling_metrics_from_replica(
self, replica_metric_report: ReplicaMetricReport
):
latency = time.time() - replica_metric_report.timestamp
latency_ms = latency * 1000
if latency_ms > RAY_SERVE_RPC_LATENCY_WARNING_THRESHOLD_MS:
logger.warning(
f"Received autoscaling metrics from replica {replica_metric_report.replica_id} with timestamp {replica_metric_report.timestamp} "
f"which is {latency_ms}ms ago. "
f"This is greater than the warning threshold RPC latency of {RAY_SERVE_RPC_LATENCY_WARNING_THRESHOLD_MS}ms. "
"This may indicate a performance issue with the controller try increasing the RAY_SERVE_RPC_LATENCY_WARNING_THRESHOLD_MS environment variable."
)
self.autoscaling_state_manager.record_request_metrics_for_replica(
replica_metric_report
)
def record_autoscaling_metrics_from_handle(
self, handle_metric_report: HandleMetricReport
):
latency = time.time() - handle_metric_report.timestamp
latency_ms = latency * 1000
if latency_ms > RAY_SERVE_RPC_LATENCY_WARNING_THRESHOLD_MS:
logger.warning(
f"Received autoscaling metrics from handle {handle_metric_report.handle_id} for deployment {handle_metric_report.deployment_id} with timestamp {handle_metric_report.timestamp} "
f"which is {latency_ms}ms ago. "
f"This is greater than the warning threshold RPC latency of {RAY_SERVE_RPC_LATENCY_WARNING_THRESHOLD_MS}ms. "
"This may indicate a performance issue with the controller try increasing the RAY_SERVE_RPC_LATENCY_WARNING_THRESHOLD_MS environment variable."
)
self.autoscaling_state_manager.record_request_metrics_for_handle(
handle_metric_report
)
def _get_total_num_requests_for_deployment_for_testing(
self, deployment_id: DeploymentID
):
return self.autoscaling_state_manager.get_total_num_requests_for_deployment(
deployment_id
)
def _get_metrics_for_deployment_for_testing(self, deployment_id: DeploymentID):
return self.autoscaling_state_manager.get_metrics_for_deployment(deployment_id)
def _dump_replica_states_for_testing(self, deployment_id: DeploymentID):
return self.deployment_state_manager._deployment_states[deployment_id]._replicas
def _stop_one_running_replica_for_testing(self, deployment_id):
self.deployment_state_manager._deployment_states[
deployment_id
]._stop_one_running_replica_for_testing()
async def listen_for_change(self, keys_to_snapshot_ids: Dict[str, int]):
"""Proxy long pull client's listen request.
Args:
keys_to_snapshot_ids (Dict[str, int]): Snapshot IDs are used to
determine whether or not the host should immediately return the
data or wait for the value to be changed.
"""
if not self.done_recovering_event.is_set():
await self.done_recovering_event.wait()
return await self.long_poll_host.listen_for_change(keys_to_snapshot_ids)
async def listen_for_change_java(self, keys_to_snapshot_ids_bytes: bytes):
"""Proxy long pull client's listen request.
Args:
keys_to_snapshot_ids_bytes (Dict[str, int]): the protobuf bytes of
keys_to_snapshot_ids (Dict[str, int]).
"""
if not self.done_recovering_event.is_set():
await self.done_recovering_event.wait()
return await self.long_poll_host.listen_for_change_java(
keys_to_snapshot_ids_bytes
)
def get_all_endpoints(self) -> Dict[DeploymentID, Dict[str, Any]]:
"""Returns a dictionary of deployment name to config."""
return self.endpoint_state.get_endpoints()
def get_all_endpoints_java(self) -> bytes:
"""Returns a dictionary of deployment name to config."""
endpoints = self.get_all_endpoints()
# NOTE(zcin): Java only supports 1.x deployments, so only return
# a dictionary of deployment name -> endpoint info
data = {
endpoint_tag.name: EndpointInfoProto(route=endpoint_dict["route"])
for endpoint_tag, endpoint_dict in endpoints.items()
}
return EndpointSet(endpoints=data).SerializeToString()
def get_proxies(self) -> Dict[NodeId, ActorHandle]:
"""Returns a dictionary of node ID to proxy actor handles."""
if self.proxy_state_manager is None:
return {}
return self.proxy_state_manager.get_proxy_handles()
def get_proxy_names(self) -> bytes:
"""Returns the proxy actor name list serialized by protobuf."""
if self.proxy_state_manager is None:
return None
actor_name_list = ActorNameList(
names=self.proxy_state_manager.get_proxy_names().values()
)
return actor_name_list.SerializeToString()
def _update_proxy_nodes(self):
"""Update the nodes set where proxy actors should run.
Controller decides where proxy actors should run
(head node and nodes with deployment replicas).
"""
new_proxy_nodes = self.deployment_state_manager.get_active_node_ids()
new_proxy_nodes = new_proxy_nodes - set(
self.cluster_node_info_cache.get_draining_nodes()
)
new_proxy_nodes.add(self._controller_node_id)
self._proxy_nodes = new_proxy_nodes
async def run_control_loop(self) -> None:
# NOTE(edoakes): we catch all exceptions here and simply log them,
# because an unhandled exception would cause the main control loop to
# halt, which should *never* happen.
recovering_timeout = RECOVERING_LONG_POLL_BROADCAST_TIMEOUT_S
num_loops = 0
start_time = time.time()
while True:
loop_start_time = time.time()
try:
await self.run_control_loop_step(
start_time, recovering_timeout, num_loops
)
except Exception as e:
# we never expect this to happen, but adding this to be safe
logger.exception(f"There was an exception in the control loop: {e}")
await asyncio.sleep(1)
loop_duration = time.time() - loop_start_time
if loop_duration > 10:
logger.warning(
f"The last control loop was slow (took {loop_duration}s). "
"This is likely caused by running a large number of "
"replicas in a single Ray cluster. Consider using "
"multiple Ray clusters.",
extra={"log_to_stderr": False},
)
self.control_loop_duration_gauge_s.set(loop_duration)
num_loops += 1
self.num_control_loops_gauge.set(num_loops)
sleep_start_time = time.time()
await asyncio.sleep(CONTROL_LOOP_INTERVAL_S)
self.sleep_duration_gauge_s.set(time.time() - sleep_start_time)
async def run_control_loop_step(
self, start_time: float, recovering_timeout: float, num_loops: int
):
try:
self.cluster_node_info_cache.update()
except Exception:
logger.exception("Exception updating cluster node info cache.")
if self._shutting_down:
try:
self.shutdown()
except Exception:
logger.exception("Exception during shutdown.")
if (
not self.done_recovering_event.is_set()
and time.time() - start_time > recovering_timeout
):
logger.warning(
f"Replicas still recovering after {recovering_timeout}s, "
"setting done recovering event to broadcast long poll updates."
)
self.done_recovering_event.set()
# initialize any_recovering to None to indicate that we don't know if
# we've recovered anything yet
any_recovering: Optional[bool] = None
try:
dsm_update_start_time = time.time()
any_recovering = self.deployment_state_manager.update()
self.dsm_update_duration_gauge_s.set(time.time() - dsm_update_start_time)
if not self.done_recovering_event.is_set() and not any_recovering:
self.done_recovering_event.set()
if num_loops > 0:
# Only log if we actually needed to recover anything.
logger.info(
"Finished recovering deployments after "
f"{(time.time() - start_time):.2f}s.",
extra={"log_to_stderr": False},
)
except Exception:
logger.exception("Exception updating deployment state.")
try:
asm_update_start_time = time.time()
self.application_state_manager.update()
self.asm_update_duration_gauge_s.set(time.time() - asm_update_start_time)
except Exception:
logger.exception("Exception updating application state.")
# Update the proxy nodes set before updating the proxy states,
# so they are more consistent.
node_update_start_time = time.time()
self._update_proxy_nodes()
self.node_update_duration_gauge_s.set(time.time() - node_update_start_time)
# Don't update proxy_state until after the done recovering event is set,
# otherwise we may start a new proxy but not broadcast it any
# info about available deployments & their replicas.
if self.proxy_state_manager and self.done_recovering_event.is_set():
try:
proxy_update_start_time = time.time()
self.proxy_state_manager.update(proxy_nodes=self._proxy_nodes)
self.proxy_update_duration_gauge_s.set(
time.time() - proxy_update_start_time
)
except Exception:
logger.exception("Exception updating proxy state.")
# When the controller is done recovering, drop invalid handle metrics
# that may be stale for autoscaling
if any_recovering is False:
self.autoscaling_state_manager.drop_stale_handle_metrics(
self.deployment_state_manager.get_alive_replica_actor_ids()
| self.proxy_state_manager.get_alive_proxy_actor_ids()
)
def _create_control_loop_metrics(self):
self.node_update_duration_gauge_s = metrics.Gauge(
"serve_controller_node_update_duration_s",
description="The control loop time spent on collecting proxy node info.",
)
self.proxy_update_duration_gauge_s = metrics.Gauge(
"serve_controller_proxy_state_update_duration_s",
description="The control loop time spent on updating proxy state.",
)
self.dsm_update_duration_gauge_s = metrics.Gauge(
"serve_controller_deployment_state_update_duration_s",
description="The control loop time spent on updating deployment state.",
)
self.asm_update_duration_gauge_s = metrics.Gauge(
"serve_controller_application_state_update_duration_s",
description="The control loop time spent on updating application state.",
)
self.sleep_duration_gauge_s = metrics.Gauge(
"serve_controller_sleep_duration_s",
description="The duration of the last control loop's sleep.",
)
self.control_loop_duration_gauge_s = metrics.Gauge(
"serve_controller_control_loop_duration_s",
description="The duration of the last control loop.",
)
self.num_control_loops_gauge = metrics.Gauge(
"serve_controller_num_control_loops",
description=(
"The number of control loops performed by the controller. "
"Increases monotonically over the controller's lifetime."
),
tag_keys=("actor_id",),
)
self.num_control_loops_gauge.set_default_tags(
{"actor_id": ray.get_runtime_context().get_actor_id()}
)
def _recover_state_from_checkpoint(self):
(
deployment_time,
serve_config,
target_capacity_direction,
) = self._read_config_checkpoint()
self._target_capacity_direction = target_capacity_direction
if serve_config is not None:
logger.info(
"Recovered config from checkpoint.", extra={"log_to_stderr": False}
)
self.apply_config(serve_config, deployment_time=deployment_time)
def _read_config_checkpoint(
self,
) -> Tuple[float, Optional[ServeDeploySchema], Optional[TargetCapacityDirection]]:
"""Reads the current Serve config checkpoint.
The Serve config checkpoint stores active application configs and
other metadata.
Returns:
If the GCS contains a checkpoint, tuple of:
1. A deployment timestamp.
2. A Serve config. This Serve config is reconstructed from the
active application states. It may not exactly match the
submitted config (e.g. the top-level http options may be
different).
3. The target_capacity direction calculated after the Serve
was submitted.
If the GCS doesn't contain a checkpoint, returns (0, None, None).
"""
checkpoint = self.kv_store.get(CONFIG_CHECKPOINT_KEY)
if checkpoint is not None:
(
deployment_time,
target_capacity,
target_capacity_direction,
config_checkpoints_dict,
) = pickle.loads(checkpoint)
return (
deployment_time,
ServeDeploySchema(
applications=list(config_checkpoints_dict.values()),
target_capacity=target_capacity,
),
target_capacity_direction,
)
else:
return (0.0, None, None)
def _all_running_replicas(self) -> Dict[DeploymentID, List[RunningReplicaInfo]]:
"""Used for testing.
Returned dictionary maps deployment names to replica infos.
"""
return self.deployment_state_manager.get_running_replica_infos()
def get_actor_details(self) -> ServeActorDetails:
"""Returns the actor details for this controller.
Currently used for test only.
"""
return self._actor_details
def get_proxy_details(self, node_id: str) -> Optional[ProxyDetails]:
"""Returns the proxy details for the proxy on the given node.
Currently used for test only. Will return None if the proxy doesn't exist on
the given node.
"""
if self.proxy_state_manager is None:
return None
return self.proxy_state_manager.get_proxy_details().get(node_id)
def get_deployment_timestamps(self, app_name: str) -> float:
"""Returns the deployment timestamp for the given app.
Currently used for test only.
"""
for (
_app_name,
app_status_info,
) in self.application_state_manager.list_app_statuses().items():
if app_name == _app_name:
return app_status_info.deployment_timestamp
def get_deployment_details(
self, app_name: str, deployment_name: str
) -> DeploymentDetails:
"""Returns the deployment details for the app and deployment.
Currently used for test only.
"""
return self.application_state_manager.list_deployment_details(app_name)[
deployment_name
]
def get_http_config(self) -> HTTPOptions:
"""Return the HTTP proxy configuration."""
if self.proxy_state_manager is None:
return HTTPOptions()
return self.proxy_state_manager.get_config()
def get_grpc_config(self) -> gRPCOptions:
"""Return the gRPC proxy configuration."""
if self.proxy_state_manager is None:
return gRPCOptions()
return self.proxy_state_manager.get_grpc_config()
def get_root_url(self):
"""Return the root url for the serve instance."""
if self.proxy_state_manager is None:
return None
http_config = self.get_http_config()
if http_config.root_url == "":
if SERVE_ROOT_URL_ENV_KEY in os.environ:
return os.environ[SERVE_ROOT_URL_ENV_KEY]
else:
# HTTP is disabled
if http_config.host is None:
return ""
return (
f"http://{build_address(http_config.host, http_config.port)}"
f"{http_config.root_path}"
)
return http_config.root_url
def config_checkpoint_deleted(self) -> bool:
"""Returns whether the config checkpoint has been deleted.
Get the config checkpoint from the kv store. If it is None, then it has been
deleted.
"""
return self.kv_store.get(CONFIG_CHECKPOINT_KEY) is None
def shutdown(self):
"""Shuts down the serve instance completely.
This method will only be triggered when `self._shutting_down` is true. It
deletes the kv store for config checkpoints, sets application state to deleting,
delete all deployments, and shuts down all proxies. Once all these
resources are released, it then kills the controller actor.
"""
if not self._shutting_down:
return
if self._shutdown_start_time is None:
self._shutdown_start_time = time.time()
logger.info("Controller shutdown started.", extra={"log_to_stderr": False})
self.kv_store.delete(CONFIG_CHECKPOINT_KEY)
self.kv_store.delete(LOGGING_CONFIG_CHECKPOINT_KEY)
self.application_state_manager.shutdown()
self.deployment_state_manager.shutdown()
self.endpoint_state.shutdown()
if self.proxy_state_manager:
self.proxy_state_manager.shutdown()
config_checkpoint_deleted = self.config_checkpoint_deleted()
application_is_shutdown = self.application_state_manager.is_ready_for_shutdown()
deployment_is_shutdown = self.deployment_state_manager.is_ready_for_shutdown()
endpoint_is_shutdown = self.endpoint_state.is_ready_for_shutdown()
proxy_state_is_shutdown = (
self.proxy_state_manager is None
or self.proxy_state_manager.is_ready_for_shutdown()
)
if (
config_checkpoint_deleted
and application_is_shutdown
and deployment_is_shutdown
and endpoint_is_shutdown
and proxy_state_is_shutdown
):
logger.warning(
"All resources have shut down, controller exiting.",
extra={"log_to_stderr": False},
)
_controller_actor = ray.get_runtime_context().current_actor
ray.kill(_controller_actor, no_restart=True)
elif time.time() - self._shutdown_start_time > 10:
if not config_checkpoint_deleted:
logger.warning(
f"{CONFIG_CHECKPOINT_KEY} not yet deleted",
extra={"log_to_stderr": False},
)
if not application_is_shutdown:
logger.warning(
"application not yet shutdown",
extra={"log_to_stderr": False},
)
if not deployment_is_shutdown:
logger.warning(
"deployment not yet shutdown",
extra={"log_to_stderr": False},
)
if not endpoint_is_shutdown:
logger.warning(
"endpoint not yet shutdown",
extra={"log_to_stderr": False},
)
if not proxy_state_is_shutdown:
logger.warning(
"proxy_state not yet shutdown",
extra={"log_to_stderr": False},
)
def deploy_applications(
self,
name_to_deployment_args_list: Dict[str, List[bytes]],
name_to_application_args: Dict[str, bytes],
) -> None:
"""
Takes in a list of dictionaries that contain deployment arguments.
If same app name deployed, old application will be overwritten.
Args:
name: Application name.
deployment_args_list: List of serialized deployment information,
where each item in the list is bytes representing the serialized
protobuf `DeploymentArgs` object. `DeploymentArgs` contains all the
information for the single deployment.
name_to_application_args: Dictionary mapping application names to serialized
application arguments, where each item is bytes representing the serialized
protobuf `ApplicationArgs` object. `ApplicationArgs` contains the information
for the application.
"""
name_to_deployment_args = {}
for name, deployment_args_list in name_to_deployment_args_list.items():
deployment_args_deserialized = []
for deployment_args_bytes in deployment_args_list:
args = DeploymentArgs.FromString(deployment_args_bytes)
deployment_args_deserialized.append(
{
"deployment_name": args.deployment_name,
"deployment_config_proto_bytes": args.deployment_config,
"replica_config_proto_bytes": args.replica_config,
"deployer_job_id": args.deployer_job_id,
"ingress": args.ingress,
"route_prefix": (
args.route_prefix if args.HasField("route_prefix") else None
),
}
)
name_to_deployment_args[name] = deployment_args_deserialized
name_to_application_args_deserialized = {}
for name, application_args_bytes in name_to_application_args.items():
name_to_application_args_deserialized[name] = ApplicationArgs.FromString(
application_args_bytes
)
self.application_state_manager.deploy_apps(
name_to_deployment_args, name_to_application_args_deserialized
)
self.application_state_manager.save_checkpoint()
def deploy_application(
self,
name: str,
deployment_args_list: List[bytes],
application_args: bytes,
) -> None:
"""
Deploy a single application
(as deploy_applications(), but it only takes a single name and deployment args).
This primarily exists as a shim to avoid
changing Java code in https://github.com/ray-project/ray/pull/49168,
and could be removed if the Java code was refactored
to use the new bulk deploy_applications API.
"""
self.deploy_applications(
{name: deployment_args_list},
{name: application_args},
)
def apply_config(
self,
config: ServeDeploySchema,
deployment_time: float = 0.0,
) -> None:
"""Apply the config described in `ServeDeploySchema`.
This will upgrade the applications to the goal state specified in the
config.
If `deployment_time` is not provided, `time.time()` is used.
"""
ServeUsageTag.API_VERSION.record("v2")
if not deployment_time:
deployment_time = time.time()
new_config_checkpoint = {}
_, curr_config, _ = self._read_config_checkpoint()
self._target_capacity_direction = calculate_target_capacity_direction(
curr_config=curr_config,
new_config=config,
curr_target_capacity_direction=self._target_capacity_direction,
)
log_target_capacity_change(
self._target_capacity,
config.target_capacity,
self._target_capacity_direction,
)
self._target_capacity = config.target_capacity
for app_config in config.applications:
# If the application logging config is not set, use the global logging
# config.
if app_config.logging_config is None and config.logging_config:
app_config.logging_config = config.logging_config
app_config_dict = app_config.dict(exclude_unset=True)
new_config_checkpoint[app_config.name] = app_config_dict
self.kv_store.put(
CONFIG_CHECKPOINT_KEY,
pickle.dumps(
(
deployment_time,
self._target_capacity,
self._target_capacity_direction,
new_config_checkpoint,
)
),
)
# Declaratively apply the new set of applications.
# This will delete any applications no longer in the config that were
# previously deployed via the REST API.
self.application_state_manager.apply_app_configs(
config.applications,
deployment_time=deployment_time,
target_capacity=self._target_capacity,
target_capacity_direction=self._target_capacity_direction,
)
self.application_state_manager.save_checkpoint()
def get_deployment_info(self, name: str, app_name: str = "") -> bytes:
"""Get the current information about a deployment.
Args:
name: the name of the deployment.
Returns:
DeploymentRoute's protobuf serialized bytes
Raises:
KeyError: If the deployment doesn't exist.
"""
id = DeploymentID(name=name, app_name=app_name)
deployment_info = self.deployment_state_manager.get_deployment(id)
if deployment_info is None:
app_msg = f" in application '{app_name}'" if app_name else ""
raise KeyError(f"Deployment '{name}' does not exist{app_msg}.")
route = self.endpoint_state.get_endpoint_route(id)
deployment_route = DeploymentRoute(
deployment_info=deployment_info.to_proto(), route=route
)
return deployment_route.SerializeToString()
def list_deployments_internal(
self,
) -> Dict[DeploymentID, Tuple[DeploymentInfo, str]]:
"""Gets the current information about all deployments.
Returns:
Dict(deployment_id, (DeploymentInfo, route))
"""
return {
id: (info, self.endpoint_state.get_endpoint_route(id))
for id, info in self.deployment_state_manager.get_deployment_infos().items()
}
def get_deployment_config(
self, deployment_id: DeploymentID
) -> Optional[DeploymentConfig]:
"""Get the deployment config for the given deployment id.
Args:
deployment_id: The deployment id to get the config for.
Returns:
A deployment config object if the deployment id exist,
None otherwise.
"""
deployment_info = self.deployment_state_manager.get_deployment_infos().get(
deployment_id
)
return deployment_info.deployment_config if deployment_info else None
def list_deployment_ids(self) -> List[DeploymentID]:
"""Gets the current list of all deployments' identifiers."""
return self.deployment_state_manager._deployment_states.keys()
def update_deployment_replicas(
self, deployment_id: DeploymentID, target_num_replicas: int
) -> None:
"""Update the target number of replicas for a deployment.
Args:
deployment_id: The deployment to update.
target_num_replicas: The new target number of replicas.
Raises:
ExternalScalerDisabledError: If external_scaler_enabled is set to False for the application.
"""
# Check if external scaler is enabled for this application
app_name = deployment_id.app_name
if not self.application_state_manager.does_app_exist(app_name):
raise ValueError(f"Application '{app_name}' not found")
if not self.application_state_manager.get_external_scaler_enabled(app_name):
raise ExternalScalerDisabledError(
f"Cannot update replicas for deployment '{deployment_id.name}' in "
f"application '{app_name}'. The external scaling API can only be used "
f"when 'external_scaler_enabled' is set to true in the application "
f"configuration. Current value: external_scaler_enabled=false. "
f"To use this API, redeploy your application with "
f"'external_scaler_enabled: true' in the config."
)
self.deployment_state_manager.set_target_num_replicas(
deployment_id, target_num_replicas
)
def get_serve_instance_details(self, source: Optional[APIType] = None) -> Dict:
"""Gets details on all applications on the cluster and system-level info.
The information includes application and deployment statuses, config options,
error messages, etc.
Args:
source: If provided, returns application
statuses for applications matching this API type.
Defaults to None, which means all applications are returned.
Returns:
Dict that follows the format of the schema ServeInstanceDetails.
"""
http_config = self.get_http_config()
grpc_config = self.get_grpc_config()
applications = {}
app_statuses = self.application_state_manager.list_app_statuses(source=source)
# If there are no app statuses, there's no point getting the app configs.
# Moreover, there might be no app statuses because the GCS is down,
# in which case getting the app configs would fail anyway,
# since they're stored in the checkpoint in the GCS.
app_configs = self.get_app_configs() if app_statuses else {}
for (
app_name,
app_status_info,
) in app_statuses.items():
applications[app_name] = ApplicationDetails(
name=app_name,
route_prefix=self.application_state_manager.get_route_prefix(app_name),
docs_path=self.get_docs_path(app_name),
status=app_status_info.status,
message=app_status_info.message,
last_deployed_time_s=app_status_info.deployment_timestamp,
# This can be none if the app was deployed through
# serve.run, the app is in deleting state,
# or a checkpoint hasn't been set yet
deployed_app_config=app_configs.get(app_name),
source=self.application_state_manager.get_app_source(app_name),
deployments=self.application_state_manager.list_deployment_details(
app_name
),
external_scaler_enabled=self.application_state_manager.get_external_scaler_enabled(
app_name
),
deployment_topology=self.application_state_manager.get_deployment_topology(
app_name
),
)
# NOTE(zcin): We use exclude_unset here because we explicitly and intentionally
# fill in all info that should be shown to users.
http_options = HTTPOptionsSchema.parse_obj(http_config.dict(exclude_unset=True))
grpc_options = gRPCOptionsSchema.parse_obj(grpc_config.dict(exclude_unset=True))
return ServeInstanceDetails(
target_capacity=self._target_capacity,
controller_info=self._actor_details,
proxy_location=ProxyLocation._from_deployment_mode(http_config.location),
http_options=http_options,
grpc_options=grpc_options,
proxies=(
self.proxy_state_manager.get_proxy_details()
if self.proxy_state_manager
else None
),
applications=applications,
target_groups=self.get_target_groups(),
)._get_user_facing_json_serializable_dict(exclude_unset=True)
def get_target_groups(
self,
app_name: Optional[str] = None,
from_proxy_manager: bool = False,
) -> List[TargetGroup]:
"""Target groups contains information about IP
addresses and ports of all proxies in the cluster.
This information is used to setup the load balancer.
"""
target_groups: List[TargetGroup] = []
if self.proxy_state_manager.get_proxy_details():
# setting prefix route to "/" because in ray serve, proxy
# accepts requests from the client and routes them to the
# correct application. This is true for both HTTP and gRPC proxies.
target_groups.append(
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/",
targets=self.proxy_state_manager.get_targets(RequestProtocol.HTTP),
)
)
if is_grpc_enabled(self.get_grpc_config()):
target_groups.append(
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/",
targets=self.proxy_state_manager.get_targets(
RequestProtocol.GRPC
),
)
)
return target_groups
def get_serve_status(self, name: str = SERVE_DEFAULT_APP_NAME) -> bytes:
"""Return application status
Args:
name: application name. If application name doesn't exist, app_status
is NOT_STARTED.
"""
app_status = self.application_state_manager.get_app_status_info(name)
deployment_statuses = self.application_state_manager.get_deployments_statuses(
name
)
status_info = StatusOverview(
name=name,
app_status=app_status,
deployment_statuses=deployment_statuses,
)
return status_info.to_proto().SerializeToString()
def get_serve_statuses(self, names: List[str]) -> List[bytes]:
statuses = []
for name in names:
statuses.append(self.get_serve_status(name))
return statuses
def list_serve_statuses(self) -> List[bytes]:
statuses = []
for name in self.application_state_manager.list_app_statuses():
statuses.append(self.get_serve_status(name))
return statuses
def get_app_configs(self) -> Dict[str, ServeApplicationSchema]:
checkpoint = self.kv_store.get(CONFIG_CHECKPOINT_KEY)
if checkpoint is None:
return {}
_, _, _, config_checkpoints_dict = pickle.loads(checkpoint)
return {
app: ServeApplicationSchema.parse_obj(config)
for app, config in config_checkpoints_dict.items()
}
def get_external_scaler_enabled(self, app_name: str) -> bool:
"""Get the external_scaler_enabled flag value for an application.
This is a helper method specifically for Java tests to verify the flag
is correctly set, since Java cannot deserialize Python Pydantic objects.
Args:
app_name: Name of the application.
Returns:
True if external_scaler_enabled is set for the application, False otherwise.
"""
return self.application_state_manager.get_external_scaler_enabled(app_name)
def get_all_deployment_statuses(self) -> List[bytes]:
"""Gets deployment status bytes for all live deployments."""
statuses = self.deployment_state_manager.get_deployment_statuses()
return [status.to_proto().SerializeToString() for status in statuses]
def get_deployment_status(
self, name: str, app_name: str = ""
) -> Union[None, bytes]:
"""Get deployment status by deployment name.
Args:
name: Deployment name.
app_name: Application name. Default is "" because 1.x
deployments go through this API.
"""
id = DeploymentID(name=name, app_name=app_name)
status = self.deployment_state_manager.get_deployment_statuses([id])
if not status:
return None
return status[0].to_proto().SerializeToString()
def get_docs_path(self, name: str):
"""Docs path for application.
Currently, this is the OpenAPI docs path for FastAPI-integrated applications."""
return self.application_state_manager.get_docs_path(name)
def get_ingress_deployment_name(self, app_name: str) -> Optional[str]:
"""Name of the ingress deployment in an application.
Returns:
Ingress deployment name (str): if the application exists.
None: if the application does not exist.
"""
return self.application_state_manager.get_ingress_deployment_name(app_name)
def delete_apps(self, names: Iterable[str]):
"""Delete applications based on names
During deletion, the application status is DELETING
"""
for name in names:
self.application_state_manager.delete_app(name)
self.application_state_manager.save_checkpoint()
def record_request_routing_info(self, info: RequestRoutingInfo):
"""Record replica routing information for a replica.
Args:
info: RequestRoutingInfo including deployment name, replica tag,
multiplex model ids, and routing stats.
"""
self.deployment_state_manager.record_request_routing_info(info)
def _get_replica_ranks_mapping(self, deployment_id: DeploymentID) -> Dict[str, int]:
"""Get the current rank mapping for all replicas in a deployment.
Args:
deployment_id: The deployment ID to get ranks for.
Returns:
Dictionary mapping replica_id to rank.
"""
return self.deployment_state_manager._get_replica_ranks_mapping(deployment_id)
async def graceful_shutdown(self, wait: bool = True):
"""Set the shutting down flag on controller to signal shutdown in
run_control_loop().
This is used to signal to the controller that it should proceed with shutdown
process, so it can shut down gracefully. It also waits until the shutdown
event is triggered if wait is true.
Raises:
RayActorError: if wait is True, the caller waits until the controller
is killed, which raises a RayActorError.
"""
self._shutting_down = True
if not wait:
return
# This event never gets set. The caller waits indefinitely on this event
# until the controller is killed, which raises a RayActorError.
await self._shutdown_event.wait()
def _get_logging_config(self) -> Tuple:
"""Get the logging configuration (for testing purposes)."""
log_file_path = None
for handler in logger.handlers:
if isinstance(handler, logging.handlers.MemoryHandler):
log_file_path = handler.target.baseFilename
return self.global_logging_config, log_file_path
def _get_target_capacity_direction(self) -> Optional[TargetCapacityDirection]:
"""Gets the controller's scale direction (for testing purposes)."""
return self._target_capacity_direction
def calculate_target_capacity_direction(
curr_config: Optional[ServeDeploySchema],
new_config: ServeDeploySchema,
curr_target_capacity_direction: Optional[float],
) -> Optional[TargetCapacityDirection]:
"""Compares two Serve configs to calculate the next scaling direction."""
curr_target_capacity = None
next_target_capacity_direction = None
if curr_config is not None and applications_match(curr_config, new_config):
curr_target_capacity = curr_config.target_capacity
next_target_capacity = new_config.target_capacity
if curr_target_capacity == next_target_capacity:
next_target_capacity_direction = curr_target_capacity_direction
elif curr_target_capacity is None and next_target_capacity is not None:
# target_capacity is scaling down from None to a number.
next_target_capacity_direction = TargetCapacityDirection.DOWN
elif next_target_capacity is None:
next_target_capacity_direction = None
elif curr_target_capacity < next_target_capacity:
next_target_capacity_direction = TargetCapacityDirection.UP
else:
next_target_capacity_direction = TargetCapacityDirection.DOWN
elif new_config.target_capacity is not None:
# A config with different apps has been applied, and it contains a
# target_capacity. Serve must start scaling this config up.
next_target_capacity_direction = TargetCapacityDirection.UP
else:
next_target_capacity_direction = None
return next_target_capacity_direction
def applications_match(config1: ServeDeploySchema, config2: ServeDeploySchema) -> bool:
"""Checks whether the applications in config1 and config2 match.
Two applications match if they have the same name.
"""
config1_app_names = {app.name for app in config1.applications}
config2_app_names = {app.name for app in config2.applications}
return config1_app_names == config2_app_names
def log_target_capacity_change(
curr_target_capacity: Optional[float],
next_target_capacity: Optional[float],
next_target_capacity_direction: Optional[TargetCapacityDirection],
):
"""Logs changes in the target_capacity."""
if curr_target_capacity != next_target_capacity:
if isinstance(next_target_capacity_direction, TargetCapacityDirection):
logger.info(
"Target capacity scaling "
f"{next_target_capacity_direction.value.lower()} "
f"from {curr_target_capacity} to {next_target_capacity}."
)
else:
logger.info("Target capacity entering 100% at steady state.")
| ServeController |
python | gevent__gevent | src/gevent/testing/testcase.py | {
"start": 4197,
"end": 5144
} | class ____(object):
"""
Assertions dealing with strings.
"""
@LazyOnClass
def HEX_NUM_RE(self):
import re
return re.compile('-?0x[0123456789abcdef]+L?', re.I)
def normalize_addr(self, s, replace='X'):
# https://github.com/PyCQA/pylint/issues/1127
return self.HEX_NUM_RE.sub(replace, s) # pylint:disable=no-member
def normalize_module(self, s, module=None, replace='module'):
if module is None:
module = type(self).__module__
return s.replace(module, replace)
def normalize(self, s):
return self.normalize_module(self.normalize_addr(s))
def assert_nstr_endswith(self, o, val):
s = str(o)
n = self.normalize(s)
self.assertTrue(n.endswith(val), (s, n))
def assert_nstr_startswith(self, o, val):
s = str(o)
n = self.normalize(s)
self.assertTrue(n.startswith(val), (s, n))
| StringAssertMixin |
python | openai__openai-python | src/openai/types/beta/assistant_create_params.py | {
"start": 8239,
"end": 8377
} | class ____(TypedDict, total=False):
code_interpreter: ToolResourcesCodeInterpreter
file_search: ToolResourcesFileSearch
| ToolResources |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/operate/configuration/env_vars_and_secrets/resources_v2.py | {
"start": 23,
"end": 319
} | class ____(dg.ConfigurableResource): ...
@dg.definitions
def defs() -> dg.Definitions:
return dg.Definitions(
# highlight-start
resources={
"some_resource": SomeResource(access_token=dg.EnvVar("MY_ACCESS_TOKEN"))
},
# highlight-end
)
| SomeResource |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/redshift_datasource.py | {
"start": 1580,
"end": 3810
} | class ____(SQLDatasource):
"""Adds a Redshift datasource to the data context using psycopg2.
Args:
name: The name of this Redshift datasource.
connection_string: The SQLAlchemy connection string used to connect to the Redshift database
For example:
"redshift+psycopg2://user:password@host.amazonaws.com:5439/database?sslmode=sslmode".
assets: An optional dictionary whose keys are TableAsset or QueryAsset names and whose
values are TableAsset or QueryAsset objects.
"""
type: Literal["redshift"] = "redshift" # type: ignore[assignment] # This is a hardcoded constant
connection_string: Union[ConfigStr, dict, RedshiftConnectionDetails, RedshiftDsn] # type: ignore[assignment] # Deviation from parent class as individual args are supported for connection
@validator("connection_string", pre=True)
def _build_connection_string_from_connection_details(
cls, connection_string: dict | RedshiftConnectionDetails | str
) -> str:
"""
If a dict or RedshiftConnectionDetails object is provided, construct the formatted
connection string.
"""
if isinstance(connection_string, str):
return connection_string
if isinstance(connection_string, dict):
connection_details = RedshiftConnectionDetails(**connection_string)
elif isinstance(connection_string, RedshiftConnectionDetails):
connection_details = connection_string
else:
raise TypeError("Invalid connection_string type: ", type(connection_string)) # noqa: TRY003
connection_string = f"redshift+psycopg2://{connection_details.user}:{connection_details.password}@{connection_details.host}:{connection_details.port}/{connection_details.database}?sslmode={connection_details.sslmode.value}"
if connection_details.schema_:
connection_string += f"&options=-csearch_path%3D{connection_details.schema_}"
return connection_string
@property
@override
def execution_engine_type(self) -> Type[SqlAlchemyExecutionEngine]:
"""Returns the default execution engine type."""
return RedshiftExecutionEngine
| RedshiftDatasource |
python | jina-ai__jina | tests/integration/gateway_streamer/test_gateway_streamer.py | {
"start": 571,
"end": 6038
} | class ____(Executor):
@requests
def foo(self, docs, parameters, **kwargs):
text_to_add = parameters.get('text_to_add', 'default ')
for doc in docs:
doc.text += text_to_add
def _create_worker_runtime(port, uses, name=''):
args = _generate_pod_args()
args.port = [port]
args.name = name
args.uses = uses
with AsyncNewLoopRuntime(args, req_handler_cls=WorkerRequestHandler) as runtime:
runtime.run_forever()
def _setup(pod0_port, pod1_port):
pod0_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod0_port, 'StreamerTestExecutor')
)
pod0_process.start()
pod1_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod1_port, 'StreamerTestExecutor')
)
pod1_process.start()
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod0_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod1_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return pod0_process, pod1_process
@pytest.mark.parametrize(
'parameters, target_executor, expected_text',
[ # (None, None, 'default default '),
({'pod0__text_to_add': 'param_pod0 '}, None, 'param_pod0 default '),
(None, 'pod1', 'default '),
({'pod0__text_to_add': 'param_pod0 '}, 'pod0', 'param_pod0 '),
],
)
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.asyncio
async def test_custom_gateway(
port_generator, parameters, target_executor, expected_text, results_in_order
):
pod0_port = port_generator()
pod1_port = port_generator()
pod0_process, pod1_process = _setup(pod0_port, pod1_port)
graph_description = {
"start-gateway": ["pod0"],
"pod0": ["pod1"],
"pod1": ["end-gateway"],
}
pod_addresses = {"pod0": [f"0.0.0.0:{pod0_port}"], "pod1": [f"0.0.0.0:{pod1_port}"]}
# send requests to the gateway
gateway_streamer = GatewayStreamer(
graph_representation=graph_description, executor_addresses=pod_addresses
)
try:
input_da = DocumentArray.empty(60)
resp = DocumentArray.empty(0)
num_resp = 0
async for r in gateway_streamer.stream_docs(
docs=input_da,
request_size=10,
parameters=parameters,
target_executor=target_executor,
results_in_order=results_in_order,
):
num_resp += 1
resp.extend(r)
assert num_resp == 6
assert len(resp) == 60
for doc in resp:
assert doc.text == expected_text
request = DataRequest()
request.data.docs = DocumentArray.empty(60)
unary_response = await gateway_streamer.process_single_data(request=request)
assert len(unary_response.docs) == 60
except Exception:
assert False
finally: # clean up runtimes
pod0_process.terminate()
pod1_process.terminate()
pod0_process.join()
pod1_process.join()
await gateway_streamer.close()
@pytest.mark.asyncio
@pytest.mark.parametrize('return_results', [False, True])
async def test_gateway_stream_executor_error(port_generator, return_results):
pod_port = port_generator()
da = DocumentArray(
[
Document(text='Request0'),
Document(text='Request1'),
Document(text='Request2'),
]
)
@dataclass
class TestExecutor(Executor):
counter = 0
@requests
def foo(self, docs, parameters, **kwargs):
self.counter += 1
if self.counter % 2 == 0:
raise ValueError('custom exception')
pod_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod_port, 'TestExecutor')
)
pod_process.start()
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
graph_description = {
"start-gateway": ["pod0"],
"pod0": ["end-gateway"],
}
pod_addresses = {"pod0": [f"0.0.0.0:{pod_port}"]}
# send requests to the gateway
gateway_streamer = GatewayStreamer(
graph_representation=graph_description, executor_addresses=pod_addresses
)
try:
responses = []
errors = []
async for response, error in gateway_streamer.stream(
docs=da, request_size=1, return_results=return_results
):
responses.append(response)
if error:
errors.append(error)
assert len(errors) == 1
error = errors[0]
assert type(error) == ExecutorError
assert error.name == 'ValueError'
assert error.args == ['custom exception']
assert error.executor == 'TestExecutor'
if return_results:
assert all([isinstance(response, Request) for response in responses])
else:
assert all([isinstance(response, DocumentArray) for response in responses])
for index, result_da in enumerate(responses):
assert da[index] == result_da[0]
finally:
pod_process.terminate()
pod_process.join()
await gateway_streamer.close()
| StreamerTestExecutor |
python | tensorflow__tensorflow | tensorflow/python/util/function_utils_test.py | {
"start": 8356,
"end": 9565
} | class ____(test.TestCase):
def testWithSimpleFunction(self):
code = function_utils.get_func_code(silly_example_function)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithClassMethod(self):
code = function_utils.get_func_code(self.testWithClassMethod)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithCallableClass(self):
callable_instance = SillyCallableClass()
code = function_utils.get_func_code(callable_instance)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithLambda(self):
anon_fn = lambda x: x
code = function_utils.get_func_code(anon_fn)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithFunctoolsPartial(self):
partial = functools.partial(silly_example_function)
code = function_utils.get_func_code(partial)
self.assertIsNone(code)
def testRaisesWithNonCallableObject(self):
with self.assertRaises(ValueError):
function_utils.get_func_code(None)
if __name__ == '__main__':
test.main()
| GetFuncCodeTest |
python | django__django | tests/defer/models.py | {
"start": 192,
"end": 418
} | class ____(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.ForeignKey(Secondary, models.CASCADE)
def __str__(self):
return self.name
| Primary |
python | realpython__materials | python-wav-files/waveio/metadata.py | {
"start": 101,
"end": 434
} | class ____:
encoding: PCMEncoding
frames_per_second: float
num_channels: int
num_frames: int | None = None
@property
def num_seconds(self):
if self.num_frames is None:
raise ValueError("indeterminate stream of audio frames")
return self.num_frames / self.frames_per_second
| WAVMetadata |
python | pydata__xarray | xarray/tests/test_accessor_dt.py | {
"start": 9192,
"end": 22958
} | class ____:
@pytest.fixture(autouse=True)
def setup(self):
nt = 100
data = np.random.rand(10, 10, nt)
lons = np.linspace(0, 11, 10)
lats = np.linspace(0, 20, 10)
self.times = pd.timedelta_range(start="1 day", freq="6h", periods=nt)
self.data = xr.DataArray(
data,
coords=[lons, lats, self.times],
dims=["lon", "lat", "time"],
name="data",
)
self.times_arr = np.random.choice(self.times, size=(10, 10, nt))
self.times_data = xr.DataArray(
self.times_arr,
coords=[lons, lats, self.times],
dims=["lon", "lat", "time"],
name="data",
)
def test_not_datetime_type(self) -> None:
nontime_data = self.data.copy()
int_data = np.arange(len(self.data.time)).astype("int8")
nontime_data = nontime_data.assign_coords(time=int_data)
with pytest.raises(AttributeError, match=r"dt"):
_ = nontime_data.time.dt
@pytest.mark.parametrize(
"field", ["days", "seconds", "microseconds", "nanoseconds"]
)
def test_field_access(self, field) -> None:
expected = xr.DataArray(
getattr(self.times, field), name=field, coords=[self.times], dims=["time"]
)
actual = getattr(self.data.time.dt, field)
assert_equal(expected, actual)
@pytest.mark.parametrize(
"method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")]
)
def test_accessor_methods(self, method, parameters) -> None:
dates = pd.timedelta_range(start="1 day", end="30 days", freq="6h")
xdates = xr.DataArray(dates, dims=["time"])
expected = getattr(dates, method)(parameters)
actual = getattr(xdates.dt, method)(parameters)
assert_array_equal(expected, actual)
@requires_dask
@pytest.mark.parametrize(
"field", ["days", "seconds", "microseconds", "nanoseconds"]
)
def test_dask_field_access(self, field) -> None:
import dask.array as da
expected = getattr(self.times_data.dt, field)
dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50))
dask_times_2d = xr.DataArray(
dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data"
)
with raise_if_dask_computes():
actual = getattr(dask_times_2d.dt, field)
assert isinstance(actual.data, da.Array)
assert_chunks_equal(actual, dask_times_2d)
assert_equal(actual, expected)
@requires_dask
@pytest.mark.parametrize(
"method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")]
)
def test_dask_accessor_method(self, method, parameters) -> None:
import dask.array as da
expected = getattr(self.times_data.dt, method)(parameters)
dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50))
dask_times_2d = xr.DataArray(
dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data"
)
with raise_if_dask_computes():
actual = getattr(dask_times_2d.dt, method)(parameters)
assert isinstance(actual.data, da.Array)
assert_chunks_equal(actual, dask_times_2d)
assert_equal(actual.compute(), expected.compute())
_NT = 100
@pytest.fixture(params=_CFTIME_CALENDARS)
def calendar(request):
return request.param
@pytest.fixture
def cftime_date_type(calendar):
if calendar == "standard":
calendar = "proleptic_gregorian"
return _all_cftime_date_types()[calendar]
@pytest.fixture
def times(calendar):
import cftime
return cftime.num2date(
np.arange(_NT),
units="hours since 2000-01-01",
calendar=calendar,
only_use_cftime_datetimes=True,
)
@pytest.fixture
def data(times):
data = np.random.rand(10, 10, _NT)
lons = np.linspace(0, 11, 10)
lats = np.linspace(0, 20, 10)
return xr.DataArray(
data, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data"
)
@pytest.fixture
def times_3d(times):
lons = np.linspace(0, 11, 10)
lats = np.linspace(0, 20, 10)
times_arr = np.random.choice(times, size=(10, 10, _NT))
return xr.DataArray(
times_arr, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data"
)
@requires_cftime
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
def test_field_access(data, field) -> None:
result = getattr(data.time.dt, field)
expected = xr.DataArray(
getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field),
name=field,
coords=data.time.coords,
dims=data.time.dims,
)
assert_equal(result, expected)
@requires_cftime
def test_calendar_cftime(data) -> None:
expected = data.time.values[0].calendar
assert data.time.dt.calendar == expected
def test_calendar_datetime64_2d() -> None:
data = xr.DataArray(np.zeros((4, 5), dtype="datetime64[ns]"), dims=("x", "y"))
assert data.dt.calendar == "proleptic_gregorian"
@requires_dask
def test_calendar_datetime64_3d_dask() -> None:
import dask.array as da
data = xr.DataArray(
da.zeros((4, 5, 6), dtype="datetime64[ns]"), dims=("x", "y", "z")
)
with raise_if_dask_computes():
assert data.dt.calendar == "proleptic_gregorian"
@requires_dask
@requires_cftime
def test_calendar_dask_cftime() -> None:
from cftime import num2date
# 3D lazy dask
data = xr.DataArray(
num2date(
np.random.randint(1, 1000000, size=(4, 5, 6)),
"hours since 1970-01-01T00:00",
calendar="noleap",
),
dims=("x", "y", "z"),
).chunk()
with raise_if_dask_computes(max_computes=2):
assert data.dt.calendar == "noleap"
@requires_cftime
def test_isocalendar_cftime(data) -> None:
with pytest.raises(
AttributeError, match=r"'CFTimeIndex' object has no attribute 'isocalendar'"
):
data.time.dt.isocalendar()
@requires_cftime
def test_date_cftime(data) -> None:
with pytest.raises(
AttributeError,
match=r"'CFTimeIndex' object has no attribute `date`. Consider using the floor method instead, for instance: `.time.dt.floor\('D'\)`.",
):
data.time.dt.date()
@requires_cftime
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_cftime_strftime_access(data) -> None:
"""compare cftime formatting against datetime formatting"""
date_format = "%Y%m%d%H"
result = data.time.dt.strftime(date_format)
datetime_array = xr.DataArray(
xr.coding.cftimeindex.CFTimeIndex(data.time.values).to_datetimeindex(
time_unit="ns"
),
name="stftime",
coords=data.time.coords,
dims=data.time.dims,
)
expected = datetime_array.dt.strftime(date_format)
assert_equal(result, expected)
@requires_cftime
@requires_dask
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
def test_dask_field_access_1d(data, field) -> None:
import dask.array as da
expected = xr.DataArray(
getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field),
name=field,
dims=["time"],
)
times = xr.DataArray(data.time.values, dims=["time"]).chunk({"time": 50})
result = getattr(times.dt, field)
assert isinstance(result.data, da.Array)
assert result.chunks == times.chunks
assert_equal(result.compute(), expected)
@requires_cftime
@requires_dask
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
def test_dask_field_access(times_3d, data, field) -> None:
import dask.array as da
expected = xr.DataArray(
getattr(
xr.coding.cftimeindex.CFTimeIndex(times_3d.values.ravel()), field
).reshape(times_3d.shape),
name=field,
coords=times_3d.coords,
dims=times_3d.dims,
)
times_3d = times_3d.chunk({"lon": 5, "lat": 5, "time": 50})
result = getattr(times_3d.dt, field)
assert isinstance(result.data, da.Array)
assert result.chunks == times_3d.chunks
assert_equal(result.compute(), expected)
@requires_cftime
def test_seasons(cftime_date_type) -> None:
dates = xr.DataArray(
np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)])
)
seasons = xr.DataArray(
[
"DJF",
"DJF",
"MAM",
"MAM",
"MAM",
"JJA",
"JJA",
"JJA",
"SON",
"SON",
"SON",
"DJF",
]
)
assert_array_equal(seasons.values, dates.dt.season.values)
@pytest.fixture
def cftime_rounding_dataarray(cftime_date_type):
return xr.DataArray(
[
[cftime_date_type(1, 1, 1, 1), cftime_date_type(1, 1, 1, 15)],
[cftime_date_type(1, 1, 1, 23), cftime_date_type(1, 1, 2, 1)],
]
)
@requires_cftime
@requires_dask
@pytest.mark.parametrize("use_dask", [False, True])
def test_cftime_floor_accessor(
cftime_rounding_dataarray, cftime_date_type, use_dask
) -> None:
import dask.array as da
freq = "D"
expected = xr.DataArray(
[
[cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 1, 0)],
[cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 2, 0)],
],
name="floor",
)
if use_dask:
chunks = {"dim_0": 1}
# Currently a compute is done to inspect a single value of the array
# if it is of object dtype to check if it is a cftime.datetime (if not
# we raise an error when using the dt accessor).
with raise_if_dask_computes(max_computes=1):
result = cftime_rounding_dataarray.chunk(chunks).dt.floor(freq)
expected = expected.chunk(chunks)
assert isinstance(result.data, da.Array)
assert result.chunks == expected.chunks
else:
result = cftime_rounding_dataarray.dt.floor(freq)
assert_identical(result, expected)
@requires_cftime
@requires_dask
@pytest.mark.parametrize("use_dask", [False, True])
def test_cftime_ceil_accessor(
cftime_rounding_dataarray, cftime_date_type, use_dask
) -> None:
import dask.array as da
freq = "D"
expected = xr.DataArray(
[
[cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 2, 0)],
[cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 3, 0)],
],
name="ceil",
)
if use_dask:
chunks = {"dim_0": 1}
# Currently a compute is done to inspect a single value of the array
# if it is of object dtype to check if it is a cftime.datetime (if not
# we raise an error when using the dt accessor).
with raise_if_dask_computes(max_computes=1):
result = cftime_rounding_dataarray.chunk(chunks).dt.ceil(freq)
expected = expected.chunk(chunks)
assert isinstance(result.data, da.Array)
assert result.chunks == expected.chunks
else:
result = cftime_rounding_dataarray.dt.ceil(freq)
assert_identical(result, expected)
@requires_cftime
@requires_dask
@pytest.mark.parametrize("use_dask", [False, True])
def test_cftime_round_accessor(
cftime_rounding_dataarray, cftime_date_type, use_dask
) -> None:
import dask.array as da
freq = "D"
expected = xr.DataArray(
[
[cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 2, 0)],
[cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 2, 0)],
],
name="round",
)
if use_dask:
chunks = {"dim_0": 1}
# Currently a compute is done to inspect a single value of the array
# if it is of object dtype to check if it is a cftime.datetime (if not
# we raise an error when using the dt accessor).
with raise_if_dask_computes(max_computes=1):
result = cftime_rounding_dataarray.chunk(chunks).dt.round(freq)
expected = expected.chunk(chunks)
assert isinstance(result.data, da.Array)
assert result.chunks == expected.chunks
else:
result = cftime_rounding_dataarray.dt.round(freq)
assert_identical(result, expected)
@pytest.mark.parametrize(
"use_cftime",
[False, pytest.param(True, marks=requires_cftime)],
ids=lambda x: f"use_cftime={x}",
)
@pytest.mark.parametrize(
"use_dask",
[False, pytest.param(True, marks=requires_dask)],
ids=lambda x: f"use_dask={x}",
)
def test_decimal_year(use_cftime, use_dask) -> None:
year = 2000
periods = 10
freq = "h"
shape = (2, 5)
dims = ["x", "y"]
hours_in_year = 24 * 366
times = xr.date_range(f"{year}", periods=periods, freq=freq, use_cftime=use_cftime)
da = xr.DataArray(times.values.reshape(shape), dims=dims)
if use_dask:
da = da.chunk({"y": 2})
# Computing the decimal year for a cftime datetime array requires a
# number of small computes (6):
# - 4x one compute per .dt accessor call (requires inspecting one
# object-dtype array element to see if it is time-like)
# - 2x one compute per calendar inference (requires inspecting one
# array element to read off the calendar)
max_computes = 6 * use_cftime
with raise_if_dask_computes(max_computes=max_computes):
result = da.dt.decimal_year
else:
result = da.dt.decimal_year
expected = xr.DataArray(
year + np.arange(periods).reshape(shape) / hours_in_year, dims=dims
)
xr.testing.assert_equal(result, expected)
| TestTimedeltaAccessor |
python | doocs__leetcode | solution/3300-3399/3376.Minimum Time to Break Locks I/Solution.py | {
"start": 0,
"end": 480
} | class ____:
def findMinimumTime(self, strength: List[int], K: int) -> int:
@cache
def dfs(i: int) -> int:
if i == (1 << len(strength)) - 1:
return 0
cnt = i.bit_count()
x = 1 + cnt * K
ans = inf
for j, s in enumerate(strength):
if i >> j & 1 ^ 1:
ans = min(ans, dfs(i | 1 << j) + (s + x - 1) // x)
return ans
return dfs(0)
| Solution |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 102772,
"end": 104206
} | class ____(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
| agg2_info |
python | neetcode-gh__leetcode | python/0044-wildcard-matching.py | {
"start": 0,
"end": 484
} | class ____(object):
def isMatch(self, s, p):
dp = [[False] * (len(p) + 1) for _ in range(len(s) + 1)]
dp[-1][-1] = True
for i in range(len(s), -1, -1):
for j in range(len(p) - 1, -1, -1):
if p[j] == '*':
dp[i][j] = dp[i][j + 1] or (i < len(s) and dp[i + 1][j])
else:
dp[i][j] = i < len(s) and (p[j] == s[i] or p[j] == '?') and dp[i + 1][j + 1]
return dp[0][0]
| Solution |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_privacy_urls.py | {
"start": 20271,
"end": 20835
} | class ____(PrivateUserProfileMixin, TestCase):
def setUp(self):
super().setUp()
self.response_data.update(
{
"/accounts/login/": {"status_code": 302},
# The test user doesn't have a GitHub account, so it's redirected to the home page.
"/accounts/migrate-to-github-app/": {"status_code": 302},
}
)
def login(self):
return self.client.login(username="tester", password="test")
def is_admin(self):
return False
| PrivateUserProfileUserAccessTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/outputs.py | {
"start": 2382,
"end": 3802
} | class ____(
NamedTuple(
"_StepOutput",
[
("node_handle", NodeHandle),
("name", str),
("dagster_type_key", str),
("properties", StepOutputProperties),
],
)
):
"""Holds the information for an ExecutionStep to process its outputs."""
def __new__(
cls,
node_handle: NodeHandle,
name: str,
dagster_type_key: str,
properties: StepOutputProperties,
):
return super().__new__(
cls,
node_handle=check.inst_param(node_handle, "node_handle", NodeHandle),
name=check.str_param(name, "name"),
dagster_type_key=check.str_param(dagster_type_key, "dagster_type_key"),
properties=check.inst_param(properties, "properties", StepOutputProperties),
)
@property
def is_required(self) -> bool:
return self.properties.is_required
@property
def is_dynamic(self) -> bool:
return self.properties.is_dynamic
@property
def is_asset(self) -> bool:
return self.properties.is_asset
@property
def asset_key(self) -> Optional[AssetKey]:
if not self.is_asset:
return None
return self.properties.asset_key
@whitelist_for_serdes(
storage_field_names={"metadata": "metadata_entries"},
field_serializers={"metadata": MetadataFieldSerializer},
)
| StepOutput |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qconv_test.py | {
"start": 1277,
"end": 2807
} | class ____(op_bench.TorchBenchmarkBase):
# def init(self, N, IC, OC, H, W, G, kernel, stride, pad):
def init(self, IC, OC, kernel, stride, N, H, W, G, pad, device):
# super().init(N, IC, OC, (H, W), G, (kernel, kernel), stride, pad)
self.scale = 1.0 / 255
self.zero_point = 0
X = torch.randn(N, IC, H, W, dtype=torch.float32)
qX = torch.quantize_per_tensor(
X, scale=self.scale, zero_point=self.zero_point, dtype=torch.quint8
)
# Convert the tensor to NHWC format
W = torch.randn(OC, IC // G, kernel, kernel, dtype=torch.float32)
self.qW = torch.quantize_per_tensor(
W, scale=self.scale, zero_point=0, dtype=torch.qint8
)
self.inputs = {"input": qX}
self.qconv2d = nnq.Conv2d(IC, OC, kernel, stride=stride, padding=pad, groups=G)
self.qconv2d.set_weight_bias(self.qW, None)
self.qconv2d.scale = torch.tensor(self.scale, dtype=torch.double)
self.qconv2d.zero_point = torch.tensor(self.zero_point, dtype=torch.int)
self.set_module_name("QConv2d")
def forward(self, input):
return self.qconv2d(input)
op_bench.generate_pt_test(
configs.remove_cuda(configs.conv_1d_configs_short + configs.conv_1d_configs_long),
QConv1dBenchmark,
)
op_bench.generate_pt_test(
configs.remove_cuda(configs.conv_2d_configs_short + configs.conv_2d_configs_long),
QConv2dBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| QConv2dBenchmark |
python | django__django | django/db/models/fields/related.py | {
"start": 36360,
"end": 50560
} | class ____(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
descriptor_class = ForeignKeyDeferredAttribute
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
"invalid": _(
"%(model)s instance with %(field)s %(value)r is not a valid choice."
)
}
description = _("Foreign Key (type determined by related field)")
def __init__(
self,
to,
on_delete,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
to_field=None,
db_constraint=True,
**kwargs,
):
try:
to._meta.model_name
except AttributeError:
if not isinstance(to, str):
raise TypeError(
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r"
% (
self.__class__.__name__,
to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if not callable(on_delete):
raise TypeError("on_delete must be callable.")
kwargs["rel"] = self.rel_class(
self,
to,
to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs.setdefault("db_index", True)
super().__init__(
to,
on_delete,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT],
to_fields=[to_field],
**kwargs,
)
self.db_constraint = db_constraint
def __class_getitem__(cls, *args, **kwargs):
return cls
def check(self, **kwargs):
databases = kwargs.get("databases") or []
return [
*super().check(**kwargs),
*self._check_on_delete(databases),
*self._check_unique(),
]
def _check_on_delete_db_support(self, on_delete, feature_flag, databases):
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if feature_flag in self.model._meta.required_db_features or getattr(
connection.features, feature_flag
):
continue
no_db_option_name = on_delete.__name__.removeprefix("DB_")
yield checks.Error(
f"{connection.display_name} does not support a {on_delete.__name__}.",
hint=f"Change the on_delete rule to {no_db_option_name}.",
obj=self,
id="fields.E324",
)
def _check_on_delete(self, databases):
on_delete = getattr(self.remote_field, "on_delete", None)
errors = []
if on_delete == DB_CASCADE:
errors.extend(
self._check_on_delete_db_support(
on_delete, "supports_on_delete_db_cascade", databases
)
)
if on_delete == DB_SET_NULL:
errors.extend(
self._check_on_delete_db_support(
on_delete, "supports_on_delete_db_null", databases
)
)
if on_delete in [DB_SET_NULL, SET_NULL] and not self.null:
errors.append(
checks.Error(
f"Field specifies on_delete={on_delete.__name__}, but cannot be "
"null.",
hint=(
"Set null=True argument on the field, or change the on_delete "
"rule."
),
obj=self,
id="fields.E320",
)
)
elif on_delete == SET_DEFAULT and not self.has_default():
errors.append(
checks.Error(
"Field specifies on_delete=SET_DEFAULT, but has no default value.",
hint="Set a default value, or change the on_delete rule.",
obj=self,
id="fields.E321",
)
)
elif on_delete == DB_SET_DEFAULT:
if self.db_default is NOT_PROVIDED:
errors.append(
checks.Error(
"Field specifies on_delete=DB_SET_DEFAULT, but has "
"no db_default value.",
hint="Set a db_default value, or change the on_delete rule.",
obj=self,
id="fields.E322",
)
)
errors.extend(
self._check_on_delete_db_support(
on_delete, "supports_on_delete_db_default", databases
)
)
if not isinstance(self.remote_field.model, str) and on_delete != DO_NOTHING:
# Database and Python variants cannot be mixed in a chain of
# model references.
is_db_on_delete = isinstance(on_delete, DatabaseOnDelete)
ref_model_related_fields = (
ref_model_field.remote_field
for ref_model_field in self.remote_field.model._meta.get_fields()
if ref_model_field.related_model
and hasattr(ref_model_field.remote_field, "on_delete")
)
for ref_remote_field in ref_model_related_fields:
if (
ref_remote_field.on_delete is not None
and ref_remote_field.on_delete != DO_NOTHING
and isinstance(ref_remote_field.on_delete, DatabaseOnDelete)
is not is_db_on_delete
):
on_delete_type = "database" if is_db_on_delete else "Python"
ref_on_delete_type = "Python" if is_db_on_delete else "database"
errors.append(
checks.Error(
f"Field specifies {on_delete_type}-level on_delete "
"variant, but referenced model uses "
f"{ref_on_delete_type}-level variant.",
hint=(
"Use either database or Python on_delete variants "
"uniformly in the references chain."
),
obj=self,
id="fields.E323",
)
)
break
return errors
def _check_unique(self, **kwargs):
return (
[
checks.Warning(
"Setting unique=True on a ForeignKey has the same effect as using "
"a OneToOneField.",
hint=(
"ForeignKey(unique=True) is usually better served by a "
"OneToOneField."
),
obj=self,
id="fields.W342",
)
]
if self.unique
else []
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["to_fields"]
del kwargs["from_fields"]
# Handle the simpler arguments
if self.db_index:
del kwargs["db_index"]
else:
kwargs["db_index"] = False
if self.db_constraint is not True:
kwargs["db_constraint"] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (
not to_meta
or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)
):
kwargs["to_field"] = self.remote_field.field_name
return name, path, args, kwargs
def to_python(self, value):
return self.target_field.to_python(value)
@property
def target_field(self):
return self.foreign_related_fields[0]
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super().validate(value, model_instance)
if value is None:
return
using = router.db_for_read(self.remote_field.model, instance=model_instance)
qs = self.remote_field.model._base_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={
"model": self.remote_field.model._meta.verbose_name,
"pk": value,
"field": self.remote_field.field_name,
"value": value,
}, # 'pk' is included for backwards compatibility
)
def resolve_related_fields(self):
related_fields = super().resolve_related_fields()
for from_field, to_field in related_fields:
if (
to_field
and to_field.model != self.remote_field.model._meta.concrete_model
):
raise exceptions.FieldError(
"'%s.%s' refers to field '%s' which is not local to model "
"'%s'."
% (
self.model._meta.label,
self.name,
to_field.name,
self.remote_field.model._meta.concrete_model._meta.label,
)
)
return related_fields
def get_attname(self):
return "%s_id" % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"""Return the to_field if the default value is an object."""
field_default = super().get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (
value == ""
and (
not self.target_field.empty_strings_allowed
or connection.features.interprets_empty_strings_as_nulls
)
):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def get_prep_value(self, value):
return self.target_field.get_prep_value(value)
def contribute_to_related_class(self, cls, related):
super().contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, *, using=None, **kwargs):
if isinstance(self.remote_field.model, str):
raise ValueError(
"Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet"
% (self.name, self.remote_field.model)
)
return super().formfield(
**{
"form_class": forms.ModelChoiceField,
"queryset": self.remote_field.model._default_manager.using(using),
"to_field_name": self.remote_field.field_name,
**kwargs,
"blank": self.blank,
}
)
def db_check(self, connection):
return None
def db_type(self, connection):
return self.target_field.rel_db_type(connection=connection)
def cast_db_type(self, connection):
return self.target_field.cast_db_type(connection=connection)
def db_parameters(self, connection):
target_db_parameters = self.target_field.db_parameters(connection)
return {
"type": self.db_type(connection),
"check": self.db_check(connection),
"collation": target_db_parameters.get("collation"),
}
def convert_empty_strings(self, value, expression, connection):
if (not value) and isinstance(value, str):
return None
return value
def get_db_converters(self, connection):
converters = super().get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self.target_field
while isinstance(output_field, ForeignKey):
output_field = output_field.target_field
if output_field is self:
raise ValueError("Cannot resolve output_field.")
return super().get_col(alias, output_field)
| ForeignKey |
python | getsentry__sentry | tests/sentry/utils/test_committers.py | {
"start": 9835,
"end": 20446
} | class ____(CommitTestCase):
"""Tests for the GroupOwner-based committers functionality."""
def setUp(self) -> None:
super().setUp()
self.group = self.create_group(project=self.project, message="Kaboom!")
def test_with_scm_based_groupowner(self) -> None:
"""Test that SCM-based GroupOwner returns expected commit data."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
assert event.group is not None
# Create commit author and commit with SCM strategy
author = self.create_commit_author()
commit = self.create_commit(author=author)
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={
"commitId": commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
},
)
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 1
assert "commits" in result[0]
assert len(result[0]["commits"]) == 1
assert result[0]["commits"][0]["id"] == commit.key
assert result[0]["commits"][0]["suspectCommitType"] == "via SCM integration"
group_owner = GroupOwner.objects.get(
group_id=event.group.id, type=GroupOwnerType.SUSPECT_COMMIT.value
)
assert result[0]["group_owner_id"] == group_owner.id
def test_with_release_based_groupowner(self) -> None:
"""Test that release-based GroupOwner returns expected commit data."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
assert event.group is not None
# Create commit author and commit with release strategy
author = self.create_commit_author()
commit = self.create_commit(author=author)
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={
"commitId": commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.RELEASE_BASED,
},
)
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 1
assert "commits" in result[0]
assert len(result[0]["commits"]) == 1
assert result[0]["commits"][0]["id"] == commit.key
assert result[0]["commits"][0]["suspectCommitType"] == "via commit in release"
group_owner = GroupOwner.objects.get(
group_id=event.group.id, type=GroupOwnerType.SUSPECT_COMMIT.value
)
assert result[0]["group_owner_id"] == group_owner.id
def test_with_multiple_groupowners(self) -> None:
"""Test that multiple GroupOwners return the most recent one only."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
assert event.group is not None
# Create multiple commits with authors
author1 = self.create_commit_author()
author2 = self.create_commit_author()
commit1 = self.create_commit(author=author1)
commit2 = self.create_commit(author=author2)
# Create first GroupOwner (older)
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={"commitId": commit1.id},
date_added=timezone.now() - timedelta(hours=2),
)
# Create second GroupOwner (newer)
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={"commitId": commit2.id},
date_added=timezone.now() - timedelta(hours=1),
)
result = get_serialized_event_file_committers(self.project, event)
# Should return the most recent one only
assert len(result) == 1
assert result[0]["commits"][0]["id"] == commit2.key
# Check group_owner_id matches the most recent GroupOwner
most_recent_group_owner = (
GroupOwner.objects.filter(
group_id=event.group.id, type=GroupOwnerType.SUSPECT_COMMIT.value
)
.order_by("-date_added")
.first()
)
assert most_recent_group_owner is not None
assert result[0]["group_owner_id"] == most_recent_group_owner.id
def test_no_groupowners(self) -> None:
"""Test that no GroupOwners returns empty list."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 0
def test_groupowner_without_commit_id(self) -> None:
"""Test that GroupOwner without commitId returns empty list."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
assert event.group is not None
# Create GroupOwner without commitId in context
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={"someOtherData": "value"},
)
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 0
def test_groupowner_with_nonexistent_commit(self) -> None:
"""Test that GroupOwner with non-existent commit returns empty list."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
assert event.group is not None
# Create GroupOwner with non-existent commit ID
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={"commitId": 99999}, # Non-existent commit ID
)
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 0
def test_groupowner_with_commit_without_author(self) -> None:
"""Test that GroupOwner with commit that has no author returns empty list."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
assert event.group is not None
# Create commit without author
commit = self.create_commit(author=None)
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={"commitId": commit.id},
)
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 0
def test_event_without_group_id(self) -> None:
"""Test that event without group_id returns empty list."""
event = Mock()
event.group_id = None
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 0
def test_non_suspect_commit_groupowners_ignored(self) -> None:
"""Test that non-suspect-commit GroupOwners are ignored."""
event = self.store_event(
data={"message": "Kaboom!", "platform": "python"}, project_id=self.project.id
)
assert event.group is not None
# Create GroupOwner with different type (ownership rule)
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.OWNERSHIP_RULE.value,
user_id=self.user.id,
context={"rule": "path:*.py"},
)
result = get_serialized_event_file_committers(self.project, event)
assert len(result) == 0
def test_display_logic_with_no_user_groupowner(self) -> None:
"""Test _get_serialized_committers_from_group_owners handles user_id=None correctly."""
group = self.create_group(project=self.project)
# Create commit with external author (no Sentry user)
author = self.create_commit_author(name="External Dev", email="external@example.com")
commit = self.create_commit(author=author)
# Create GroupOwner with user_id=None
GroupOwner.objects.create(
group_id=group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=None, # No Sentry user mapping
context={
"commitId": commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
},
)
result = _get_serialized_committers_from_group_owners(self.project, group.id)
assert result is not None
assert len(result) == 1
# Should use commit author data, not Sentry user data
author = result[0]["author"]
assert author is not None
assert author["email"] == "external@example.com"
assert author["name"] == "External Dev"
assert "username" not in author # No Sentry user data
assert "id" not in author # No Sentry user data
assert result[0]["commits"][0]["id"] == commit.key
assert result[0]["commits"][0]["suspectCommitType"] == "via SCM integration"
group_owner = GroupOwner.objects.get(
group_id=group.id, type=GroupOwnerType.SUSPECT_COMMIT.value
)
assert result[0]["group_owner_id"] == group_owner.id
| GetSerializedEventFileCommitters |
python | pennersr__django-allauth | allauth/account/internal/flows/login_by_code.py | {
"start": 817,
"end": 4911
} | class ____(AbstractCodeVerificationProcess):
def __init__(self, stage):
self.stage = stage
self.request = stage.request
super().__init__(
state=stage.state,
timeout=app_settings.LOGIN_BY_CODE_TIMEOUT,
max_attempts=app_settings.LOGIN_BY_CODE_MAX_ATTEMPTS,
user=stage.login.user,
)
def finish(self, redirect_url: Optional[str]):
email = self.state.get("email")
phone = self.state.get("phone")
user = self.user
record_authentication(
self.request, user, method="code", email=email, phone=phone
)
if email:
verify_email_indirectly(self.request, user, email)
if phone:
verify_phone_indirectly(self.request, user, phone)
if self.state["initiated_by_user"]:
# Just requesting a login code does is not considered to be a real login,
# yet, is needed in order to make the stage machinery work. Now that we've
# completed the code, let's start a real login.
login = Login(
user=user,
redirect_url=redirect_url,
email=email,
)
return perform_login(self.request, login)
else:
return self.stage.exit()
def abort(self):
clear_login(self.request)
def persist(self):
stash_login(self.request, self.stage.login)
def send(self):
email = self.state.get("email")
phone = self.state.get("phone")
if email:
self.send_by_email(email)
elif phone:
self.send_by_phone(phone)
else:
raise ValueError()
def send_by_phone(self, phone):
adapter = get_adapter()
if self.user:
code = adapter._generate_phone_verification_code_compat(
user=self.user, phone=phone
)
adapter.send_verification_code_sms(user=self.user, phone=phone, code=code)
self.state["code"] = code
else:
if self.stage.login.signup:
adapter.send_account_already_exists_sms(phone)
else:
adapter.send_unknown_account_sms(phone)
self.add_sent_message({"recipient": phone, "phone": phone})
def send_by_email(self, email):
adapter = get_adapter()
if not self.user:
if self.stage.login.signup:
adapter.send_account_already_exists_mail(email)
else:
send_unknown_account_mail(self.request, email)
else:
code = adapter.generate_login_code()
context = {
"request": self.request,
"code": code,
}
adapter.send_mail("account/email/login_code", email, context)
self.state["code"] = code
self.add_sent_message({"email": email, "recipient": email})
def add_sent_message(self, context):
get_adapter().add_message(
self.request,
messages.SUCCESS,
"account/messages/login_code_sent.txt",
context,
)
@classmethod
def initiate(
cls,
*,
request,
user,
email: Optional[str] = None,
phone: Optional[str] = None,
stage=None,
):
initial_state = cls.initial_state(user=user, email=email, phone=phone)
initial_state["initiated_by_user"] = stage is None
if not stage:
login = Login(user=user, email=email)
login.state["stages"] = {"current": "login_by_code"}
stage = LoginByCodeStage(
LoginStageController(request, login), request, login
)
stage.state.update(initial_state)
process = LoginCodeVerificationProcess(stage=stage)
process.send()
process.persist()
return process
@classmethod
def resume(cls, stage):
process = LoginCodeVerificationProcess(stage=stage)
return process.abort_if_invalid()
| LoginCodeVerificationProcess |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 114670,
"end": 114723
} | class ____(MaskAlign):
_expr_cls = Where
| WhereAlign |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec10.py | {
"start": 203,
"end": 591
} | class ____(Protocol):
_lock: RLock
S = TypeVar("S", bound=HasLock)
P = ParamSpec("P")
R = TypeVar("R")
def with_lock(func: Callable[Concatenate[S, P], R]) -> Callable[Concatenate[S, P], R]:
@functools.wraps(func)
def wrapper(self: S, *args: P.args, **kwargs: P.kwargs) -> R:
with self._lock:
return func(self, *args, **kwargs)
return wrapper
| HasLock |
python | dask__distributed | distributed/actor.py | {
"start": 568,
"end": 7027
} | class ____(TaskRef):
"""Controls an object on a remote worker
An actor allows remote control of a stateful object living on a remote
worker. Method calls on this object trigger operations on the remote
object and return BaseActorFutures on which we can block to get results.
Examples
--------
>>> class Counter:
... def __init__(self):
... self.n = 0
... def increment(self):
... self.n += 1
... return self.n
>>> from dask.distributed import Client
>>> client = Client()
You can create an actor by submitting a class with the keyword
``actor=True``.
>>> future = client.submit(Counter, actor=True)
>>> counter = future.result()
>>> counter
<Actor: Counter, key=Counter-1234abcd>
Calling methods on this object immediately returns deferred ``BaseActorFuture``
objects. You can call ``.result()`` on these objects to block and get the
result of the function call.
>>> future = counter.increment()
>>> future.result()
1
>>> future = counter.increment()
>>> future.result()
2
"""
def __init__(self, cls, address, key, worker=None):
super().__init__(key)
self._cls = cls
self._address = address
self._key = key
self._future = None
self._worker = worker
self._client = None
self._try_bind_worker_client()
def _try_bind_worker_client(self):
if not self._worker:
try:
self._worker = get_worker()
except ValueError:
self._worker = None
if not self._client:
try:
self._client = get_client()
self._future = Future(self._key, self._client)
# ^ When running on a worker, only hold a weak reference to the key, otherwise the key could become unreleasable.
except ValueError:
self._client = None
def __repr__(self):
return f"<Actor: {self._cls.__name__}, key={self.key}>"
def __reduce__(self):
return (Actor, (self._cls, self._address, self.key))
@property
def _io_loop(self):
if self._worker is None and self._client is None:
self._try_bind_worker_client()
if self._worker:
return self._worker.loop
else:
return self._client.loop
@property
def _scheduler_rpc(self):
if self._worker is None and self._client is None:
self._try_bind_worker_client()
if self._worker:
return self._worker.scheduler
else:
return self._client.scheduler
@property
def _worker_rpc(self):
if self._worker is None and self._client is None:
self._try_bind_worker_client()
if self._worker:
return self._worker.rpc(self._address)
else:
if self._client.direct_to_workers:
return self._client.rpc(self._address)
else:
return ProxyRPC(self._client.scheduler, self._address)
@property
def _asynchronous(self):
if self._client:
return self._client.asynchronous
else:
return threading.get_ident() == self._worker.thread_id
def _sync(self, func, *args, **kwargs):
if self._client:
return self._client.sync(func, *args, **kwargs)
else:
if self._asynchronous:
return func(*args, **kwargs)
return sync(self._worker.loop, func, *args, **kwargs)
def __dir__(self):
o = set(dir(type(self)))
o.update(attr for attr in dir(self._cls) if not attr.startswith("_"))
return sorted(o)
def __getattr__(self, key):
if self._future and self._future.status not in ("finished", "pending"):
raise RuntimeError(
"Worker holding Actor was lost. Status: " + self._future.status
)
self._try_bind_worker_client()
if (
self._worker
and self._worker.address == self._address
and getattr(thread_state, "actor", False)
):
# actor calls actor on same worker
actor = self._worker.actors[self.key]
attr = getattr(actor, key)
if iscoroutinefunction(attr):
return attr
elif callable(attr):
return lambda *args, **kwargs: EagerActorFuture(attr(*args, **kwargs))
else:
return attr
attr = getattr(self._cls, key)
if callable(attr):
@functools.wraps(attr)
def func(*args, **kwargs):
async def run_actor_function_on_worker():
try:
result = await self._worker_rpc.actor_execute(
function=key,
actor=self.key,
args=[to_serialize(arg) for arg in args],
kwargs={k: to_serialize(v) for k, v in kwargs.items()},
)
except OSError:
if self._future and not self._future.done():
await self._future
return await run_actor_function_on_worker()
else:
exc = OSError("Unable to contact Actor's worker")
return _Error(exc)
if result["status"] == "OK":
return _OK(result["result"])
return _Error(result["exception"])
actor_future = ActorFuture(io_loop=self._io_loop)
async def wait_then_set_result():
actor_future._set_result(await run_actor_function_on_worker())
self._io_loop.add_callback(wait_then_set_result)
return actor_future
return func
else:
async def get_actor_attribute_from_worker():
x = await self._worker_rpc.actor_attribute(
attribute=key, actor=self.key
)
if x["status"] == "OK":
return x["result"]
else:
raise x["exception"]
return self._sync(get_actor_attribute_from_worker)
@property
def client(self):
return self._future.client
| Actor |
python | getsentry__sentry | tests/sentry/db/models/fields/test_jsonfield.py | {
"start": 782,
"end": 3282
} | class ____(models.Model):
json = JSONField(default=default)
class Meta:
app_label = "fixtures"
def test_json_field() -> None:
obj = JSONFieldTestModel(
json="""{
"spam": "eggs"
}"""
)
assert obj.json == {"spam": "eggs"}
def test_json_field_empty() -> None:
obj = JSONFieldTestModel(json="")
assert obj.json is None
def test_db_prep_value() -> None:
field = JSONField("test")
field.set_attributes_from_name("json")
assert field.get_db_prep_value(None, connection=None) is None
assert '{"spam":"eggs"}' == field.get_db_prep_value({"spam": "eggs"}, connection=None)
def test_formfield() -> None:
field = JSONField("test")
field.set_attributes_from_name("json")
formfield = field.formfield()
assert formfield is not None
assert type(formfield) is forms.CharField
assert type(formfield.widget) is forms.Textarea
def test_formfield_clean_blank() -> None:
field = JSONField("test")
formfield = field.formfield()
assert formfield is not None
with pytest.raises(forms.ValidationError) as excinfo:
formfield.clean(value="")
assert excinfo.value.message == formfield.error_messages["required"]
def test_formfield_clean_none() -> None:
field = JSONField("test")
formfield = field.formfield()
assert formfield is not None
with pytest.raises(forms.ValidationError) as excinfo:
formfield.clean(value=None)
assert excinfo.value.message == formfield.error_messages["required"]
def test_formfield_null_and_blank_clean_blank() -> None:
field = JSONField("test", null=True, blank=True)
formfield = field.formfield()
assert formfield is not None
assert formfield.clean(value="") == ""
def test_formfield_blank_clean_blank() -> None:
field = JSONField("test", null=False, blank=True)
formfield = field.formfield()
assert formfield is not None
assert formfield.clean(value="") == ""
def test_mutable_default_checking() -> None:
obj1 = JSONFieldWithDefaultTestModel()
obj2 = JSONFieldWithDefaultTestModel()
obj1.json["foo"] = "bar"
assert "foo" not in obj2.json
def test_invalid_json() -> None:
obj = JSONFieldTestModel()
obj.json = '{"foo": 2}'
assert "foo" in obj.json
with pytest.raises(forms.ValidationError):
obj.json = '{"foo"}'
def test_invalid_json_default() -> None:
with pytest.raises(ValueError):
JSONField("test", default='{"foo"}')
| CallableDefaultModel |
python | google__pytype | pytype/tools/analyze_project/pytype_runner_test.py | {
"start": 8002,
"end": 10873
} | class ____(TestBase):
"""Test PytypeRunner.set_custom_options."""
def setUp(self):
super().setUp()
self.conf = self.parser.config_from_defaults()
def assertFlags(self, flags, expected_flags):
# Add temporary flags that are set to true by default here, so that they are
# filtered out of tests.
temporary_flags = set()
self.assertEqual(flags - temporary_flags, expected_flags)
# --disable tests a flag with a string value.
def test_disable(self):
self.conf.disable = ['import-error', 'name-error']
runner = make_runner([], [], self.conf)
flags_with_values = {}
runner.set_custom_options(flags_with_values, set(), self.conf.report_errors)
self.assertEqual(flags_with_values['--disable'], 'import-error,name-error')
def test_no_disable(self):
self.conf.disable = []
runner = make_runner([], [], self.conf)
flags_with_values = {}
runner.set_custom_options(flags_with_values, set(), self.conf.report_errors)
self.assertFalse(flags_with_values)
# The purpose of the following --no-report-errors tests is to test a generic
# binary flag with a custom to_command_line. These tests do not reflect actual
# error-reporting behavior; for that, see TestGetRunCmd.test_error_reporting.
def test_report_errors(self):
self.conf.report_errors = True
runner = make_runner([], [], self.conf)
binary_flags = {'--no-report-errors'}
runner.set_custom_options({}, binary_flags, True)
self.assertFlags(binary_flags, set())
def test_no_report_errors(self):
self.conf.report_errors = False
runner = make_runner([], [], self.conf)
binary_flags = set()
runner.set_custom_options({}, binary_flags, True)
self.assertFlags(binary_flags, {'--no-report-errors'})
def test_report_errors_default(self):
self.conf.report_errors = True
runner = make_runner([], [], self.conf)
binary_flags = set()
runner.set_custom_options({}, binary_flags, True)
self.assertFlags(binary_flags, set())
# --protocols tests a binary flag whose value is passed through transparently.
def test_protocols(self):
self.conf.protocols = True
runner = make_runner([], [], self.conf)
binary_flags = set()
runner.set_custom_options({}, binary_flags, self.conf.report_errors)
self.assertFlags(binary_flags, {'--protocols'})
def test_no_protocols(self):
self.conf.protocols = False
runner = make_runner([], [], self.conf)
binary_flags = {'--protocols'}
runner.set_custom_options({}, binary_flags, self.conf.report_errors)
self.assertFlags(binary_flags, set())
def test_no_protocols_default(self):
self.conf.protocols = False
runner = make_runner([], [], self.conf)
binary_flags = set()
runner.set_custom_options({}, binary_flags, self.conf.report_errors)
self.assertFlags(binary_flags, set())
| TestCustomOptions |
python | doocs__leetcode | lcof2/剑指 Offer II 109. 开密码锁/Solution.py | {
"start": 0,
"end": 1061
} | class ____:
def openLock(self, deadends: List[str], target: str) -> int:
s = set(deadends)
if target in s or '0000' in s:
return -1
if target == '0000':
return 0
def prev(c):
return '9' if c == '0' else str(int(c) - 1)
def next(c):
return '0' if c == '9' else str(int(c) + 1)
def get(t):
res = []
t = list(t)
for i in range(4):
c = t[i]
t[i] = prev(c)
res.append(''.join(t))
t[i] = next(c)
res.append(''.join(t))
t[i] = c
return res
visited = set()
q = deque([('0000', 0)])
while q:
status, step = q.popleft()
for t in get(status):
if t in visited or t in s:
continue
if t == target:
return step + 1
q.append((t, step + 1))
visited.add(t)
return -1
| Solution |
python | getsentry__sentry | tests/sentry/dynamic_sampling/tasks/test_boost_low_volume_projects.py | {
"start": 11141,
"end": 14368
} | class ____(TestCase):
def test_partition_by_measure_with_spans_feature(self) -> None:
org = self.create_organization("test-org1")
with self.options(
{
"dynamic-sampling.check_span_feature_flag": True,
"dynamic-sampling.measure.spans": [org.id],
}
):
result = partition_by_measure([org.id])
assert SamplingMeasure.SPANS in result
assert SamplingMeasure.TRANSACTIONS in result
assert result[SamplingMeasure.SPANS] == [org.id]
assert result[SamplingMeasure.TRANSACTIONS] == []
def test_partition_by_measure_without_spans_feature(self) -> None:
org = self.create_organization("test-org1")
with self.options(
{
"dynamic-sampling.check_span_feature_flag": True,
"dynamic-sampling.measure.spans": [],
}
):
result = partition_by_measure([org.id])
assert SamplingMeasure.SPANS in result
assert SamplingMeasure.TRANSACTIONS in result
assert result[SamplingMeasure.SPANS] == []
assert result[SamplingMeasure.TRANSACTIONS] == [org.id]
def test_partition_by_measure_with_span_feature_flag_disabled(self) -> None:
org = self.create_organization("test-org1")
with self.options(
{
"dynamic-sampling.check_span_feature_flag": False,
"dynamic-sampling.measure.spans": [org.id],
}
):
result = partition_by_measure([org.id])
assert SamplingMeasure.TRANSACTIONS in result
assert SamplingMeasure.SPANS not in result
assert result[SamplingMeasure.TRANSACTIONS] == [org.id]
def test_partition_by_measure_returns_sorted_output_multiple_orgs(self) -> None:
orgs = [self.create_organization(f"test-org{i}") for i in range(10)]
org_ids = [org.id for org in reversed(orgs)]
with self.options(
{
"dynamic-sampling.check_span_feature_flag": True,
"dynamic-sampling.measure.spans": [orgs[2].id, orgs[7].id, orgs[5].id],
}
):
result = partition_by_measure(org_ids)
assert result[SamplingMeasure.SPANS] == sorted([orgs[2].id, orgs[7].id, orgs[5].id])
expected_transaction_orgs = sorted(
[org.id for org in orgs if org.id not in [orgs[2].id, orgs[7].id, orgs[5].id]]
)
assert result[SamplingMeasure.TRANSACTIONS] == expected_transaction_orgs
def test_partition_by_measure_returns_sorted_when_feature_disabled(self) -> None:
org1 = self.create_organization("test-org1")
org2 = self.create_organization("test-org2")
org3 = self.create_organization("test-org3")
org_ids = [org3.id, org1.id, org2.id]
with self.options(
{
"dynamic-sampling.check_span_feature_flag": False,
}
):
result = partition_by_measure(org_ids)
assert result[SamplingMeasure.TRANSACTIONS] == sorted(org_ids)
assert SamplingMeasure.SPANS not in result
| TestPartitionByMeasure |
python | getsentry__sentry | tests/sentry/web/frontend/test_js_sdk_loader.py | {
"start": 333,
"end": 15304
} | class ____(TestCase):
@pytest.fixture(autouse=True)
def set_settings(self) -> None:
settings.JS_SDK_LOADER_SDK_VERSION = "0.5.2"
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = (
"https://s3.amazonaws.com/getsentry-cdn/@sentry/browser/%s/bundle.min.js"
)
@cached_property
def path(self) -> str:
return reverse("sentry-js-sdk-loader", args=[self.projectkey.public_key])
def test_noop_no_pub_key(self) -> None:
resp = self.client.get(reverse("sentry-js-sdk-loader", args=["abc"]))
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader-noop.js.tmpl")
def test_noop(self) -> None:
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = ""
resp = self.client.get(reverse("sentry-js-sdk-loader", args=[self.projectkey.public_key]))
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader-noop.js.tmpl")
def test_no_replace(self) -> None:
settings.JS_SDK_LOADER_SDK_VERSION = "0.5.2"
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = (
"https://s3.amazonaws.com/getsentry-cdn/@sentry/browser/0.0.0/bundle.min.js"
)
resp = self.client.get(reverse("sentry-js-sdk-loader", args=[self.projectkey.public_key]))
assert resp.status_code == 200
assert settings.JS_SDK_LOADER_DEFAULT_SDK_URL.encode("utf-8") in resp.content
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
def test_renders_js_loader(self) -> None:
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert self.projectkey.public_key.encode("utf-8") in resp.content
assert b"bundle.min.js" in resp.content
def test_minified(self) -> None:
resp = self.client.get(self.path)
assert resp.status_code == 200
min_resp = self.client.get(
reverse("sentry-js-sdk-loader", args=[self.projectkey.public_key, ".min"])
)
assert min_resp.status_code == 200
self.assertTemplateUsed(min_resp, "sentry/js-sdk-loader.min.js.tmpl")
assert self.projectkey.public_key.encode("utf-8") in min_resp.content
assert b"bundle.min.js" in min_resp.content
assert len(resp.content) > len(min_resp.content)
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file", return_value=["6.19.7", "7.0.0"]
)
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="6.x"
)
def test_less_than_v7_returns_es6(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
self.projectkey.data = {}
self.projectkey.save()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/6.19.7/bundle.min.js" in resp.content
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file", return_value=["6.19.7", "7.0.0"]
)
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="7.x"
)
def test_equal_to_v7_returns_es5(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
self.projectkey.data = {}
self.projectkey.save()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/7.0.0/bundle.es5.min.js" in resp.content
@mock.patch("sentry.loader.browsersdkversion.load_version_from_file", return_value=["7.3.15"])
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="7.x"
)
def test_greater_than_v7_returns_es5(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
self.projectkey.data = {}
self.projectkey.save()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/7.3.15/bundle.es5.min.js" in resp.content
@mock.patch("sentry.loader.browsersdkversion.load_version_from_file", return_value=["7.37.0"])
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="7.x"
)
def test_returns_es6_with_defaults(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/7.37.0/bundle.tracing.replay.min.js" in resp.content
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file",
return_value=["8.1.0", "7.1.0", "7.0.1", "6.1.0"],
)
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="latest"
)
def test_returns_latest_pre_v8_version_when_latest_is_selected(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/7.1.0/bundle.tracing.replay.min.js" in resp.content
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file",
return_value=["9.1.0", "8.1.0", "6.1.0", "5.0.0"],
)
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="latest"
)
def test_returns_latest_pre_v8_version_when_latest_is_selected_with_no_available_v7_version(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/6.1.0/bundle.min.js" in resp.content
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file",
return_value=["8.1.0", "8.0.0", "8", "8.0.0-alpha.0", "7.100.0", "6.1.0", "5.0.0"],
)
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="latest"
)
def test_returns_latest_pre_v8_version_when_latest_is_selected_various_v8_versions_available(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/7.100.0/bundle.tracing.replay.min.js" in resp.content
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file",
return_value=["8.0.0"],
)
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="8.x"
)
def test_equal_to_v8_returns_default_bundle(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
self.projectkey.data = {}
self.projectkey.save()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/8.0.0/bundle.min.js" in resp.content
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file",
return_value=["8.1.0", "8.0.0", "8", "8.0.0-alpha.0"],
)
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="8.x"
)
def test_returns_latest_v8_version_when_various_v8_versions_available(
self, load_version_from_file, get_selected_browser_sdk_version
):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
self.projectkey.data = {}
self.projectkey.save()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert b"/8.1.0/bundle.min.js" in resp.content
@mock.patch("sentry.loader.browsersdkversion.load_version_from_file", return_value=["7.37.0"])
@mock.patch(
"sentry.loader.browsersdkversion.get_selected_browser_sdk_version", return_value="7.x"
)
def test_bundle_kind_modifiers(
self, load_version_from_file: MagicMock, get_selected_browser_sdk_version: MagicMock
) -> None:
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = "https://browser.sentry-cdn.com/%s/bundle%s.min.js"
settings.JS_SDK_LOADER_SDK_VERSION = "7.32.0"
dsn = self.projectkey.get_dsn(public=True)
for data, expected_bundle, expected_options in [
(
{
"dynamicSdkLoaderOptions": {
DynamicSdkLoaderOption.HAS_PERFORMANCE.value: True,
}
},
b"/7.37.0/bundle.tracing.es5.min.js",
{"dsn": dsn, "tracesSampleRate": 1},
),
(
{
"dynamicSdkLoaderOptions": {
DynamicSdkLoaderOption.HAS_DEBUG.value: True,
}
},
b"/7.37.0/bundle.es5.debug.min.js",
{"dsn": dsn, "debug": True},
),
(
{
"dynamicSdkLoaderOptions": {
DynamicSdkLoaderOption.HAS_REPLAY.value: True,
}
},
b"/7.37.0/bundle.replay.min.js",
{"dsn": dsn, "replaysSessionSampleRate": 0.1, "replaysOnErrorSampleRate": 1},
),
(
{
"dynamicSdkLoaderOptions": {
DynamicSdkLoaderOption.HAS_PERFORMANCE.value: True,
DynamicSdkLoaderOption.HAS_REPLAY.value: True,
}
},
b"/7.37.0/bundle.tracing.replay.min.js",
{
"dsn": dsn,
"tracesSampleRate": 1,
"replaysSessionSampleRate": 0.1,
"replaysOnErrorSampleRate": 1,
},
),
(
{
"dynamicSdkLoaderOptions": {
DynamicSdkLoaderOption.HAS_REPLAY.value: True,
DynamicSdkLoaderOption.HAS_DEBUG.value: True,
}
},
b"/7.37.0/bundle.replay.debug.min.js",
{
"dsn": dsn,
"replaysSessionSampleRate": 0.1,
"replaysOnErrorSampleRate": 1,
"debug": True,
},
),
(
{
"dynamicSdkLoaderOptions": {
DynamicSdkLoaderOption.HAS_PERFORMANCE.value: True,
DynamicSdkLoaderOption.HAS_DEBUG.value: True,
}
},
b"/7.37.0/bundle.tracing.es5.debug.min.js",
{"dsn": dsn, "tracesSampleRate": 1, "debug": True},
),
(
{
"dynamicSdkLoaderOptions": {
DynamicSdkLoaderOption.HAS_PERFORMANCE.value: True,
DynamicSdkLoaderOption.HAS_DEBUG.value: True,
DynamicSdkLoaderOption.HAS_REPLAY.value: True,
}
},
b"/7.37.0/bundle.tracing.replay.debug.min.js",
{
"dsn": dsn,
"tracesSampleRate": 1,
"replaysSessionSampleRate": 0.1,
"replaysOnErrorSampleRate": 1,
"debug": True,
},
),
]:
self.projectkey.data = data
self.projectkey.save()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/js-sdk-loader.js.tmpl")
assert expected_bundle in resp.content
for key in expected_options:
# Convert to e.g. "option_name": 0.1
single_option = {key: expected_options[key]}
assert json.dumps(single_option)[1:-1].encode() in resp.content
self.projectkey.data = {}
self.projectkey.save()
@patch("sentry.loader.browsersdkversion.load_version_from_file")
def test_headers(self, mock_load_version_from_file: MagicMock) -> None:
# We want to always load the major version here since otherwise we fall back to
# the default value which isn't correct.
mocked_version = "4.9.9"
mock_load_version_from_file.return_value = [mocked_version]
resp = self.client.get(self.path)
assert resp.status_code == 200, resp
assert "*" in resp["Access-Control-Allow-Origin"]
assert "stale-if-error" in resp["Cache-Control"]
assert "stale-while-revalidate" in resp["Cache-Control"]
assert "s-maxage" in resp["Cache-Control"]
assert "max-age" in resp["Cache-Control"]
assert "project/%s" % self.projectkey.project_id in resp["Surrogate-Key"]
assert "sdk/" in resp["Surrogate-Key"]
assert "sdk-loader" in resp["Surrogate-Key"]
assert "Content-Encoding" not in resp
assert "Set-Cookie" not in resp
assert "Vary" not in resp, f"Found Vary header: {resp['Vary']}"
def test_absolute_url(self) -> None:
assert (
reverse("sentry-js-sdk-loader", args=[self.projectkey.public_key, ".min"])
in self.projectkey.js_sdk_loader_cdn_url
)
settings.JS_SDK_LOADER_CDN_URL = "https://js.sentry-cdn.com/"
assert (
"https://js.sentry-cdn.com/%s.min.js" % self.projectkey.public_key
) == self.projectkey.js_sdk_loader_cdn_url
| JavaScriptSdkLoaderTest |
python | chroma-core__chroma | chromadb/errors.py | {
"start": 3078,
"end": 4018
} | class ____(ChromaError):
@overrides
def code(self) -> int:
return 400
@classmethod
@overrides
def name(cls) -> str:
return "QuotaError"
error_types: Dict[str, Type[ChromaError]] = {
"InvalidDimension": InvalidDimensionException,
"InvalidArgumentError": InvalidArgumentError,
"IDAlreadyExists": IDAlreadyExistsError,
"DuplicateID": DuplicateIDError,
"InvalidUUID": InvalidUUIDError,
"InvalidHTTPVersion": InvalidHTTPVersion,
"AuthorizationError": AuthorizationError,
"NotFoundError": NotFoundError,
"BatchSizeExceededError": BatchSizeExceededError,
"VersionMismatchError": VersionMismatchError,
"RateLimitError": RateLimitError,
"AuthError": ChromaAuthError,
"UniqueConstraintError": UniqueConstraintError,
"QuotaError": QuotaError,
"InternalError": InternalError,
# Catch-all for any other errors
"ChromaError": ChromaError,
}
| QuotaError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.