language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py
|
{
"start": 77132,
"end": 81059
}
|
class ____:
@mock.patch("google.cloud.aiplatform.datasets.VideoDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook, mock_dataset):
mock_hook.return_value.create_auto_ml_video_training_job.return_value = (None, "training_id")
with pytest.warns(AirflowProviderDeprecationWarning):
op = CreateAutoMLVideoTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
prediction_type="classification",
model_type="CLOUD",
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
parent_model=TEST_PARENT_MODEL,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_dataset.assert_called_once_with(dataset_name=TEST_DATASET_ID)
mock_hook.return_value.create_auto_ml_video_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
parent_model=TEST_PARENT_MODEL,
prediction_type="classification",
model_type="CLOUD",
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
test_fraction_split=None,
training_filter_split=None,
test_filter_split=None,
model_display_name=None,
model_labels=None,
sync=True,
is_default_version=None,
model_version_aliases=None,
model_version_description=None,
)
@mock.patch("google.cloud.aiplatform.datasets.VideoDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute__parent_model_version_index_is_removed(self, mock_hook, mock_dataset):
mock_hook.return_value.create_auto_ml_video_training_job.return_value = (None, "training_id")
with pytest.warns(AirflowProviderDeprecationWarning):
op = CreateAutoMLVideoTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
prediction_type="classification",
model_type="CLOUD",
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
parent_model=VERSIONED_TEST_PARENT_MODEL,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.return_value.create_auto_ml_video_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
parent_model=TEST_PARENT_MODEL,
prediction_type="classification",
model_type="CLOUD",
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
test_fraction_split=None,
training_filter_split=None,
test_filter_split=None,
model_display_name=None,
model_labels=None,
sync=True,
is_default_version=None,
model_version_aliases=None,
model_version_description=None,
)
|
TestVertexAICreateAutoMLVideoTrainingJobOperator
|
python
|
ipython__ipython
|
docs/autogen_shortcuts.py
|
{
"start": 3064,
"end": 6792
}
|
class ____:
"""Used as a buffer to get prompt_toolkit bindings"""
handle_return = None
input_transformer_manager = None
display_completions = None
editing_mode = "emacs"
auto_suggest = None
def bindings_from_prompt_toolkit(prompt_bindings: KeyBindingsBase) -> List[Binding]:
"""Collect bindings to a simple format that does not depend on prompt-toolkit internals"""
bindings: List[Binding] = []
for kb in prompt_bindings.bindings:
bindings.append(
Binding(
handler=Handler(
description=kb.handler.__doc__ or "",
identifier=create_identifier(kb.handler),
),
shortcut=Shortcut(
keys_sequence=[
str(k.value) if hasattr(k, "value") else k for k in kb.keys
],
filter=format_filter(kb.filter, skip={"has_focus_filter"}),
),
)
)
return bindings
INDISTINGUISHABLE_KEYS = {**KEY_ALIASES, **{v: k for k, v in KEY_ALIASES.items()}}
def format_prompt_keys(keys: str, add_alternatives=True) -> str:
"""Format prompt toolkit key with modifier into an RST representation."""
def to_rst(key):
escaped = key.replace("\\", "\\\\")
return f":kbd:`{escaped}`"
keys_to_press: List[str]
prefixes = {
"c-s-": [to_rst("ctrl"), to_rst("shift")],
"s-c-": [to_rst("ctrl"), to_rst("shift")],
"c-": [to_rst("ctrl")],
"s-": [to_rst("shift")],
}
for prefix, modifiers in prefixes.items():
if keys.startswith(prefix):
remainder = keys[len(prefix) :]
keys_to_press = [*modifiers, to_rst(remainder)]
break
else:
keys_to_press = [to_rst(keys)]
result = " + ".join(keys_to_press)
if keys in INDISTINGUISHABLE_KEYS and add_alternatives:
alternative = INDISTINGUISHABLE_KEYS[keys]
result = (
result
+ " (or "
+ format_prompt_keys(alternative, add_alternatives=False)
+ ")"
)
return result
if __name__ == "__main__":
here = Path(__file__).parent
dest = here / "source" / "config" / "shortcuts"
ipy_bindings = create_ipython_shortcuts(_DummyTerminal())
session = PromptSession(key_bindings=ipy_bindings)
prompt_bindings = session.app.key_bindings
assert prompt_bindings
# Ensure that we collected the default shortcuts
assert len(prompt_bindings.bindings) > len(ipy_bindings.bindings)
bindings = bindings_from_prompt_toolkit(prompt_bindings)
def sort_key(binding: Binding):
return binding.handler.identifier, binding.shortcut.filter
filters = []
with (dest / "table.tsv").open("w", encoding="utf-8") as csv:
for binding in sorted(bindings, key=sort_key):
sequence = ", ".join(
[format_prompt_keys(keys) for keys in binding.shortcut.keys_sequence]
)
if binding.shortcut.filter == "always":
condition_label = "-"
else:
# we cannot fit all the columns as the filters got too complex over time
condition_label = "ⓘ"
csv.write(
"\t".join(
[
sequence,
sentencize(binding.handler.description)
+ f" :raw-html:`<br>` `{binding.handler.identifier}`",
f':raw-html:`<span title="{html_escape(binding.shortcut.filter)}" style="cursor: help">{condition_label}</span>`',
]
)
+ "\n"
)
|
_DummyTerminal
|
python
|
davidhalter__parso
|
test/fuzz_diff_parser.py
|
{
"start": 3323,
"end": 7799
}
|
class ____:
@classmethod
def generate(cls, code_lines, change_count, previous_file_modification=None):
if previous_file_modification is not None and random.random() > 0.5:
# We want to keep the previous modifications in some cases to make
# more complex parser issues visible.
code_lines = previous_file_modification.apply(code_lines)
added_modifications = previous_file_modification.modification_list
else:
added_modifications = []
return cls(
added_modifications
+ list(cls._generate_line_modifications(code_lines, change_count)),
# work with changed trees more than with normal ones.
check_original=random.random() > 0.8,
)
@staticmethod
def _generate_line_modifications(lines, change_count):
def random_line(include_end=False):
return random.randint(0, len(lines) - (not include_end))
lines = list(lines)
for _ in range(change_count):
rand = random.randint(1, 4)
if rand == 1:
if len(lines) == 1:
# We cannot delete every line, that doesn't make sense to
# fuzz and it would be annoying to rewrite everything here.
continue
ld = LineDeletion(random_line())
elif rand == 2:
# Copy / Insertion
# Make it possible to insert into the first and the last line
ld = LineCopy(random_line(), random_line(include_end=True))
elif rand in (3, 4):
# Modify a line in some weird random ways.
line_nr = random_line()
line = lines[line_nr]
column = random.randint(0, len(line))
random_string = ''
for _ in range(random.randint(1, 3)):
if random.random() > 0.8:
# The lower characters cause way more issues.
unicode_range = 0x1f if random.randint(0, 1) else 0x3000
random_string += chr(random.randint(0, unicode_range))
else:
# These insertions let us understand how random
# keyword/operator insertions work. Theoretically this
# could also be done with unicode insertions, but the
# fuzzer is just way more effective here.
random_string += random.choice(_random_python_fragments)
if random.random() > 0.5:
# In this case we insert at a very random place that
# probably breaks syntax.
line = line[:column] + random_string + line[column:]
else:
# Here we have better chances to not break syntax, because
# we really replace the line with something that has
# indentation.
line = ' ' * random.randint(0, 12) + random_string + '\n'
ld = LineReplacement(line_nr, line)
ld.apply(lines)
yield ld
def __init__(self, modification_list, check_original):
self.modification_list = modification_list
self._check_original = check_original
def apply(self, code_lines):
changed_lines = list(code_lines)
for modification in self.modification_list:
modification.apply(changed_lines)
return changed_lines
def run(self, grammar, code_lines, print_code):
code = ''.join(code_lines)
modified_lines = self.apply(code_lines)
modified_code = ''.join(modified_lines)
if print_code:
if self._check_original:
print('Original:')
_print_copyable_lines(code_lines)
print('\nModified:')
_print_copyable_lines(modified_lines)
print()
if self._check_original:
m = grammar.parse(code, diff_cache=True)
start1 = _get_first_error_start_pos_or_none(m)
grammar.parse(modified_code, diff_cache=True)
if self._check_original:
# Also check if it's possible to "revert" the changes.
m = grammar.parse(code, diff_cache=True)
start2 = _get_first_error_start_pos_or_none(m)
assert start1 == start2, (start1, start2)
|
FileModification
|
python
|
pallets__jinja
|
src/jinja2/visitor.py
|
{
"start": 1733,
"end": 3550
}
|
class ____(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> list[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv
|
NodeTransformer
|
python
|
tiangolo__fastapi
|
fastapi/middleware/asyncexitstack.py
|
{
"start": 209,
"end": 637
}
|
class ____:
def __init__(
self, app: ASGIApp, context_name: str = "fastapi_middleware_astack"
) -> None:
self.app = app
self.context_name = context_name
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
async with AsyncExitStack() as stack:
scope[self.context_name] = stack
await self.app(scope, receive, send)
|
AsyncExitStackMiddleware
|
python
|
scrapy__scrapy
|
scrapy/downloadermiddlewares/ajaxcrawl.py
|
{
"start": 565,
"end": 3805
}
|
class ____:
"""
Handle 'AJAX crawlable' pages marked as crawlable via meta tag.
"""
def __init__(self, settings: BaseSettings):
if not settings.getbool("AJAXCRAWL_ENABLED"):
raise NotConfigured
warn(
"scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware is deprecated"
" and will be removed in a future Scrapy version.",
ScrapyDeprecationWarning,
stacklevel=2,
)
# XXX: Google parses at least first 100k bytes; scrapy's redirect
# middleware parses first 4k. 4k turns out to be insufficient
# for this middleware, and parsing 100k could be slow.
# We use something in between (32K) by default.
self.lookup_bytes: int = settings.getint("AJAXCRAWL_MAXSIZE")
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler.settings)
def process_response(
self, request: Request, response: Response, spider: Spider
) -> Request | Response:
if not isinstance(response, HtmlResponse) or response.status != 200:
return response
if request.method != "GET":
# other HTTP methods are either not safe or don't have a body
return response
if "ajax_crawlable" in request.meta: # prevent loops
return response
if not self._has_ajax_crawlable_variant(response):
return response
ajax_crawl_request = request.replace(url=escape_ajax(request.url + "#!"))
logger.debug(
"Downloading AJAX crawlable %(ajax_crawl_request)s instead of %(request)s",
{"ajax_crawl_request": ajax_crawl_request, "request": request},
extra={"spider": spider},
)
ajax_crawl_request.meta["ajax_crawlable"] = True
return ajax_crawl_request
def _has_ajax_crawlable_variant(self, response: Response) -> bool:
"""
Return True if a page without hash fragment could be "AJAX crawlable".
"""
body = response.text[: self.lookup_bytes]
return _has_ajaxcrawlable_meta(body)
_ajax_crawlable_re: re.Pattern[str] = re.compile(
r'<meta\s+name=["\']fragment["\']\s+content=["\']!["\']/?>'
)
def _has_ajaxcrawlable_meta(text: str) -> bool:
"""
>>> _has_ajaxcrawlable_meta('<html><head><meta name="fragment" content="!"/></head><body></body></html>')
True
>>> _has_ajaxcrawlable_meta("<html><head><meta name='fragment' content='!'></head></html>")
True
>>> _has_ajaxcrawlable_meta('<html><head><!--<meta name="fragment" content="!"/>--></head><body></body></html>')
False
>>> _has_ajaxcrawlable_meta('<html></html>')
False
"""
# Stripping scripts and comments is slow (about 20x slower than
# just checking if a string is in text); this is a quick fail-fast
# path that should work for most pages.
if "fragment" not in text:
return False
if "content" not in text:
return False
text = html.remove_tags_with_content(text, ("script", "noscript"))
text = html.replace_entities(text)
text = html.remove_comments(text)
return _ajax_crawlable_re.search(text) is not None
|
AjaxCrawlMiddleware
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/optional2.py
|
{
"start": 200,
"end": 569
}
|
class ____:
def __eq__(self, other: "Optional[Cmp]") -> bool: ...
def __lt__(self, other: "Optional[Cmp]") -> bool: ...
def __gt__(self, other: "Cmp") -> bool: ...
def valid(value: Optional[Cmp], needed: Cmp):
x = value > needed
y = value == needed
# This should generate an error if reportOptionalOperand is enabled.
z = value < needed
|
Cmp
|
python
|
langchain-ai__langchain
|
libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py
|
{
"start": 2400,
"end": 10862
}
|
class ____:
"""Message to send to the TextGenInference API."""
role: str
content: str
tool_calls: list[dict]
def _lc_tool_call_to_hf_tool_call(tool_call: ToolCall) -> dict:
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
},
}
def _lc_invalid_tool_call_to_hf_tool_call(
invalid_tool_call: InvalidToolCall,
) -> dict:
return {
"type": "function",
"id": invalid_tool_call["id"],
"function": {
"name": invalid_tool_call["name"],
"arguments": invalid_tool_call["args"],
},
}
def _convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
if message.tool_calls or message.invalid_tool_calls:
message_dict["tool_calls"] = [
_lc_tool_call_to_hf_tool_call(tc) for tc in message.tool_calls
] + [
_lc_invalid_tool_call_to_hf_tool_call(tc)
for tc in message.invalid_tool_calls
]
elif "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
# If tool calls only, content is None not empty string
if "tool_calls" in message_dict and message_dict["content"] == "":
message_dict["content"] = None
else:
pass
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
}
else:
msg = f"Got unknown type {message}"
raise TypeError(msg)
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
role = _dict.get("role")
if role == "user":
return HumanMessage(content=_dict.get("content", ""))
if role == "assistant":
content = _dict.get("content", "") or ""
additional_kwargs: dict = {}
if function_call := _dict.get("function_call"):
additional_kwargs["function_call"] = dict(function_call)
tool_calls = []
invalid_tool_calls = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
for raw_tool_call in raw_tool_calls:
try:
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
except Exception as e:
invalid_tool_calls.append(
dict(make_invalid_tool_call(raw_tool_call, str(e)))
)
return AIMessage(
content=content,
additional_kwargs=additional_kwargs,
tool_calls=tool_calls,
invalid_tool_calls=invalid_tool_calls,
)
if role == "system":
return SystemMessage(content=_dict.get("content", ""))
if role == "function":
return FunctionMessage(
content=_dict.get("content", ""), name=_dict.get("name", "")
)
if role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return ToolMessage(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id", ""),
additional_kwargs=additional_kwargs,
)
return ChatMessage(content=_dict.get("content", ""), role=role or "")
def _is_huggingface_hub(llm: Any) -> bool:
try:
from langchain_community.llms.huggingface_hub import (
HuggingFaceHub, # type: ignore[import-not-found]
)
return isinstance(llm, HuggingFaceHub)
except ImportError:
# if no langchain community, it is not a HuggingFaceHub
return False
def _convert_chunk_to_message_chunk(
chunk: Mapping[str, Any], default_class: type[BaseMessageChunk]
) -> BaseMessageChunk:
choice = chunk["choices"][0]
_dict = choice["delta"]
role = cast(str, _dict.get("role"))
content = cast(str, _dict.get("content") or "")
additional_kwargs: dict = {}
tool_call_chunks: list[ToolCallChunk] = []
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
for rtc in raw_tool_calls:
with contextlib.suppress(KeyError):
tool_call_chunks.append(
create_tool_call_chunk(
name=rtc["function"].get("name"),
args=rtc["function"].get("arguments"),
id=rtc.get("id"),
index=rtc.get("index"),
)
)
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
if role == "assistant" or default_class == AIMessageChunk:
if usage := chunk.get("usage"):
input_tokens = usage.get("prompt_tokens", 0)
output_tokens = usage.get("completion_tokens", 0)
usage_metadata = {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"total_tokens": usage.get("total_tokens", input_tokens + output_tokens),
}
else:
usage_metadata = None
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
tool_call_chunks=tool_call_chunks,
usage_metadata=usage_metadata, # type: ignore[arg-type]
)
if role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
if role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
if role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"])
if role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
return default_class(content=content) # type: ignore[call-arg]
def _is_huggingface_textgen_inference(llm: Any) -> bool:
try:
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference, # type: ignore[import-not-found]
)
return isinstance(llm, HuggingFaceTextGenInference)
except ImportError:
# if no langchain community, it is not a HuggingFaceTextGenInference
return False
def _is_huggingface_endpoint(llm: Any) -> bool:
return isinstance(llm, HuggingFaceEndpoint)
def _is_huggingface_pipeline(llm: Any) -> bool:
return isinstance(llm, HuggingFacePipeline)
|
TGI_MESSAGE
|
python
|
doocs__leetcode
|
solution/0700-0799/0784.Letter Case Permutation/Solution.py
|
{
"start": 0,
"end": 393
}
|
class ____:
def letterCasePermutation(self, s: str) -> List[str]:
def dfs(i: int) -> None:
if i >= len(t):
ans.append("".join(t))
return
dfs(i + 1)
if t[i].isalpha():
t[i] = chr(ord(t[i]) ^ 32)
dfs(i + 1)
t = list(s)
ans = []
dfs(0)
return ans
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/rule_reenable.py
|
{
"start": 67,
"end": 275
}
|
class ____(analytics.Event, abc.ABC):
"""Re-enable a rule that was disabled"""
rule_id: int
user_id: int | None
organization_id: int
@analytics.eventclass("rule_reenable.explicit")
|
RuleReenable
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_size.py
|
{
"start": 871,
"end": 1197
}
|
class ____(scale_size_ordinal):
"""
Discrete area size scale
"""
_aesthetics = ["size"]
def __post_init__(self, range):
warn(
"Using size for a discrete variable is not advised.",
PlotnineWarning,
)
super().__post_init__(range)
@dataclass
|
scale_size_discrete
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/remote_representation/handle.py
|
{
"start": 3163,
"end": 4063
}
|
class ____:
job_name: str
repository_handle: RepositoryHandle
def to_string(self):
return f"{self.location_name}.{self.repository_name}.{self.job_name}"
@property
def repository_name(self):
return self.repository_handle.repository_name
@property
def location_name(self):
return self.repository_handle.location_name
def get_remote_origin(self):
return self.repository_handle.get_remote_origin().get_job_origin(self.job_name)
def get_python_origin(self):
return self.repository_handle.get_python_origin().get_job_origin(self.job_name)
def to_selector(self) -> JobSubsetSelector:
return JobSubsetSelector(
location_name=self.location_name,
repository_name=self.repository_name,
job_name=self.job_name,
op_selection=None,
)
@record(kw_only=False)
|
JobHandle
|
python
|
python__mypy
|
mypyc/test/test_refcount.py
|
{
"start": 819,
"end": 2052
}
|
class ____(MypycDataSuite):
files = files
base_path = test_temp_dir
optional_out = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a runtime checking transformation test case."""
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
expected_output = replace_word_size(expected_output)
try:
ir = build_ir_for_single_file(testcase.input, options)
except CompileError as e:
actual = e.messages
else:
actual = []
for fn in ir:
if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"):
continue
insert_uninit_checks(fn)
insert_ref_count_opcodes(fn)
actual.extend(format_func(fn))
assert_test_output(testcase, actual, "Invalid source code output", expected_output)
|
TestRefCountTransform
|
python
|
viewflow__viewflow
|
viewflow/workflow/nodes/switch.py
|
{
"start": 307,
"end": 1082
}
|
class ____(Activation):
"""Switch gateway activation."""
next_task = None
@Activation.status.super()
def activate(self):
with transaction.atomic(savepoint=True), self.exception_guard():
for node, cond in self.flow_task._branches:
if cond:
if cond(self):
self.next_task = node
break
else:
self.next_task = node
if not self.next_task:
raise FlowRuntimeError(
"No next task available for {}".format(self.flow_task.name)
)
@Activation.status.super()
def create_next(self):
yield self.next_task._create(self, self.task.token)
|
SwitchActivation
|
python
|
boto__boto3
|
tests/unit/resources/test_collection.py
|
{
"start": 4687,
"end": 22027
}
|
class ____(BaseTestCase):
def setUp(self):
super().setUp()
# Minimal definition so things like repr work
self.collection_def = {
'request': {'operation': 'TestOperation'},
'resource': {'type': 'Frob'},
}
self.client = mock.Mock()
self.client.can_paginate.return_value = False
self.parent = mock.Mock()
self.parent.meta = ResourceMeta('test', client=self.client)
self.factory = ResourceFactory(mock.Mock())
self.service_model = ServiceModel({})
def get_collection(self):
resource_defs = {'Frob': {'identifiers': []}}
# Build up a resource def identifier list based on what
# the collection is expecting to be required from its
# definition. This saves a bunch of repetitive typing
# and lets you just define a collection in the tests
# below. Any identifiers you expect to be availabe in
# the resource definition will automatically be there.
resource_def = self.collection_def.get('resource', {})
for identifier in resource_def.get('identifiers', []):
resource_defs['Frob']['identifiers'].append(
{'name': identifier['target']}
)
collection_model = Collection(
'test', self.collection_def, resource_defs
)
collection = CollectionManager(
collection_model=collection_model,
parent=self.parent,
factory=self.factory,
service_context=ServiceContext(
service_name='test',
service_model=self.service_model,
resource_json_definitions=resource_defs,
service_waiter_model=None,
),
)
return collection
def test_repr(self):
collection = self.get_collection()
assert 'CollectionManager' in repr(collection)
def test_iteration_manager(self):
# A collection manager is not iterable. You must first call
# .all or .filter or another method to get an iterable.
collection = self.get_collection()
with pytest.raises(TypeError):
list(collection)
def test_iteration_non_paginated(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.get_frobs.return_value = {
'Frobs': [
{'Id': 'one'},
{'Id': 'two'},
{'Id': 'three'},
{'Id': 'four'},
]
}
collection = self.get_collection()
items = list(collection.all())
assert len(items) == 4
assert items[0].id == 'one'
assert items[1].id == 'two'
assert items[2].id == 'three'
assert items[3].id == 'four'
def test_limit_param_non_paginated(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.get_frobs.return_value = {
'Frobs': [
{'Id': 'one'},
{'Id': 'two'},
{'Id': 'three'},
{'Id': 'four'},
]
}
collection = self.get_collection()
items = list(collection.all().limit(2))
assert len(items) == 2
# Only the first two should be present
assert items[0].id == 'one'
assert items[1].id == 'two'
def test_limit_method_non_paginated(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.get_frobs.return_value = {
'Frobs': [
{'Id': 'one'},
{'Id': 'two'},
{'Id': 'three'},
{'Id': 'four'},
]
}
collection = self.get_collection()
items = list(collection.limit(2))
assert len(items) == 2
# Only the first two should be present
assert items[0].id == 'one'
assert items[1].id == 'two'
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_filters_non_paginated(self, handler):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {'type': 'Frob', 'identifiers': []},
}
self.client.get_frobs.return_value = {}
handler.return_value.return_value = []
collection = self.get_collection()
list(collection.filter(Param1='foo', Param2=3).limit(2))
# Note - limit is not passed through to the low-level call
self.client.get_frobs.assert_called_with(Param1='foo', Param2=3)
def test_page_iterator_returns_pages_of_items(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = [
{'Frobs': [{'Id': 'one'}, {'Id': 'two'}]},
{'Frobs': [{'Id': 'three'}, {'Id': 'four'}]},
]
collection = self.get_collection()
pages = list(collection.limit(3).pages())
assert len(pages) == 2
assert len(pages[0]) == 2
assert len(pages[1]) == 1
def test_page_iterator_page_size(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.can_paginate.return_value = True
paginator = self.client.get_paginator.return_value
paginator.paginate.return_value = []
collection = self.get_collection()
list(collection.page_size(5).pages())
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': 5, 'MaxItems': None}
)
def test_iteration_paginated(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = [
{'Frobs': [{'Id': 'one'}, {'Id': 'two'}]},
{'Frobs': [{'Id': 'three'}, {'Id': 'four'}]},
]
collection = self.get_collection()
items = list(collection.all())
assert len(items) == 4
assert items[0].id == 'one'
assert items[1].id == 'two'
assert items[2].id == 'three'
assert items[3].id == 'four'
# Low-level pagination should have been called
self.client.get_paginator.assert_called_with('get_frobs')
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': None, 'MaxItems': None}
)
def test_limit_param_paginated(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = [
{'Frobs': [{'Id': 'one'}, {'Id': 'two'}]},
{'Frobs': [{'Id': 'three'}, {'Id': 'four'}]},
]
collection = self.get_collection()
items = list(collection.all().limit(2))
assert len(items) == 2
# Only the first two should be present
assert items[0].id == 'one'
assert items[1].id == 'two'
def test_limit_method_paginated(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = [
{'Frobs': [{'Id': 'one'}, {'Id': 'two'}]},
{'Frobs': [{'Id': 'three'}, {'Id': 'four'}]},
]
collection = self.get_collection()
items = list(collection.all().limit(2))
assert len(items) == 2
# Only the first two should be present
assert items[0].id == 'one'
assert items[1].id == 'two'
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_filters_paginated(self, handler):
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
collection = self.get_collection()
list(collection.filter(Param1='foo', Param2=3).limit(2))
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': None, 'MaxItems': 2},
Param1='foo',
Param2=3,
)
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_filter_does_not_clobber_existing_list_values(self, handler):
self.collection_def = {
'request': {
'operation': 'GetFrobs',
"params": [
{
"target": "Filters[0].Name",
"source": "string",
"value": "frob-id",
},
{
"target": "Filters[0].Values[0]",
"source": "identifier",
"name": "Id",
},
],
},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
collection = self.get_collection()
self.parent.id = 'my-id'
list(
collection.filter(
Filters=[{'Name': 'another-filter', 'Values': ['foo']}]
)
)
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': None, 'MaxItems': None},
Filters=[
{'Values': ['my-id'], 'Name': 'frob-id'},
{'Values': ['foo'], 'Name': 'another-filter'},
],
)
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_page_size_param(self, handler):
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
collection = self.get_collection()
list(collection.all().page_size(1))
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': 1, 'MaxItems': None}
)
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_page_size_method(self, handler):
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
collection = self.get_collection()
list(collection.page_size(1))
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': 1, 'MaxItems': None}
)
def test_chaining(self):
self.collection_def = {
'request': {'operation': 'GetFrobs'},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.get_frobs.return_value = {
'Frobs': [
{'Id': 'one'},
{'Id': 'two'},
{'Id': 'three'},
{'Id': 'four'},
]
}
collection = self.get_collection()
items = list(collection.filter().all().all())
assert len(items) == 4
assert items[0].id == 'one'
assert items[1].id == 'two'
assert items[2].id == 'three'
assert items[3].id == 'four'
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_chaining_copies_parameters(self, handler):
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
collection = self.get_collection()
list(collection.all().filter(CustomArg=1).limit(3).page_size(3))
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': 3, 'MaxItems': 3}, CustomArg=1
)
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_chaining_filters_does_not_clobber_list_values(self, handler):
self.collection_def = {
'request': {
'operation': 'GetFrobs',
"params": [
{
"target": "Filters[0].Name",
"source": "string",
"value": "frob-id",
},
{
"target": "Filters[0].Values[0]",
"source": "identifier",
"name": "Id",
},
],
},
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'response',
'path': 'Frobs[].Id',
}
],
},
}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
collection = self.get_collection()
self.parent.id = 'my-id'
collection = collection.filter(
Filters=[{'Name': 'second-filter', 'Values': ['foo']}]
)
list(
collection.filter(
Filters=[{'Name': 'third-filter', 'Values': ['bar']}]
)
)
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(
PaginationConfig={'PageSize': None, 'MaxItems': None},
Filters=[
{'Values': ['my-id'], 'Name': 'frob-id'},
{'Values': ['foo'], 'Name': 'second-filter'},
{'Values': ['bar'], 'Name': 'third-filter'},
],
)
def test_chained_repr(self):
collection = self.get_collection()
assert 'ResourceCollection' in repr(collection.all())
|
TestResourceCollection
|
python
|
Delgan__loguru
|
loguru/_file_sink.py
|
{
"start": 2039,
"end": 2487
}
|
class ____:
@staticmethod
def retention_count(logs, number):
def key_log(log):
return (-os.stat(log).st_mtime, log)
for log in sorted(logs, key=key_log)[number:]:
os.remove(log)
@staticmethod
def retention_age(logs, seconds):
t = datetime.datetime.now().timestamp()
for log in logs:
if os.stat(log).st_mtime <= t - seconds:
os.remove(log)
|
Retention
|
python
|
astropy__astropy
|
astropy/modeling/tests/test_fitters.py
|
{
"start": 5725,
"end": 7545
}
|
class ____:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=0.4)
self.jf = JointFitter(
[self.g1, self.g2], {self.g1: ["amplitude"], self.g2: ["amplitude"]}, [9.8]
)
self.x = np.arange(10, 20, 0.1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, 0.3]
p2 = [13, 0.4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(
np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2]
)
coeff, _ = optimize.leastsq(
errfunc, p, args=(self.x, self.ny1, self.x, self.ny2)
)
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
|
TestJointFitter
|
python
|
wandb__wandb
|
wandb/vendor/pygments/formatters/img.py
|
{
"start": 18817,
"end": 19131
}
|
class ____(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
|
GifImageFormatter
|
python
|
mozilla__bleach
|
bleach/_vendor/html5lib/treewalkers/genshi.py
|
{
"start": 313,
"end": 2309
}
|
class ____(base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, _ = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END or
next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if namespace != namespaces["html"] or name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
TreeWalker
|
python
|
imageio__imageio
|
imageio/plugins/dicom.py
|
{
"start": 3672,
"end": 12621
}
|
class ____(Format):
"""See :mod:`imageio.plugins.dicom`"""
def _can_read(self, request):
# If user URI was a directory, we check whether it has a DICOM file
if os.path.isdir(request.filename):
files = os.listdir(request.filename)
for fname in sorted(files): # Sorting make it consistent
filename = os.path.join(request.filename, fname)
if os.path.isfile(filename) and "DICOMDIR" not in fname:
with open(filename, "rb") as f:
first_bytes = read_n_bytes(f, 140)
return first_bytes[128:132] == b"DICM"
else:
return False
# Check
return request.firstbytes[128:132] == b"DICM"
def _can_write(self, request):
# We cannot save yet. May be possible if we will used pydicom as
# a backend.
return False
# --
class Reader(Format.Reader):
_compressed_warning_dirs = set()
def _open(self, progress=True):
if not _dicom:
load_lib()
if os.path.isdir(self.request.filename):
# A dir can be given if the user used the format explicitly
self._info = {}
self._data = None
else:
# Read the given dataset now ...
try:
dcm = _dicom.SimpleDicomReader(self.request.get_file())
except _dicom.CompressedDicom as err:
# We cannot do this on our own. Perhaps with some help ...
cmd = get_gdcmconv_exe()
if not cmd and "JPEG" in str(err):
cmd = get_dcmdjpeg_exe()
if not cmd:
msg = err.args[0].replace("using", "installing")
msg = msg.replace("convert", "auto-convert")
err.args = (msg,)
raise
else:
fname1 = self.request.get_local_filename()
fname2 = fname1 + ".raw"
try:
subprocess.check_call(cmd + [fname1, fname2])
except Exception:
raise err
d = os.path.dirname(fname1)
if d not in self._compressed_warning_dirs:
self._compressed_warning_dirs.add(d)
logger.warning(
"DICOM file contained compressed data. "
+ "Autoconverting with "
+ cmd[0]
+ " (this warning is shown once for each directory)"
)
dcm = _dicom.SimpleDicomReader(fname2)
self._info = dcm._info
self._data = dcm.get_numpy_array()
# Initialize series, list of DicomSeries objects
self._series = None # only created if needed
# Set progress indicator
if isinstance(progress, BaseProgressIndicator):
self._progressIndicator = progress
elif progress is True:
p = StdoutProgressIndicator("Reading DICOM")
self._progressIndicator = p
elif progress in (None, False):
self._progressIndicator = BaseProgressIndicator("Dummy")
else:
raise ValueError("Invalid value for progress.")
def _close(self):
# Clean up
self._info = None
self._data = None
self._series = None
@property
def series(self):
if self._series is None:
pi = self._progressIndicator
self._series = _dicom.process_directory(self.request, pi)
return self._series
def _get_length(self):
if self._data is None:
dcm = self.series[0][0]
self._info = dcm._info
self._data = dcm.get_numpy_array()
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
if self.request.mode[1] == "i":
# User expects one, but lets be honest about this file
return nslices
elif self.request.mode[1] == "I":
# User expects multiple, if this file has multiple slices, ok.
# Otherwise we have to check the series.
if nslices > 1:
return nslices
else:
return sum([len(serie) for serie in self.series])
elif self.request.mode[1] == "v":
# User expects a volume, if this file has one, ok.
# Otherwise we have to check the series
if nslices > 1:
return 1
else:
return len(self.series) # We assume one volume per series
elif self.request.mode[1] == "V":
# User expects multiple volumes. We have to check the series
return len(self.series) # We assume one volume per series
else:
raise RuntimeError("DICOM plugin should know what to expect.")
def _get_slice_data(self, index):
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
# Allow index >1 only if this file contains >1
if nslices > 1:
return self._data[index], self._info
elif index == 0:
return self._data, self._info
else:
raise IndexError("Dicom file contains only one slice.")
def _get_data(self, index):
if self._data is None:
dcm = self.series[0][0]
self._info = dcm._info
self._data = dcm.get_numpy_array()
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
if self.request.mode[1] == "i":
return self._get_slice_data(index)
elif self.request.mode[1] == "I":
# Return slice from volume, or return item from series
if index == 0 and nslices > 1:
return self._data[index], self._info
else:
L = []
for serie in self.series:
L.extend([dcm_ for dcm_ in serie])
return L[index].get_numpy_array(), L[index].info
elif self.request.mode[1] in "vV":
# Return volume or series
if index == 0 and nslices > 1:
return self._data, self._info
else:
return (
self.series[index].get_numpy_array(),
self.series[index].info,
)
# mode is `?` (typically because we are using V3). If there is a
# series (multiple files), index referrs to the element of the
# series and we read volumes. If there is no series, index
# referrs to the slice in the volume we read "flat" images.
elif len(self.series) > 1:
# mode is `?` and there are multiple series. Each series is a ndimage.
return (
self.series[index].get_numpy_array(),
self.series[index].info,
)
else:
# mode is `?` and there is only one series. Each slice is an ndimage.
return self._get_slice_data(index)
def _get_meta_data(self, index):
if self._data is None:
dcm = self.series[0][0]
self._info = dcm._info
self._data = dcm.get_numpy_array()
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
# Default is the meta data of the given file, or the "first" file.
if index is None:
return self._info
if self.request.mode[1] == "i":
return self._info
elif self.request.mode[1] == "I":
# Return slice from volume, or return item from series
if index == 0 and nslices > 1:
return self._info
else:
L = []
for serie in self.series:
L.extend([dcm_ for dcm_ in serie])
return L[index].info
elif self.request.mode[1] in "vV":
# Return volume or series
if index == 0 and nslices > 1:
return self._info
else:
return self.series[index].info
else: # pragma: no cover
raise ValueError("DICOM plugin should know what to expect.")
|
DicomFormat
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/uninitializedVariable2.py
|
{
"start": 506,
"end": 572
}
|
class ____(Abstract1):
def __init__(self):
self.x = ""
|
D
|
python
|
walkccc__LeetCode
|
solutions/2363. Merge Similar Items/2363.py
|
{
"start": 0,
"end": 236
}
|
class ____:
def mergeSimilarItems(self, items1: list[list[int]],
items2: list[list[int]]) -> list[list[int]]:
return sorted(
(Counter(dict(items1)) + collections.Counter(dict(items2))).items())
|
Solution
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 110768,
"end": 115953
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.IntervalSelectionConfig` ``TypedDict`` wrapper.
Parameters
----------
type
Determines the default event processing and data query for the selection. Vega-Lite
currently supports two selection types:
* ``"point"`` -- to select multiple discrete data values; the first value is
selected on ``click`` and additional values toggled on shift-click.
* ``"interval"`` -- to select a continuous range of data values on ``drag``.
clear
Clears the selection, emptying it of all values. This property can be a `Event
Stream <https://vega.github.io/vega/docs/event-streams/>`__ or ``false`` to disable
clear.
**Default value:** ``dblclick``.
**See also:** `clear examples
<https://vega.github.io/vega-lite/docs/selection.html#clear>`__ in the
documentation.
encodings
An array of encoding channels. The corresponding data field values must match for a
data tuple to fall within the selection.
**See also:** The `projection with encodings and fields section
<https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the
documentation.
fields
An array of field names whose values must match for a data tuple to fall within the
selection.
**See also:** The `projection with encodings and fields section
<https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the
documentation.
mark
An interval selection also adds a rectangle mark to depict the extents of the
interval. The ``mark`` property can be used to customize the appearance of the mark.
**See also:** `mark examples
<https://vega.github.io/vega-lite/docs/selection.html#mark>`__ in the documentation.
on
A `Vega event stream <https://vega.github.io/vega/docs/event-streams/>`__ (object or
selector) that triggers the selection. For interval selections, the event stream
must specify a `start and end
<https://vega.github.io/vega/docs/event-streams/#between-filters>`__.
**See also:** `on examples
<https://vega.github.io/vega-lite/docs/selection.html#on>`__ in the documentation.
resolve
With layered and multi-view displays, a strategy that determines how selections'
data queries are resolved when applied in a filter transform, conditional encoding
rule, or scale domain.
One of:
* ``"global"`` -- only one brush exists for the entire SPLOM. When the user begins
to drag, any previous brushes are cleared, and a new one is constructed.
* ``"union"`` -- each cell contains its own brush, and points are highlighted if
they lie within *any* of these individual brushes.
* ``"intersect"`` -- each cell contains its own brush, and points are highlighted
only if they fall within *all* of these individual brushes.
**Default value:** ``global``.
**See also:** `resolve examples
<https://vega.github.io/vega-lite/docs/selection.html#resolve>`__ in the
documentation.
translate
When truthy, allows a user to interactively move an interval selection
back-and-forth. Can be ``true``, ``false`` (to disable panning), or a `Vega event
stream definition <https://vega.github.io/vega/docs/event-streams/>`__ which must
include a start and end event to trigger continuous panning. Discrete panning (e.g.,
pressing the left/right arrow keys) will be supported in future versions.
**Default value:** ``true``, which corresponds to ``[pointerdown, window:pointerup]
> window:pointermove!``. This default allows users to clicks and drags within an
interval selection to reposition it.
**See also:** `translate examples
<https://vega.github.io/vega-lite/docs/selection.html#translate>`__ in the
documentation.
zoom
When truthy, allows a user to interactively resize an interval selection. Can be
``true``, ``false`` (to disable zooming), or a `Vega event stream definition
<https://vega.github.io/vega/docs/event-streams/>`__. Currently, only ``wheel``
events are supported, but custom event streams can still be used to specify filters,
debouncing, and throttling. Future versions will expand the set of events that can
trigger this transformation.
**Default value:** ``true``, which corresponds to ``wheel!``. This default allows
users to use the mouse wheel to resize an interval selection.
**See also:** `zoom examples
<https://vega.github.io/vega-lite/docs/selection.html#zoom>`__ in the documentation.
"""
type: Literal["interval"]
clear: str | bool | MergedStreamKwds | DerivedStreamKwds
encodings: Sequence[SingleDefUnitChannel_T]
fields: Sequence[str]
mark: BrushConfigKwds
on: str | MergedStreamKwds | DerivedStreamKwds
resolve: SelectionResolution_T
translate: str | bool
zoom: str | bool
|
IntervalSelectionConfigKwds
|
python
|
pypa__warehouse
|
tests/unit/admin/views/test_users.py
|
{
"start": 23053,
"end": 45623
}
|
class ____:
def test_user_recover_account_initiate(self, db_request, db_session):
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
project0 = ProjectFactory.create()
RoleFactory.create(user=user, project=project0)
release0 = ReleaseFactory.create(project=project0)
db_session.add(
ReleaseURL(
release=release0, name="Homepage", url="https://example.com/home0"
)
)
db_session.add(
ReleaseURL(
release=release0, name="Source Code", url="http://example.com/source0"
)
)
project1 = ProjectFactory.create()
RoleFactory.create(user=user, project=project1)
release1 = ReleaseFactory.create(project=project1)
db_session.add(
ReleaseURL(
release=release1, name="Homepage", url="https://example.com/home1"
)
)
project2 = ProjectFactory.create()
RoleFactory.create(user=user, project=project2)
release2 = ReleaseFactory.create(project=project2)
db_session.add(
ReleaseURL(release=release2, name="telnet", url="telnet://192.0.2.16:80/")
)
project3 = ProjectFactory.create()
RoleFactory.create(user=user, project=project3)
result = views.user_recover_account_initiate(user, db_request)
assert result == {
"user": user,
"repo_urls": {
project0.name: {
("Homepage", "https://example.com/home0"),
("Source Code", "http://example.com/source0"),
},
project1.name: {
("Homepage", "https://example.com/home1"),
},
},
}
def test_user_recover_account_initiate_only_one(self, db_request):
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
account_recovery0 = user.record_observation(
request=db_request,
kind=ObservationKind.AccountRecovery,
actor=admin_user,
summary="Account Recovery",
payload={"completed": None},
)
account_recovery0.additional = {"status": "initiated"}
result = views.user_recover_account_initiate(user, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
def test_user_recover_account_initiate_submit(
self, db_request, db_session, monkeypatch
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(
release=release, name="Homepage", url="https://example.com/home0"
)
)
db_session.add(
ReleaseURL(
release=release, name="Source Code", url="http://example.com/source0"
)
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_account_recovery_initiated_email", send_email)
monkeypatch.setattr(views, "token_urlsafe", lambda: "deadbeef")
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = project.name
db_request.POST["support_issue_link"] = (
"https://github.com/pypi/support/issues/666"
)
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
now = datetime.datetime.now(datetime.UTC)
with freezegun.freeze_time(now):
result = views.user_recover_account_initiate(user, db_request)
assert send_email.calls == [
pretend.call(
db_request,
(user, None),
project_name=project.name,
support_issue_link="https://github.com/pypi/support/issues/666",
token="deadbeef",
)
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert len(user.active_account_recoveries) == 1
account_recovery = user.active_account_recoveries[0]
assert account_recovery.payload == {
"initiated": str(now),
"completed": None,
"token": "deadbeef",
"project_name": project.name,
"repos": sorted(
[
("Source Code", "http://example.com/source0"),
("Homepage", "https://example.com/home0"),
]
),
"support_issue_link": "https://github.com/pypi/support/issues/666",
"override_to_email": None,
}
assert account_recovery.additional == {"status": "initiated"}
def test_user_recover_account_initiate_no_urls_submit(
self, db_request, db_session, monkeypatch
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(release=release, name="telnet", url="telnet://192.0.2.16:80/")
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_account_recovery_initiated_email", send_email)
monkeypatch.setattr(views, "token_urlsafe", lambda: "deadbeef")
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = ""
db_request.POST["support_issue_link"] = (
"https://github.com/pypi/support/issues/666"
)
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
now = datetime.datetime.now(datetime.UTC)
with freezegun.freeze_time(now):
result = views.user_recover_account_initiate(user, db_request)
assert send_email.calls == [
pretend.call(
db_request,
(user, None),
project_name="",
support_issue_link="https://github.com/pypi/support/issues/666",
token="deadbeef",
)
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert len(user.active_account_recoveries) == 1
account_recovery = user.active_account_recoveries[0]
assert account_recovery.payload == {
"initiated": str(now),
"completed": None,
"token": "deadbeef",
"project_name": "",
"repos": [],
"support_issue_link": "https://github.com/pypi/support/issues/666",
"override_to_email": None,
}
assert account_recovery.additional == {"status": "initiated"}
def test_user_recover_account_initiate_override_email(
self, db_request, db_session, monkeypatch
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(release=release, name="telnet", url="telnet://192.0.2.16:80/")
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_account_recovery_initiated_email", send_email)
monkeypatch.setattr(views, "token_urlsafe", lambda: "deadbeef")
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = ""
db_request.POST["support_issue_link"] = (
"https://github.com/pypi/support/issues/666"
)
db_request.POST["override_to_email"] = "foo@example.com"
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
now = datetime.datetime.now(datetime.UTC)
with freezegun.freeze_time(now):
result = views.user_recover_account_initiate(user, db_request)
_email = [e for e in user.emails if e.email == "foo@example.com"][0]
assert _email.verified is False
assert send_email.calls == [
pretend.call(
db_request,
(user, _email),
project_name="",
support_issue_link="https://github.com/pypi/support/issues/666",
token="deadbeef",
)
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert len(user.active_account_recoveries) == 1
account_recovery = user.active_account_recoveries[0]
assert account_recovery.payload == {
"initiated": str(now),
"completed": None,
"token": "deadbeef",
"project_name": "",
"repos": [],
"support_issue_link": "https://github.com/pypi/support/issues/666",
"override_to_email": "foo@example.com",
}
assert account_recovery.additional == {"status": "initiated"}
def test_user_recover_account_initiate_override_email_exists(
self, db_request, db_session, monkeypatch
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
EmailFactory.create(
user=user, email="foo@example.com", primary=False, verified=False
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(release=release, name="telnet", url="telnet://192.0.2.16:80/")
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_account_recovery_initiated_email", send_email)
monkeypatch.setattr(views, "token_urlsafe", lambda: "deadbeef")
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = ""
db_request.POST["support_issue_link"] = (
"https://github.com/pypi/support/issues/666"
)
db_request.POST["override_to_email"] = "foo@example.com"
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
now = datetime.datetime.now(datetime.UTC)
with freezegun.freeze_time(now):
result = views.user_recover_account_initiate(user, db_request)
_email = [e for e in user.emails if e.email == "foo@example.com"][0]
assert _email.verified is False
assert send_email.calls == [
pretend.call(
db_request,
(user, _email),
project_name="",
support_issue_link="https://github.com/pypi/support/issues/666",
token="deadbeef",
)
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert len(user.active_account_recoveries) == 1
account_recovery = user.active_account_recoveries[0]
assert account_recovery.payload == {
"initiated": str(now),
"completed": None,
"token": "deadbeef",
"project_name": "",
"repos": [],
"support_issue_link": "https://github.com/pypi/support/issues/666",
"override_to_email": "foo@example.com",
}
assert account_recovery.additional == {"status": "initiated"}
def test_user_recover_account_initiate_override_email_exists_wrong_user(
self, db_request, db_session, monkeypatch
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
other_user = UserFactory.create()
EmailFactory.create(
user=other_user, email="foo@example.com", primary=False, verified=False
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(release=release, name="telnet", url="telnet://192.0.2.16:80/")
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_account_recovery_initiated_email", send_email)
monkeypatch.setattr(views, "token_urlsafe", lambda: "deadbeef")
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = ""
db_request.POST["support_issue_link"] = (
"https://github.com/pypi/support/issues/666"
)
db_request.POST["override_to_email"] = "foo@example.com"
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.user_recover_account_initiate(user, db_request)
assert send_email.calls == []
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.account_recovery.initiate", username=user.username)
]
assert db_request.session.flash.calls == [
pretend.call("Email address already associated with a user", queue="error")
]
assert len(user.active_account_recoveries) == 0
def test_user_recover_account_initiate_no_support_issue_link_submit(
self, db_request, db_session
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(release=release, name="telnet", url="telnet://192.0.2.16:80/")
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = ""
db_request.POST["support_issue_link"] = ""
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.user_recover_account_initiate(user, db_request)
assert send_email.calls == []
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.account_recovery.initiate", username=user.username)
]
assert db_request.session.flash.calls == [
pretend.call("Provide a link to the pypi/support issue", queue="error")
]
assert len(user.active_account_recoveries) == 0
def test_user_recover_account_initiate_invalid_support_issue_link_submit(
self, db_request, db_session
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(release=release, name="telnet", url="telnet://192.0.2.16:80/")
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = ""
db_request.POST["support_issue_link"] = (
"https://github.com/pypi/warehouse/issues/420"
)
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.user_recover_account_initiate(user, db_request)
assert send_email.calls == []
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.account_recovery.initiate", username=user.username)
]
assert db_request.session.flash.calls == [
pretend.call("The pypi/support issue link is invalid", queue="error")
]
assert len(user.active_account_recoveries) == 0
def test_recover_account_initiate_invalid_project_name_with_available_urls_submit(
self, db_request, db_session
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
release = ReleaseFactory.create(project=project)
db_session.add(
ReleaseURL(release=release, name="Homepage", url="https://example.com/home")
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
db_request.method = "POST"
db_request.user = admin_user
db_request.POST["project_name"] = ""
db_request.POST["support_issue_link"] = (
"https://github.com/pypi/support/issues/420"
)
db_request.route_path = pretend.call_recorder(
lambda route_name, **kwargs: "/user/the-redirect/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.user_recover_account_initiate(user, db_request)
assert send_email.calls == []
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.account_recovery.initiate", username=user.username)
]
assert db_request.session.flash.calls == [
pretend.call("Select a project for verification", queue="error")
]
assert len(user.active_account_recoveries) == 0
|
TestUserRecoverAccountInitiate
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/frame_ctor.py
|
{
"start": 1927,
"end": 2310
}
|
class ____:
params = [Nano(1), Hour(1)]
param_names = ["offset"]
def setup(self, offset):
N = 10**3
idx = date_range(Timestamp("1/1/1900"), freq=offset, periods=N)
df = DataFrame(np.random.randn(N, 10), index=idx)
self.d = df.to_dict()
def time_dict_with_timestamp_offsets(self, offset):
DataFrame(self.d)
|
FromDictwithTimestamp
|
python
|
geekcomputers__Python
|
Checker_game_by_dz/modules/pieces.py
|
{
"start": 81,
"end": 1272
}
|
class ____:
padding = 17
outline = 2
def __init__(self, row, col, color):
self.row = row
self.col = col
self.color = color
self.king = False
"""if (self.color == yellow):
self.direction = -1
else:
self.direction = 1"""
self.x = self.y = 0
self.calculate_pos()
# calculate the positions
def calculate_pos(self):
self.x = (sq_size * self.col) + (sq_size // 2)
self.y = (sq_size * self.row) + (sq_size // 2)
# for making king
def make_king(self):
self.king = True
def draw(self, window):
radd = (sq_size // 2) - self.padding
pg.draw.circle(window, gray, (self.x, self.y), radd + self.outline)
pg.draw.circle(window, self.color, (self.x, self.y), radd)
if self.king:
window.blit(
crown,
((self.x - crown.get_width() // 2), (self.y - crown.get_height() // 2)),
)
def move(self, row, col):
self.row = row
self.col = col
self.calculate_pos()
# represtation as a string
def __repr__(self):
return str(self.color)
|
pieces
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_common.py
|
{
"start": 2376,
"end": 4903
}
|
class ____:
def test_name_required(self):
with pytest.raises(TypeError):
DeploymentStatusInfo(
status=DeploymentStatus.HEALTHY,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
)
def test_deployment_status_required(self):
with pytest.raises(TypeError):
DeploymentStatusInfo(
name="test_name",
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
)
@pytest.mark.parametrize(
"status,status_trigger",
list(zip(list(DeploymentStatus), list(DeploymentStatusTrigger))),
)
def test_proto(self, status, status_trigger):
deployment_status_info = DeploymentStatusInfo(
name="test_name",
status=status,
status_trigger=status_trigger,
message="context about status",
)
serialized_proto = deployment_status_info.to_proto().SerializeToString()
deserialized_proto = DeploymentStatusInfoProto.FromString(serialized_proto)
reconstructed_info = DeploymentStatusInfo.from_proto(deserialized_proto)
assert deployment_status_info == reconstructed_info
def test_running_replica_info():
"""Test hash value of RunningReplicaInfo"""
replica_id = ReplicaID("asdf123", deployment_id=DeploymentID(name="my_deployment"))
actor_name = replica_id.to_full_id_str()
# Test that replicas with same attributes have same hash
replica1 = RunningReplicaInfo(
replica_id=replica_id,
node_id="node_id",
node_ip="node_ip",
availability_zone="some-az",
actor_name=actor_name,
max_ongoing_requests=1,
is_cross_language=False,
)
replica2 = RunningReplicaInfo(
replica_id=replica_id,
node_id="node_id",
node_ip="node_ip",
availability_zone="some-az",
actor_name=actor_name,
max_ongoing_requests=1,
is_cross_language=False,
)
# Test that cross-language setting affects hash
replica3 = RunningReplicaInfo(
replica_id=replica_id,
node_id="node_id",
node_ip="node_ip",
availability_zone="some-az",
actor_name=actor_name,
max_ongoing_requests=1,
is_cross_language=True,
)
assert replica1._hash == replica2._hash
assert replica3._hash != replica1._hash
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
TestDeploymentStatusInfo
|
python
|
huggingface__transformers
|
src/transformers/models/modernbert_decoder/modular_modernbert_decoder.py
|
{
"start": 1802,
"end": 12726
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ModernBertDecoderModel`]. It is used to instantiate a ModernBert
decoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ModernBERT-base decoder.
e.g. [blab-jhu/test-32m-dec](https://huggingface.co/blab-jhu/test-32m-dec)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50368):
Vocabulary size of the ModernBert decoder model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ModernBertDecoderModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 22):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
if not specified.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
norm_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the normalization layers.
pad_token_id (`int`, *optional*, defaults to 50283):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 50282):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 50281):
Beginning of stream token id.
cls_token_id (`int`, *optional*, defaults to 50281):
Classification token id.
sep_token_id (`int`, *optional*, defaults to 50282):
Separation token id.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
embedding_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layers.
mlp_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the MLP layers.
decoder_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the decoder layers.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the classifier.
classifier_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the classifier.
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function for the classifier.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
local_attention (`int`, *optional*, defaults to 128):
The sliding window size for local attention. Only used for layers that use local attention. Note that for
the decoder to match ModernBERT this is actually half of the sliding window size, so 128 => 64.
global_attn_every_n_layers (`int`, *optional*, defaults to 3):
Every `global_attn_every_n_layers` layers will use global attention instead of local attention.
layer_types (`list[str]`, *optional*):
List of layer types, one for each layer. If not specified, will be automatically generated based on
`global_attn_every_n_layers`. Should contain "full_attention" or "sliding_attention".
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
Examples:
```python
>>> from transformers import ModernBertDecoderModel, ModernBertDecoderConfig
>>> # Initializing a ModernBert decoder style configuration
>>> configuration = ModernBertDecoderConfig()
>>> # Initializing a model from the modernbert-base decoder style configuration
>>> model = ModernBertDecoderModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "modernbert-decoder"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = {"global": 160_000.0, "local": 10_000.0}
def __init__(
self,
vocab_size: Optional[int] = 50368,
hidden_size: Optional[int] = 768,
intermediate_size: Optional[int] = 1152,
num_hidden_layers: Optional[int] = 22,
num_attention_heads: Optional[int] = 12,
hidden_activation: Optional[str] = "gelu",
max_position_embeddings: Optional[int] = 8192,
initializer_range: Optional[float] = 0.02,
initializer_cutoff_factor: Optional[float] = 2.0,
norm_eps: Optional[int] = 1e-5,
norm_bias: Optional[bool] = False,
pad_token_id: Optional[int] = 50283,
eos_token_id: Optional[int] = 50282,
bos_token_id: Optional[int] = 50281,
cls_token_id: Optional[int] = 50281,
sep_token_id: Optional[int] = 50282,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
embedding_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
mlp_dropout: Optional[float] = 0.0,
decoder_bias: Optional[bool] = True,
classifier_dropout: Optional[float] = 0.0,
classifier_bias: Optional[bool] = False,
classifier_activation: Optional[str] = "gelu",
use_cache: Optional[bool] = True,
local_attention: Optional[int] = 128,
global_attn_every_n_layers: Optional[int] = 3,
layer_types: Optional[list[str]] = None,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.initializer_range = initializer_range
self.initializer_cutoff_factor = initializer_cutoff_factor
self.norm_eps = norm_eps
self.norm_bias = norm_bias
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.embedding_dropout = embedding_dropout
self.mlp_bias = mlp_bias
self.mlp_dropout = mlp_dropout
self.decoder_bias = decoder_bias
self.classifier_dropout = classifier_dropout
self.classifier_bias = classifier_bias
self.classifier_activation = classifier_activation
self.use_cache = use_cache
self.global_attn_every_n_layers = global_attn_every_n_layers
# for consistency with ModernBert
self.reference_compile = False
# Set up layer_types for standardized layer type detection
self.layer_types = layer_types
if self.layer_types is None:
# Create layer_types based on the alternating pattern
self.layer_types = []
for layer_id in range(num_hidden_layers):
if layer_id % global_attn_every_n_layers != 0:
self.layer_types.append("sliding_attention")
else:
self.layer_types.append("full_attention")
# NOTE: sliding window numbers matches ModernBERT but is only half of it
self.sliding_window = local_attention // 2 if local_attention else -1
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
cls_token_id=cls_token_id,
sep_token_id=sep_token_id,
**kwargs,
)
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation=None, **kwargs):
rope_scaling = kwargs.pop("rope_scaling", None)
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`. If we find `rope_parameters`
# as arg in the inputs, we can safely assume that it is in the new format. New naming used -> new format
default_rope_params = {
"sliding_attention": {"rope_type": "default"},
"full_attention": {"rope_type": "default"},
}
self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else default_rope_params
if rope_scaling is not None:
self.rope_parameters["full_attention"].update(rope_scaling)
self.rope_parameters["sliding_attention"].update(rope_scaling)
self.rope_parameters["full_attention"].setdefault(
"rope_theta", kwargs.pop("global_rope_theta", self.default_theta["global"])
)
self.rope_parameters["sliding_attention"].setdefault(
"rope_theta", kwargs.pop("local_rope_theta", self.default_theta["local"])
)
# Standardize and validate the correctness of rotary position embeddings parameters
self.standardize_rope_params()
self.validate_rope(ignore_keys=ignore_keys_at_rope_validation)
return kwargs
|
ModernBertDecoderConfig
|
python
|
davidhalter__parso
|
parso/python/tree.py
|
{
"start": 22214,
"end": 22545
}
|
class ____(Flow):
type = 'for_stmt'
__slots__ = ()
def get_testlist(self):
"""
Returns the input node ``y`` from: ``for x in y:``.
"""
return self.children[3]
def get_defined_names(self, include_setitem=False):
return _defined_names(self.children[1], include_setitem)
|
ForStmt
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/lite_flex_test.py
|
{
"start": 12151,
"end": 13808
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_v2_only
def testFlexResourceVariables(self):
class Model(tf.Module):
def __init__(self):
self.v = tf.Variable([[0.0, 0.0, 0.0, 0.0]])
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 4], dtype=tf.float32)])
def eval(self, x):
# Control flow is needed to generate "FlexReadVariableOp".
if tf.reduce_mean(x) > 1.0:
self.v.assign_add([[1.0, 1.0, 1.0, 1.0]])
return self.v + x
m = Model()
to_save = m.eval.get_concrete_function()
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
tf.saved_model.save(m, save_dir, to_save)
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
converter.experimental_enable_resource_variables = True
tflite_model = converter.convert()
# Check the model works with TensorFlow ops.
interpreter = Interpreter(model_content=tflite_model)
signature_runner = interpreter.get_signature_runner()
outputs = signature_runner(
x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))
expected_output = np.array([[2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
self.assertTrue((expected_output == list(outputs.values())[0]).all)
# Second run.
outputs = signature_runner(
x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))
expected_output = np.array([[3.0, 4.0, 5.0, 6.0]], dtype=np.float32)
self.assertTrue((expected_output == list(outputs.values())[0]).all)
|
FromSavedModelTest
|
python
|
huggingface__transformers
|
src/transformers/models/maskformer/image_processing_maskformer_fast.py
|
{
"start": 3100,
"end": 31629
}
|
class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 800, "longest_edge": 1333}
default_to_square = False
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_pad = True
model_input_names = ["pixel_values", "pixel_mask"]
size_divisor = 32
do_reduce_labels = False
valid_kwargs = MaskFormerImageProcessorKwargs
def __init__(self, **kwargs: Unpack[MaskFormerImageProcessorKwargs]) -> None:
size = kwargs.pop("size", None)
max_size = kwargs.pop("max_size", None)
if size is None and max_size is not None:
size = self.size
size["longest_edge"] = max_size
elif size is None:
size = self.size
self.size = get_size_dict(size, max_size=max_size, default_to_square=False)
super().__init__(**kwargs)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
"""
image_processor_dict = super().to_dict()
image_processor_dict.pop("_max_size", None)
return image_processor_dict
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
def resize(
self,
image: torch.Tensor,
size: SizeDict,
size_divisor: int = 0,
interpolation: Optional["F.InterpolationMode"] = None,
**kwargs,
) -> torch.Tensor:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Resampling filter to use if resizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if size.shortest_edge and size.longest_edge:
# Resize the image so that the shortest edge or the longest edge is of the given size
# while maintaining the aspect ratio of the original image.
new_size = get_size_with_aspect_ratio(
image.size()[-2:],
size["shortest_edge"],
size["longest_edge"],
)
elif size.max_height and size.max_width:
new_size = get_image_size_for_max_height_width(image.size()[-2:], size["max_height"], size["max_width"])
elif size.height and size.width:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
if size_divisor > 0:
height, width = new_size
height = int(math.ceil(height / size_divisor) * size_divisor)
width = int(math.ceil(width / size_divisor) * size_divisor)
new_size = (height, width)
image = F.resize(
image,
size=new_size,
interpolation=interpolation,
**kwargs,
)
return image
def pad(
self,
images: torch.Tensor,
padded_size: tuple[int, int],
segmentation_maps: Optional[torch.Tensor] = None,
fill: int = 0,
ignore_index: int = 255,
) -> BatchFeature:
original_size = images.size()[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
if original_size != padded_size:
padding = [0, 0, padding_right, padding_bottom]
images = F.pad(images, padding, fill=fill)
if segmentation_maps is not None:
segmentation_maps = [F.pad(mask, padding, fill=ignore_index) for mask in segmentation_maps]
# Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
pixel_mask = torch.zeros((images.shape[0], *padded_size), dtype=torch.int64, device=images.device)
pixel_mask[:, : original_size[0], : original_size[1]] = 1
return images, pixel_mask, segmentation_maps
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]] = None,
**kwargs: Unpack[MaskFormerImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps.
instance_id_to_semantic_id (`Union[list[dict[int, int]], dict[int, int]]`, *optional*):
A mapping from instance IDs to semantic IDs.
"""
return super().preprocess(
images,
segmentation_maps,
instance_id_to_semantic_id,
**kwargs,
)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput,
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[MaskFormerImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
To be overridden by subclasses when image-like inputs other than images should be processed.
It can be used for segmentation maps, depth maps, etc.
"""
# Prepare input images
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
if segmentation_maps is not None:
segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
return self._preprocess(images, segmentation_maps, instance_id_to_semantic_id, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
segmentation_maps: Optional["torch.Tensor"],
instance_id_to_semantic_id: Optional[dict[int, int]],
do_resize: Optional[bool],
size: Optional[SizeDict],
pad_size: Optional[SizeDict],
size_divisor: Optional[int],
interpolation: Optional[Union["PILImageResampling", "F.InterpolationMode"]],
do_rescale: Optional[bool],
rescale_factor: Optional[float],
do_normalize: Optional[bool],
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
ignore_index: Optional[int],
do_reduce_labels: Optional[bool],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
if segmentation_maps is not None and len(images) != len(segmentation_maps):
raise ValueError("Images and segmentation maps must have the same length.")
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
if segmentation_maps is not None:
grouped_segmentation_maps, grouped_segmentation_maps_index = group_images_by_shape(
segmentation_maps, disable_grouping=disable_grouping
)
resized_segmentation_maps_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, size_divisor=size_divisor, interpolation=interpolation
)
if segmentation_maps is not None:
stacked_segmentation_maps = grouped_segmentation_maps[shape]
if do_resize:
stacked_segmentation_maps = self.resize(
image=stacked_segmentation_maps,
size=size,
size_divisor=size_divisor,
interpolation=F.InterpolationMode.NEAREST_EXACT,
)
resized_images_grouped[shape] = stacked_images
if segmentation_maps is not None:
resized_segmentation_maps_grouped[shape] = stacked_segmentation_maps
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
if segmentation_maps is not None:
resized_segmentation_maps = reorder_images(
resized_segmentation_maps_grouped, grouped_segmentation_maps_index
)
if pad_size is not None:
padded_size = (pad_size.height, pad_size.width)
else:
padded_size = get_max_height_width(resized_images)
if segmentation_maps is not None:
mask_labels = []
class_labels = []
# Convert to list of binary masks and labels
for idx, segmentation_map in enumerate(resized_segmentation_maps):
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
# Use instance2class_id mapping per image
masks, classes = convert_segmentation_map_to_binary_masks_fast(
segmentation_map.squeeze(0),
instance_id,
ignore_index=ignore_index,
do_reduce_labels=do_reduce_labels,
)
mask_labels.append(masks)
class_labels.append(classes)
if segmentation_maps is not None:
# group mask_labels as paired inputs and not images so as not to stack them
grouped_images, grouped_segmentation_maps, grouped_images_index = group_images_by_shape(
resized_images, mask_labels, disable_grouping=disable_grouping
)
processed_segmentation_maps_grouped = {}
else:
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, disable_grouping=disable_grouping
)
processed_images_grouped = {}
processed_pixel_masks_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
padded_images, pixel_masks, padded_segmentation_maps = self.pad(
images=stacked_images,
segmentation_maps=grouped_segmentation_maps[shape] if segmentation_maps is not None else None,
padded_size=padded_size,
ignore_index=ignore_index,
)
processed_images_grouped[shape] = padded_images
processed_pixel_masks_grouped[shape] = pixel_masks
if segmentation_maps is not None:
processed_segmentation_maps_grouped[shape] = padded_segmentation_maps
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_pixel_masks = reorder_images(processed_pixel_masks_grouped, grouped_images_index)
encoded_inputs = BatchFeature(
data={
"pixel_values": torch.stack(processed_images, dim=0) if return_tensors else processed_images,
"pixel_mask": torch.stack(processed_pixel_masks, dim=0) if return_tensors else processed_pixel_masks,
},
tensor_type=return_tensors,
)
if segmentation_maps is not None:
mask_labels = reorder_images(processed_segmentation_maps_grouped, grouped_images_index)
# we cannot batch them since they don't share a common class size
encoded_inputs["mask_labels"] = mask_labels
encoded_inputs["class_labels"] = class_labels
return encoded_inputs
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_semantic_segmentation
def post_process_semantic_segmentation(
self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None
) -> "torch.Tensor":
"""
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = torch.nn.functional.interpolate(
segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_instance_segmentation
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: Optional[list[tuple[int, int]]] = None,
return_coco_annotation: Optional[bool] = False,
return_binary_maps: Optional[bool] = False,
) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only
supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if return_coco_annotation and return_binary_maps:
raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.")
# [batch_size, num_queries, num_classes+1]
class_queries_logits = outputs.class_queries_logits
# [batch_size, num_queries, height, width]
masks_queries_logits = outputs.masks_queries_logits
device = masks_queries_logits.device
num_classes = class_queries_logits.shape[-1] - 1
num_queries = class_queries_logits.shape[-2]
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(class_queries_logits.shape[0]):
mask_pred = masks_queries_logits[i]
mask_cls = class_queries_logits[i]
scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor")
mask_pred = mask_pred[topk_indices]
pred_masks = (mask_pred > 0).float()
# Calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
pred_masks.flatten(1).sum(1) + 1e-6
)
pred_scores = scores_per_image * mask_scores_per_image
pred_classes = labels_per_image
segmentation = torch.zeros(masks_queries_logits.shape[2:]) - 1
if target_sizes is not None:
segmentation = torch.zeros(target_sizes[i]) - 1
pred_masks = torch.nn.functional.interpolate(
pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest"
)[0]
instance_maps, segments = [], []
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append(
{
"id": current_segment_id,
"label_id": pred_classes[j].item(),
"was_fused": False,
"score": round(score, 6),
}
)
current_segment_id += 1
instance_maps.append(pred_masks[j])
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
# Return a concatenated tensor of binary instance maps
if return_binary_maps and len(instance_maps) != 0:
segmentation = torch.stack(instance_maps, dim=0)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_panoptic_segmentation
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_sizes: Optional[list[tuple[int, int]]] = None,
) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["MaskFormerImageProcessorFast"]
|
MaskFormerImageProcessorFast
|
python
|
scipy__scipy
|
scipy/stats/tests/test_contingency.py
|
{
"start": 1766,
"end": 10937
}
|
class ____:
def test_chi2_contingency_trivial(self):
# Some very simple tests for chi2_contingency.
# A trivial case
obs = np.array([[1, 2], [1, 2]])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 1)
assert_array_equal(obs, expected)
# A *really* trivial case: 1-D data.
obs = np.array([1, 2, 3])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 0)
assert_array_equal(obs, expected)
def test_chi2_contingency_R(self):
# Some test cases that were computed independently, using R.
# Rcode = \
# """
# # Data vector.
# data <- c(
# 12, 34, 23, 4, 47, 11,
# 35, 31, 11, 34, 10, 18,
# 12, 32, 9, 18, 13, 19,
# 12, 12, 14, 9, 33, 25
# )
#
# # Create factor tags:r=rows, c=columns, t=tiers
# r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
# c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
# t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
#
# # 3-way Chi squared test of independence
# s = summary(xtabs(data~r+c+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + t)
# Number of cases in table: 478
# Number of factors: 3
# Test for independence of all factors:
# Chisq = 102.17, df = 17, p-value = 3.514e-14
# """
obs = np.array(
[[[12, 34, 23],
[35, 31, 11],
[12, 32, 9],
[12, 12, 14]],
[[4, 47, 11],
[34, 10, 18],
[18, 13, 19],
[9, 33, 25]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 102.17, significant=5)
assert_approx_equal(p, 3.514e-14, significant=4)
assert_equal(dof, 17)
# Rcode = \
# """
# # Data vector.
# data <- c(
# #
# 12, 17,
# 11, 16,
# #
# 11, 12,
# 15, 16,
# #
# 23, 15,
# 30, 22,
# #
# 14, 17,
# 15, 16
# )
#
# # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
# r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
# c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
# d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
# t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
#
# # 4-way Chi squared test of independence
# s = summary(xtabs(data~r+c+d+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + d + t)
# Number of cases in table: 262
# Number of factors: 4
# Test for independence of all factors:
# Chisq = 8.758, df = 11, p-value = 0.6442
# """
obs = np.array(
[[[[12, 17],
[11, 16]],
[[11, 12],
[15, 16]]],
[[[23, 15],
[30, 22]],
[[14, 17],
[15, 16]]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 8.758, significant=4)
assert_approx_equal(p, 0.6442, significant=4)
assert_equal(dof, 11)
def test_chi2_contingency_g(self):
c = np.array([[15, 60], [15, 90]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
correction=False)
assert_allclose(g, 2*xlogy(c, c/e).sum())
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
correction=True)
c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
c = np.array([[10, 12, 10], [12, 10, 10]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
assert_allclose(g, 2*xlogy(c, c/e).sum())
def test_chi2_contingency_bad_args(self):
# Test that "bad" inputs raise a ValueError.
# Negative value in the array of observed frequencies.
obs = np.array([[-1, 10], [1, 2]])
assert_raises(ValueError, chi2_contingency, obs)
# The zeros in this will result in zeros in the array
# of expected frequencies.
obs = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, chi2_contingency, obs)
# A degenerate case: `observed` has size 0.
obs = np.empty((0, 8))
assert_raises(ValueError, chi2_contingency, obs)
def test_chi2_contingency_yates_gh13875(self):
# Magnitude of Yates' continuity correction should not exceed difference
# between expected and observed value of the statistic; see gh-13875
observed = np.array([[1573, 3], [4, 0]])
p = chi2_contingency(observed)[1]
assert_allclose(p, 1, rtol=1e-12)
@pytest.mark.parametrize("correction", [False, True])
def test_result(self, correction):
obs = np.array([[1, 2], [1, 2]])
res = chi2_contingency(obs, correction=correction)
assert_equal((res.statistic, res.pvalue, res.dof, res.expected_freq), res)
@pytest.mark.slow
def test_exact_permutation(self):
table = np.arange(4).reshape(2, 2)
ref_statistic = chi2_contingency(table, correction=False).statistic
ref_pvalue = stats.fisher_exact(table).pvalue
method = stats.PermutationMethod(n_resamples=50000)
res = chi2_contingency(table, correction=False, method=method)
assert_equal(res.statistic, ref_statistic)
assert_allclose(res.pvalue, ref_pvalue, rtol=1e-15)
@pytest.mark.slow
@pytest.mark.parametrize('method', (stats.PermutationMethod,
stats.MonteCarloMethod))
def test_resampling_randomized(self, method):
rng = np.random.default_rng(2592340925)
# need to have big sum for asymptotic approximation to be good
rows = [300, 1000, 800]
cols = [200, 400, 800, 700]
table = stats.random_table(rows, cols, seed=rng).rvs()
res = chi2_contingency(table, correction=False, method=method(rng=rng))
ref = chi2_contingency(table, correction=False)
assert_equal(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue, atol=5e-3)
assert_equal(res.dof, np.nan)
assert_equal(res.expected_freq, ref.expected_freq)
def test_resampling_invalid_args(self):
table = np.arange(8).reshape(2, 2, 2)
method = stats.PermutationMethod()
message = "Use of `method` is only compatible with two-way tables."
with pytest.raises(ValueError, match=message):
chi2_contingency(table, correction=False, method=method)
table = np.arange(4).reshape(2, 2)
method = stats.PermutationMethod()
message = "`correction=True` is not compatible with..."
with pytest.raises(ValueError, match=message):
chi2_contingency(table, method=method)
method = stats.MonteCarloMethod()
message = "`lambda_=2` is not compatible with..."
with pytest.raises(ValueError, match=message):
chi2_contingency(table, correction=False, lambda_=2, method=method)
method = 'herring'
message = "`method='herring'` not recognized; if provided, `method`..."
with pytest.raises(ValueError, match=message):
chi2_contingency(table, correction=False, method=method)
method = stats.MonteCarloMethod(rvs=stats.norm.rvs)
message = "If the `method` argument of `chi2_contingency` is..."
with pytest.raises(ValueError, match=message):
chi2_contingency(table, correction=False, method=method)
def test_bad_association_args():
# Invalid Test Statistic
assert_raises(ValueError, association, [[1, 2], [3, 4]], "X")
# Invalid array shape
assert_raises(ValueError, association, [[[1, 2]], [[3, 4]]], "cramer")
# chi2_contingency exception
assert_raises(ValueError, association, [[-1, 10], [1, 2]], 'cramer')
# Invalid Array Item Data Type
assert_raises(ValueError, association,
np.array([[1, 2], ["dd", 4]], dtype=object), 'cramer')
@pytest.mark.parametrize('stat, expected',
[('cramer', 0.09222412010290792),
('tschuprow', 0.0775509319944633),
('pearson', 0.12932925727138758)])
def test_assoc(stat, expected):
# 2d Array
obs1 = np.array([[12, 13, 14, 15, 16],
[17, 16, 18, 19, 11],
[9, 15, 14, 12, 11]])
a = association(observed=obs1, method=stat)
assert_allclose(a, expected)
|
TestChi2Contingency
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/evaluation_test.py
|
{
"start": 1990,
"end": 9224
}
|
class ____(test.TestCase):
def setUp(self):
super(EvaluateOnceTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_op = losses.log_loss(labels=tf_labels, predictions=tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = optimizer.minimize(loss_op,
training.get_or_create_global_step())
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)]) as session:
loss = None
while not session.should_stop():
_, loss = session.run([train_op, loss_op])
if num_steps >= 300:
assert loss < .015
def testEvaluatePerfectModel(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_once')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics_module.accuracy(labels, predictions)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={'accuracy': (accuracy, update_op)},
hooks=[
evaluation._StopAfterNEvalsHook(1),
])
self.assertGreater(final_ops_values['accuracy'], .99)
def testEvaluateWithFiniteInputs(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_with_finite_inputs')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run evaluation. Inputs are fed through input producer for one epoch.
all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
single_input, single_label = training.slice_input_producer(
[all_inputs, all_labels], num_epochs=1)
inputs, labels = training.batch([single_input, single_label], batch_size=6,
allow_smaller_final_batch=True)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics_module.accuracy(labels, predictions)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={
'accuracy': (accuracy, update_op),
'eval_steps': evaluation._get_or_create_eval_step()
},
hooks=[
evaluation._StopAfterNEvalsHook(None),
])
self.assertTrue(final_ops_values['accuracy'] > .99)
# Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
# each; the 3rd iter evaluates the remaining 4 inputs, and the last one
# triggers an error which stops evaluation.
self.assertEqual(final_ops_values['eval_steps'], 4)
def testEvalOpAndFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
num_evals = 5
final_increment = 9.0
my_var = local_variable(0.0, name='MyVar')
eval_ops = state_ops.assign_add(my_var, 1.0)
final_ops = array_ops.identity(my_var) + final_increment
final_hooks = [evaluation._StopAfterNEvalsHook(num_evals),]
initial_hooks = list(final_hooks)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=final_hooks)
self.assertEqual(final_ops_values['value'], num_evals + final_increment)
self.assertEqual(initial_hooks, final_hooks)
def testMultiEvalStepIncrements(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
num_evals = 6
my_var = local_variable(0.0, name='MyVar')
# In eval ops, we also increase the eval step one more time.
eval_ops = [state_ops.assign_add(my_var, 1.0),
state_ops.assign_add(
evaluation._get_or_create_eval_step(), 1, use_locking=True)]
expect_eval_update_counts = num_evals // 2
final_ops = array_ops.identity(my_var)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=[evaluation._StopAfterNEvalsHook(num_evals),])
self.assertEqual(final_ops_values['value'], expect_eval_update_counts)
def testOnlyFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
final_increment = 9.0
my_var = local_variable(0.0, name='MyVar')
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
self.assertEqual(final_ops_values['value'], final_increment)
if __name__ == '__main__':
test.main()
|
EvaluateOnceTest
|
python
|
huggingface__transformers
|
src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py
|
{
"start": 42537,
"end": 48242
}
|
class ____(nn.Module):
def __init__(self, config: MMGroundingDinoConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MMGroundingDinoMultiscaleDeformableAttention(
config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
spatial_shapes_list (`list[tuple[int, int]]`, *optional*):
Spatial shapes of the backbone feature maps (but as list for export compatibility).
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return hidden_states, attn_weights
# Based on https://github.com/IDEA-Research/MMGroundingDino/blob/2b62f419c292ca9c518daae55512fabc3fead4a4/MMGroundingDino/models/MMGroundingDino/utils.py#L24
def get_sine_pos_embed(
pos_tensor: torch.Tensor, num_pos_feats: int = 128, temperature: int = 10000, exchange_xy: bool = True
) -> Tensor:
"""
Generate sine position embeddings from a position tensor.
Args:
pos_tensor (torch.Tensor):
Tensor containing positions. Shape: [..., n].
num_pos_feats (`int`, *optional*, defaults to 128):
Projected shape for each float in the tensor.
temperature (`int`, *optional*, defaults to 10000):
Temperature in the sine/cosine function.
exchange_xy (`bool`, *optional*, defaults to `True`):
Exchange pos x and pos y. For example, input tensor is [x,y], the results will be [pos(y), pos(x)].
Returns:
position_embeddings (torch.Tensor): shape: [..., n * hidden_size].
"""
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
def sine_func(x: torch.Tensor):
sin_x = x * scale / dim_t
sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
return sin_x
pos_tensor = pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)
position_embeddings = [sine_func(x) for x in pos_tensor]
if exchange_xy:
position_embeddings[0], position_embeddings[1] = position_embeddings[1], position_embeddings[0]
position_embeddings = torch.cat(position_embeddings, dim=-1)
return position_embeddings
|
MMGroundingDinoDeformableLayer
|
python
|
neetcode-gh__leetcode
|
python/0745-prefix-and-suffix-search.py
|
{
"start": 0,
"end": 171
}
|
class ____:
def __init__(self):
self.children = {} # Dictionary to store child nodes
self.word = -1 # Store the index of the word at this node
|
TrieNode
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/misc.py
|
{
"start": 50292,
"end": 53717
}
|
class ____(VariableTracker):
def __init__(self, method_wrapper, **kwargs) -> None:
super().__init__(**kwargs)
self.method_wrapper = method_wrapper
self._builtin_fns = {}
def call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
if is_tensor_base_attr_getter(self.method_wrapper) and isinstance(
args[0], variables.TensorVariable
):
if not (len(args) == 1 and len(kwargs) == 0):
raise_type_error_exc(
tx, "tensor attribute getter takes exactly one argument"
)
return args[0].var_getattr(tx, self.method_wrapper.__self__.__name__)
# method-wrapper variables are common in __init__ calls. For example,
# str("foo").__init__ is a method-wrapper. These method wrappers point
# to C functions. Here we intercept if these method-wrappers are from
# builtins and then call the function counterpart directly by obtaining
# the self object.
self_obj = self.method_wrapper.__self__
wrapper_name = self.method_wrapper.__name__
# TODO(dynamo-team) - We can perhaps expand the scope to more names and
# more builtins.
if wrapper_name == "__init__":
fn_obj = type(self_obj).__init__
if fn_obj is object.__init__:
return variables.BuiltinVariable(object).call_method(
tx, wrapper_name, [self_obj, *args], kwargs
)
elif (
sys.version_info >= (3, 14)
# for some reason, even if the below check passes,
# self.method_wrapper may not be the same as type.__dict__["__annotations__"].__get__
and self_obj is type.__dict__["__annotations__"]
and wrapper_name == "__get__"
):
from .builder import SourcelessBuilder
if len(args) == 1 and not kwargs:
try:
return SourcelessBuilder.create(
tx, self.method_wrapper(args[0].as_python_constant())
)
except AttributeError:
raise_observed_exception(AttributeError, tx)
except AsPythonConstantNotImplementedError:
pass
unimplemented(
gb_type="unsupported type.__dict__['__annotations__'].__get__ call",
context=f"call_function {self}, args: {args}, kwargs: {kwargs}",
explanation="`torch.compile` only supports calling type.__dict__['__annotations__'].__get__ "
"on a single constant argument (i.e. a type).",
hints=[
"Make sure your call to type.__dict__['__annotations__'] only has "
"one positional argument (no keyword arguments).",
"Make sure the argument to type.__dict__['__annotations__'] is a constant "
"(i.e. type). For example, `object`, `int`, `MyCustomClass`.",
*graph_break_hints.SUPPORTABLE,
],
)
return super().call_function(tx, args, kwargs)
def is_python_constant(self):
return True
def as_python_constant(self):
return self.method_wrapper
|
MethodWrapperVariable
|
python
|
google__pytype
|
pytype/tests/test_operators3.py
|
{
"start": 672,
"end": 892
}
|
class ____(test_base.BaseTest, test_utils.OperatorsTestMixin):
"""Tests for in-place operators."""
def test_div(self):
self.check_inplace("itruediv", "/=")
if __name__ == "__main__":
test_base.main()
|
InplaceTest
|
python
|
coleifer__peewee
|
tests/models.py
|
{
"start": 154200,
"end": 154326
}
|
class ____(TestModel):
user = ForeignKeyField(TUser, backref='transactions')
amount = FloatField(default=0.)
|
Transaction
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-cassandra/llama_index/tools/cassandra/base.py
|
{
"start": 345,
"end": 3086
}
|
class ____(BaseToolSpec):
"""Base tool for interacting with an Apache Cassandra database."""
db: CassandraDatabase = Field(exclude=True)
spec_functions = [
"cassandra_db_query",
"cassandra_db_schema",
"cassandra_db_select_table_data",
]
def __init__(self, db: CassandraDatabase) -> None:
"""DB session in context."""
self.db = db
def cassandra_db_query(self, query: str) -> List[Document]:
"""
Execute a CQL query and return the results as a list of Documents.
Args:
query (str): A CQL query to execute.
Returns:
List[Document]: A list of Document objects, each containing data from a row.
"""
documents = []
result = self.db.run_no_throw(query, fetch="Cursor")
for row in result:
doc_str = ", ".join([str(value) for value in row])
documents.append(Document(text=doc_str))
return documents
def cassandra_db_schema(self, keyspace: str) -> List[Document]:
"""
Input to this tool is a keyspace name, output is a table description
of Apache Cassandra tables.
If the query is not correct, an error message will be returned.
If an error is returned, report back to the user that the keyspace
doesn't exist and stop.
Args:
keyspace (str): The name of the keyspace for which to return the schema.
Returns:
List[Document]: A list of Document objects, each containing a table description.
"""
return [Document(text=self.db.get_keyspace_tables_str(keyspace))]
def cassandra_db_select_table_data(
self, keyspace: str, table: str, predicate: str, limit: int
) -> List[Document]:
"""
Tool for getting data from a table in an Apache Cassandra database.
Use the WHERE clause to specify the predicate for the query that uses the
primary key. A blank predicate will return all rows. Avoid this if possible.
Use the limit to specify the number of rows to return. A blank limit will
return all rows.
Args:
keyspace (str): The name of the keyspace containing the table.
table (str): The name of the table for which to return data.
predicate (str): The predicate for the query that uses the primary key.
limit (int): The maximum number of rows to return.
Returns:
List[Document]: A list of Document objects, each containing a row of data.
"""
return [
Document(text=self.db.get_table_data(keyspace, table, predicate, limit))
]
|
CassandraDatabaseToolSpec
|
python
|
huggingface__transformers
|
src/transformers/models/gpt_neox/modeling_gpt_neox.py
|
{
"start": 31488,
"end": 33720
}
|
class ____(GPTNeoXPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.gpt_neox = GPTNeoXModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> QuestionAnsweringModelOutput:
outputs: BaseModelOutputWithPast = self.gpt_neox(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs.last_hidden_state
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
loss = None
if start_positions is not None and end_positions is not None:
loss = self.loss_function(start_logits, end_logits, start_positions, end_positions)
return QuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
|
GPTNeoXForQuestionAnswering
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/asymmetric/dh.py
|
{
"start": 1258,
"end": 2422
}
|
class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def key_size(self) -> int:
"""
The bit length of the prime modulus.
"""
@abc.abstractmethod
def parameters(self) -> DHParameters:
"""
The DHParameters object associated with this public key.
"""
@abc.abstractmethod
def public_numbers(self) -> DHPublicNumbers:
"""
Returns a DHPublicNumbers.
"""
@abc.abstractmethod
def public_bytes(
self,
encoding: _serialization.Encoding,
format: _serialization.PublicFormat,
) -> bytes:
"""
Returns the key serialized as bytes.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __copy__(self) -> DHPublicKey:
"""
Returns a copy.
"""
@abc.abstractmethod
def __deepcopy__(self, memo: dict) -> DHPublicKey:
"""
Returns a deep copy.
"""
DHPublicKeyWithSerialization = DHPublicKey
DHPublicKey.register(rust_openssl.dh.DHPublicKey)
|
DHPublicKey
|
python
|
pytest-dev__pytest
|
src/_pytest/config/exceptions.py
|
{
"start": 175,
"end": 315
}
|
class ____(Exception):
"""Raised when pytest should print its help to skip the rest of the
argument parsing and validation."""
|
PrintHelp
|
python
|
huggingface__transformers
|
src/transformers/models/llama4/modeling_llama4.py
|
{
"start": 33564,
"end": 36216
}
|
class ____(nn.Module):
def __init__(self, config: Llama4VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = config.hidden_size // config.num_attention_heads
self.num_key_value_groups = 1
self.attention_dropout = config.attention_dropout
self.scaling = self.head_dim**-0.5
self.q_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=True)
self.k_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=True)
self.v_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.embed_dim, bias=True)
def forward(
self,
hidden_states: torch.Tensor,
freqs_ci: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
query_states, key_states = vision_apply_rotary_emb(query_states, key_states, freqs_ci=freqs_ci)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
attention_interface: Callable = vision_eager_attention_forward
# flex disable because breaks on TP 8, embed is 88 not power of 2
if self.config._attn_implementation not in ["eager", "flex_attention"]:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=None, # TODO Might be enforced here for TP compatibility as scaling is not just sqrt(head_dim)
is_causal=False, # HAS TO BE ENFORCED
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
Llama4VisionAttention
|
python
|
kamyu104__LeetCode-Solutions
|
Python/decode-the-slanted-ciphertext.py
|
{
"start": 29,
"end": 864
}
|
class ____(object):
def decodeCiphertext(self, encodedText, rows):
"""
:type encodedText: str
:type rows: int
:rtype: str
"""
cols = len(encodedText)//rows
k = len(encodedText)
for i in reversed(xrange(cols)):
for j in reversed(xrange(i, len(encodedText), cols+1)):
if encodedText[j] != ' ':
k = j
break
else:
continue
break
result = []
for i in xrange(cols):
for j in xrange(i, len(encodedText), cols+1):
result.append(encodedText[j])
if j == k:
break
else:
continue
break
return "".join(result)
# Time: O(n)
# Space: O(n)
|
Solution
|
python
|
python__mypy
|
mypy/checker.py
|
{
"start": 405183,
"end": 419521
}
|
class ____(Generic[TKey, TValue]):
"""An variation of the union-find algorithm/data structure where instead of keeping
track of just disjoint sets, we keep track of disjoint dicts -- keep track of multiple
Set[Key] -> Set[Value] mappings, where each mapping's keys are guaranteed to be disjoint.
This data structure is currently used exclusively by 'group_comparison_operands' below
to merge chains of '==' and 'is' comparisons when two or more chains use the same expression
in best-case O(n), where n is the number of operands.
Specifically, the `add_mapping()` function and `items()` functions will take on average
O(k + v) and O(n) respectively, where k and v are the number of keys and values we're adding
for a given chain. Note that k <= n and v <= n.
We hit these average/best-case scenarios for most user code: e.g. when the user has just
a single chain like 'a == b == c == d == ...' or multiple disjoint chains like
'a==b < c==d < e==f < ...'. (Note that a naive iterative merging would be O(n^2) for
the latter case).
In comparison, this data structure will make 'group_comparison_operands' have a worst-case
runtime of O(n*log(n)): 'add_mapping()' and 'items()' are worst-case O(k*log(n) + v) and
O(k*log(n)) respectively. This happens only in the rare case where the user keeps repeatedly
making disjoint mappings before merging them in a way that persistently dodges the path
compression optimization in '_lookup_root_id', which would end up constructing a single
tree of height log_2(n). This makes root lookups no longer amoritized constant time when we
finally call 'items()'.
"""
def __init__(self) -> None:
# Each key maps to a unique ID
self._key_to_id: dict[TKey, int] = {}
# Each id points to the parent id, forming a forest of upwards-pointing trees. If the
# current id already is the root, it points to itself. We gradually flatten these trees
# as we perform root lookups: eventually all nodes point directly to its root.
self._id_to_parent_id: dict[int, int] = {}
# Each root id in turn maps to the set of values.
self._root_id_to_values: dict[int, set[TValue]] = {}
def add_mapping(self, keys: set[TKey], values: set[TValue]) -> None:
"""Adds a 'Set[TKey] -> Set[TValue]' mapping. If there already exists a mapping
containing one or more of the given keys, we merge the input mapping with the old one.
Note that the given set of keys must be non-empty -- otherwise, nothing happens.
"""
if not keys:
return
subtree_roots = [self._lookup_or_make_root_id(key) for key in keys]
new_root = subtree_roots[0]
root_values = self._root_id_to_values[new_root]
root_values.update(values)
for subtree_root in subtree_roots[1:]:
if subtree_root == new_root or subtree_root not in self._root_id_to_values:
continue
self._id_to_parent_id[subtree_root] = new_root
root_values.update(self._root_id_to_values.pop(subtree_root))
def items(self) -> list[tuple[set[TKey], set[TValue]]]:
"""Returns all disjoint mappings in key-value pairs."""
root_id_to_keys: dict[int, set[TKey]] = {}
for key in self._key_to_id:
root_id = self._lookup_root_id(key)
if root_id not in root_id_to_keys:
root_id_to_keys[root_id] = set()
root_id_to_keys[root_id].add(key)
output = []
for root_id, keys in root_id_to_keys.items():
output.append((keys, self._root_id_to_values[root_id]))
return output
def _lookup_or_make_root_id(self, key: TKey) -> int:
if key in self._key_to_id:
return self._lookup_root_id(key)
else:
new_id = len(self._key_to_id)
self._key_to_id[key] = new_id
self._id_to_parent_id[new_id] = new_id
self._root_id_to_values[new_id] = set()
return new_id
def _lookup_root_id(self, key: TKey) -> int:
i = self._key_to_id[key]
while i != self._id_to_parent_id[i]:
# Optimization: make keys directly point to their grandparents to speed up
# future traversals. This prevents degenerate trees of height n from forming.
new_parent = self._id_to_parent_id[self._id_to_parent_id[i]]
self._id_to_parent_id[i] = new_parent
i = new_parent
return i
def group_comparison_operands(
pairwise_comparisons: Iterable[tuple[str, Expression, Expression]],
operand_to_literal_hash: Mapping[int, Key],
operators_to_group: set[str],
) -> list[tuple[str, list[int]]]:
"""Group a series of comparison operands together chained by any operand
in the 'operators_to_group' set. All other pairwise operands are kept in
groups of size 2.
For example, suppose we have the input comparison expression:
x0 == x1 == x2 < x3 < x4 is x5 is x6 is not x7 is not x8
If we get these expressions in a pairwise way (e.g. by calling ComparisonExpr's
'pairwise()' method), we get the following as input:
[('==', x0, x1), ('==', x1, x2), ('<', x2, x3), ('<', x3, x4),
('is', x4, x5), ('is', x5, x6), ('is not', x6, x7), ('is not', x7, x8)]
If `operators_to_group` is the set {'==', 'is'}, this function will produce
the following "simplified operator list":
[("==", [0, 1, 2]), ("<", [2, 3]), ("<", [3, 4]),
("is", [4, 5, 6]), ("is not", [6, 7]), ("is not", [7, 8])]
Note that (a) we yield *indices* to the operands rather then the operand
expressions themselves and that (b) operands used in a consecutive chain
of '==' or 'is' are grouped together.
If two of these chains happen to contain operands with the same underlying
literal hash (e.g. are assignable and correspond to the same expression),
we combine those chains together. For example, if we had:
same == x < y == same
...and if 'operand_to_literal_hash' contained the same values for the indices
0 and 3, we'd produce the following output:
[("==", [0, 1, 2, 3]), ("<", [1, 2])]
But if the 'operand_to_literal_hash' did *not* contain an entry, we'd instead
default to returning:
[("==", [0, 1]), ("<", [1, 2]), ("==", [2, 3])]
This function is currently only used to assist with type-narrowing refinements
and is extracted out to a helper function so we can unit test it.
"""
groups: dict[str, DisjointDict[Key, int]] = {op: DisjointDict() for op in operators_to_group}
simplified_operator_list: list[tuple[str, list[int]]] = []
last_operator: str | None = None
current_indices: set[int] = set()
current_hashes: set[Key] = set()
for i, (operator, left_expr, right_expr) in enumerate(pairwise_comparisons):
if last_operator is None:
last_operator = operator
if current_indices and (operator != last_operator or operator not in operators_to_group):
# If some of the operands in the chain are assignable, defer adding it: we might
# end up needing to merge it with other chains that appear later.
if not current_hashes:
simplified_operator_list.append((last_operator, sorted(current_indices)))
else:
groups[last_operator].add_mapping(current_hashes, current_indices)
last_operator = operator
current_indices = set()
current_hashes = set()
# Note: 'i' corresponds to the left operand index, so 'i + 1' is the
# right operand.
current_indices.add(i)
current_indices.add(i + 1)
# We only ever want to combine operands/combine chains for these operators
if operator in operators_to_group:
left_hash = operand_to_literal_hash.get(i)
if left_hash is not None:
current_hashes.add(left_hash)
right_hash = operand_to_literal_hash.get(i + 1)
if right_hash is not None:
current_hashes.add(right_hash)
if last_operator is not None:
if not current_hashes:
simplified_operator_list.append((last_operator, sorted(current_indices)))
else:
groups[last_operator].add_mapping(current_hashes, current_indices)
# Now that we know which chains happen to contain the same underlying expressions
# and can be merged together, add in this info back to the output.
for operator, disjoint_dict in groups.items():
for keys, indices in disjoint_dict.items():
simplified_operator_list.append((operator, sorted(indices)))
# For stability, reorder list by the first operand index to appear
simplified_operator_list.sort(key=lambda item: item[1][0])
return simplified_operator_list
def is_typed_callable(c: Type | None) -> bool:
c = get_proper_type(c)
if not c or not isinstance(c, CallableType):
return False
return not all(
isinstance(t, AnyType) and t.type_of_any == TypeOfAny.unannotated
for t in get_proper_types(c.arg_types + [c.ret_type])
)
def is_untyped_decorator(typ: Type | None) -> bool:
typ = get_proper_type(typ)
if not typ:
return True
elif isinstance(typ, CallableType):
return not is_typed_callable(typ)
elif isinstance(typ, Instance):
method = typ.type.get_method("__call__")
if method:
if isinstance(method, Decorator):
return is_untyped_decorator(method.func.type) or is_untyped_decorator(
method.var.type
)
if isinstance(method.type, Overloaded):
return any(is_untyped_decorator(item) for item in method.type.items)
else:
return not is_typed_callable(method.type)
else:
return False
elif isinstance(typ, Overloaded):
return any(is_untyped_decorator(item) for item in typ.items)
return True
def is_static(func: FuncBase | Decorator) -> bool:
if isinstance(func, Decorator):
return is_static(func.func)
elif isinstance(func, FuncBase):
return func.is_static
assert False, f"Unexpected func type: {type(func)}"
def is_property(defn: SymbolNode) -> bool:
if isinstance(defn, FuncDef):
return defn.is_property
if isinstance(defn, Decorator):
return defn.func.is_property
if isinstance(defn, OverloadedFuncDef):
if defn.items and isinstance(defn.items[0], Decorator):
return defn.items[0].func.is_property
return False
def is_settable_property(defn: SymbolNode | None) -> TypeGuard[OverloadedFuncDef]:
if isinstance(defn, OverloadedFuncDef):
if defn.items and isinstance(defn.items[0], Decorator):
return defn.items[0].func.is_property
return False
def is_custom_settable_property(defn: SymbolNode | None) -> bool:
"""Check if a node is a settable property with a non-trivial setter type.
By non-trivial here we mean that it is known (i.e. definition was already type
checked), it is not Any, and it is different from the property getter type.
"""
if defn is None:
return False
if not is_settable_property(defn):
return False
first_item = defn.items[0]
assert isinstance(first_item, Decorator)
if not first_item.var.is_settable_property:
return False
var = first_item.var
if var.type is None or var.setter_type is None or isinstance(var.type, PartialType):
# The caller should defer in case of partial types or not ready variables.
return False
setter_type = var.setter_type.arg_types[1]
if isinstance(get_proper_type(setter_type), AnyType):
return False
return not is_same_type(get_property_type(get_proper_type(var.type)), setter_type)
def get_property_type(t: ProperType) -> ProperType:
if isinstance(t, CallableType):
return get_proper_type(t.ret_type)
if isinstance(t, Overloaded):
return get_proper_type(t.items[0].ret_type)
return t
def is_subset_no_promote(left: Type, right: Type) -> bool:
return is_subtype(left, right, ignore_promotions=True, always_covariant=True)
def is_overlapping_types_for_overload(left: Type, right: Type) -> bool:
# Note that among other effects 'overlap_for_overloads' flag will effectively
# ignore possible overlap between type variables and None. This is technically
# unsafe, but unsafety is tiny and this prevents some common use cases like:
# @overload
# def foo(x: None) -> None: ..
# @overload
# def foo(x: T) -> Foo[T]: ...
return is_overlapping_types(
left,
right,
ignore_promotions=True,
prohibit_none_typevar_overlap=True,
overlap_for_overloads=True,
)
def is_private(node_name: str) -> bool:
"""Check if node is private to class definition."""
return node_name.startswith("__") and not node_name.endswith("__")
def is_string_literal(typ: Type) -> bool:
strs = try_getting_str_literals_from_type(typ)
return strs is not None and len(strs) == 1
def has_bool_item(typ: ProperType) -> bool:
"""Return True if type is 'bool' or a union with a 'bool' item."""
if is_named_instance(typ, "builtins.bool"):
return True
if isinstance(typ, UnionType):
return any(is_named_instance(item, "builtins.bool") for item in typ.items)
return False
def collapse_walrus(e: Expression) -> Expression:
"""If an expression is an AssignmentExpr, pull out the assignment target.
We don't make any attempt to pull out all the targets in code like `x := (y := z)`.
We could support narrowing those if that sort of code turns out to be common.
"""
if isinstance(e, AssignmentExpr):
return e.target
return e
def find_last_var_assignment_line(n: Node, v: Var) -> int:
"""Find the highest line number of a potential assignment to variable within node.
This supports local and global variables.
Return -1 if no assignment was found.
"""
visitor = VarAssignVisitor(v)
n.accept(visitor)
return visitor.last_line
|
DisjointDict
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_str_returned.py
|
{
"start": 413,
"end": 522
}
|
class ____(type):
def __str__(cls):
return "some str"
@six.add_metaclass(StrMetaclass)
|
StrMetaclass
|
python
|
weaviate__weaviate-python-client
|
weaviate/connect/integrations.py
|
{
"start": 93,
"end": 458
}
|
class ____(BaseModel):
model_config = ConfigDict(strict=True)
def _to_header(self) -> Dict[str, str]:
# headers have to be strings
return_dict = cast(dict, self.model_dump(by_alias=True, exclude_none=True))
for key, value in return_dict.items():
return_dict[key] = str(value)
return return_dict
|
_IntegrationConfig
|
python
|
ray-project__ray
|
python/ray/serve/schema.py
|
{
"start": 32187,
"end": 32570
}
|
class ____(str, Enum):
"""The current status of the proxy."""
STARTING = "STARTING"
HEALTHY = "HEALTHY"
UNHEALTHY = "UNHEALTHY"
DRAINING = "DRAINING"
# The DRAINED status is a momentary state
# just before the proxy is removed
# so this status won't show up on the dashboard.
DRAINED = "DRAINED"
@PublicAPI(stability="alpha")
@dataclass
|
ProxyStatus
|
python
|
kamyu104__LeetCode-Solutions
|
Python/prime-arrangements.py
|
{
"start": 145,
"end": 1073
}
|
class ____(object):
def numPrimeArrangements(self, n):
"""
:type n: int
:rtype: int
"""
def count_primes(n):
if n <= 1:
return 0
is_prime = [True]*((n+1)//2)
cnt = len(is_prime)
for i in xrange(3, n+1, 2):
if i*i > n:
break
if not is_prime[i//2]:
continue
for j in xrange(i*i, n+1, 2*i):
if not is_prime[j//2]:
continue
cnt -= 1
is_prime[j//2] = False
return cnt
def factorial(n):
result = 1
for i in xrange(2, n+1):
result = (result*i)%MOD
return result
MOD = 10**9+7
cnt = count_primes(n)
return factorial(cnt) * factorial(n-cnt) % MOD
|
Solution
|
python
|
pydata__xarray
|
asv_bench/benchmarks/dataset_io.py
|
{
"start": 11875,
"end": 13998
}
|
class ____(IOMultipleNetCDF):
def setup(self):
# TODO: Lazily skipped in CI as it is very demanding and slow.
# Improve times and remove errors.
_skip_slow()
requires_dask()
self.make_ds()
self.format = "NETCDF4"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
).load()
def time_open_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
)
def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
)
def time_open_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
)
def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
)
|
IOReadMultipleNetCDF4Dask
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/views/oidc.py
|
{
"start": 1248,
"end": 4784
}
|
class ____(OIDCOnlyMixin, View):
"""
View used to show oidc provider configuration information per
`OpenID Provider Metadata <https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata>`_
"""
def get(self, request, *args, **kwargs):
issuer_url = oauth2_settings.OIDC_ISS_ENDPOINT
if not issuer_url:
issuer_url = oauth2_settings.oidc_issuer(request)
authorization_endpoint = request.build_absolute_uri(reverse("oauth2_provider:authorize"))
token_endpoint = request.build_absolute_uri(reverse("oauth2_provider:token"))
userinfo_endpoint = oauth2_settings.OIDC_USERINFO_ENDPOINT or request.build_absolute_uri(
reverse("oauth2_provider:user-info")
)
jwks_uri = request.build_absolute_uri(reverse("oauth2_provider:jwks-info"))
if oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ENABLED:
end_session_endpoint = request.build_absolute_uri(
reverse("oauth2_provider:rp-initiated-logout")
)
else:
parsed_url = urlparse(oauth2_settings.OIDC_ISS_ENDPOINT)
host = parsed_url.scheme + "://" + parsed_url.netloc
authorization_endpoint = "{}{}".format(host, reverse("oauth2_provider:authorize"))
token_endpoint = "{}{}".format(host, reverse("oauth2_provider:token"))
userinfo_endpoint = oauth2_settings.OIDC_USERINFO_ENDPOINT or "{}{}".format(
host, reverse("oauth2_provider:user-info")
)
jwks_uri = "{}{}".format(host, reverse("oauth2_provider:jwks-info"))
if oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ENABLED:
end_session_endpoint = "{}{}".format(host, reverse("oauth2_provider:rp-initiated-logout"))
signing_algorithms = [Application.HS256_ALGORITHM]
if oauth2_settings.OIDC_RSA_PRIVATE_KEY:
signing_algorithms = [Application.RS256_ALGORITHM, Application.HS256_ALGORITHM]
validator_class = oauth2_settings.OAUTH2_VALIDATOR_CLASS
validator = validator_class()
oidc_claims = list(set(validator.get_discovery_claims(request)))
scopes_class = oauth2_settings.SCOPES_BACKEND_CLASS
scopes = scopes_class()
scopes_supported = [scope for scope in scopes.get_available_scopes()]
data = {
"issuer": issuer_url,
"authorization_endpoint": authorization_endpoint,
"token_endpoint": token_endpoint,
"userinfo_endpoint": userinfo_endpoint,
"jwks_uri": jwks_uri,
"scopes_supported": scopes_supported,
"response_types_supported": oauth2_settings.OIDC_RESPONSE_TYPES_SUPPORTED,
"subject_types_supported": oauth2_settings.OIDC_SUBJECT_TYPES_SUPPORTED,
"id_token_signing_alg_values_supported": signing_algorithms,
"token_endpoint_auth_methods_supported": (
oauth2_settings.OIDC_TOKEN_ENDPOINT_AUTH_METHODS_SUPPORTED
),
"code_challenge_methods_supported": [key for key, _ in AbstractGrant.CODE_CHALLENGE_METHODS],
"claims_supported": oidc_claims,
}
if oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ENABLED:
data["end_session_endpoint"] = end_session_endpoint
response = JsonResponse(data)
response["Access-Control-Allow-Origin"] = "*"
return response
@method_decorator(login_not_required, name="dispatch")
|
ConnectDiscoveryInfoView
|
python
|
h5py__h5py
|
h5py/_hl/dataset.py
|
{
"start": 9131,
"end": 10210
}
|
class ____(AbstractView):
"""Wrapper to decode strings on reading the dataset"""
def __init__(self, dset, encoding, errors='strict'):
super().__init__(dset)
self.encoding = encoding
self.errors = errors
@property
def dtype(self):
return numpy.dtype(object)
def __getitem__(self, idx):
bytes_arr = self._dset[idx]
# numpy.char.decode() seems like the obvious thing to use. But it only
# accepts numpy string arrays, not object arrays of bytes (which we
# return from HDF5 variable-length strings). And the numpy
# implementation is not faster than doing it with a loop; in fact, by
# not converting the result to a numpy unicode array, the
# naive way can be faster! (Comparing with numpy 1.18.4, June 2020)
if numpy.isscalar(bytes_arr):
return bytes_arr.decode(self.encoding, self.errors)
return numpy.array([
b.decode(self.encoding, self.errors) for b in bytes_arr.flat
], dtype=object).reshape(bytes_arr.shape)
|
AsStrView
|
python
|
huggingface__transformers
|
src/transformers/models/phi3/modular_phi3.py
|
{
"start": 9187,
"end": 10725
}
|
class ____(MistralForCausalLM):
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
# process
# When the first time input length reached long and short factor switching point, enforce re-compute cache
# It will cause downside of slower at this single token position, however, better than current failure.
if (
past_key_values
and hasattr(self.config, "original_max_position_embeddings")
and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
):
past_length = cache_position[0]
if past_length <= self.config.original_max_position_embeddings:
past_key_values = None
model_inputs = GenerationMixin.prepare_inputs_for_generation(
self,
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
logits_to_keep=logits_to_keep,
**kwargs,
)
return model_inputs
|
Phi3ForCausalLM
|
python
|
django__django
|
django/utils/deprecation.py
|
{
"start": 10242,
"end": 12328
}
|
class ____:
sync_capable = True
async_capable = True
def __init__(self, get_response):
if get_response is None:
raise ValueError("get_response must be provided.")
self.get_response = get_response
# If get_response is a coroutine function, turns us into async mode so
# a thread is not consumed during a whole request.
self.async_mode = iscoroutinefunction(self.get_response)
if self.async_mode:
# Mark the class as async-capable, but do the actual switch inside
# __call__ to avoid swapping out dunder methods.
markcoroutinefunction(self)
super().__init__()
def __repr__(self):
return "<%s get_response=%s>" % (
self.__class__.__qualname__,
getattr(
self.get_response,
"__qualname__",
self.get_response.__class__.__name__,
),
)
def __call__(self, request):
# Exit out to async mode, if needed
if self.async_mode:
return self.__acall__(request)
response = None
if hasattr(self, "process_request"):
response = self.process_request(request)
response = response or self.get_response(request)
if hasattr(self, "process_response"):
response = self.process_response(request, response)
return response
async def __acall__(self, request):
"""
Async version of __call__ that is swapped in when an async request
is running.
"""
response = None
if hasattr(self, "process_request"):
response = await sync_to_async(
self.process_request,
thread_sensitive=True,
)(request)
response = response or await self.get_response(request)
if hasattr(self, "process_response"):
response = await sync_to_async(
self.process_response,
thread_sensitive=True,
)(request, response)
return response
|
MiddlewareMixin
|
python
|
lazyprogrammer__machine_learning_examples
|
rl3/ddpg.py
|
{
"start": 1902,
"end": 10969
}
|
class ____:
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(s=self.obs1_buf[idxs],
s2=self.obs2_buf[idxs],
a=self.acts_buf[idxs],
r=self.rews_buf[idxs],
d=self.done_buf[idxs])
### Implement the DDPG algorithm ###
def ddpg(
env_fn,
ac_kwargs=dict(),
seed=0,
save_folder=None,
num_train_episodes=100,
test_agent_every=25,
replay_size=int(1e6),
gamma=0.99,
decay=0.995,
mu_lr=1e-3,
q_lr=1e-3,
batch_size=100,
start_steps=10000,
action_noise=0.1,
max_episode_length=1000):
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
# comment out this line if you don't want to record a video of the agent
if save_folder is not None:
test_env = gym.wrappers.Monitor(test_env, save_folder)
# get size of state space and action space
num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
# Maximum value of action
# Assumes both low and high values are the same
# Assumes all actions have the same bounds
# May NOT be the case for all environments
action_max = env.action_space.high[0]
# Create Tensorflow placeholders (neural network inputs)
X = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # state
A = tf.placeholder(dtype=tf.float32, shape=(None, num_actions)) # action
X2 = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # next state
R = tf.placeholder(dtype=tf.float32, shape=(None,)) # reward
D = tf.placeholder(dtype=tf.float32, shape=(None,)) # done
# Main network outputs
with tf.variable_scope('main'):
mu, q, q_mu = CreateNetworks(X, A, num_actions, action_max, **ac_kwargs)
# Target networks
with tf.variable_scope('target'):
# We don't need the Q network output with arbitrary input action A
# because that's not actually used in our loss functions
# NOTE 1: The state input is X2, NOT X
# We only care about max_a{ Q(s', a) }
# Where this is equal to Q(s', mu(s'))
# This is because it's used in the target calculation: r + gamma * max_a{ Q(s',a) }
# Where s' = X2
# NOTE 2: We ignore the first 2 networks for the same reason
_, _, q_mu_targ = CreateNetworks(X2, A, num_actions, action_max, **ac_kwargs)
# Experience replay memory
replay_buffer = ReplayBuffer(obs_dim=num_states, act_dim=num_actions, size=replay_size)
# Target value for the Q-network loss
# We use stop_gradient to tell Tensorflow not to differentiate
# q_mu_targ wrt any params
# i.e. consider q_mu_targ values constant
q_target = tf.stop_gradient(R + gamma * (1 - D) * q_mu_targ)
# DDPG losses
mu_loss = -tf.reduce_mean(q_mu)
q_loss = tf.reduce_mean((q - q_target)**2)
# Train each network separately
mu_optimizer = tf.train.AdamOptimizer(learning_rate=mu_lr)
q_optimizer = tf.train.AdamOptimizer(learning_rate=q_lr)
mu_train_op = mu_optimizer.minimize(mu_loss, var_list=get_vars('main/mu'))
q_train_op = q_optimizer.minimize(q_loss, var_list=get_vars('main/q'))
# Use soft updates to update the target networks
target_update = tf.group(
[tf.assign(v_targ, decay*v_targ + (1 - decay)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))
]
)
# Copy main network params to target networks
target_init = tf.group(
[tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))
]
)
# boilerplate (and copy to the target networks!)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
def get_action(s, noise_scale):
a = sess.run(mu, feed_dict={X: s.reshape(1,-1)})[0]
a += noise_scale * np.random.randn(num_actions)
return np.clip(a, -action_max, action_max)
test_returns = []
def test_agent(num_episodes=5):
t0 = datetime.now()
n_steps = 0
for j in range(num_episodes):
s, episode_return, episode_length, d = test_env.reset(), 0, 0, False
while not (d or (episode_length == max_episode_length)):
# Take deterministic actions at test time (noise_scale=0)
test_env.render()
s, r, d, _ = test_env.step(get_action(s, 0))
episode_return += r
episode_length += 1
n_steps += 1
print('test return:', episode_return, 'episode_length:', episode_length)
test_returns.append(episode_return)
# print("test steps per sec:", n_steps / (datetime.now() - t0).total_seconds())
# Main loop: play episode and train
returns = []
q_losses = []
mu_losses = []
num_steps = 0
for i_episode in range(num_train_episodes):
# reset env
s, episode_return, episode_length, d = env.reset(), 0, 0, False
while not (d or (episode_length == max_episode_length)):
# For the first `start_steps` steps, use randomly sampled actions
# in order to encourage exploration.
if num_steps > start_steps:
a = get_action(s, action_noise)
else:
a = env.action_space.sample()
# Keep track of the number of steps done
num_steps += 1
if num_steps == start_steps:
print("USING AGENT ACTIONS NOW")
# Step the env
s2, r, d, _ = env.step(a)
episode_return += r
episode_length += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d_store = False if episode_length == max_episode_length else d
# Store experience to replay buffer
replay_buffer.store(s, a, r, s2, d_store)
# Assign next state to be the current state on the next round
s = s2
# Perform the updates
for _ in range(episode_length):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {
X: batch['s'],
X2: batch['s2'],
A: batch['a'],
R: batch['r'],
D: batch['d']
}
# Q network update
# Note: plot the Q loss if you want
ql, _, _ = sess.run([q_loss, q, q_train_op], feed_dict)
q_losses.append(ql)
# Policy update
# (And target networks update)
# Note: plot the mu loss if you want
mul, _, _ = sess.run([mu_loss, mu_train_op, target_update], feed_dict)
mu_losses.append(mul)
print("Episode:", i_episode + 1, "Return:", episode_return, 'episode_length:', episode_length)
returns.append(episode_return)
# Test the agent
if i_episode > 0 and i_episode % test_agent_every == 0:
test_agent()
# on Mac, plotting results in an error, so just save the results for later
# if you're not on Mac, feel free to uncomment the below lines
np.savez('ddpg_results.npz', train=returns, test=test_returns, q_losses=q_losses, mu_losses=mu_losses)
# plt.plot(returns)
# plt.plot(smooth(np.array(returns)))
# plt.title("Train returns")
# plt.show()
# plt.plot(test_returns)
# plt.plot(smooth(np.array(test_returns)))
# plt.title("Test returns")
# plt.show()
# plt.plot(q_losses)
# plt.title('q_losses')
# plt.show()
# plt.plot(mu_losses)
# plt.title('mu_losses')
# plt.show()
def smooth(x):
# last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i - 99)
y[i] = float(x[start:(i+1)].sum()) / (i - start + 1)
return y
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--hidden_layer_sizes', type=int, default=300)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--num_train_episodes', type=int, default=200)
parser.add_argument('--save_folder', type=str, default='ddpg_monitor')
args = parser.parse_args()
ddpg(
lambda : gym.make(args.env),
ac_kwargs=dict(hidden_sizes=[args.hidden_layer_sizes]*args.num_layers),
gamma=args.gamma,
seed=args.seed,
save_folder=args.save_folder,
num_train_episodes=args.num_train_episodes,
)
|
ReplayBuffer
|
python
|
pandas-dev__pandas
|
pandas/core/internals/blocks.py
|
{
"start": 74017,
"end": 80782
}
|
class ____(NDArrayBackedExtensionBlock):
"""Block for datetime64[ns], timedelta64[ns]."""
__slots__ = ()
is_numeric = False
values: DatetimeArray | TimedeltaArray
# -----------------------------------------------------------------
# Constructor Helpers
def maybe_coerce_values(values: ArrayLike) -> ArrayLike:
"""
Input validation for values passed to __init__. Ensure that
any datetime64/timedelta64 dtypes are in nanoseconds. Ensure
that we do not have string dtypes.
Parameters
----------
values : np.ndarray or ExtensionArray
Returns
-------
values : np.ndarray or ExtensionArray
"""
# Caller is responsible for ensuring NumpyExtensionArray is already extracted.
if isinstance(values, np.ndarray):
values = ensure_wrapped_if_datetimelike(values)
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None:
# freq is only stored in DatetimeIndex/TimedeltaIndex, not in Series/DataFrame
values = values._with_freq(None)
return values
def get_block_type(dtype: DtypeObj) -> type[Block]:
"""
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
"""
if isinstance(dtype, DatetimeTZDtype):
return DatetimeLikeBlock
elif isinstance(dtype, PeriodDtype):
return NDArrayBackedExtensionBlock
elif isinstance(dtype, ExtensionDtype):
# Note: need to be sure NumpyExtensionArray is unwrapped before we get here
return ExtensionBlock
# We use kind checks because it is much more performant
# than is_foo_dtype
kind = dtype.kind
if kind in "Mm":
return DatetimeLikeBlock
return NumpyBlock
def new_block_2d(
values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None = None
) -> Block:
# new_block specialized to case with
# ndim=2
# isinstance(placement, BlockPlacement)
# check_ndim/ensure_block_shape already checked
klass = get_block_type(values.dtype)
values = maybe_coerce_values(values)
return klass(values, ndim=2, placement=placement, refs=refs)
def new_block(
values,
placement: BlockPlacement,
*,
ndim: int,
refs: BlockValuesRefs | None = None,
) -> Block:
# caller is responsible for ensuring:
# - values is NOT a NumpyExtensionArray
# - check_ndim/ensure_block_shape already checked
# - maybe_coerce_values already called/unnecessary
klass = get_block_type(values.dtype)
return klass(values, ndim=ndim, placement=placement, refs=refs)
def check_ndim(values, placement: BlockPlacement, ndim: int) -> None:
"""
ndim inference and validation.
Validates that values.ndim and ndim are consistent.
Validates that len(values) and len(placement) are consistent.
Parameters
----------
values : array-like
placement : BlockPlacement
ndim : int
Raises
------
ValueError : the number of dimensions do not match
"""
if values.ndim > ndim:
# Check for both np.ndarray and ExtensionArray
raise ValueError(
f"Wrong number of dimensions. values.ndim > ndim [{values.ndim} > {ndim}]"
)
if not is_1d_only_ea_dtype(values.dtype):
# TODO(EA2D): special case not needed with 2D EAs
if values.ndim != ndim:
raise ValueError(
"Wrong number of dimensions. "
f"values.ndim != ndim [{values.ndim} != {ndim}]"
)
if len(placement) != len(values):
raise ValueError(
f"Wrong number of items passed {len(values)}, "
f"placement implies {len(placement)}"
)
elif ndim == 2 and len(placement) != 1:
# TODO(EA2D): special case unnecessary with 2D EAs
raise ValueError("need to split")
def extract_pandas_array(
values: ArrayLike, dtype: DtypeObj | None, ndim: int
) -> tuple[ArrayLike, DtypeObj | None]:
"""
Ensure that we don't allow NumpyExtensionArray / NumpyEADtype in internals.
"""
# For now, blocks should be backed by ndarrays when possible.
if isinstance(values, ABCNumpyExtensionArray):
values = values.to_numpy()
if ndim and ndim > 1:
# TODO(EA2D): special case not needed with 2D EAs
values = np.atleast_2d(values)
if isinstance(dtype, NumpyEADtype):
dtype = dtype.numpy_dtype
return values, dtype
# -----------------------------------------------------------------
def extend_blocks(result, blocks=None) -> list[Block]:
"""return a new extended blocks, given the result"""
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
else:
assert isinstance(result, Block), type(result)
blocks.append(result)
return blocks
def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike:
"""
Reshape if possible to have values.ndim == ndim.
"""
if values.ndim < ndim:
if not is_1d_only_ea_dtype(values.dtype):
# TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023
# block.shape is incorrect for "2D" ExtensionArrays
# We can't, and don't need to, reshape.
values = cast("np.ndarray | DatetimeArray | TimedeltaArray", values)
values = values.reshape(1, -1)
return values
def external_values(values: ArrayLike) -> ArrayLike:
"""
The array that Series.values returns (public attribute).
This has some historical constraints, and is overridden in block
subclasses to return the correct array (e.g. period returns
object ndarray and datetimetz a datetime64[ns] ndarray instead of
proper extension array).
"""
if isinstance(values, (PeriodArray, IntervalArray)):
return values.astype(object)
elif isinstance(values, (DatetimeArray, TimedeltaArray)):
# NB: for datetime64tz this is different from np.asarray(values), since
# that returns an object-dtype ndarray of Timestamps.
# Avoid raising in .astype in casting from dt64tz to dt64
values = values._ndarray
if isinstance(values, np.ndarray):
values = values.view()
values.flags.writeable = False
else:
# ExtensionArrays
values = values.view()
values._readonly = True
return values
|
DatetimeLikeBlock
|
python
|
dagster-io__dagster
|
python_modules/dagster-pipes/dagster_pipes/__init__.py
|
{
"start": 2051,
"end": 2278
}
|
class ____(TypedDict):
"""A message sent from the external process to the orchestration process."""
__dagster_pipes_version: str
method: str
params: Optional[Mapping[str, Any]]
###### PIPES CONTEXT
|
PipesMessage
|
python
|
Textualize__textual
|
src/textual/widgets/_switch.py
|
{
"start": 492,
"end": 5904
}
|
class ____(Widget, can_focus=True):
"""A switch widget that represents a boolean value.
Can be toggled by clicking on it or through its [bindings][textual.widgets.Switch.BINDINGS].
The switch widget also contains [component classes][textual.widgets.Switch.COMPONENT_CLASSES]
that enable more customization.
"""
BINDINGS: ClassVar[list[BindingType]] = [
Binding("enter,space", "toggle_switch", "Toggle", show=False),
]
"""
| Key(s) | Description |
| :- | :- |
| enter,space | Toggle the switch state. |
"""
COMPONENT_CLASSES: ClassVar[set[str]] = {
"switch--slider",
}
"""
| Class | Description |
| :- | :- |
| `switch--slider` | Targets the slider of the switch. |
"""
DEFAULT_CSS = """
Switch {
border: tall $border-blurred;
background: $surface;
height: auto;
width: auto;
padding: 0 2;
&.-on .switch--slider {
color: $success;
}
& .switch--slider {
color: $panel;
background: $panel-darken-2;
}
&:hover {
& > .switch--slider {
color: $panel-lighten-1
}
&.-on > .switch--slider {
color: $success-lighten-1;
}
}
&:focus {
border: tall $border;
background-tint: $foreground 5%;
}
&:light {
&.-on .switch--slider {
color: $success;
}
& .switch--slider {
color: $primary 15%;
background: $panel-darken-2;
}
&:hover {
& > .switch--slider {
color: $primary 25%;
}
&.-on > .switch--slider {
color: $success-lighten-1;
}
}
}
}
"""
value: reactive[bool] = reactive(False, init=False)
"""The value of the switch; `True` for on and `False` for off."""
_slider_position = reactive(0.0)
"""The position of the slider."""
class Changed(Message):
"""Posted when the status of the switch changes.
Can be handled using `on_switch_changed` in a subclass of `Switch`
or in a parent widget in the DOM.
Attributes:
value: The value that the switch was changed to.
switch: The `Switch` widget that was changed.
"""
def __init__(self, switch: Switch, value: bool) -> None:
super().__init__()
self.value: bool = value
self.switch: Switch = switch
@property
def control(self) -> Switch:
"""Alias for self.switch."""
return self.switch
def __init__(
self,
value: bool = False,
*,
animate: bool = True,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
tooltip: RenderableType | None = None,
):
"""Initialise the switch.
Args:
value: The initial value of the switch.
animate: True if the switch should animate when toggled.
name: The name of the switch.
id: The ID of the switch in the DOM.
classes: The CSS classes of the switch.
disabled: Whether the switch is disabled or not.
tooltip: Optional tooltip.
"""
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
if value:
self._slider_position = 1.0
self.set_reactive(Switch.value, value)
self._should_animate = animate
if tooltip is not None:
self.tooltip = tooltip
def watch_value(self, value: bool) -> None:
target_slider_position = 1.0 if value else 0.0
if self._should_animate:
self.animate(
"_slider_position",
target_slider_position,
duration=0.3,
level="basic",
)
else:
self._slider_position = target_slider_position
self.post_message(self.Changed(self, self.value))
def watch__slider_position(self, slider_position: float) -> None:
self.set_class(slider_position == 1, "-on")
def render(self) -> RenderResult:
style = self.get_component_rich_style("switch--slider")
return ScrollBarRender(
virtual_size=100,
window_size=50,
position=self._slider_position * 50,
style=style,
vertical=False,
)
def get_content_width(self, container: Size, viewport: Size) -> int:
return 4
def get_content_height(self, container: Size, viewport: Size, width: int) -> int:
return 1
async def _on_click(self, event: Click) -> None:
"""Toggle the state of the switch."""
event.stop()
self.toggle()
def action_toggle_switch(self) -> None:
"""Toggle the state of the switch."""
self.toggle()
def toggle(self) -> Self:
"""Toggle the switch value.
As a result of the value changing, a `Switch.Changed` message will
be posted.
Returns:
The `Switch` instance.
"""
self.value = not self.value
return self
|
Switch
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-operations-to-move-all-balls-to-each-box.py
|
{
"start": 29,
"end": 473
}
|
class ____(object):
def minOperations(self, boxes):
"""
:type boxes: str
:rtype: List[int]
"""
result = [0]*len(boxes)
for direction in (lambda x:x, reversed):
cnt = accu = 0
for i in direction(xrange(len(boxes))):
result[i] += accu
if boxes[i] == '1':
cnt += 1
accu += cnt
return result
|
Solution
|
python
|
google__jax
|
tests/pmap_test.py
|
{
"start": 129348,
"end": 129465
}
|
class ____(EagerPmapMixin, VmapOfPmapTest):
pass
@jtu.pytest_mark_if_available('multiaccelerator')
|
VmapOfPmapEagerTest
|
python
|
scrapy__scrapy
|
scrapy/core/downloader/handlers/http11.py
|
{
"start": 22587,
"end": 27621
}
|
class ____(Protocol):
def __init__(
self,
finished: Deferred[_ResultT],
txresponse: TxResponse,
request: Request,
maxsize: int,
warnsize: int,
fail_on_dataloss: bool,
crawler: Crawler,
):
self._finished: Deferred[_ResultT] = finished
self._txresponse: TxResponse = txresponse
self._request: Request = request
self._bodybuf: BytesIO = BytesIO()
self._maxsize: int = maxsize
self._warnsize: int = warnsize
self._fail_on_dataloss: bool = fail_on_dataloss
self._fail_on_dataloss_warned: bool = False
self._reached_warnsize: bool = False
self._bytes_received: int = 0
self._certificate: ssl.Certificate | None = None
self._ip_address: ipaddress.IPv4Address | ipaddress.IPv6Address | None = None
self._crawler: Crawler = crawler
def _finish_response(
self, flags: list[str] | None = None, failure: Failure | None = None
) -> None:
self._finished.callback(
{
"txresponse": self._txresponse,
"body": self._bodybuf.getvalue(),
"flags": flags,
"certificate": self._certificate,
"ip_address": self._ip_address,
"failure": failure,
}
)
def connectionMade(self) -> None:
assert self.transport
if self._certificate is None:
with suppress(AttributeError):
self._certificate = ssl.Certificate(
self.transport._producer.getPeerCertificate()
)
if self._ip_address is None:
self._ip_address = ipaddress.ip_address(
self.transport._producer.getPeer().host
)
def dataReceived(self, bodyBytes: bytes) -> None:
# This maybe called several times after cancel was called with buffered data.
if self._finished.called:
return
assert self.transport
self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes)
bytes_received_result = self._crawler.signals.send_catch_log(
signal=signals.bytes_received,
data=bodyBytes,
request=self._request,
spider=self._crawler.spider,
)
for handler, result in bytes_received_result:
if isinstance(result, Failure) and isinstance(result.value, StopDownload):
logger.debug(
"Download stopped for %(request)s from signal handler %(handler)s",
{"request": self._request, "handler": handler.__qualname__},
)
self.transport.stopProducing()
self.transport.loseConnection()
failure = result if result.value.fail else None
self._finish_response(flags=["download_stopped"], failure=failure)
if self._maxsize and self._bytes_received > self._maxsize:
logger.warning(
"Received (%(bytes)s) bytes larger than download "
"max size (%(maxsize)s) in request %(request)s.",
{
"bytes": self._bytes_received,
"maxsize": self._maxsize,
"request": self._request,
},
)
# Clear buffer earlier to avoid keeping data in memory for a long time.
self._bodybuf.truncate(0)
self._finished.cancel()
if (
self._warnsize
and self._bytes_received > self._warnsize
and not self._reached_warnsize
):
self._reached_warnsize = True
logger.warning(
"Received more bytes than download "
"warn size (%(warnsize)s) in request %(request)s.",
{"warnsize": self._warnsize, "request": self._request},
)
def connectionLost(self, reason: Failure = connectionDone) -> None:
if self._finished.called:
return
if reason.check(ResponseDone):
self._finish_response()
return
if reason.check(PotentialDataLoss):
self._finish_response(flags=["partial"])
return
if reason.check(ResponseFailed) and any(
r.check(_DataLoss) for r in reason.value.reasons
):
if not self._fail_on_dataloss:
self._finish_response(flags=["dataloss"])
return
if not self._fail_on_dataloss_warned:
logger.warning(
"Got data loss in %s. If you want to process broken "
"responses set the setting DOWNLOAD_FAIL_ON_DATALOSS = False"
" -- This message won't be shown in further requests",
self._txresponse.request.absoluteURI.decode(),
)
self._fail_on_dataloss_warned = True
self._finished.errback(reason)
|
_ResponseReader
|
python
|
django__django
|
tests/test_utils/tests.py
|
{
"start": 85509,
"end": 86757
}
|
class ____(SimpleTestCase):
def setUp(self):
self.addCleanup(setattr, self.__class__, "databases", self.databases)
def test_no_close_match(self):
self.__class__.databases = {"void"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'void' which is "
"not defined in settings.DATABASES."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_close_match(self):
self.__class__.databases = {"defualt"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'defualt' which "
"is not defined in settings.DATABASES. Did you mean 'default'?"
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_match(self):
self.__class__.databases = {"default", "other"}
self.assertEqual(self._validate_databases(), frozenset({"default", "other"}))
def test_all(self):
self.__class__.databases = "__all__"
self.assertEqual(self._validate_databases(), frozenset(connections))
@isolate_apps("test_utils", attr_name="class_apps")
|
DatabaseAliasTests
|
python
|
pypa__warehouse
|
warehouse/captcha/__init__.py
|
{
"start": 81,
"end": 517
}
|
class ____(ValueError):
pass
def includeme(config):
# Register our Captcha service
captcha_class = config.maybe_dotted(config.registry.settings["captcha.backend"])
config.register_service_factory(
captcha_class.create_service,
ICaptchaService,
# Service requires a name for lookup in Jinja2 template,
# where the Interface object is not available.
name="captcha",
)
|
CaptchaError
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 28572,
"end": 29159
}
|
class ____(models.Model):
NORTH_AMERICA = "North America"
SOUTH_AMERICA = "South America"
EUROPE = "Europe"
ASIA = "Asia"
OCEANIA = "Oceania"
ANTARCTICA = "Antarctica"
CONTINENT_CHOICES = [
(NORTH_AMERICA, NORTH_AMERICA),
(SOUTH_AMERICA, SOUTH_AMERICA),
(EUROPE, EUROPE),
(ASIA, ASIA),
(OCEANIA, OCEANIA),
(ANTARCTICA, ANTARCTICA),
]
name = models.CharField(max_length=80)
continent = models.CharField(max_length=13, choices=CONTINENT_CHOICES)
def __str__(self):
return self.name
|
Country
|
python
|
huggingface__transformers
|
tests/models/vilt/test_image_processing_vilt.py
|
{
"start": 4629,
"end": 7494
}
|
class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = ViltImageProcessor if is_vision_available() else None
fast_image_processing_class = ViltImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = ViltImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "size_divisor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "resample"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "model_input_names"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 30})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
def test_slow_fast_equivalence(self):
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
image_processor_slow = self.image_processing_class(**self.image_processor_dict, do_pad=True)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, do_pad=True)
slow_outputs = image_processor_slow(image_inputs, return_tensors="pt")
slow_pixel_values = slow_outputs.pixel_values
slow_pixel_mask = slow_outputs.pixel_mask
fast_outputs = image_processor_fast(image_inputs, return_tensors="pt")
fast_pixel_values = fast_outputs.pixel_values
fast_pixel_mask = fast_outputs.pixel_mask
self.assertEqual(slow_pixel_values.shape, fast_pixel_values.shape)
self.assertTrue(torch.allclose(slow_pixel_values, fast_pixel_values, atol=1e-2))
self.assertEqual(slow_pixel_mask.shape, fast_pixel_mask.shape)
self.assertTrue(torch.equal(slow_pixel_mask, fast_pixel_mask))
|
ViltImageProcessingTest
|
python
|
django__django
|
tests/m2m_through/models.py
|
{
"start": 2222,
"end": 2512
}
|
class ____(models.Model):
first = models.ForeignKey(
PersonSelfRefM2M, models.CASCADE, related_name="rel_from_set"
)
second = models.ForeignKey(
PersonSelfRefM2M, models.CASCADE, related_name="rel_to_set"
)
date_friended = models.DateTimeField()
|
Friendship
|
python
|
explosion__spaCy
|
spacy/lang/tl/__init__.py
|
{
"start": 172,
"end": 320
}
|
class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
|
TagalogDefaults
|
python
|
dask__distributed
|
distributed/diagnostics/plugin.py
|
{
"start": 27551,
"end": 29273
}
|
class ____(logging.Handler):
"""
Handler class that gets installed inside workers by
:class:`ForwardLoggingPlugin`. Not intended to be instantiated by the user
directly.
In each affected worker, ``ForwardLoggingPlugin`` adds an instance of this
handler to one or more loggers (possibly the root logger). Tasks running on
the worker may then use the affected logger as normal, with the side effect
that any ``LogRecord``s handled by the logger (or by a logger below it in
the hierarchy) will be published to the dask client as a
``topic`` event.
"""
def __init__(self, worker, topic, level=logging.NOTSET):
super().__init__(level)
self.worker = worker
self.topic = topic
def prepare_record_attributes(self, record):
# Adapted from the CPython standard library's
# logging.handlers.SocketHandler.makePickle; see its source at:
# https://github.com/python/cpython/blob/main/Lib/logging/handlers.py
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
_ = self.format(record)
# If msg or args are objects, they may not be available on the receiving
# end. So we convert the msg % args to a string, save it as msg and zap
# the args.
d = dict(record.__dict__)
d["msg"] = record.getMessage()
d["args"] = None
d["exc_info"] = None
# delete 'message' if present: redundant with 'msg'
d.pop("message", None)
return d
def emit(self, record):
attributes = self.prepare_record_attributes(record)
self.worker.log_event(self.topic, attributes)
|
_ForwardingLogHandler
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/depend/imphook.py
|
{
"start": 9732,
"end": 26690
}
|
class ____:
"""
Cached object encapsulating a lazy loadable hook script.
This object exposes public attributes (e.g., `datas`) of the underlying hook script as attributes of the same
name of this object. On the first access of any such attribute, this hook script is lazily loaded into an
in-memory private module reused on subsequent accesses. These dynamic attributes are referred to as "magic." All
other static attributes of this object (e.g., `hook_module_name`) are referred to as "non-magic."
Attributes (Magic)
----------
datas : set
Set of `TOC`-style 2-tuples `(target_file, source_file)` for all external non-executable files required by
the module being hooked, converted from the `datas` list of hook-style 2-tuples `(source_dir_or_glob,
target_dir)` defined by this hook script.
binaries : set
Set of `TOC`-style 2-tuples `(target_file, source_file)` for all external executable files required by the
module being hooked, converted from the `binaries` list of hook-style 2-tuples `(source_dir_or_glob,
target_dir)` defined by this hook script.
excludedimports : set
Set of the fully-qualified names of all modules imported by the module being hooked to be ignored rather than
imported from that module, converted from the `excludedimports` list defined by this hook script. These
modules will only be "locally" rather than "globally" ignored. These modules will remain importable from all
modules other than the module being hooked.
hiddenimports : set
Set of the fully-qualified names of all modules imported by the module being hooked that are _not_
automatically detectable by PyInstaller (usually due to being dynamically imported in that module),
converted from the `hiddenimports` list defined by this hook script.
warn_on_missing_hiddenimports : bool
Boolean flag indicating whether missing hidden imports from the hook should generate warnings or not. This
behavior is enabled by default, but individual hooks can opt out of it.
module_collection_mode : dict
A dictionary of package/module names and their corresponding collection mode strings ('pyz', 'pyc', 'py',
'pyz+py', 'py+pyz').
bindepend_symlink_suppression : set
A set of paths or path patterns corresponding to shared libraries for which binary dependency analysis should
not create symbolic links into top-level application directory.
Attributes (Non-magic)
----------
module_graph : ModuleGraph
Current module graph.
module_name : str
Name of the module hooked by this hook script.
hook_filename : str
Absolute or relative path of this hook script.
hook_module_name : str
Name of the in-memory module of this hook script's interpreted contents.
_hook_module : module
In-memory module of this hook script's interpreted contents, lazily loaded on the first call to the
`_load_hook_module()` method _or_ `None` if this method has yet to be accessed.
_default_priority : int
Default (location-based) priority for this hook.
priority : int
Actual priority for this hook. Might be different from `_default_priority` if hook file specifies the hook
priority override.
"""
#-- Magic --
def __init__(self, module_graph, module_name, hook_filename, hook_module_name_prefix, default_priority):
"""
Initialize this metadata.
Parameters
----------
module_graph : ModuleGraph
Current module graph.
module_name : str
Name of the module hooked by this hook script.
hook_filename : str
Absolute or relative path of this hook script.
hook_module_name_prefix : str
String prefixing the name of the in-memory module for this hook script. To avoid namespace clashes with
similar modules created by other `ModuleHook` objects in other `ModuleHookCache` containers, this string
_must_ be unique to the `ModuleHookCache` container containing this `ModuleHook` object. If this string
is non-unique, an existing in-memory module will be erroneously reused when lazily loading this hook
script, thus erroneously resanitizing previously sanitized hook script attributes (e.g., `datas`) with
the `format_binaries_and_datas()` helper.
default_priority : int
Default, location-based priority for this hook. Used to select active hook when multiple hooks are defined
for the same module.
"""
# Note that the passed module graph is already a weak reference, avoiding circular reference issues. See
# ModuleHookCache.__init__(). TODO: Add a failure message
assert isinstance(module_graph, weakref.ProxyTypes)
self.module_graph = module_graph
self.module_name = module_name
self.hook_filename = hook_filename
# Default priority; used as fall-back for dynamic `hook_priority` attribute.
self._default_priority = default_priority
# Name of the in-memory module fabricated to refer to this hook script.
self.hook_module_name = hook_module_name_prefix + self.module_name.replace('.', '_')
# Attributes subsequently defined by the _load_hook_module() method.
self._loaded = False
self._has_hook_function = False
self._hook_module = None
def __getattr__(self, attr_name):
"""
Get the magic attribute with the passed name (e.g., `datas`) from this lazily loaded hook script if any _or_
raise `AttributeError` otherwise.
This special method is called only for attributes _not_ already defined by this object. This includes
undefined attributes and the first attempt to access magic attributes.
This special method is _not_ called for subsequent attempts to access magic attributes. The first attempt to
access magic attributes defines corresponding instance variables accessible via the `self.__dict__` instance
dictionary (e.g., as `self.datas`) without calling this method. This approach also allows magic attributes to
be deleted from this object _without_ defining the `__delattr__()` special method.
See Also
----------
Class docstring for supported magic attributes.
"""
if attr_name == 'priority':
# If attribute is part of hook metadata, read metadata from hook script and return the attribute value.
self._load_hook_metadata()
return getattr(self, attr_name)
if attr_name in _MAGIC_MODULE_HOOK_ATTRS and not self._loaded:
# If attribute is hook's magic attribute, load and run the hook script, and return the attribute value.
self._load_hook_module()
return getattr(self, attr_name)
else:
# This is an undefined attribute. Raise an exception.
raise AttributeError(attr_name)
def __setattr__(self, attr_name, attr_value):
"""
Set the attribute with the passed name to the passed value.
If this is a magic attribute, this hook script will be lazily loaded before setting this attribute. Unlike
`__getattr__()`, this special method is called to set _any_ attribute -- including magic, non-magic,
and undefined attributes.
See Also
----------
Class docstring for supported magic attributes.
"""
# If this is a magic attribute, initialize this attribute by lazy loading this hook script before overwriting
# this attribute.
if attr_name in _MAGIC_MODULE_HOOK_ATTRS:
self._load_hook_module()
# Set this attribute to the passed value. To avoid recursion, the superclass method rather than setattr() is
# called.
return super().__setattr__(attr_name, attr_value)
#-- Loading --
def _load_hook_metadata(self):
"""
Load hook metadata from its source file.
"""
self.priority = self._default_priority
# Priority override pattern: `# $PyInstaller-Hook-Priority: <value>`
priority_pattern = re.compile(r"^\s*#\s*\$PyInstaller-Hook-Priority:\s*(?P<value>[\S]+)")
with open(self.hook_filename, "r", encoding="utf-8") as f:
for line in f:
# Attempt to match and parse hook priority directive
m = priority_pattern.match(line)
if m is not None:
try:
self.priority = int(m.group('value'))
except Exception:
logger.warning(
"Failed to parse hook priority value string: %r!", m.group('value'), exc_info=True
)
# Currently, this is our only line of interest, so we can stop the search here.
return
def _load_hook_module(self, keep_module_ref=False):
"""
Lazily load this hook script into an in-memory private module.
This method (and, indeed, this class) preserves all attributes and functions defined by this hook script as
is, ensuring sane behaviour in hook functions _not_ expecting unplanned external modification. Instead,
this method copies public attributes defined by this hook script (e.g., `binaries`) into private attributes
of this object, which the special `__getattr__()` and `__setattr__()` methods safely expose to external
callers. For public attributes _not_ defined by this hook script, the corresponding private attributes will
be assigned sane defaults. For some public attributes defined by this hook script, the corresponding private
attributes will be transformed into objects more readily and safely consumed elsewhere by external callers.
See Also
----------
Class docstring for supported attributes.
"""
# If this hook script module has already been loaded, noop.
if self._loaded and (self._hook_module is not None or not keep_module_ref):
return
# Load and execute the hook script. Even if mechanisms from the import machinery are used, this does not import
# the hook as the module.
hook_path, hook_basename = os.path.split(self.hook_filename)
logger.info('Processing standard module hook %r from %r', hook_basename, hook_path)
try:
self._hook_module = importlib_load_source(self.hook_module_name, self.hook_filename)
except ImportError:
logger.debug("Hook failed with:", exc_info=True)
raise ImportErrorWhenRunningHook(self.hook_module_name, self.hook_filename)
# Mark as loaded
self._loaded = True
# Check if module has hook() function.
self._has_hook_function = hasattr(self._hook_module, 'hook')
# Copy hook script attributes into magic attributes exposed as instance variables of the current "ModuleHook"
# instance.
for attr_name, (default_type, sanitizer_func) in _MAGIC_MODULE_HOOK_ATTRS.items():
# Unsanitized value of this attribute.
attr_value = getattr(self._hook_module, attr_name, None)
# If this attribute is undefined, expose a sane default instead.
if attr_value is None:
attr_value = default_type()
# Else if this attribute requires sanitization, do so.
elif sanitizer_func is not None:
attr_value = sanitizer_func(attr_value)
# Else, expose the unsanitized value of this attribute.
# Expose this attribute as an instance variable of the same name.
setattr(self, attr_name, attr_value)
# If module_collection_mode has an entry with None key, reassign it to the hooked module's name.
setattr(
self, 'module_collection_mode', {
key if key is not None else self.module_name: value
for key, value in getattr(self, 'module_collection_mode').items()
}
)
# Release the module if we do not need the reference. This is the case when hook is loaded during the analysis
# rather as part of the post-graph operations.
if not keep_module_ref:
self._hook_module = None
#-- Hooks --
def post_graph(self, analysis):
"""
Call the **post-graph hook** (i.e., `hook()` function) defined by this hook script, if any.
Parameters
----------
analysis: build_main.Analysis
Analysis that calls the hook
This method is intended to be called _after_ the module graph for this application is constructed.
"""
# Lazily load this hook script into an in-memory module.
# The script might have been loaded before during modulegraph analysis; in that case, it needs to be reloaded
# only if it provides a hook() function.
if not self._loaded or self._has_hook_function:
# Keep module reference when loading the hook, so we can call its hook function!
self._load_hook_module(keep_module_ref=True)
# Call this hook script's hook() function, which modifies attributes accessed by subsequent methods and
# hence must be called first.
self._process_hook_func(analysis)
# Order is insignificant here.
self._process_hidden_imports()
def _process_hook_func(self, analysis):
"""
Call this hook's `hook()` function if defined.
Parameters
----------
analysis: build_main.Analysis
Analysis that calls the hook
"""
# If this hook script defines no hook() function, noop.
if not hasattr(self._hook_module, 'hook'):
return
# Call this hook() function.
hook_api = PostGraphAPI(module_name=self.module_name, module_graph=self.module_graph, analysis=analysis)
try:
self._hook_module.hook(hook_api)
except ImportError:
logger.debug("Hook failed with:", exc_info=True)
raise ImportErrorWhenRunningHook(self.hook_module_name, self.hook_filename)
# Update all magic attributes modified by the prior call.
self.datas.update(set(hook_api._added_datas))
self.binaries.update(set(hook_api._added_binaries))
self.hiddenimports.extend(hook_api._added_imports)
self.module_collection_mode.update(hook_api._module_collection_mode)
self.bindepend_symlink_suppression.update(hook_api._bindepend_symlink_suppression)
# FIXME: `hook_api._deleted_imports` should be appended to `self.excludedimports` and used to suppress module
# import during the modulegraph construction rather than handled here. However, for that to work, the `hook()`
# function needs to be ran during modulegraph construction instead of in post-processing (and this in turn
# requires additional code refactoring in order to be able to pass `analysis` to `PostGraphAPI` object at
# that point). So once the modulegraph rewrite is complete, remove the code block below.
for deleted_module_name in hook_api._deleted_imports:
# Remove the graph link between the hooked module and item. This removes the 'item' node from the graph if
# no other links go to it (no other modules import it)
self.module_graph.removeReference(hook_api.node, deleted_module_name)
def _process_hidden_imports(self):
"""
Add all imports listed in this hook script's `hiddenimports` attribute to the module graph as if directly
imported by this hooked module.
These imports are typically _not_ implicitly detectable by PyInstaller and hence must be explicitly defined
by hook scripts.
"""
# For each hidden import required by the module being hooked...
for import_module_name in self.hiddenimports:
try:
# Graph node for this module. Do not implicitly create namespace packages for non-existent packages.
caller = self.module_graph.find_node(self.module_name, create_nspkg=False)
# Manually import this hidden import from this module.
self.module_graph.import_hook(import_module_name, caller)
# If this hidden import is unimportable, print a non-fatal warning. Hidden imports often become
# desynchronized from upstream packages and hence are only "soft" recommendations.
except ImportError:
if self.warn_on_missing_hiddenimports:
logger.warning('Hidden import "%s" not found!', import_module_name)
|
ModuleHook
|
python
|
getsentry__sentry
|
src/sentry/auth/access.py
|
{
"start": 1949,
"end": 5848
}
|
class ____(abc.ABC):
@property
@abc.abstractmethod
def sso_is_valid(self) -> bool:
pass
@property
@abc.abstractmethod
def requires_sso(self) -> bool:
pass
@property
@abc.abstractmethod
def has_open_membership(self) -> bool:
pass
@property
@abc.abstractmethod
def has_global_access(self) -> bool:
pass
@property
@abc.abstractmethod
def scopes(self) -> frozenset[str]:
pass
@property
@abc.abstractmethod
def permissions(self) -> frozenset[str]:
pass
# TODO(cathy): remove this
@property
@abc.abstractmethod
def role(self) -> str | None:
pass
@property
@abc.abstractmethod
def team_ids_with_membership(self) -> frozenset[int]:
pass
@property
@abc.abstractmethod
def accessible_team_ids(self) -> frozenset[int]:
pass
@property
@abc.abstractmethod
def project_ids_with_team_membership(self) -> frozenset[int]:
pass
@property
@abc.abstractmethod
def accessible_project_ids(self) -> frozenset[int]:
pass
@property
def is_integration_token(self) -> bool:
return False
def has_permission(self, permission: str) -> bool:
"""
Return bool representing if the user has the given permission.
>>> access.has_permission('broadcasts.admin')
"""
return permission in self.permissions
def has_scope(self, scope: str) -> bool:
"""
Return bool representing if the user has the given scope.
>>> access.has_project('org:read')
"""
return scope in self.scopes
def get_organization_role(self) -> OrganizationRole | None:
if self.role is not None:
return organization_roles.get(self.role)
return None
@abc.abstractmethod
def has_role_in_organization(
self, role: str, organization: Organization, user_id: int | None
) -> bool:
pass
@abc.abstractmethod
def has_team_access(self, team: Team) -> bool:
"""
Return bool representing if a user should have access to information for the given team.
>>> access.has_team_access(team)
"""
@abc.abstractmethod
def has_team_scope(self, team: Team, scope: str) -> bool:
pass
def has_team_membership(self, team: Team) -> bool:
return team.id in self.team_ids_with_membership
@abc.abstractmethod
def get_team_role(self, team: Team) -> TeamRole | None:
pass
@abc.abstractmethod
def has_project_access(self, project: Project) -> bool:
"""
Return bool representing if a user should have access to information for the given project.
>>> access.has_project_access(project)
"""
raise NotImplementedError
def has_projects_access(self, projects: Iterable[Project]) -> bool:
"""
Returns bool representing if a user should have access to every requested project
"""
return all([self.has_project_access(project) for project in projects])
def has_project_membership(self, project: Project) -> bool:
"""
Return bool representing if a user has explicit membership for the given project.
>>> access.has_project_membership(project)
"""
return project.id in self.project_ids_with_team_membership
def has_project_scope(self, project: Project, scope: str) -> bool:
"""
Return bool representing if a user should have access with the given scope to information
for the given project.
>>> access.has_project_scope(project, 'project:read')
"""
return self.has_any_project_scope(project, [scope])
@abc.abstractmethod
def has_any_project_scope(self, project: Project, scopes: Collection[str]) -> bool:
pass
@dataclass
|
Access
|
python
|
PyCQA__pylint
|
tests/functional/s/super/super_init_not_called_py38.py
|
{
"start": 295,
"end": 411
}
|
class ____(MyProtocol):
"""An implementation."""
def __init__(self) -> None:
...
|
ProtocolImplimentation
|
python
|
scrapy__scrapy
|
tests/test_loader.py
|
{
"start": 698,
"end": 798
}
|
class ____:
name: list = dataclasses.field(default_factory=list)
# test item loaders
|
NameDataClass
|
python
|
huggingface__transformers
|
tests/quantization/eetq_integration/test_eetq.py
|
{
"start": 1158,
"end": 2253
}
|
class ____(unittest.TestCase):
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = EetqConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "eetq", "weights": "int8"}
quantization_config = EetqConfig.from_dict(dict)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
self.assertEqual(dict["weights"], quantization_config.weights)
@slow
@require_torch_gpu
@require_eetq
@require_accelerate
|
EetqConfigTest
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/lib/bedrock/_client.py
|
{
"start": 4495,
"end": 10172
}
|
class ____(BaseBedrockClient[httpx.Client, Stream[Any]], SyncAPIClient):
messages: Messages
completions: Completions
beta: Beta
def __init__(
self,
aws_secret_key: str | None = None,
aws_access_key: str | None = None,
aws_region: str | None = None,
aws_profile: str | None = None,
aws_session_token: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
# Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: httpx.Client | None = None,
# Enable or disable schema validation for data returned by the API.
# When enabled an error APIResponseValidationError is raised
# if the API responds with invalid data for the expected schema.
#
# This parameter may be removed or changed in the future.
# If you rely on this feature, please open a GitHub issue
# outlining your use-case to help us decide if it should be
# part of our public interface in the future.
_strict_response_validation: bool = False,
) -> None:
self.aws_secret_key = aws_secret_key
self.aws_access_key = aws_access_key
self.aws_region = _infer_region() if aws_region is None else aws_region
self.aws_profile = aws_profile
self.aws_session_token = aws_session_token
if base_url is None:
base_url = os.environ.get("ANTHROPIC_BEDROCK_BASE_URL")
if base_url is None:
base_url = f"https://bedrock-runtime.{self.aws_region}.amazonaws.com"
super().__init__(
version=__version__,
base_url=base_url,
timeout=timeout,
max_retries=max_retries,
custom_headers=default_headers,
custom_query=default_query,
http_client=http_client,
_strict_response_validation=_strict_response_validation,
)
self.beta = Beta(self)
self.messages = Messages(self)
self.completions = Completions(self)
@override
def _make_sse_decoder(self) -> AWSEventStreamDecoder:
return AWSEventStreamDecoder()
@override
def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
return _prepare_options(options)
@override
def _prepare_request(self, request: httpx.Request) -> None:
from ._auth import get_auth_headers
data = request.read().decode()
headers = get_auth_headers(
method=request.method,
url=str(request.url),
headers=request.headers,
aws_access_key=self.aws_access_key,
aws_secret_key=self.aws_secret_key,
aws_session_token=self.aws_session_token,
region=self.aws_region or "us-east-1",
profile=self.aws_profile,
data=data,
)
request.headers.update(headers)
def copy(
self,
*,
aws_secret_key: str | None = None,
aws_access_key: str | None = None,
aws_region: str | None = None,
aws_session_token: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
http_client: httpx.Client | None = None,
max_retries: int | NotGiven = NOT_GIVEN,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
if default_query is not None and set_default_query is not None:
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
headers = self._custom_headers
if default_headers is not None:
headers = {**headers, **default_headers}
elif set_default_headers is not None:
headers = set_default_headers
params = self._custom_query
if default_query is not None:
params = {**params, **default_query}
elif set_default_query is not None:
params = set_default_query
return self.__class__(
aws_secret_key=aws_secret_key or self.aws_secret_key,
aws_access_key=aws_access_key or self.aws_access_key,
aws_region=aws_region or self.aws_region,
aws_session_token=aws_session_token or self.aws_session_token,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
max_retries=max_retries if is_given(max_retries) else self.max_retries,
default_headers=headers,
default_query=params,
**_extra_kwargs,
)
# Alias for `copy` for nicer inline usage, e.g.
# client.with_options(timeout=10).foo.create(...)
with_options = copy
|
AnthropicBedrock
|
python
|
arrow-py__arrow
|
tests/test_locales.py
|
{
"start": 105119,
"end": 106742
}
|
class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "ابھی"
assert self.locale._format_timeframe("second", -1) == "ایک سیکنڈ"
assert self.locale._format_timeframe("second", 1) == "ایک سیکنڈ"
assert self.locale._format_timeframe("seconds", -3) == "3 سیکنڈ"
assert self.locale._format_timeframe("minute", 1) == "ایک منٹ"
assert self.locale._format_timeframe("minutes", -4) == "4 منٹ"
assert self.locale._format_timeframe("hour", 1) == "ایک گھنٹے"
assert self.locale._format_timeframe("hours", -23) == "23 گھنٹے"
assert self.locale._format_timeframe("day", 1) == "ایک دن"
assert self.locale._format_timeframe("days", -12) == "12 دن"
assert self.locale._format_timeframe("week", 1) == "ایک ہفتے"
assert self.locale._format_timeframe("weeks", -12) == "12 ہفتے"
assert self.locale._format_timeframe("month", 1) == "ایک مہینہ"
assert self.locale._format_timeframe("months", -2) == "2 ماہ"
assert self.locale._format_timeframe("year", 1) == "ایک سال"
assert self.locale._format_timeframe("years", -2) == "2 سال"
def test_weekday_and_month(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
# Saturday
assert self.locale.day_name(dt.isoweekday()) == "ہفتہ"
assert self.locale.day_abbreviation(dt.isoweekday()) == "ہفتہ"
# June
assert self.locale.month_name(dt.isoweekday()) == "جون"
assert self.locale.month_abbreviation(dt.isoweekday()) == "جون"
@pytest.mark.usefixtures("lang_locale")
|
TestUrduLocale
|
python
|
numba__numba
|
numba/testing/main.py
|
{
"start": 3654,
"end": 4356
}
|
class ____(object):
"""Simply list available tests rather than running them."""
def __init__(self, useslice):
self.useslice = parse_slice(useslice)
def run(self, test):
result = runner.TextTestResult(sys.stderr, descriptions=True,
verbosity=1)
self._test_list = _flatten_suite(test)
masked_list = list(filter(self.useslice, self._test_list))
self._test_list.sort(key=cuda_sensitive_mtime)
for t in masked_list:
print(t.id())
print('%d tests found. %s selected' % (len(self._test_list),
len(masked_list)))
return result
|
TestLister
|
python
|
kamyu104__LeetCode-Solutions
|
Python/depth-of-bst-given-insertion-order.py
|
{
"start": 59,
"end": 510
}
|
class ____(object):
def maxDepthBST(self, order):
"""
:type order: List[int]
:rtype: int
"""
depths = sortedcontainers.SortedDict({float("-inf"):0, float("inf"):0})
values_view = depths.values()
result = 0
for x in order:
i = depths.bisect_right(x)
depths[x] = max(values_view[i-1:i+1])+1
result = max(result, depths[x])
return result
|
Solution
|
python
|
astropy__astropy
|
astropy/modeling/fitting.py
|
{
"start": 2560,
"end": 4247
}
|
class ____:
"""Class for covariance matrix calculated by fitter."""
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = "parameter variances / covariances \n"
fstring = f"{'': <{longest_name}}| {{0}}\n"
for i, row in enumerate(self.cov_matrix):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += fstring.replace(" " * len(param), param, 1).format(
repr(np.round(row[: i + 1], round_val))[7:-2]
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError("Covariance must be indexed by two values.")
if all(isinstance(item, str) for item in params):
i1, i2 = (
self.param_names.index(params[0]),
self.param_names.index(params[1]),
)
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError(
"Covariance can be indexed by two parameter names or integer indices."
)
return self.cov_matrix[i1][i2]
|
Covariance
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_shape_base_.py
|
{
"start": 19576,
"end": 20296
}
|
class ____(TestCase):
"""Only testing for integer splits."""
def test_non_iterable(self):
assert_raises(ValueError, hsplit, 1, 1)
def test_0D_array(self):
a = np.array(1)
try:
hsplit(a, 2)
assert_(0)
except ValueError:
pass
def test_1D_array(self):
a = np.array([1, 2, 3, 4])
res = hsplit(a, 2)
desired = [np.array([1, 2]), np.array([3, 4])]
compare_results(res, desired)
def test_2D_array(self):
a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
res = hsplit(a, 2)
desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]
compare_results(res, desired)
|
TestHsplit
|
python
|
huggingface__transformers
|
src/transformers/models/dpr/modeling_dpr.py
|
{
"start": 8536,
"end": 8989
}
|
class ____(DPRPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: DPRConfig
base_model_prefix = "span_predictor"
###############
# Actual Models
###############
@auto_docstring(
custom_intro="""
The bare DPRContextEncoder transformer outputting pooler outputs as context representations.
"""
)
|
DPRPretrainedReader
|
python
|
PrefectHQ__prefect
|
tests/server/api/test_server.py
|
{
"start": 9844,
"end": 14823
}
|
class ____:
@pytest.fixture(autouse=True)
def enable_memoization(self, tmp_path):
with temporary_settings(
{
PREFECT_MEMOIZE_BLOCK_AUTO_REGISTRATION: True,
PREFECT_MEMO_STORE_PATH: tmp_path / "memo_store.toml",
}
):
yield
@pytest.fixture
def memo_store_with_mismatched_key(self):
PREFECT_MEMO_STORE_PATH.value().write_text(
toml.dumps({"block_auto_registration": "not-a-real-key"})
)
@pytest.fixture
def current_block_registry_hash(self):
return "abcd1234"
@pytest.fixture
def memo_store_with_accurate_key(self, current_block_registry_hash):
PREFECT_MEMO_STORE_PATH.value().write_text(
toml.dumps({"block_auto_registration": current_block_registry_hash})
)
async def test_runs_wrapped_function_on_missing_key(
self, current_block_registry_hash
):
assert not PREFECT_MEMO_STORE_PATH.value().exists()
assert PREFECT_MEMOIZE_BLOCK_AUTO_REGISTRATION.value(), (
"Memoization is not enabled"
)
test_func = AsyncMock()
# hashing fails randomly fails when running full test suite
# mocking the hash stabilizes this test
with patch("prefect.server.api.server.hash_objects") as mock:
mock.return_value = current_block_registry_hash
await _memoize_block_auto_registration(test_func)()
test_func.assert_called_once()
assert PREFECT_MEMO_STORE_PATH.value().exists(), "Memo store was not created"
assert (
toml.load(PREFECT_MEMO_STORE_PATH.value()).get("block_auto_registration")
== current_block_registry_hash
), "Key was not added to memo store"
async def test_runs_wrapped_function_on_mismatched_key(
self,
memo_store_with_mismatched_key,
current_block_registry_hash,
):
assert PREFECT_MEMOIZE_BLOCK_AUTO_REGISTRATION.value(), (
"Memoization is not enabled"
)
test_func = AsyncMock()
# hashing fails randomly fails when running full test suite
# mocking the hash stabilizes this test
with patch("prefect.server.api.server.hash_objects") as mock:
mock.return_value = current_block_registry_hash
await _memoize_block_auto_registration(test_func)()
test_func.assert_called_once()
assert (
toml.load(PREFECT_MEMO_STORE_PATH.value()).get("block_auto_registration")
== current_block_registry_hash
), "Key was not updated in memo store"
async def test_runs_wrapped_function_when_memoization_disabled(
self, memo_store_with_accurate_key
):
with temporary_settings(
{
PREFECT_MEMOIZE_BLOCK_AUTO_REGISTRATION: False,
}
):
test_func = AsyncMock()
await _memoize_block_auto_registration(test_func)()
test_func.assert_called_once()
async def test_skips_wrapped_function_on_matching_key(
self, current_block_registry_hash, memo_store_with_accurate_key
):
test_func = AsyncMock()
# hashing fails randomly fails when running full test suite
# mocking the hash stabilizes this test
with patch("prefect.server.api.server.hash_objects") as mock:
mock.return_value = current_block_registry_hash
await _memoize_block_auto_registration(test_func)()
test_func.assert_not_called()
async def test_runs_wrapped_function_when_hashing_fails(
self, memo_store_with_accurate_key
):
test_func = AsyncMock()
with patch("prefect.server.api.server.hash_objects") as mock:
mock.return_value = None
await _memoize_block_auto_registration(test_func)()
test_func.assert_called_once()
async def test_does_not_fail_on_read_only_filesystem(self, enable_memoization):
try:
PREFECT_MEMO_STORE_PATH.value().parent.chmod(744)
test_func = AsyncMock()
with patch("prefect.server.api.server.hash_objects") as mock:
mock.return_value = None
await _memoize_block_auto_registration(test_func)()
test_func.assert_called_once()
assert not PREFECT_MEMO_STORE_PATH.value().exists()
finally:
PREFECT_MEMO_STORE_PATH.value().parent.chmod(777)
async def test_changing_database_breaks_cache(self, enable_memoization):
test_func = AsyncMock()
await _memoize_block_auto_registration(test_func)()
assert test_func.call_count == 1
with temporary_settings(
{
PREFECT_API_DATABASE_CONNECTION_URL: "something else",
}
):
await _memoize_block_auto_registration(test_func)()
assert test_func.call_count == 2
|
TestMemoizeBlockAutoRegistration
|
python
|
sympy__sympy
|
sympy/matrices/expressions/factorizations.py
|
{
"start": 475,
"end": 507
}
|
class ____(UofLU): pass
|
UofCholesky
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_quotes/docstring_singles_mixed_quotes_class_var_2.py
|
{
"start": 0,
"end": 357
}
|
class ____():
'Do not'" start with empty string" ' and lint docstring safely'
''' Not a docstring '''
def foo(self, bar='''not a docstring'''):
'Do not'" start with empty string" ' and lint docstring safely'
pass
class Nested(foo()[:]): 'Do not'" start with empty string" ' and lint docstring safely'; pass
|
SingleLineDocstrings
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/secrets/test_secrets_manager.py
|
{
"start": 978,
"end": 10466
}
|
class ____:
@mock.patch("airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend.get_conn_value")
def test_aws_secrets_manager_get_connection(self, mock_get_value):
mock_get_value.return_value = "scheme://user:pass@host:100"
conn = SecretsManagerBackend().get_connection("fake_conn")
assert conn.host == "host"
@mock_aws
def test_get_conn_value_full_url_mode(self):
secret_id = "airflow/connections/test_postgres"
create_param = {
"Name": secret_id,
"SecretString": "postgresql://airflow:airflow@host:5432/airflow",
}
secrets_manager_backend = SecretsManagerBackend()
secrets_manager_backend.client.create_secret(**create_param)
returned_uri = secrets_manager_backend.get_conn_value(conn_id="test_postgres")
assert returned_uri == "postgresql://airflow:airflow@host:5432/airflow"
@mock_aws
def test_get_conn_value_non_existent_key(self):
"""
Test that if the key with connection ID is not present,
SecretsManagerBackend.get_connection should return None
"""
conn_id = "test_mysql"
secret_id = "airflow/connections/test_postgres"
create_param = {
"Name": secret_id,
"SecretString": "postgresql://airflow:airflow@host:5432/airflow",
}
secrets_manager_backend = SecretsManagerBackend()
secrets_manager_backend.client.create_secret(**create_param)
assert secrets_manager_backend.get_conn_value(conn_id=conn_id) is None
assert secrets_manager_backend.get_connection(conn_id=conn_id) is None
@mock_aws
def test_get_variable(self):
secret_id = "airflow/variables/hello"
create_param = {"Name": secret_id, "SecretString": "world"}
secrets_manager_backend = SecretsManagerBackend()
secrets_manager_backend.client.create_secret(**create_param)
returned_uri = secrets_manager_backend.get_variable("hello")
assert returned_uri == "world"
@mock_aws
def test_get_variable_non_existent_key(self):
"""
Test that if Variable key is not present,
SystemsManagerParameterStoreBackend.get_variables should return None
"""
secret_id = "airflow/variables/hello"
create_param = {"Name": secret_id, "SecretString": "world"}
secrets_manager_backend = SecretsManagerBackend()
secrets_manager_backend.client.create_secret(**create_param)
assert secrets_manager_backend.get_variable("test_mysql") is None
@mock_aws
def test_get_config_non_existent_key(self):
"""
Test that if Config key is not present,
SystemsManagerParameterStoreBackend.get_config should return None
"""
secret_id = "airflow/config/hello"
create_param = {"Name": secret_id, "SecretString": "world"}
secrets_manager_backend = SecretsManagerBackend()
secrets_manager_backend.client.create_secret(**create_param)
assert secrets_manager_backend.get_config("test") is None
@mock.patch("airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend._get_secret")
def test_connection_prefix_none_value(self, mock_get_secret):
"""
Test that if Connection ID is not present in AWS Secrets Manager,
SecretsManagerBackend.get_conn_value should return None,
SecretsManagerBackend._get_secret should not be called
"""
kwargs = {"connections_prefix": None}
secrets_manager_backend = SecretsManagerBackend(**kwargs)
assert secrets_manager_backend.get_conn_value("test_mysql") is None
mock_get_secret.assert_not_called()
@mock.patch("airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend._get_secret")
def test_variable_prefix_none_value(self, mock_get_secret):
"""
Test that if Variable key is not present in AWS Secrets Manager,
SecretsManagerBackend.get_variables should return None,
SecretsManagerBackend._get_secret should not be called
"""
kwargs = {"variables_prefix": None}
secrets_manager_backend = SecretsManagerBackend(**kwargs)
assert secrets_manager_backend.get_variable("hello") is None
mock_get_secret.assert_not_called()
@mock.patch("airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend._get_secret")
def test_config_prefix_none_value(self, mock_get_secret):
"""
Test that if Config key is not present in AWS Secrets Manager,
SecretsManagerBackend.get_config should return None,
SecretsManagerBackend._get_secret should not be called
"""
kwargs = {"config_prefix": None}
secrets_manager_backend = SecretsManagerBackend(**kwargs)
assert secrets_manager_backend.get_config("config") is None
mock_get_secret.assert_not_called()
@mock.patch(
"airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend.client",
new_callable=mock.PropertyMock,
)
@pytest.mark.parametrize(
("connection_id", "connections_lookup_pattern", "num_client_calls"),
[
("test", "test", 1),
("test", ".*", 1),
("test", "T.*", 1),
("test", "dummy-pattern", 0),
("test", None, 1),
],
)
def test_connection_lookup_pattern(
self, mock_client, connection_id, connections_lookup_pattern, num_client_calls
):
"""
Test that if Connection ID is looked up in AWS Secrets Manager
"""
mock_client().get_secret_value.return_value = {"SecretString": None}
kwargs = {"connections_lookup_pattern": connections_lookup_pattern}
secrets_manager_backend = SecretsManagerBackend(**kwargs)
secrets_manager_backend.get_conn_value(connection_id)
assert mock_client().get_secret_value.call_count == num_client_calls
@mock.patch(
"airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend.client",
new_callable=mock.PropertyMock,
)
@pytest.mark.parametrize(
("variable_key", "variables_lookup_pattern", "num_client_calls"),
[
("test", "test", 1),
("test", ".*", 1),
("test", "T.*", 1),
("test", "dummy-pattern", 0),
("test", None, 1),
],
)
def test_variable_lookup_pattern(
self, mock_client, variable_key, variables_lookup_pattern, num_client_calls
):
"""
Test that if Variable key is looked up in AWS Secrets Manager
"""
mock_client().get_secret_value.return_value = {"SecretString": None}
kwargs = {"variables_lookup_pattern": variables_lookup_pattern}
secrets_manager_backend = SecretsManagerBackend(**kwargs)
secrets_manager_backend.get_variable(variable_key)
assert mock_client().get_secret_value.call_count == num_client_calls
@mock.patch(
"airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend.client",
new_callable=mock.PropertyMock,
)
@pytest.mark.parametrize(
("config_key", "config_lookup_pattern", "num_client_calls"),
[
("test", "test", 1),
("test", ".*", 1),
("test", "T.*", 1),
("test", "dummy-pattern", 0),
("test", None, 1),
],
)
def test_config_lookup_pattern(self, mock_client, config_key, config_lookup_pattern, num_client_calls):
"""
Test that if Variable key is looked up in AWS Secrets Manager
"""
mock_client().get_secret_value.return_value = {"SecretString": None}
kwargs = {"config_lookup_pattern": config_lookup_pattern}
secrets_manager_backend = SecretsManagerBackend(**kwargs)
secrets_manager_backend.get_config(config_key)
assert mock_client().get_secret_value.call_count == num_client_calls
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.SessionFactory")
def test_passing_client_kwargs(self, mock_session_factory):
secrets_manager_backend = SecretsManagerBackend(
use_ssl=False, role_arn="arn:aws:iam::222222222222:role/awesome-role", region_name="eu-central-1"
)
# Mock SessionFactory, session and client
mock_session_factory_instance = mock_session_factory.return_value
mock_ssm_client = mock.MagicMock(return_value="mock-secretsmanager-client")
mock_session = mock.MagicMock()
mock_session.client = mock_ssm_client
mock_create_session = mock.MagicMock(return_value=mock_session)
mock_session_factory_instance.create_session = mock_create_session
secrets_manager_backend.client
assert mock_session_factory.call_count == 1
mock_session_factory_call_kwargs = mock_session_factory.call_args.kwargs
assert "conn" in mock_session_factory_call_kwargs
conn_wrapper = mock_session_factory_call_kwargs["conn"]
assert conn_wrapper.conn_id == "SecretsManagerBackend__connection"
assert conn_wrapper.role_arn == "arn:aws:iam::222222222222:role/awesome-role"
assert conn_wrapper.region_name == "eu-central-1"
mock_ssm_client.assert_called_once_with(
service_name="secretsmanager", region_name="eu-central-1", use_ssl=False
)
|
TestSecretsManagerBackend
|
python
|
networkx__networkx
|
networkx/classes/coreviews.py
|
{
"start": 506,
"end": 1531
}
|
class ____(Mapping):
"""An AtlasView is a Read-only Mapping of Mappings.
It is a View into a dict-of-dict data structure.
The inner level of dict is read-write. But the
outer level is read-only.
See Also
========
AdjacencyView: View into dict-of-dict-of-dict
MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ("_atlas",)
def __getstate__(self):
return {"_atlas": self._atlas}
def __setstate__(self, state):
self._atlas = state["_atlas"]
def __init__(self, d):
self._atlas = d
def __len__(self):
return len(self._atlas)
def __iter__(self):
return iter(self._atlas)
def __getitem__(self, key):
return self._atlas[key]
def copy(self):
return {n: self[n].copy() for n in self._atlas}
def __str__(self):
return str(self._atlas) # {nbr: self[nbr] for nbr in self})
def __repr__(self):
return f"{self.__class__.__name__}({self._atlas!r})"
|
AtlasView
|
python
|
google__pytype
|
pytype/tests/test_utils.py
|
{
"start": 8037,
"end": 8274
}
|
class ____:
"""Match a regex."""
def __init__(self, regex):
self.regex = regex
def match(self, message):
return re.search(self.regex, message, flags=re.DOTALL)
def __repr__(self):
return repr(self.regex)
|
RegexMatcher
|
python
|
scikit-learn__scikit-learn
|
sklearn/externals/_arff.py
|
{
"start": 13966,
"end": 14290
}
|
class ____(ArffException):
'''Error raised when the layout of the ARFF file has something wrong.'''
message = 'Invalid layout of the ARFF file, at line %d.'
def __init__(self, msg=''):
super().__init__()
if msg:
self.message = BadLayout.message + ' ' + msg.replace('%', '%%')
|
BadLayout
|
python
|
mlflow__mlflow
|
mlflow/types/responses_helpers.py
|
{
"start": 3805,
"end": 3947
}
|
class ____(Status):
id: str
arguments: str
name: str
server_label: str
type: str = "mcp_approval_request"
|
McpApprovalRequest
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_eks.py
|
{
"start": 27419,
"end": 29991
}
|
class ____:
def setup_method(self) -> None:
self.cluster_name: str = CLUSTER_NAME
self.fargate_profile_name: str = FARGATE_PROFILE_NAME
self.delete_fargate_profile_operator = EksDeleteFargateProfileOperator(
task_id=TASK_ID, cluster_name=self.cluster_name, fargate_profile_name=self.fargate_profile_name
)
@mock.patch.object(Waiter, "wait")
@mock.patch.object(EksHook, "delete_fargate_profile")
def test_existing_fargate_profile(self, mock_delete_fargate_profile, mock_waiter):
self.delete_fargate_profile_operator.execute({})
mock_delete_fargate_profile.assert_called_once_with(
clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name
)
mock_waiter.assert_not_called()
@mock.patch.object(Waiter, "wait")
@mock.patch.object(EksHook, "delete_fargate_profile")
def test_existing_fargate_profile_with_wait(self, mock_delete_fargate_profile, mock_waiter):
self.delete_fargate_profile_operator.wait_for_completion = True
self.delete_fargate_profile_operator.execute({})
mock_delete_fargate_profile.assert_called_once_with(
clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name
)
mock_waiter.assert_called_with(
mock.ANY,
clusterName=CLUSTER_NAME,
fargateProfileName=FARGATE_PROFILE_NAME,
WaiterConfig={"Delay": 30, "MaxAttempts": 60},
)
assert_expected_waiter_type(mock_waiter, "FargateProfileDeleted")
@mock.patch.object(EksHook, "delete_fargate_profile")
def test_delete_fargate_profile_deferrable(self, _):
self.delete_fargate_profile_operator.deferrable = True
with pytest.raises(TaskDeferred) as exc:
self.delete_fargate_profile_operator.execute({})
assert isinstance(exc.value.trigger, EksDeleteFargateProfileTrigger), (
"Trigger is not a EksDeleteFargateProfileTrigger"
)
def test_template_fields(self):
validate_template_fields(self.delete_fargate_profile_operator)
def test_init_with_region(self):
with pytest.warns(AirflowProviderDeprecationWarning) as m:
m.operator = EksDeleteFargateProfileOperator(
task_id=TASK_ID,
cluster_name=self.cluster_name,
fargate_profile_name=self.fargate_profile_name,
region="us-east-2",
)
assert m.operator.region_name == "us-east-2"
|
TestEksDeleteFargateProfileOperator
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/errors.py
|
{
"start": 11988,
"end": 12423
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "InvalidSubsetError"
pipeline = graphene.Field(
graphene.NonNull("dagster_graphql.schema.pipelines.pipeline.GraphenePipeline")
)
def __init__(self, message, pipeline):
super().__init__()
self.message = check.str_param(message, "message")
self.pipeline = pipeline
|
GrapheneInvalidSubsetError
|
python
|
google__pytype
|
pytype/pyi/parser.py
|
{
"start": 10090,
"end": 28981
}
|
class ____(visitor.BaseVisitor):
"""Converts an ast tree to a pytd tree."""
_NOOP_NODES = {
# Expression contexts are ignored.
astlib.Load,
astlib.Store,
astlib.Del,
# Appears as an operator in `__all__ += ...`.
astlib.Add,
# These nodes are passed through unchanged and processed by their parents.
astlib.arg,
astlib.arguments,
astlib.keyword,
types.Pyval,
}
_ANNOT_NODES = (
astlib.Attribute,
astlib.BinOp,
astlib.Name,
astlib.Subscript,
)
def __init__(self, src, filename, module_name, options):
super().__init__(filename=filename, src_code=src, visit_decorators=True)
defs = definitions.Definitions(modules.Module(filename, module_name))
self.defs = defs
self.module_name = module_name
self.options = options
self.level = 0
self.in_function = False # pyi will not have nested defs
self.annotation_visitor = _AnnotationVisitor(filename=filename, defs=defs)
self.class_stack = []
def show(self, node):
print(debug.dump(node, astlib, include_attributes=True))
def generic_visit(self, node):
node_type = type(node)
if node_type in self._NOOP_NODES:
return node
raise NotImplementedError(f"Unsupported node type: {node_type.__name__}")
def visit_Module(self, node):
node.body = _flatten_splices(node.body)
return self.defs.build_type_decl_unit(node.body)
def visit_Pass(self, node):
return self.defs.ELLIPSIS
def visit_Expr(self, node):
# Handle some special cases of expressions that can occur in class and
# module bodies.
if node.value == self.defs.ELLIPSIS:
# class x: ...
return node.value
elif types.Pyval.is_str(node.value):
# docstrings
return Splice([])
else:
raise ParseError(f"Unexpected expression: {node.value}")
def _extract_function_properties(self, node):
decorators = []
abstract = coroutine = final = overload = False
for d in node.decorator_list:
# Since we can't import other parts of the stdlib in builtins and typing,
# we treat the abstractmethod and coroutine decorators as pseudo-builtins.
if self.defs.matches_type(
d.name, ("builtins.abstractmethod", "abc.abstractmethod")
):
abstract = True
elif self.defs.matches_type(
d.name,
("typing.Coroutine", "asyncio.coroutine", "coroutines.coroutine"),
):
coroutine = True
elif self.defs.matches_type(d.name, "typing.final"):
final = True
elif self.defs.matches_type(d.name, "typing.overload"):
overload = True
else:
decorators.append(d)
return decorators, function.SigProperties(
abstract=abstract,
coroutine=coroutine,
final=final,
overload=overload,
is_async=isinstance(node, astlib.AsyncFunctionDef),
)
def visit_FunctionDef(self, node):
node.decorator_list, props = self._extract_function_properties(node)
node.body = _flatten_splices(node.body)
return function.NameAndSig.from_function(node, props)
def visit_AsyncFunctionDef(self, node):
return self.visit_FunctionDef(node)
def visit_AnnAssign(self, node):
return self._ann_assign(node.target, node.annotation, node.value)
def _ann_assign(self, name, typ, val):
is_alias = False
if name == "__match_args__" and isinstance(val, tuple):
typ = pytd.NamedType("tuple")
val = None
elif typ.name:
if self.defs.matches_type(typ.name, "typing.Final"):
if isinstance(val, types.Pyval):
# to_pytd_literal raises an exception if the value is a float, but
# checking upfront allows us to generate a nicer error message.
if isinstance(val.value, float):
raise ParseError(
f"Default value for {name}: Final can only be '...' or a legal "
f"Literal parameter, got {val}"
)
else:
typ = val.to_pytd_literal()
val = None
elif isinstance(val, pytd.NamedType):
typ = pytd.Literal(val)
val = None
elif self.defs.matches_type(typ.name, "typing.TypeAlias"):
if not val:
raise ParseError(f"Missing default value for {name}: {typ.name}")
typ = self.defs.new_type_from_value(val) or val
val = None
is_alias = True
elif (
self.module_name == "typing_extensions" and typ.name == "_SpecialForm"
):
def type_of(n):
return pytd.GenericType(
pytd.NamedType("builtins.type"), (pytd.NamedType(n),)
)
# We convert known special forms to their corresponding types and
# otherwise treat them as unknown types.
if name in {"Final", "Protocol", "Self", "TypeGuard", "TypeIs"}:
typ = type_of(f"typing.{name}")
elif name == "LiteralString":
# TODO(b/303083512): Support LiteralString.
typ = type_of("builtins.str")
else:
typ = pytd.AnythingType()
if val:
if isinstance(val, (types.Ellipsis, types.Pyval)):
val = pytd.AnythingType()
else:
raise ParseError(
f"Default value for {name}: {typ.name} can only be '...' or a "
f"literal constant, got {val}"
)
if is_alias:
assert not val
ret = pytd.Alias(name, typ)
else:
ret = pytd.Constant(name, typ, val)
if self.level == 0:
self.defs.add_alias_or_constant(ret)
return ret
def visit_AugAssign(self, node):
if node.target == "__all__":
# Ignore other assignments
self.defs.all += _read_str_list(node.target, node.value)
return Splice([])
def _bare_assign(self, name, typ, val):
if typ:
if val is self.defs.ELLIPSIS:
# `name = ... # type: typ` converts to `name: typ`, dropping `...`.
return self._ann_assign(name, typ, None)
else:
return self._ann_assign(name, typ, val)
# Record and erase TypeVar and ParamSpec definitions.
if isinstance(val, _TypeVariable):
self.defs.add_type_variable(name, val)
return Splice([])
if getattr(val, "name", None) == _UNKNOWN_IMPORT:
constant = pytd.Constant(name, pytd.AnythingType())
self.defs.add_alias_or_constant(constant)
return constant
if name == "__slots__":
if self.level == 0:
raise ParseError("__slots__ only allowed on the class level")
return types.SlotDecl(_read_str_list(name, val))
if name == "__all__" and isinstance(val, (list, tuple)):
self.defs.all = _read_str_list(name, val)
return Splice([])
ret = self.defs.new_alias_or_constant(name, val)
if self.in_function:
return function.Mutator(name, ret.type)
if self.level == 0:
self.defs.add_alias_or_constant(ret)
return ret
def visit_Assign(self, node):
out = []
value = node.value
for target in node.targets:
if isinstance(target, tuple):
count = len(target)
if not (isinstance(value, tuple) and count == len(value)):
msg = f"Cannot unpack {count} values for multiple assignment"
raise ParseError(msg)
for k, v in zip(target, value):
out.append(self._bare_assign(k, node.type_comment, v))
else:
out.append(self._bare_assign(target, node.type_comment, value))
return Splice(out)
def visit_ClassDef(self, node):
full_class_name = ".".join(self.class_stack)
self.defs.type_map[full_class_name] = pytd.NamedType(full_class_name)
defs = _flatten_splices(node.body)
return self.defs.build_class(
full_class_name, node.bases, node.keywords, node.decorator_list, defs
)
def enter_If(self, node):
# Evaluate the test and preemptively remove the invalid branch so we don't
# waste time traversing it.
node.test = conditions.evaluate(node.test, self.options)
if not isinstance(node.test, bool):
raise ParseError("Unexpected if statement " + debug.dump(node, astlib))
if node.test:
node.orelse = []
else:
node.body = []
def visit_If(self, node):
if not isinstance(node.test, bool):
raise ParseError("Unexpected if statement " + debug.dump(node, astlib))
if node.test:
return Splice(node.body)
else:
return Splice(node.orelse)
def visit_Import(self, node):
if self.level > 0:
raise ParseError("Import statements need to be at module level")
self.defs.add_import(None, node.names)
return Splice([])
def visit_ImportFrom(self, node):
if self.level > 0:
raise ParseError("Import statements need to be at module level")
module = _import_from_module(node.module, node.level)
self.defs.add_import(module, node.names)
return Splice([])
def visit_alias(self, node):
if node.asname is None:
return node.name
return node.name, node.asname
def visit_Name(self, node):
return _parseable_name_to_real_name(node.id)
def visit_Attribute(self, node):
return f"{node.value}.{node.attr}"
def visit_Tuple(self, node):
return tuple(node.elts)
def visit_List(self, node):
return list(node.elts)
def visit_Dict(self, node):
return dict(zip(node.keys, node.values))
def visit_Call(self, node):
func = node.func.name or ""
for tvar_kind in ("TypeVar", "ParamSpec"):
if self.defs.matches_type(func, f"typing.{tvar_kind}"):
if self.level > 0:
raise ParseError(f"{tvar_kind}s need to be defined at module level")
return _TypeVariable.from_call(tvar_kind, node)
if self.defs.matches_type(func, "typing.NamedTuple"):
if len(node.args) != 2:
msg = "Wrong args: expected NamedTuple(name, [(field, type), ...])"
raise ParseError(msg)
name, fields = node.args
return self.defs.new_named_tuple(
name.value, [(n.value, t) for n, t in fields]
)
elif self.defs.matches_type(func, "collections.namedtuple"):
if len(node.args) != 2:
msg = "Wrong args: expected namedtuple(name, [field, ...])"
raise ParseError(msg)
name, fields = node.args
typed_fields = [(n.value, pytd.AnythingType()) for n in fields]
return self.defs.new_named_tuple(name.value, typed_fields)
elif self.defs.matches_type(func, "typing.TypedDict"):
if len(node.args) != 2:
msg = "Wrong args: expected TypedDict(name, {field: type, ...})"
raise ParseError(msg)
name, fields = node.args
return self.defs.new_typed_dict(
name.value, {n.value: t for n, t in fields.items()}, node.keywords
)
elif self.defs.matches_type(func, "typing.NewType"):
if len(node.args) != 2:
msg = "Wrong args: expected NewType(name, type)"
raise ParseError(msg)
name, typ = node.args
return self.defs.new_new_type(name.value, typ)
elif self.defs.matches_type(func, "importlib.import_module"):
if self.level > 0:
raise ParseError("Import statements need to be at module level")
return pytd.NamedType(_UNKNOWN_IMPORT)
# Convert all other calls to their function names; for example, typing.pyi
# uses things like:
# List = _Alias()
return node.func
def visit_Raise(self, node):
return types.Raise(node.exc)
# We convert type comments and annotations in enter() because we want to
# convert an entire type at once rather than bottom-up. enter() and leave()
# are also used to track nesting level.
def _convert_value(self, node):
if isinstance(node.value, self._ANNOT_NODES):
node.value = self.annotation_visitor.visit(node.value)
elif isinstance(node.value, (astlib.Tuple, astlib.List)):
elts = [
self.annotation_visitor.visit(x)
if isinstance(x, self._ANNOT_NODES)
else x
for x in node.value.elts
]
node.value = type(node.value)(elts)
def enter_Assign(self, node):
if node.type_comment:
node.type_comment = self.annotation_visitor.visit(node.type_comment)
self._convert_value(node)
def enter_AnnAssign(self, node):
if node.annotation:
node.annotation = self.annotation_visitor.visit(node.annotation)
self._convert_value(node)
def enter_arg(self, node):
if node.annotation:
node.annotation = self.annotation_visitor.visit(node.annotation)
def _convert_list(self, lst, start=0):
lst[start:] = [self.annotation_visitor.visit(x) for x in lst[start:]]
def _convert_newtype_args(self, node: astlib.Call):
self._convert_list(node.args, start=1)
def _convert_typing_namedtuple_args(self, node: astlib.Call):
for fields in node.args[1:]:
for field in cast(astlib.List, fields).elts:
self._convert_list(cast(astlib.Tuple, field).elts, start=1)
def _convert_typevar_args(self, node: astlib.Call):
self._convert_list(node.args, start=1)
for kw in node.keywords:
if kw.arg == "bound":
kw.value = self.annotation_visitor.visit(kw.value)
elif kw.arg == "default":
kw.value = self.annotation_visitor.visit(kw.value)
def _convert_typed_dict_args(self, node: astlib.Call):
for fields in node.args[1:]:
self._convert_list(cast(astlib.Dict, fields).values)
def enter_Call(self, node):
node.func = self.annotation_visitor.visit(node.func)
func = node.func.name or ""
if self.defs.matches_type(
func, ("typing.TypeVar", "typing.ParamSpec", "typing.TypeVarTuple")
):
self._convert_typevar_args(node)
elif self.defs.matches_type(func, "typing.NamedTuple"):
self._convert_typing_namedtuple_args(node)
elif self.defs.matches_type(func, "typing.TypedDict"):
self._convert_typed_dict_args(node)
elif self.defs.matches_type(func, "typing.NewType"):
return self._convert_newtype_args(node)
def enter_Raise(self, node):
exc = node.exc.func if isinstance(node.exc, astlib.Call) else node.exc
node.exc = self.annotation_visitor.visit(exc)
def _convert_decorators(self, node):
decorators = []
for d in node.decorator_list:
base = d.func if isinstance(d, astlib.Call) else d
if isinstance(base, astlib.Attribute):
name = _attribute_to_name(base)
else:
name = base
typ = self.annotation_visitor.visit(name)
# Wrap as aliases so that we can reference functions as types.
decorators.append(pytd.Alias(name.id, typ))
node.decorator_list = decorators
def enter_FunctionDef(self, node):
self._convert_decorators(node)
if node.returns:
node.returns = self.annotation_visitor.visit(node.returns)
self.level += 1
self.in_function = True
def leave_FunctionDef(self, node):
self.level -= 1
self.in_function = False
def enter_AsyncFunctionDef(self, node):
self.enter_FunctionDef(node)
def leave_AsyncFunctionDef(self, node):
self.leave_FunctionDef(node)
def enter_ClassDef(self, node):
self._convert_decorators(node)
node.bases = [
self.annotation_visitor.visit(base)
if isinstance(base, self._ANNOT_NODES)
else base
for base in node.bases
]
for kw in node.keywords:
if kw.arg == "metaclass":
kw.value = self.annotation_visitor.visit(kw.value)
self.level += 1
self.class_stack.append(_parseable_name_to_real_name(node.name))
def leave_ClassDef(self, node):
self.level -= 1
self.class_stack.pop()
def post_process_ast(ast, src, name=None):
"""Post-process the parsed AST."""
ast = definitions.finalize_ast(ast)
ast = ast.Visit(pep484.ConvertTypingToNative(name))
if name:
ast = ast.Replace(name=name)
ast = ast.Visit(visitors.ResolveLocalNames())
else:
# If there's no unique name, hash the sourcecode.
ast = ast.Replace(name=hashlib.md5(src.encode("utf-8")).hexdigest())
ast = ast.Visit(visitors.StripExternalNamePrefix())
# Now that we have resolved external names, validate any class decorators that
# do code generation. (We will generate the class lazily, but we should check
# for errors at parse time so they can be reported early.)
try:
ast = ast.Visit(decorate.ValidateDecoratedClassVisitor())
except TypeError as e:
# Convert errors into ParseError. Unfortunately we no longer have location
# information if an error is raised during transformation of a class node.
raise ParseError.from_exc(e)
return ast
def _fix_src(src: str) -> str:
"""Attempts to fix syntax errors in the source code."""
# TODO(b/294445640): This is a hacky workaround to deal with invalid stubs
# produced by the protobuf pyi generator.
try:
tokens = list(tokenize.generate_tokens(io.StringIO(src).readline))
except SyntaxError:
return src
num_tokens = len(tokens)
def _is_classname(i):
return i and tokens[i - 1].string == "class"
def _is_varname(i):
if i and tokens[i - 1].string.strip(): # not proceeded by whitespace
return False
return i < num_tokens - 1 and tokens[i + 1].type == tokenize.OP
lines = src.splitlines()
for i, token in enumerate(tokens):
if (
not keyword.iskeyword(token.string)
or not _is_classname(i)
and not _is_varname(i)
):
continue
start_line, start_col = token.start
end_line, end_col = token.end
if start_line != end_line:
continue
line = lines[start_line - 1]
new_line = (
line[:start_col]
+ _keyword_to_parseable_name(token.string)
+ line[end_col:]
)
lines[start_line - 1] = new_line
return "\n".join(lines)
def _parse(src: str, feature_version: int, filename: str = ""):
"""Call the ast parser with the appropriate feature version."""
kwargs = {"feature_version": feature_version, "type_comments": True}
try:
ast_root_node = astlib.parse(src, filename, **kwargs)
except SyntaxError as e:
# We only attempt to fix the source code if a syntax error is encountered
# because (1) this way, if the fixing fails, the error details will
# correctly reflect the original source, and (2) fixing is unnecessary most
# of the time, so always running it would be inefficient.
fixed_src = _fix_src(src)
try:
ast_root_node = astlib.parse(fixed_src, filename, **kwargs)
except SyntaxError:
raise ParseError(
e.msg, line=e.lineno, filename=filename, column=e.offset, text=e.text
) from e
return ast_root_node
def _feature_version(python_version: tuple[int, ...]) -> int:
"""Get the python feature version for the parser."""
if len(python_version) == 1:
return sys.version_info.minor
else:
return python_version[1]
# Options that will be copied from pytype.config.Options.
_TOPLEVEL_PYI_OPTIONS = (
"platform",
"python_version",
"strict_primitive_comparisons",
)
@dataclasses.dataclass
|
_GeneratePytdVisitor
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/bigquery.py
|
{
"start": 92695,
"end": 94020
}
|
class ____(GoogleBaseAsyncHook):
"""Async hook for BigQuery Table."""
sync_hook_class = BigQueryHook
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
async def get_table_client(
self, dataset: str, table_id: str, project_id: str, session: ClientSession
) -> Table_async:
"""
Get a Google Big Query Table object.
:param dataset: The name of the dataset in which to look for the table storage bucket.
:param table_id: The name of the table to check the existence of.
:param project_id: The Google cloud project in which to look for the table.
The connection supplied to the hook must provide
access to the specified project.
:param session: aiohttp ClientSession
"""
token = await self.get_token(session=session)
return Table_async(
dataset_name=dataset,
table_name=table_id,
project=project_id,
token=token,
session=cast("Session", session),
)
|
BigQueryTableAsyncHook
|
python
|
getsentry__sentry
|
src/sentry/issue_detection/detectors/query_injection_detector.py
|
{
"start": 645,
"end": 5721
}
|
class ____(PerformanceDetector):
__slots__ = "stored_problems"
type = DetectorType.QUERY_INJECTION
settings_key = DetectorType.QUERY_INJECTION
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.stored_problems = {}
self.potential_unsafe_inputs: list[tuple[str, dict[str, Any]]] = []
self.extract_request_data(event)
def extract_request_data(self, event: dict[str, Any]) -> None:
self.request_data = event.get("request", {}).get("data", {})
self.request_url = event.get("request", {}).get("url", "")
if not isinstance(self.request_data, dict):
return
for query_pair in self.request_data.items():
query_value = query_pair[1]
# Any JSON-like values being passed as query parameters are potential unsafe inputs
if not isinstance(query_value, (str, int, float, bool)) and query_value is not None:
self.potential_unsafe_inputs.append(query_pair)
def visit_span(self, span: Span) -> None:
if not self._is_span_eligible(span):
return
if len(self.potential_unsafe_inputs) == 0:
return
description = span.get("description", None) or ""
op = span.get("op", None) or ""
spans_involved = [span["span_id"]]
unsafe_inputs = []
for input_key, input_value in self.potential_unsafe_inputs:
original_input_value = input_value.copy()
# Replace all operands in filter with "?" since the query description is sanitized
if input_value and isinstance(input_value, dict):
for dict_key, dict_value in input_value.items():
if dict_key and not isinstance(dict_value, dict):
input_value[dict_key] = "?"
input_dict = {input_key: input_value}
if json.dumps(input_dict) in description:
description = description.replace(json.dumps(input_value), "[UNTRUSTED_INPUT]")
unsafe_inputs.append((input_key, original_input_value))
if len(unsafe_inputs) == 0:
return
parameterized_description = span.get("sentry_tags", {}).get("description")
# If the query description is not parameterized, use the original description with replacements
if not parameterized_description:
parameterized_description = description
vulnerable_keys = [key for key, _ in unsafe_inputs]
fingerprint_description = f"{'-'.join(vulnerable_keys)}-{parameterized_description}"
fingerprint = self._fingerprint(fingerprint_description)
issue_description = (
f"Untrusted Inputs [{', '.join(vulnerable_keys)}] in `{parameterized_description}`"
)
self.stored_problems[fingerprint] = PerformanceProblem(
type=QueryInjectionVulnerabilityGroupType,
fingerprint=fingerprint,
op=op,
desc=issue_description[:MAX_EVIDENCE_VALUE_LENGTH],
cause_span_ids=[],
parent_span_ids=[],
offender_span_ids=spans_involved,
evidence_data={
"op": op,
"cause_span_ids": [],
"parent_span_ids": [],
"offender_span_ids": spans_involved,
"transaction_name": self._event.get("transaction", ""),
"vulnerable_parameters": unsafe_inputs,
"request_url": self.request_url,
},
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
op,
description,
)[:MAX_EVIDENCE_VALUE_LENGTH],
# Has to be marked important to be displayed in the notifications
important=True,
)
],
)
def is_creation_allowed_for_organization(self, organization: Organization) -> bool:
return True
def is_creation_allowed_for_project(self, project: Project | None) -> bool:
return self.settings["detection_enabled"]
def _is_span_eligible(self, span: Span) -> bool:
if not span.get("span_id"):
return False
op = span.get("op", None)
if not op or not op.startswith("db") or op.startswith("db.redis"):
return False
description = span.get("description", None)
if not description:
return False
sql_keywords = ("SELECT", "UPDATE", "INSERT")
if any(description.upper().startswith(keyword) for keyword in sql_keywords):
return False
return True
def _fingerprint(self, description: str) -> str:
signature = description.encode("utf-8")
full_fingerprint = hashlib.sha1(signature).hexdigest()
return f"1-{QueryInjectionVulnerabilityGroupType.type_id}-{full_fingerprint}"
|
QueryInjectionDetector
|
python
|
getsentry__sentry
|
src/sentry/seer/breakpoints.py
|
{
"start": 1185,
"end": 1453
}
|
class ____(TypedDict):
data: "Mapping[str, BreakpointTransaction]"
sort: NotRequired[str]
allow_midpoint: NotRequired[str]
validate_tail_hours: NotRequired[int]
trend_percentage: NotRequired[float]
min_change: NotRequired[float]
|
BreakpointRequest
|
python
|
python-excel__xlwt
|
xlwt/BIFFRecords.py
|
{
"start": 13832,
"end": 14716
}
|
class ____(BiffRecord):
"""
This record stores two Windows country identifiers. The first
represents the user interface language of the Excel version that has
saved the file, and the second represents the system regional settings
at the time the file was saved.
Record COUNTRY, BIFF3-BIFF8:
Offset Size Contents
0 2 Windows country identifier of the user interface language of Excel
2 2 Windows country identifier of the system regional settings
The following table shows most of the used country identifiers. Most
of these identifiers are equal to the international country calling
codes.
1 USA
2 Canada
7 Russia
"""
_REC_ID = 0x008C
def __init__(self, ui_id, sys_settings_id):
self._rec_data = pack('<2H', ui_id, sys_settings_id)
|
CountryRecord
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/frame_ctor.py
|
{
"start": 1708,
"end": 1927
}
|
class ____:
def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
self.s = Series(np.random.randn(10000), index=mi)
def time_mi_series(self):
DataFrame(self.s)
|
FromSeries
|
python
|
sqlalchemy__sqlalchemy
|
examples/generic_associations/table_per_related.py
|
{
"start": 1844,
"end": 2427
}
|
class ____:
"""HasAddresses mixin, creates a new Address class
for each parent.
"""
@declared_attr
def addresses(cls):
cls.Address = type(
f"{cls.__name__}Address",
(Address, Base),
dict(
__tablename__=f"{cls.__tablename__}_address",
parent_id=mapped_column(
Integer, ForeignKey(f"{cls.__tablename__}.id")
),
parent=relationship(cls, overlaps="addresses"),
),
)
return relationship(cls.Address)
|
HasAddresses
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_str_returned.py
|
{
"start": 1002,
"end": 1097
}
|
class ____:
""" Uninferable return value """
__str__ = lambda self: Missing
|
AmbiguousStr
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/cfg.py
|
{
"start": 21836,
"end": 32351
}
|
class ____(gast.NodeVisitor):
"""Converts an AST to CFGs.
A separate CFG will be constructed for each function.
"""
def __init__(self):
super(AstToCfg, self).__init__()
self.builder_stack = []
self.builder = None
self.cfgs = {}
self.lexical_scopes = []
def _enter_lexical_scope(self, node):
self.lexical_scopes.append(node)
def _exit_lexical_scope(self, node):
leaving_node = self.lexical_scopes.pop()
assert node == leaving_node
def _get_enclosing_finally_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.finalbody:
included.append(node)
if isinstance(node, stop_at):
return node, included
return None, included
def _get_enclosing_except_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.handlers:
included.extend(node.handlers)
if isinstance(node, stop_at):
break
return included
def _process_basic_statement(self, node):
self.generic_visit(node)
self.builder.add_ordinary_node(node)
def _process_exit_statement(self,
node,
exits_nodes_of_type,
may_exit_via_except=False):
self.generic_visit(node)
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(exits_nodes_of_type)
assert try_node is not None, '{} that is not enclosed by any of {}'.format(
node, exits_nodes_of_type)
node = self.builder.add_exit_node(node, try_node, guards)
if may_exit_via_except:
except_guards = self._get_enclosing_except_scopes(exits_nodes_of_type)
self.builder.connect_raise_node(node, except_guards)
def _process_continue_statement(self, node, *loops_to_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(
tuple(loops_to_nodes_of_type))
if try_node is None:
raise ValueError('%s that is not enclosed by any of %s' %
(node, loops_to_nodes_of_type))
self.builder.add_continue_node(node, try_node, guards)
def visit_ClassDef(self, node):
# We also keep the ClassDef node in the CFG, since it technically is a
# statement.
# For example, this is legal and allows executing user code:
#
# class Foo(bar()):
# pass
#
# It also has a scope:
#
# class Bar(object):
# a = 1
if self.builder is None:
self.generic_visit(node)
return
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self._process_basic_statement(node)
self._exit_lexical_scope(node)
# TODO(mdan): Track the CFG local to the class definition as well?
self.builder = self.builder_stack.pop()
def _process_function_def(self, node, is_lambda):
# The function body is stored in a separate graph, because function
# definitions have effects very different from function calls.
if self.builder is not None:
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self._process_basic_statement(node.args)
if is_lambda:
self._process_exit_statement(node.body, (gast.Lambda,))
else:
for stmt in node.body:
self.visit(stmt)
self.builder.exit_section(node)
self._exit_lexical_scope(node)
self.cfgs[node] = self.builder.build()
self.builder = self.builder_stack.pop()
def visit_FunctionDef(self, node):
self._process_function_def(node, is_lambda=False)
def visit_Lambda(self, node):
self._process_function_def(node, is_lambda=True)
def visit_Return(self, node):
self._process_exit_statement(node, (gast.FunctionDef,))
def visit_Import(self, node):
self._process_basic_statement(node)
def visit_ImportFrom(self, node):
self._process_basic_statement(node)
def visit_Expr(self, node):
self._process_basic_statement(node)
def visit_NamedExpr(self, node):
# TODO(yileiyang): Add a test case once we have a newer astunparse version.
# NamedExpr was introduced in Python 3.8 and supported in gast 0.5.1+.
self._process_basic_statement(node)
def visit_Assign(self, node):
self._process_basic_statement(node)
def visit_AnnAssign(self, node):
self._process_basic_statement(node)
def visit_AugAssign(self, node):
self._process_basic_statement(node)
def visit_Pass(self, node):
self._process_basic_statement(node)
def visit_Global(self, node):
self._process_basic_statement(node)
def visit_Nonlocal(self, node):
self._process_basic_statement(node)
def visit_Print(self, node):
self._process_basic_statement(node)
def visit_Raise(self, node):
self._process_exit_statement(
node, (gast.FunctionDef,), may_exit_via_except=True)
self.builder.errors.add(node)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
self._process_basic_statement(node)
def visit_Delete(self, node):
self._process_basic_statement(node)
def visit_If(self, node):
# No need to track ifs as lexical scopes, for now.
# Lexical scopes are generally tracked in order to be able to resolve the
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
self.builder.begin_statement(node)
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
self.builder.new_cond_branch(node)
for stmt in node.body:
self.visit(stmt)
self.builder.new_cond_branch(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_cond_section(node)
self.builder.end_statement(node)
def visit_While(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self.generic_visit(node.test)
self.builder.enter_loop_section(node, node.test)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# the statements inside it don't affect the loop itself. For example, a
# break in the loop's orelse will not affect the loop itself.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_For(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
# Note: Strictly speaking, this should be node.target + node.iter.
# However, the activity analysis accounts for this inconsistency,
# so dataflow analysis produces the correct values.
self.generic_visit(node.iter)
self.builder.enter_loop_section(node, node.iter)
# Also include the "extra loop test" annotation, to capture things like the
# control variable for return and break in for loops.
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
self._process_basic_statement(
anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST))
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# they don't count as loop bodies. For example, a break in the loop's
# orelse will affect the parent loop, not the current one.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_Break(self, node):
self._process_exit_statement(node, (
gast.While,
gast.For,
))
def visit_Continue(self, node):
self._process_continue_statement(node, (
gast.While,
gast.For,
))
def visit_ExceptHandler(self, node):
self.builder.begin_statement(node)
self.builder.enter_except_section(node)
if node.type is not None:
self.visit(node.type)
if node.name is not None:
self.visit(node.name)
for stmt in node.body:
self.visit(stmt)
self.builder.end_statement(node)
def visit_Try(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
# Note: the current simplification is that the try block fully executes
# regardless of whether an exception triggers or not. This is consistent
# with blocks free of try/except, which also don't account for the
# possibility of an exception being raised mid-block.
for stmt in node.body:
self.visit(stmt)
# The orelse is an optional continuation of the body.
if node.orelse:
block_representative = node.orelse[0]
self.builder.enter_cond_section(block_representative)
self.builder.new_cond_branch(block_representative)
for stmt in node.orelse:
self.visit(stmt)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
self._exit_lexical_scope(node)
if node.handlers:
# Using node would be inconsistent. Using the first handler node is also
# inconsistent, but less so.
block_representative = node.handlers[0]
self.builder.enter_cond_section(block_representative)
for block in node.handlers:
self.builder.new_cond_branch(block_representative)
self.visit(block)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
if node.finalbody:
self.builder.enter_finally_section(node)
for stmt in node.finalbody:
self.visit(stmt)
self.builder.exit_finally_section(node)
self.builder.end_statement(node)
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
for item in node.items:
self._process_basic_statement(item)
for stmt in node.body:
self.visit(stmt)
def build(node):
visitor = AstToCfg()
visitor.visit(node)
return visitor.cfgs
|
AstToCfg
|
python
|
dagster-io__dagster
|
integration_tests/python_modules/dagster-k8s-test-infra/dagster_k8s_test_infra/cluster.py
|
{
"start": 1173,
"end": 9733
}
|
class ____(namedtuple("_ClusterConfig", "name kubeconfig_file")):
"""Used to represent a cluster, returned by the cluster_provider fixture below."""
def __new__(cls, name, kubeconfig_file):
return super().__new__(
cls,
name=check.str_param(name, "name"),
kubeconfig_file=check.str_param(kubeconfig_file, "kubeconfig_file"),
)
def define_cluster_provider_fixture(additional_kind_images=None):
@pytest.fixture(scope="session")
def _cluster_provider(request):
from dagster_k8s_test_infra.kind import kind_cluster, kind_load_images
if IS_BUILDKITE:
print("Installing ECR credentials...")
check_output("aws ecr get-login --no-include-email --region us-west-2 | sh", shell=True)
provider = request.config.getoption("--cluster-provider")
# Use a kind cluster
if provider == "kind":
cluster_name = request.config.getoption("--kind-cluster")
# Cluster will be deleted afterwards unless this is set.
# This is to allow users to reuse an existing cluster in local test by running
# `pytest --kind-cluster my-cluster --no-cleanup` -- this avoids the per-test run
# overhead of cluster setup and teardown
should_cleanup = True if IS_BUILDKITE else not request.config.getoption("--no-cleanup")
with kind_cluster(cluster_name, should_cleanup=should_cleanup) as cluster_config:
if not IS_BUILDKITE:
docker_image = get_test_project_docker_image()
try:
client = docker.from_env()
client.images.get(docker_image)
print(
f"Found existing image tagged {docker_image}, skipping image build. To rebuild,"
f" first run: docker rmi {docker_image}"
)
except docker.errors.ImageNotFound: # pyright: ignore[reportAttributeAccessIssue]
build_and_tag_test_image(docker_image)
kind_load_images(
cluster_name=cluster_config.name,
local_dagster_test_image=docker_image,
additional_images=additional_kind_images,
)
yield cluster_config
# Use cluster from kubeconfig
elif provider == "kubeconfig":
kubeconfig_file = os.getenv("KUBECONFIG", os.path.expandvars("${HOME}/.kube/config"))
kubernetes.config.load_kube_config(config_file=kubeconfig_file)
yield ClusterConfig(name="from_system_kubeconfig", kubeconfig_file=kubeconfig_file)
else:
raise Exception(f"unknown cluster provider {provider}")
return _cluster_provider
@contextmanager
def local_port_forward_postgres(namespace):
print("Port-forwarding postgres")
postgres_pod_name = (
check_output(
[
"kubectl",
"get",
"pods",
"--namespace",
namespace,
"-l",
"app=postgresql,release=dagster",
"-o",
'jsonpath="{.items[0].metadata.name}"',
]
)
.decode("utf-8")
.strip('"')
)
forward_port = find_free_port()
DagsterKubernetesClient.production_client().wait_for_pod(postgres_pod_name, namespace=namespace)
p = None
try:
p = subprocess.Popen(
[
"kubectl",
"port-forward",
"--namespace",
namespace,
postgres_pod_name,
f"{forward_port}:5432",
],
# Squelch the verbose "Handling connection for..." messages
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
# Validate port forwarding works
start = time.time()
while True:
if time.time() - start > PG_PORT_FORWARDING_TIMEOUT:
raise Exception("Timed out while waiting for postgres port forwarding")
print(
"Waiting for port forwarding from k8s pod %s:5432 to localhost:%d to be" # noqa: UP031
" available..." % (postgres_pod_name, forward_port)
)
try:
conn = psycopg2.connect(
database="test",
user="test",
password="test",
host="localhost",
port=forward_port,
)
conn.close()
break
except:
time.sleep(1)
continue
yield forward_port
finally:
if p is not None:
print("Terminating port-forwarding")
p.terminate()
@pytest.fixture(scope="session")
def helm_postgres_url_for_k8s_run_launcher(system_namespace_for_k8s_run_launcher):
with local_port_forward_postgres(
namespace=system_namespace_for_k8s_run_launcher
) as local_forward_port:
postgres_url = f"postgresql://test:test@localhost:{local_forward_port}/test"
print("Local Postgres forwarding URL: ", postgres_url)
yield postgres_url
@pytest.fixture(scope="function")
def dagster_instance_for_k8s_run_launcher(
helm_postgres_url_for_k8s_run_launcher,
):
with tempfile.TemporaryDirectory() as tempdir:
instance_ref = InstanceRef.from_dir(tempdir)
with DagsterInstance(
instance_type=InstanceType.PERSISTENT,
local_artifact_storage=LocalArtifactStorage(tempdir),
run_storage=PostgresRunStorage(helm_postgres_url_for_k8s_run_launcher),
event_storage=PostgresEventLogStorage(helm_postgres_url_for_k8s_run_launcher),
schedule_storage=PostgresScheduleStorage(helm_postgres_url_for_k8s_run_launcher),
compute_log_manager=NoOpComputeLogManager(),
run_coordinator=DefaultRunCoordinator(),
run_launcher=ExplodingRunLauncher(),
ref=instance_ref,
) as instance:
yield instance
check_export_runs(instance)
@pytest.fixture(scope="session")
def helm_postgres_url(helm_namespace):
with local_port_forward_postgres(namespace=helm_namespace) as local_forward_port:
postgres_url = f"postgresql://test:test@localhost:{local_forward_port}/test"
print("Local Postgres forwarding URL: ", postgres_url)
yield postgres_url
@pytest.fixture(scope="function")
def dagster_instance(helm_postgres_url):
with tempfile.TemporaryDirectory() as tempdir:
with environ({"DAGSTER_HOME": tempdir}):
with DagsterInstance(
instance_type=InstanceType.PERSISTENT,
local_artifact_storage=LocalArtifactStorage(tempdir),
run_storage=PostgresRunStorage(helm_postgres_url),
event_storage=PostgresEventLogStorage(helm_postgres_url),
compute_log_manager=NoOpComputeLogManager(),
run_coordinator=DefaultRunCoordinator(),
run_launcher=ExplodingRunLauncher(), # use graphql to launch any runs
ref=InstanceRef.from_dir(tempdir),
) as instance:
yield instance
check_export_runs(instance)
def check_export_runs(instance):
if not IS_BUILDKITE:
return
# example PYTEST_CURRENT_TEST: test_user_code_deployments.py::test_execute_on_celery_k8s (teardown)
current_test = (
os.environ.get("PYTEST_CURRENT_TEST").split()[0].replace("::", "-").replace(".", "-") # pyright: ignore[reportOptionalMemberAccess]
)
for run in instance.get_runs():
output_file = f"{current_test}-{run.run_id}.dump"
try:
export_run(instance, run, output_file)
except Exception as e:
print(f"Hit an error exporting dagster-debug {output_file}: {e}")
continue
p = subprocess.Popen(
[
"buildkite-agent",
"artifact",
"upload",
output_file,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
print("Buildkite artifact added with stdout: ", stdout)
print("Buildkite artifact added with stderr: ", stderr)
assert p.returncode == 0
|
ClusterConfig
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.