language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py | {
"start": 109198,
"end": 110104
} | class ____(Validator):
REJECTLIST_FILES = frozenset(('__pycache__',))
def __init__(self, path, reporter=None):
super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(path)
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def validate(self):
super(PythonPackageValidator, self).validate()
if self.basename in self.REJECTLIST_FILES:
return
init_file = os.path.join(self.path, '__init__.py')
if not os.path.exists(init_file):
self.reporter.error(
path=self.object_path,
code='subdirectory-missing-init',
msg='Ansible module subdirectories must contain an __init__.py'
)
| PythonPackageValidator |
python | celery__celery | t/unit/utils/test_saferepr.py | {
"start": 2165,
"end": 2245
} | class ____(dict):
def __repr__(self):
return super().__repr__()
| dict3 |
python | bokeh__bokeh | src/bokeh/models/transforms.py | {
"start": 4612,
"end": 5154
} | class ____(Transform):
''' Apply either fixed dodge amount to data.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value = Float(default=0, help="""
The amount to dodge the input data.
""")
range = Nullable(Instance("bokeh.models.ranges.Range"), help="""
When applying ``Dodge`` to categorical data values, the corresponding
``FactorRange`` must be supplied as the ``range`` property.
""")
| Dodge |
python | pennersr__django-allauth | allauth/socialaccount/providers/battlenet/provider.py | {
"start": 418,
"end": 1066
} | class ____(OAuth2Provider):
id = "battlenet"
name = "Battle.net"
account_class = BattleNetAccount
oauth2_adapter_class = BattleNetOAuth2Adapter
def extract_uid(self, data):
uid = str(data["id"])
if data.get("region") == "cn":
# China is on a different account system. UIDs can clash with US.
return uid + "-cn"
return uid
def extract_common_fields(self, data):
return {"username": data.get("battletag")}
def get_default_scope(self):
# Optional scopes: "sc2.profile", "wow.profile"
return []
provider_classes = [BattleNetProvider]
| BattleNetProvider |
python | python-openxml__python-docx | tests/test_section.py | {
"start": 3759,
"end": 18375
} | class ____:
"""Unit-test suite for `docx.section.Section`."""
@pytest.mark.parametrize(
("sectPr_cxml", "expected_value"),
[
("w:sectPr", False),
("w:sectPr/w:titlePg", True),
("w:sectPr/w:titlePg{w:val=0}", False),
("w:sectPr/w:titlePg{w:val=1}", True),
("w:sectPr/w:titlePg{w:val=true}", True),
],
)
def it_knows_when_it_displays_a_distinct_first_page_header(
self, sectPr_cxml: str, expected_value: bool, document_part_: Mock
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
section = Section(sectPr, document_part_)
different_first_page_header_footer = section.different_first_page_header_footer
assert different_first_page_header_footer is expected_value
@pytest.mark.parametrize(
("sectPr_cxml", "value", "expected_cxml"),
[
("w:sectPr", True, "w:sectPr/w:titlePg"),
("w:sectPr/w:titlePg", False, "w:sectPr"),
("w:sectPr/w:titlePg{w:val=1}", True, "w:sectPr/w:titlePg"),
("w:sectPr/w:titlePg{w:val=off}", False, "w:sectPr"),
],
)
def it_can_change_whether_the_document_has_distinct_odd_and_even_headers(
self, sectPr_cxml: str, value: bool, expected_cxml: str, document_part_: Mock
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
expected_xml = xml(expected_cxml)
section = Section(sectPr, document_part_)
section.different_first_page_header_footer = value
assert sectPr.xml == expected_xml
def it_provides_access_to_its_even_page_footer(
self, document_part_: Mock, _Footer_: Mock, footer_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
_Footer_.return_value = footer_
section = Section(sectPr, document_part_)
footer = section.even_page_footer
_Footer_.assert_called_once_with(sectPr, document_part_, WD_HEADER_FOOTER.EVEN_PAGE)
assert footer is footer_
def it_provides_access_to_its_even_page_header(
self, document_part_: Mock, _Header_: Mock, header_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
_Header_.return_value = header_
section = Section(sectPr, document_part_)
header = section.even_page_header
_Header_.assert_called_once_with(sectPr, document_part_, WD_HEADER_FOOTER.EVEN_PAGE)
assert header is header_
def it_provides_access_to_its_first_page_footer(
self, document_part_: Mock, _Footer_: Mock, footer_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
_Footer_.return_value = footer_
section = Section(sectPr, document_part_)
footer = section.first_page_footer
_Footer_.assert_called_once_with(sectPr, document_part_, WD_HEADER_FOOTER.FIRST_PAGE)
assert footer is footer_
def it_provides_access_to_its_first_page_header(
self, document_part_: Mock, _Header_: Mock, header_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
_Header_.return_value = header_
section = Section(sectPr, document_part_)
header = section.first_page_header
_Header_.assert_called_once_with(sectPr, document_part_, WD_HEADER_FOOTER.FIRST_PAGE)
assert header is header_
def it_provides_access_to_its_default_footer(
self, document_part_: Mock, _Footer_: Mock, footer_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
_Footer_.return_value = footer_
section = Section(sectPr, document_part_)
footer = section.footer
_Footer_.assert_called_once_with(sectPr, document_part_, WD_HEADER_FOOTER.PRIMARY)
assert footer is footer_
def it_provides_access_to_its_default_header(
self, document_part_: Mock, _Header_: Mock, header_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
_Header_.return_value = header_
section = Section(sectPr, document_part_)
header = section.header
_Header_.assert_called_once_with(sectPr, document_part_, WD_HEADER_FOOTER.PRIMARY)
assert header is header_
def it_can_iterate_its_inner_content(self):
document = Document(test_file("sct-inner-content.docx"))
assert len(document.sections) == 3
inner_content = list(document.sections[0].iter_inner_content())
assert len(inner_content) == 3
p = inner_content[0]
assert isinstance(p, Paragraph)
assert p.text == "P1"
t = inner_content[1]
assert isinstance(t, Table)
assert t.rows[0].cells[0].text == "T2"
p = inner_content[2]
assert isinstance(p, Paragraph)
assert p.text == "P3"
inner_content = list(document.sections[1].iter_inner_content())
assert len(inner_content) == 3
t = inner_content[0]
assert isinstance(t, Table)
assert t.rows[0].cells[0].text == "T4"
p = inner_content[1]
assert isinstance(p, Paragraph)
assert p.text == "P5"
p = inner_content[2]
assert isinstance(p, Paragraph)
assert p.text == "P6"
inner_content = list(document.sections[2].iter_inner_content())
assert len(inner_content) == 3
p = inner_content[0]
assert isinstance(p, Paragraph)
assert p.text == "P7"
p = inner_content[1]
assert isinstance(p, Paragraph)
assert p.text == "P8"
p = inner_content[2]
assert isinstance(p, Paragraph)
assert p.text == "P9"
@pytest.mark.parametrize(
("sectPr_cxml", "expected_value"),
[
("w:sectPr", WD_SECTION.NEW_PAGE),
("w:sectPr/w:type", WD_SECTION.NEW_PAGE),
("w:sectPr/w:type{w:val=continuous}", WD_SECTION.CONTINUOUS),
("w:sectPr/w:type{w:val=nextPage}", WD_SECTION.NEW_PAGE),
("w:sectPr/w:type{w:val=oddPage}", WD_SECTION.ODD_PAGE),
("w:sectPr/w:type{w:val=evenPage}", WD_SECTION.EVEN_PAGE),
("w:sectPr/w:type{w:val=nextColumn}", WD_SECTION.NEW_COLUMN),
],
)
def it_knows_its_start_type(
self, sectPr_cxml: str, expected_value: WD_SECTION, document_part_: Mock
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
section = Section(sectPr, document_part_)
start_type = section.start_type
assert start_type is expected_value
@pytest.mark.parametrize(
("sectPr_cxml", "value", "expected_cxml"),
[
(
"w:sectPr/w:type{w:val=oddPage}",
WD_SECTION.EVEN_PAGE,
"w:sectPr/w:type{w:val=evenPage}",
),
("w:sectPr/w:type{w:val=nextPage}", None, "w:sectPr"),
("w:sectPr", None, "w:sectPr"),
("w:sectPr/w:type{w:val=continuous}", WD_SECTION.NEW_PAGE, "w:sectPr"),
("w:sectPr/w:type", WD_SECTION.NEW_PAGE, "w:sectPr"),
(
"w:sectPr/w:type",
WD_SECTION.NEW_COLUMN,
"w:sectPr/w:type{w:val=nextColumn}",
),
],
)
def it_can_change_its_start_type(
self,
sectPr_cxml: str,
value: WD_SECTION | None,
expected_cxml: str,
document_part_: Mock,
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
expected_xml = xml(expected_cxml)
section = Section(sectPr, document_part_)
section.start_type = value
assert section._sectPr.xml == expected_xml
@pytest.mark.parametrize(
("sectPr_cxml", "expected_value"),
[
("w:sectPr/w:pgSz{w:w=1440}", Inches(1)),
("w:sectPr/w:pgSz", None),
("w:sectPr", None),
],
)
def it_knows_its_page_width(
self, sectPr_cxml: str, expected_value: Length | None, document_part_: Mock
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
section = Section(sectPr, document_part_)
page_width = section.page_width
assert page_width == expected_value
@pytest.mark.parametrize(
("value", "expected_cxml"),
[
(None, "w:sectPr/w:pgSz"),
(Inches(4), "w:sectPr/w:pgSz{w:w=5760}"),
],
)
def it_can_change_its_page_width(
self,
value: Length | None,
expected_cxml: str,
document_part_: Mock,
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
expected_xml = xml(expected_cxml)
section = Section(sectPr, document_part_)
section.page_width = value
assert section._sectPr.xml == expected_xml
@pytest.mark.parametrize(
("sectPr_cxml", "expected_value"),
[
("w:sectPr/w:pgSz{w:h=2880}", Inches(2)),
("w:sectPr/w:pgSz", None),
("w:sectPr", None),
],
)
def it_knows_its_page_height(
self, sectPr_cxml: str, expected_value: Length | None, document_part_: Mock
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
section = Section(sectPr, document_part_)
page_height = section.page_height
assert page_height == expected_value
@pytest.mark.parametrize(
("value", "expected_cxml"),
[
(None, "w:sectPr/w:pgSz"),
(Inches(2), "w:sectPr/w:pgSz{w:h=2880}"),
],
)
def it_can_change_its_page_height(
self, value: Length | None, expected_cxml: str, document_part_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
expected_xml = xml(expected_cxml)
section = Section(sectPr, document_part_)
section.page_height = value
assert section._sectPr.xml == expected_xml
@pytest.mark.parametrize(
("sectPr_cxml", "expected_value"),
[
("w:sectPr/w:pgSz{w:orient=landscape}", WD_ORIENTATION.LANDSCAPE),
("w:sectPr/w:pgSz{w:orient=portrait}", WD_ORIENTATION.PORTRAIT),
("w:sectPr/w:pgSz", WD_ORIENTATION.PORTRAIT),
("w:sectPr", WD_ORIENTATION.PORTRAIT),
],
)
def it_knows_its_page_orientation(
self, sectPr_cxml: str, expected_value: WD_ORIENTATION, document_part_: Mock
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
section = Section(sectPr, document_part_)
orientation = section.orientation
assert orientation is expected_value
@pytest.mark.parametrize(
("value", "expected_cxml"),
[
(WD_ORIENTATION.LANDSCAPE, "w:sectPr/w:pgSz{w:orient=landscape}"),
(WD_ORIENTATION.PORTRAIT, "w:sectPr/w:pgSz"),
(None, "w:sectPr/w:pgSz"),
],
)
def it_can_change_its_orientation(
self, value: WD_ORIENTATION | None, expected_cxml: str, document_part_: Mock
):
sectPr = cast(CT_SectPr, element("w:sectPr"))
expected_xml = xml(expected_cxml)
section = Section(sectPr, document_part_)
section.orientation = value
assert section._sectPr.xml == expected_xml
@pytest.mark.parametrize(
("sectPr_cxml", "margin_prop_name", "expected_value"),
[
("w:sectPr/w:pgMar{w:left=120}", "left_margin", 76200),
("w:sectPr/w:pgMar{w:right=240}", "right_margin", 152400),
("w:sectPr/w:pgMar{w:top=-360}", "top_margin", -228600),
("w:sectPr/w:pgMar{w:bottom=480}", "bottom_margin", 304800),
("w:sectPr/w:pgMar{w:gutter=600}", "gutter", 381000),
("w:sectPr/w:pgMar{w:header=720}", "header_distance", 457200),
("w:sectPr/w:pgMar{w:footer=840}", "footer_distance", 533400),
("w:sectPr/w:pgMar", "left_margin", None),
("w:sectPr", "top_margin", None),
],
)
def it_knows_its_page_margins(
self,
sectPr_cxml: str,
margin_prop_name: str,
expected_value: int | None,
document_part_: Mock,
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
section = Section(sectPr, document_part_)
value = getattr(section, margin_prop_name)
assert value == expected_value
@pytest.mark.parametrize(
("sectPr_cxml", "margin_prop_name", "value", "expected_cxml"),
[
("w:sectPr", "left_margin", Inches(1), "w:sectPr/w:pgMar{w:left=1440}"),
("w:sectPr", "right_margin", Inches(0.5), "w:sectPr/w:pgMar{w:right=720}"),
("w:sectPr", "top_margin", Inches(-0.25), "w:sectPr/w:pgMar{w:top=-360}"),
(
"w:sectPr",
"bottom_margin",
Inches(0.75),
"w:sectPr/w:pgMar{w:bottom=1080}",
),
("w:sectPr", "gutter", Inches(0.25), "w:sectPr/w:pgMar{w:gutter=360}"),
(
"w:sectPr",
"header_distance",
Inches(1.25),
"w:sectPr/w:pgMar{w:header=1800}",
),
(
"w:sectPr",
"footer_distance",
Inches(1.35),
"w:sectPr/w:pgMar{w:footer=1944}",
),
("w:sectPr", "left_margin", None, "w:sectPr/w:pgMar"),
(
"w:sectPr/w:pgMar{w:top=-360}",
"top_margin",
Inches(0.6),
"w:sectPr/w:pgMar{w:top=864}",
),
],
)
def it_can_change_its_page_margins(
self,
sectPr_cxml: str,
margin_prop_name: str,
value: Length | None,
expected_cxml: str,
document_part_: Mock,
):
sectPr = cast(CT_SectPr, element(sectPr_cxml))
expected_xml = xml(expected_cxml)
section = Section(sectPr, document_part_)
setattr(section, margin_prop_name, value)
assert section._sectPr.xml == expected_xml
# -- fixtures-----------------------------------------------------
@pytest.fixture
def document_part_(self, request: FixtureRequest):
return instance_mock(request, DocumentPart)
@pytest.fixture
def _Footer_(self, request: FixtureRequest):
return class_mock(request, "docx.section._Footer")
@pytest.fixture
def footer_(self, request: FixtureRequest):
return instance_mock(request, _Footer)
@pytest.fixture
def _Header_(self, request: FixtureRequest):
return class_mock(request, "docx.section._Header")
@pytest.fixture
def header_(self, request: FixtureRequest):
return instance_mock(request, _Header)
| DescribeSection |
python | spack__spack | lib/spack/spack/new_installer.py | {
"start": 26777,
"end": 39669
} | class ____:
"""Tracks the build status display for terminal output."""
def __init__(
self,
total: int,
stdout: io.TextIOWrapper = sys.stdout, # type: ignore[assignment]
get_terminal_size: Callable[[], Tuple[int, int]] = os.get_terminal_size,
get_time: Callable[[], float] = time.monotonic,
is_tty: Optional[bool] = None,
) -> None:
#: Ordered dict of build ID -> info
self.total = total
self.completed = 0
self.builds: Dict[str, BuildInfo] = {}
self.finished_builds: List[BuildInfo] = []
self.spinner_chars = ["|", "/", "-", "\\"]
self.spinner_index = 0
self.dirty = True # Start dirty to draw initial state
self.active_area_rows = 0
self.total_lines = 0
self.next_spinner_update = 0.0
self.next_update = 0.0
self.overview_mode = True # Whether to draw the package overview
self.tracked_build_id = "" # identifier of the package whose logs we follow
self.search_term = ""
self.search_mode = False
self.stdout = stdout
self.get_terminal_size = get_terminal_size
self.get_time = get_time
self.is_tty = is_tty if is_tty is not None else self.stdout.isatty()
def add_build(self, spec: spack.spec.Spec, explicit: bool, control_w_conn: Connection) -> None:
"""Add a new build to the display and mark the display as dirty."""
self.builds[spec.dag_hash()] = BuildInfo(spec, explicit, control_w_conn)
self.dirty = True
def toggle(self) -> None:
"""Toggle between overview mode and following a specific build."""
if self.overview_mode:
self.next()
else:
self.active_area_rows = 0
self.search_term = ""
self.search_mode = False
self.overview_mode = True
self.dirty = True
try:
os.write(self.builds[self.tracked_build_id].control_w_conn.fileno(), b"0")
except (KeyError, OSError):
pass
self.tracked_build_id = ""
def search_input(self, input: str) -> None:
"""Handle keyboard input when in search mode"""
if input in ("\r", "\n"):
self.next(1)
elif input == "\x1b": # Escape
self.search_mode = False
self.search_term = ""
self.dirty = True
elif input in ("\x7f", "\b"): # Backspace
self.search_term = self.search_term[:-1]
self.dirty = True
elif input.isprintable():
self.search_term += input
self.dirty = True
def enter_search(self) -> None:
self.search_mode = True
self.dirty = True
def _is_displayed(self, build: BuildInfo) -> bool:
"""Returns true if the build matches the search term, or when no search term is set."""
# When not in search mode, the search_term is "", which always evaluates to True below
return self.search_term in build.name or build.hash.startswith(self.search_term)
def _get_next(self, direction: int) -> Optional[str]:
"""Returns the next or previous unfinished build ID matching the search term, or None if
none found. Direction should be 1 for next, -1 for previous."""
matching = [
build_id
for build_id, build in self.builds.items()
if build.finished_time is None and self._is_displayed(build)
]
if not matching:
return None
try:
idx = matching.index(self.tracked_build_id)
except ValueError:
return matching[0] if direction == 1 else matching[-1]
return matching[(idx + direction) % len(matching)]
def next(self, direction: int = 1) -> None:
"""Follow the logs of the next build in the list."""
new_build_id = self._get_next(direction)
if not new_build_id or self.tracked_build_id == new_build_id:
return
new_build = self.builds[new_build_id]
if self.overview_mode:
self.overview_mode = False
# Stop following the previous and start following the new build.
if self.tracked_build_id:
try:
os.write(self.builds[self.tracked_build_id].control_w_conn.fileno(), b"0")
except (KeyError, OSError):
pass
self.tracked_build_id = new_build_id
# Tell the user we're following new logs, and instruct the child to start sending them.
self.stdout.write(
f"\n==> Following logs of {new_build.name}" f"\033[0;36m@{new_build.version}\033[0m\n"
)
self.stdout.flush()
try:
os.write(new_build.control_w_conn.fileno(), b"1")
except (KeyError, OSError):
pass
def update_state(self, build_id: str, state: str) -> None:
"""Update the state of a package and mark the display as dirty."""
build_info = self.builds[build_id]
build_info.state = state
build_info.progress_percent = None
if state in ("finished", "failed"):
self.completed += 1
build_info.finished_time = self.get_time() + CLEANUP_TIMEOUT
if build_id == self.tracked_build_id and not self.overview_mode:
self.toggle()
self.dirty = True
# For non-TTY output, print state changes immediately without colors
if not self.is_tty:
self.stdout.write(
f"{build_info.hash} {build_info.name}@{build_info.version}: {state}\n"
)
self.stdout.flush()
def update_progress(self, build_id: str, current: int, total: int) -> None:
"""Update the progress of a package and mark the display as dirty."""
percent = int((current / total) * 100)
build_info = self.builds[build_id]
if build_info.progress_percent != percent:
build_info.progress_percent = percent
self.dirty = True
def update(self, finalize: bool = False) -> None:
"""Redraw the interactive display."""
if not self.is_tty or not self.overview_mode:
return
now = self.get_time()
# Avoid excessive redraws
if not finalize and now < self.next_update:
return
# Only update the spinner if there are still running packages
if now >= self.next_spinner_update and any(
pkg.finished_time is None for pkg in self.builds.values()
):
self.spinner_index = (self.spinner_index + 1) % len(self.spinner_chars)
self.dirty = True
self.next_spinner_update = now + SPINNER_INTERVAL
for build_id in list(self.builds):
build_info = self.builds[build_id]
if build_info.state == "failed" or build_info.finished_time is None:
continue
if finalize or now >= build_info.finished_time:
self.finished_builds.append(build_info)
del self.builds[build_id]
self.dirty = True
if not self.dirty:
return
# Build the overview output in a buffer and print all at once to avoid flickering.
buffer = io.StringIO()
# Move cursor up to the start of the display area
if self.active_area_rows > 0:
buffer.write(f"\033[{self.active_area_rows}F")
max_width, max_height = self.get_terminal_size()
self.total_lines = 0
total_finished = len(self.finished_builds)
# First flush the finished builds. These are "persisted" in terminal history.
for build in self.finished_builds:
self._render_build(build, buffer, max_width)
self.finished_builds.clear()
# Then a header followed by the active builds. This is the "mutable" part of the display.
long_header_len = len(
f"Progress: {self.completed}/{self.total} /: filter v: logs n/p: next/prev"
)
if long_header_len < max_width:
self._println(
buffer,
f"\033[1mProgress:\033[0m {self.completed}/{self.total}"
" \033[36m/\033[0m: filter \033[36mv\033[0m: logs"
" \033[36mn\033[0m/\033[36mp\033[0m: next/prev",
)
else:
self._println(buffer, f"\033[1mProgress:\033[0m {self.completed}/{self.total}")
displayed_builds = (
[b for b in self.builds.values() if self._is_displayed(b)]
if self.search_term
else self.builds.values()
)
len_builds = len(displayed_builds)
# Truncate if we have more builds than fit on the screen. In that case we have to reserve
# an additional line for the "N more..." message.
truncate_at = max_height - 3 if len_builds + 2 > max_height else len_builds
for i, build in enumerate(displayed_builds, 1):
if i > truncate_at:
self._println(buffer, f"{len_builds - i + 1} more...")
break
self._render_build(build, buffer, max_width)
if self.search_mode:
buffer.write(f"filter> {self.search_term}\033[K")
# Clear any remaining lines from previous display
buffer.write("\033[0J")
# Print everything at once to avoid flickering
self.stdout.write(buffer.getvalue())
self.stdout.flush()
# Update the number of lines drawn for next time. It reflects the number of active builds.
self.active_area_rows = self.total_lines - total_finished
self.dirty = False
# Schedule next UI update
self.next_update = now + SPINNER_INTERVAL / 2
def _println(self, buffer: io.StringIO, line: str = "") -> None:
"""Print a line to the buffer, handling line clearing and cursor movement."""
self.total_lines += 1
if line:
buffer.write(line)
if self.total_lines > self.active_area_rows:
buffer.write("\033[0m\033[K\n") # reset, clear to EOL, newline
else:
buffer.write("\033[0m\033[K\033[1E") # reset, clear to EOL, move down 1 line
def print_logs(self, build_id: str, data: bytes) -> None:
# Discard logs we are not following. Generally this should not happen as we tell the child
# to only send logs when we are following it. It could maybe happen while transitioning
# between builds.
if self.overview_mode or build_id != self.tracked_build_id:
return
# TODO: drop initial bytes from data until first newline (?)
self.stdout.buffer.write(data)
self.stdout.flush()
def _render_build(self, build_info: BuildInfo, buffer: io.StringIO, max_width: int) -> None:
line_width = 0
for component in self._generate_line_components(build_info):
# ANSI escape sequence(s), does not contribute to width
if not component.startswith("\033"):
line_width += len(component)
if line_width > max_width:
break
buffer.write(component)
self._println(buffer)
def _generate_line_components(self, build_info: BuildInfo) -> Generator[str, None, None]:
"""Yield formatted line components for a package. Escape sequences are yielded as separate
strings so they do not contribute to the line width."""
if build_info.external:
indicator = "[e]"
elif build_info.state == "finished":
indicator = "[+]"
elif build_info.state == "failed":
indicator = "[x]"
else:
indicator = f"[{self.spinner_chars[self.spinner_index]}]"
if build_info.state == "failed":
yield "\033[31m" # red
elif build_info.state == "finished":
yield "\033[32m" # green
yield indicator
yield "\033[0m" # reset
yield " "
yield "\033[0;90m" # dark gray
yield build_info.hash
yield "\033[0m" # reset
yield " "
# Package name in bold white if explicit, default otherwise
if build_info.explicit:
yield "\033[1;37m" # bold white
yield build_info.name
yield "\033[0m" # reset
else:
yield build_info.name
yield "\033[0;36m" # cyan
yield f"@{build_info.version}"
yield "\033[0m" # reset
# progress or state
if build_info.progress_percent is not None:
yield " fetching"
yield f": {build_info.progress_percent}%"
elif build_info.state == "finished":
yield f" {build_info.prefix}"
else:
yield f" {build_info.state}"
Nodes = Dict[str, spack.spec.Spec]
Edges = Dict[str, Set[str]]
| BuildStatus |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dtlink2/package.py | {
"start": 217,
"end": 453
} | class ____(Package):
"""Simple package which acts as a link dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/dtlink2-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
| Dtlink2 |
python | numpy__numpy | numpy/f2py/tests/test_parameter.py | {
"start": 56,
"end": 4634
} | class ____(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
sources = [
util.getpath("tests", "src", "parameter", "constant_real.f90"),
util.getpath("tests", "src", "parameter", "constant_integer.f90"),
util.getpath("tests", "src", "parameter", "constant_both.f90"),
util.getpath("tests", "src", "parameter", "constant_compound.f90"),
util.getpath("tests", "src", "parameter", "constant_non_compound.f90"),
util.getpath("tests", "src", "parameter", "constant_array.f90"),
]
@pytest.mark.slow
def test_constant_real_single(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
pytest.raises(ValueError, self.module.foo_single, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo_single(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_real_double(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo_double, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_double(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_compound_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
pytest.raises(ValueError, self.module.foo_compound_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_compound_int(x)
assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2])
@pytest.mark.slow
def test_constant_non_compound_int(self):
# check values
x = np.arange(4, dtype=np.int32)
self.module.foo_non_compound_int(x)
assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3])
@pytest.mark.slow
def test_constant_integer_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
pytest.raises(ValueError, self.module.foo_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_int(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_integer_long(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int64)[::2]
pytest.raises(ValueError, self.module.foo_long, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int64)
self.module.foo_long(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_both(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo(x)
assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_no(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo_no, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_no(x)
assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_sum(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo_sum, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_sum(x)
assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
def test_constant_array(self):
x = np.arange(3, dtype=np.float64)
y = np.arange(5, dtype=np.float64)
z = self.module.foo_array(x, y)
assert np.allclose(x, [0.0, 1. / 10, 2. / 10])
assert np.allclose(y, [0.0, 1. * 10, 2. * 10, 3. * 10, 4. * 10])
assert np.allclose(z, 19.0)
def test_constant_array_any_index(self):
x = np.arange(6, dtype=np.float64)
y = self.module.foo_array_any_index(x)
assert np.allclose(y, x.reshape((2, 3), order='F'))
def test_constant_array_delims(self):
x = self.module.foo_array_delims()
assert x == 9
| TestParameters |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 117741,
"end": 117907
} | class ____:
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5 * np.pi), -90.0)
| TestDegrees |
python | numpy__numpy | numpy/_core/tests/test_scalarmath.py | {
"start": 1368,
"end": 5836
} | class ____:
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, f"error with {atype!r}: got {a!r}")
def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for atype in types:
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
def check_ufunc_scalar_equivalence(op, arr1, arr2):
scalar1 = arr1[()]
scalar2 = arr2[()]
assert isinstance(scalar1, np.generic)
assert isinstance(scalar2, np.generic)
if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
comp_ops = {operator.ge, operator.gt, operator.le, operator.lt}
if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)):
pytest.xfail("complex comp ufuncs use sort-order, scalars do not.")
if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]:
# array**scalar special case can have different result dtype
# (Other powers may have issues also, but are not hit here.)
# TODO: It would be nice to resolve this issue.
pytest.skip("array**2 can have incorrect/weird result dtype")
# ignore fpe's since they may just mismatch for integers anyway.
with warnings.catch_warnings(), np.errstate(all="ignore"):
# Comparisons DeprecationWarnings replacing errors (2022-03):
warnings.simplefilter("error", DeprecationWarning)
try:
res = op(arr1, arr2)
except Exception as e:
with pytest.raises(type(e)):
op(scalar1, scalar2)
else:
scalar_res = op(scalar1, scalar2)
assert_array_equal(scalar_res, res, strict=True)
@pytest.mark.slow
@settings(max_examples=10000, deadline=2000)
@given(sampled_from(binary_operators_for_scalars),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()))
def test_array_scalar_ufunc_equivalence(op, arr1, arr2):
"""
This is a thorough test attempting to cover important promotion paths
and ensuring that arrays and scalars stay as aligned as possible.
However, if it creates troubles, it should maybe just be removed.
"""
check_ufunc_scalar_equivalence(op, arr1, arr2)
@pytest.mark.slow
@given(sampled_from(binary_operators_for_scalars),
hynp.scalar_dtypes(), hynp.scalar_dtypes())
def test_array_scalar_ufunc_dtypes(op, dt1, dt2):
# Same as above, but don't worry about sampling weird values so that we
# do not have to sample as much
arr1 = np.array(2, dtype=dt1)
arr2 = np.array(3, dtype=dt2) # some power do weird things.
check_ufunc_scalar_equivalence(op, arr1, arr2)
@pytest.mark.parametrize("fscalar", [np.float16, np.float32])
def test_int_float_promotion_truediv(fscalar):
# Promotion for mixed int and float32/float16 must not go to float64
i = np.int8(1)
f = fscalar(1)
expected = np.result_type(i, f)
assert (i / f).dtype == expected
assert (f / i).dtype == expected
# But normal int / int true division goes to float64:
assert (i / i).dtype == np.dtype("float64")
# For int16, result has to be ast least float32 (takes ufunc path):
assert (np.int16(1) / f).dtype == np.dtype("float32")
| TestTypes |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock-converse/tests/test_llms_bedrock_converse.py | {
"start": 5764,
"end": 41091
} | class ____:
def __init__(self, *args, **kwargs) -> "MockAsyncSession":
pass
def client(self, *args, **kwargs):
return AsyncMockClient()
@pytest.fixture()
def mock_boto3_session(monkeypatch):
def mock_client(*args, **kwargs):
return MockClient()
monkeypatch.setattr("boto3.Session.client", mock_client)
@pytest.fixture()
def mock_aioboto3_session(monkeypatch):
monkeypatch.setattr("aioboto3.Session", MockAsyncSession)
@pytest.fixture()
def bedrock_converse(mock_boto3_session, mock_aioboto3_session):
return BedrockConverse(
model=EXP_MODEL,
max_tokens=EXP_MAX_TOKENS,
temperature=EXP_TEMPERATURE,
guardrail_identifier=EXP_GUARDRAIL_ID,
guardrail_version=EXP_GUARDRAIL_VERSION,
trace=EXP_GUARDRAIL_TRACE,
callback_manager=CallbackManager(),
)
@pytest.fixture()
def bedrock_converse_with_application_inference_profile(
mock_boto3_session, mock_aioboto3_session
):
"""
Create a BedrockConverse client that uses an application inference profile for invoking the LLM.
See AWS documentation for details about creating and using application inference profiles.
"""
return BedrockConverse(
model=EXP_MODEL,
max_tokens=EXP_MAX_TOKENS,
temperature=EXP_TEMPERATURE,
guardrail_identifier=EXP_GUARDRAIL_ID,
guardrail_version=EXP_GUARDRAIL_VERSION,
application_inference_profile_arn=EXP_APP_INF_PROFILE_ARN,
trace=EXP_GUARDRAIL_TRACE,
callback_manager=CallbackManager(),
)
def test_init(bedrock_converse):
assert bedrock_converse.model == EXP_MODEL
assert bedrock_converse.max_tokens == EXP_MAX_TOKENS
assert bedrock_converse.temperature == EXP_TEMPERATURE
assert bedrock_converse._client is not None
def test_init_app_inf_profile(bedrock_converse_with_application_inference_profile):
client = bedrock_converse_with_application_inference_profile
assert client.application_inference_profile_arn == EXP_APP_INF_PROFILE_ARN
assert client.model == EXP_MODEL
# Application inference profile ARN should be used for the LLM kwargs when provided.
assert client._model_kwargs["model"] == EXP_APP_INF_PROFILE_ARN
def test_chat(bedrock_converse):
response = bedrock_converse.chat(messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == EXP_RESPONSE
def test_complete(bedrock_converse):
response = bedrock_converse.complete(prompt)
assert isinstance(response, CompletionResponse)
assert response.text == EXP_RESPONSE
assert response.additional_kwargs["status"] == []
assert response.additional_kwargs["tool_call_id"] == []
def test_stream_chat(bedrock_converse):
response_stream = bedrock_converse.stream_chat(messages)
responses = list(response_stream)
# Check that we have content responses plus final metadata response
assert len(responses) == len(EXP_STREAM_RESPONSE) + 1 # +1 for metadata response
# Check content responses
for i, response in enumerate(responses[:-1]): # All except last
assert response.message.role == MessageRole.ASSISTANT
assert response.delta in EXP_STREAM_RESPONSE
# Check final metadata response with token usage
final_response = responses[-1]
assert final_response.message.role == MessageRole.ASSISTANT
assert final_response.delta == "" # No delta for metadata response
# Verify raw contains complete metadata
assert "metadata" in final_response.raw
assert "usage" in final_response.raw["metadata"]
# Verify token counts in additional_kwargs
assert "prompt_tokens" in final_response.additional_kwargs
assert final_response.additional_kwargs["prompt_tokens"] == 15
assert final_response.additional_kwargs["completion_tokens"] == 26
assert final_response.additional_kwargs["total_tokens"] == 41
@pytest.mark.asyncio
async def test_achat(bedrock_converse):
response = await bedrock_converse.achat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == EXP_RESPONSE
@pytest.mark.asyncio
async def test_astream_chat(bedrock_converse):
response_stream = await bedrock_converse.astream_chat(messages)
responses = []
async for response in response_stream:
responses.append(response)
# Check that we have content responses plus final metadata response
assert len(responses) == len(EXP_STREAM_RESPONSE) + 1 # +1 for metadata response
# Check content responses
for i, response in enumerate(responses[:-1]): # All except last
assert response.message.role == MessageRole.ASSISTANT
assert response.delta in EXP_STREAM_RESPONSE
# Check final metadata response with token usage
final_response = responses[-1]
assert final_response.message.role == MessageRole.ASSISTANT
assert final_response.delta == "" # No delta for metadata response
# Verify raw contains complete metadata
assert "metadata" in final_response.raw
assert "usage" in final_response.raw["metadata"]
# Verify token counts in additional_kwargs
assert "prompt_tokens" in final_response.additional_kwargs
assert final_response.additional_kwargs["prompt_tokens"] == 15
assert final_response.additional_kwargs["completion_tokens"] == 26
assert final_response.additional_kwargs["total_tokens"] == 41
@pytest.mark.asyncio
async def test_acomplete(bedrock_converse):
response = await bedrock_converse.acomplete(prompt)
assert isinstance(response, CompletionResponse)
assert response.text == EXP_RESPONSE
assert response.additional_kwargs["status"] == []
assert response.additional_kwargs["tool_call_id"] == []
@pytest.mark.asyncio
async def test_astream_complete(bedrock_converse):
response_stream = await bedrock_converse.astream_complete(prompt)
responses = []
async for response in response_stream:
responses.append(response)
# Check that we have content responses plus final metadata response
assert len(responses) == len(EXP_STREAM_RESPONSE) + 1 # +1 for metadata response
# Check that content responses match expected
content_responses = responses[:-1] # All except last (metadata) response
assert "".join([r.delta for r in content_responses]) == "".join(EXP_STREAM_RESPONSE)
@needs_aws_creds
def test_bedrock_converse_integration_chat_text_only(bedrock_converse_integration):
"""Test a simple text chat integration with Bedrock Converse."""
llm = bedrock_converse_integration
messages = [
ChatMessage(role=MessageRole.USER, content="Write a short sonnet about clouds.")
]
response = llm.chat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert isinstance(response.message.content, str)
assert len(response.message.content) > 5
@needs_aws_creds
def test_bedrock_converse_integration_chat_multimodal(
temp_image_bytes, bedrock_converse_integration
):
"""Test multimodal chat (text + image) integration with Bedrock Converse."""
llm = bedrock_converse_integration
messages = [
ChatMessage(
role=MessageRole.USER,
blocks=[ImageBlock(image=temp_image_bytes, image_mimetype="image/png")],
),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="What color do you see in the image above?")],
),
]
response = llm.chat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert isinstance(response.message.content, str)
assert "red" in response.message.content.lower()
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_integration_achat_text_only(
bedrock_converse_integration,
):
"""Test async text chat integration."""
llm = bedrock_converse_integration
messages = [
ChatMessage(role=MessageRole.USER, content="What is the capital of France?")
]
response = await llm.achat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert isinstance(response.message.content, str)
assert "paris" in response.message.content.lower()
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_integration_achat_multimodal(
temp_image_bytes, bedrock_converse_integration
):
"""Test async multimodal chat integration."""
llm = bedrock_converse_integration
# Use the red image data from temp_image_bytes fixture
messages = [
ChatMessage(
role=MessageRole.USER,
blocks=[ImageBlock(image=temp_image_bytes, image_mimetype="image/png")],
),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Describe the image provided above briefly.")],
),
]
response = await llm.achat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert isinstance(response.message.content, str)
assert len(response.message.content) > 5
@needs_aws_creds
def test_bedrock_converse_integration_stream_chat(bedrock_converse_integration):
"""Test streaming chat integration with Bedrock Converse."""
llm = bedrock_converse_integration
messages = [ChatMessage(role=MessageRole.USER, content="Count from 1 to 5 slowly.")]
response_stream = llm.stream_chat(messages)
chunks = []
for response in response_stream:
chunks.append(response.delta)
assert len(chunks) > 1
combined = "".join(chunks)
assert len(combined) > 5
@needs_aws_creds
def test_bedrock_converse_integration_stream_chat_multimodal(
temp_image_bytes, bedrock_converse_integration
):
"""Test streaming multimodal chat integration with Bedrock Converse."""
llm = bedrock_converse_integration
messages = [
ChatMessage(
role=MessageRole.USER,
blocks=[ImageBlock(image=temp_image_bytes, image_mimetype="image/png")],
),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Describe this image in a few words.")],
),
]
response_stream = llm.stream_chat(messages)
chunks = []
for response in response_stream:
chunks.append(response.delta)
assert len(chunks) > 1
combined = "".join(chunks)
assert len(combined) > 5
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_integration_astream_chat(bedrock_converse_integration):
"""Test async streaming chat integration with Bedrock Converse."""
llm = bedrock_converse_integration
messages = [
ChatMessage(role=MessageRole.USER, content="Name three famous scientists.")
]
response_stream = await llm.astream_chat(messages)
chunks = []
async for response in response_stream:
chunks.append(response.delta)
assert len(chunks) > 1
combined = "".join(chunks)
assert len(combined) > 5
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_integration_astream_chat_multimodal(
temp_image_bytes, bedrock_converse_integration
):
"""Test async streaming multimodal chat integration with Bedrock Converse."""
llm = bedrock_converse_integration
messages = [
ChatMessage(
role=MessageRole.USER,
blocks=[ImageBlock(image=temp_image_bytes, image_mimetype="image/png")],
),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="What do you see in this image?")],
),
]
response_stream = await llm.astream_chat(messages)
chunks = []
async for response in response_stream:
chunks.append(response.delta)
assert len(chunks) > 1
combined = "".join(chunks)
assert len(combined) > 5
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
def test_prepare_chat_with_tools_tool_required(bedrock_converse):
"""Test that tool_required=True is correctly passed to the API request."""
result = bedrock_converse._prepare_chat_with_tools(
tools=[search_tool], tool_required=True
)
assert "tools" in result
assert "toolChoice" in result["tools"]
assert result["tools"]["toolChoice"] == {"any": {}}
def test_prepare_chat_with_tools_tool_not_required(bedrock_converse):
"""Test that tool_required=False is correctly passed to the API request."""
result = bedrock_converse._prepare_chat_with_tools(
tools=[search_tool], tool_required=False
)
assert "tools" in result
assert "toolChoice" in result["tools"]
assert result["tools"]["toolChoice"] == {"auto": {}}
def test_prepare_chat_with_tools_custom_tool_choice(bedrock_converse):
"""Test that custom tool_choice overrides tool_required."""
custom_tool_choice = {"specific": {"name": "search_tool"}}
result = bedrock_converse._prepare_chat_with_tools(
tools=[search_tool], tool_choice=custom_tool_choice
)
assert "tools" in result
assert "toolChoice" in result["tools"]
assert result["tools"]["toolChoice"] == custom_tool_choice
def test_prepare_chat_with_tools_cache_enabled(bedrock_converse):
"""Test that custom tool_choice overrides tool_required."""
custom_tool_choice = {"specific": {"name": "search_tool"}}
result = bedrock_converse._prepare_chat_with_tools(
tools=[search_tool], tool_caching=True
)
assert "tools" in result
assert "toolChoice" in result["tools"]
# Integration test for reproducing the empty text field error
def get_temperature(location: str) -> float:
"""
A tool that returns the temperature of a given location.
Args:
location: The location to get the temperature for.
Returns:
The temperature of the location in Celsius.
"""
return 18.0
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_agent_workflow_empty_text_error(
bedrock_converse_integration,
):
"""
Test that reproduces the empty text field error when BedrockConverse
calls tools without outputting any text in AgentWorkflow.
This test reproduces the issue described in:
https://github.com/run-llama/llama_index/issues/18449
"""
get_temperature_tool = FunctionTool.from_defaults(
name="get_temperature",
description="A tool that returns the temperature of a given location.",
fn=get_temperature,
)
agent = FunctionAgent(
name="weather_agent",
tools=[get_temperature_tool],
llm=bedrock_converse_integration,
system_prompt="You are a helpful assistant that helps users with their queries about the weather.",
)
workflow = AgentWorkflow(agents=[agent])
try:
response = await workflow.run(
user_msg="Sort the temperatures of the following locations: Paris, London, Lisbon, Madrid, and Rome."
)
assert response is not None
except Exception as e:
error_msg = str(e)
if (
"The text field in the ContentBlock object" in error_msg
and "is blank" in error_msg
):
pytest.fail(f"Empty text field error occurred: {error_msg}")
else:
raise
@needs_aws_creds
def test_bedrock_converse_integration_chat_with_empty_system_prompt(
bedrock_converse_integration,
):
"""Test chat integration with empty system prompt."""
llm = bedrock_converse_integration
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=""),
ChatMessage(role=MessageRole.USER, content="What is 2 + 2?"),
]
response = llm.chat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert isinstance(response.message.content, str)
assert len(response.message.content) > 0
assert "4" in response.message.content
@needs_aws_creds
def test_bedrock_converse_integration_chat_with_empty_assistant_message(
bedrock_converse_integration,
):
"""Test chat integration with empty assistant message in conversation history."""
llm = bedrock_converse_integration
messages = [
ChatMessage(role=MessageRole.USER, content="Hello"),
ChatMessage(role=MessageRole.ASSISTANT, content=""),
ChatMessage(role=MessageRole.USER, content="Can you count to 3?"),
]
response = llm.chat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert isinstance(response.message.content, str)
assert len(response.message.content) > 0
assert any(num in response.message.content for num in ["1", "2", "3"])
@needs_aws_creds
def test_bedrock_converse_integration_chat_with_empty_user_message(
bedrock_converse_integration,
):
"""Test chat integration with empty user message."""
llm = bedrock_converse_integration
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(role=MessageRole.USER, content=""),
ChatMessage(role=MessageRole.USER, content="What is 2 + 2?"),
]
response = llm.chat(messages)
assert isinstance(response, ChatResponse)
assert response.message.role == MessageRole.ASSISTANT
assert isinstance(response.message.content, str)
assert len(response.message.content) > 0
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_integration_astream_chat_with_empty_assistant_message(
bedrock_converse_integration,
):
"""Test astream_chat integration with empty assistant message."""
llm = bedrock_converse_integration
# Create a conversation with various empty and valid content scenarios
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(role=MessageRole.USER, content="Hello"),
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[
TextBlock(text=""),
TextBlock(text="Previous response"),
],
),
ChatMessage(role=MessageRole.USER, content="What is 2+2?"),
]
response_stream = await llm.astream_chat(messages)
chunks = []
async for response in response_stream:
chunks.append(response.delta)
assert len(chunks) > 0
combined = "".join(chunks)
assert len(combined) > 0
# Define a tool function that returns no value
def log_activity(activity: str) -> None:
"""
Log user activity to system log, but returns no value.
Args:
activity: The activity description to log
"""
print(f"[LOG] User activity: {activity}")
# This function intentionally returns no value
# Define a tool function that returns no value and takes no arguments
def wake_up_user() -> None:
"""
Sends a notification to the user to wake them up.
"""
return
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_agent_with_void_tool_and_continued_conversation(
bedrock_converse_integration,
):
"""
Test that Agent can call a tool that returns no value and continue Q&A conversation.
This test case verifies:
1. Agent can properly call tools that return no value (void functions)
2. After calling void tools, the Agent can still answer user questions
3. No errors occur due to tools not returning values
This test is important for validating BedrockConverse's handling of tool calls without return values
"""
# Create a logging tool that returns no value
log_activity_tool = FunctionTool.from_defaults(
name="log_activity",
description="Log user activity to system log for tracking and analysis",
fn=log_activity,
)
# Create a tool with return value for comparison
get_temperature_tool = FunctionTool.from_defaults(
name="get_temperature",
description="Get the temperature of a specified location",
fn=get_temperature,
)
# Create agent using both tools
agent = FunctionAgent(
name="assistant_with_logging",
tools=[log_activity_tool, get_temperature_tool, wake_up_user],
llm=bedrock_converse_integration,
system_prompt=(
"You are a helpful assistant that logs important user activities. "
"Before answering weather-related questions, please log the user's query activity."
),
)
workflow = AgentWorkflow(agents=[agent])
ctx = Context(workflow)
# First conversation: Request weather information
# Agent should call log_activity (void tool) first, then call get_temperature
response1 = await workflow.run(
user_msg="What's the weather like in San Francisco today? What's the temperature?",
ctx=ctx,
)
# Verify first conversation has normal response
assert response1 is not None
assert hasattr(response1, "response")
response1_text = str(response1.response)
assert len(response1_text) > 0
# Second conversation: Continue asking other questions
# Ensure agent can still handle subsequent conversations after calling void tool
response2 = await workflow.run(
user_msg="Will the weather be better tomorrow? Any suggestions?", ctx=ctx
)
# Verify second conversation also has normal response
assert response2 is not None
assert hasattr(response2, "response")
response2_text = str(response2.response)
assert len(response2_text) > 0
# Third conversation: General question not involving tools
response3 = await workflow.run(user_msg="Thank you for your help!", ctx=ctx)
# Verify third conversation response
assert response3 is not None
assert hasattr(response3, "response")
response3_text = str(response3.response)
assert len(response3_text) > 0
# Verify blank tool calls are handled correctly
response_4 = await workflow.run(user_msg="Wake me up please!", ctx=ctx)
assert response_4 is not None
assert hasattr(response_4, "response")
assert len(response_4.tool_calls) > 0
# Verify all history is handled properly and LLM can continue conversation
response_5 = await workflow.run(user_msg="Thank you, I am awake now.", ctx=ctx)
assert response_5 is not None
assert hasattr(response_5, "response")
assert len(str(response_5)) > 0
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_thinking(bedrock_converse_integration_thinking):
messages = [
ChatMessage(
role="user",
content="Can you help me solve this equation for x? x^2+7x+12 = 0. Please think before answering",
)
]
res_chat = bedrock_converse_integration_thinking.chat(messages)
assert (
len(
[
block
for block in res_chat.message.blocks
if isinstance(block, ThinkingBlock)
]
)
> 0
)
res_achat = await bedrock_converse_integration_thinking.achat(messages)
assert (
len(
[
block
for block in res_achat.message.blocks
if isinstance(block, ThinkingBlock)
]
)
> 0
)
res_stream_chat = bedrock_converse_integration_thinking.stream_chat(messages)
last_resp = None
for r in res_stream_chat:
last_resp = r
assert all(
len((block.content or "")) > 10
for block in last_resp.message.blocks
if isinstance(block, ThinkingBlock)
)
assert len(last_resp.message.blocks) > 0
res_astream_chat = await bedrock_converse_integration_thinking.astream_chat(
messages
)
last_resp = None
async for r in res_astream_chat:
last_resp = r
assert all(
len((block.content or "")) > 10
for block in last_resp.message.blocks
if isinstance(block, ThinkingBlock)
)
assert len(last_resp.message.blocks) > 0
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_integration_system_prompt_cache_points(
bedrock_converse_integration_no_system_prompt_caching_param,
):
"""
Test system prompt cache point functionality with BedrockConverse integration.
This test verifies:
1. Cache point creation on first call (cache write tokens > 0)
2. Cache point usage on second call (cache read tokens > 0)
3. Proper token accounting for cached vs non-cached content
Uses a system prompt with 1026+ tokens to exceed the 1024 token minimum for caching.
Each test run uses a unique random identifier to ensure fresh cache creation.
"""
llm = bedrock_converse_integration_no_system_prompt_caching_param
# Generate a unique random string for this test run to ensure fresh cache
# Use fixed length to ensure consistent token counting
random_id = "".join(random.choices(string.ascii_letters + string.digits, k=8))
# Create a system prompt with enough tokens for caching
# Approximate token calculation: "You are a recruiting expert for session ABC12345! " ≈ 11-12 tokens
base_text = f"You are a recruiting expert for session {random_id}! "
# Calculate repetitions needed to exceed 1024 tokens (using conservative estimate of 10 tokens per repetition)
target_tokens = 1100 # Target slightly above minimum to ensure we exceed 1024
estimated_tokens_per_repetition = 10
repetitions = target_tokens // estimated_tokens_per_repetition
repeated_text = base_text * repetitions
# Additional uncached text to test partial caching
uncached_instructions = (
"Please focus on providing helpful responses to job seekers."
)
# First call - should establish cache
cache_test_messages_1 = [
ChatMessage(
role=MessageRole.SYSTEM,
blocks=[
TextBlock(text=repeated_text),
CachePoint(cache_control=CacheControl(type="default")),
TextBlock(text=uncached_instructions),
],
),
ChatMessage(
role=MessageRole.USER, content="Do you have data science jobs in Toronto?"
),
]
response_1 = await llm.achat(messages=cache_test_messages_1)
# Verify cache write tokens are present (first call should write to cache)
additional_kwargs_1 = getattr(response_1, "additional_kwargs", {})
assert "cache_creation_input_tokens" in additional_kwargs_1, (
"First call should show cache creation tokens"
)
cache_write_tokens_1 = additional_kwargs_1.get("cache_creation_input_tokens", 0)
assert cache_write_tokens_1 > 0, (
f"Expected cache write tokens > 0, got {cache_write_tokens_1}"
)
# Second call - should read from cache with different user message
cache_test_messages_2 = [
ChatMessage(
role=MessageRole.SYSTEM,
blocks=[
TextBlock(text=repeated_text), # Same cached content
CachePoint(cache_control=CacheControl(type="default")),
TextBlock(text=uncached_instructions), # Same uncached content
],
),
ChatMessage(
role=MessageRole.USER,
content="What are the environmental impacts of solar energy?",
),
]
response_2 = await llm.achat(messages=cache_test_messages_2)
# Verify cache read tokens are present (second call should read from cache)
additional_kwargs_2 = getattr(response_2, "additional_kwargs", {})
assert "cache_read_input_tokens" in additional_kwargs_2, (
"Second call should show cache read tokens"
)
cache_read_tokens_2 = additional_kwargs_2.get("cache_read_input_tokens", 0)
assert cache_read_tokens_2 > 0, (
f"Expected cache read tokens > 0, got {cache_read_tokens_2}"
)
# Verify cache efficiency - cache read tokens should be close to cache write tokens
# (since we're using the same cached content)
cache_efficiency_ratio = cache_read_tokens_2 / cache_write_tokens_1
assert 0.95 <= cache_efficiency_ratio <= 1.05, (
f"Cache efficiency seems off. Write: {cache_write_tokens_1}, "
f"Read: {cache_read_tokens_2}, Ratio: {cache_efficiency_ratio:.2f}"
)
@needs_aws_creds
@pytest.mark.asyncio
async def test_bedrock_converse_integration_system_prompt_caching_auto_write(
bedrock_converse_integration,
):
"""
Test automatic system prompt cache writing when system_prompt_caching=True.
This test verifies:
1. Cache write tokens are properly recorded on first call
Uses the bedrock_converse_integration fixture which has system_prompt_caching=True.
Each test run uses a unique random identifier to ensure fresh cache creation.
"""
llm = bedrock_converse_integration
# Generate a unique random string for this test run to ensure fresh cache
# Use fixed length to ensure consistent token counting
random_id = "".join(random.choices(string.ascii_letters + string.digits, k=8))
# Create a system prompt with enough tokens for automatic caching
# The system_prompt_caching=True should automatically cache system prompts >= 1024 tokens
base_text = (
f"You are an AI assistant specialized in {random_id} analysis and research. "
)
# Calculate repetitions needed to exceed 1024 tokens (using conservative estimate of 12 tokens per repetition)
target_tokens = 1100 # Target slightly above minimum to ensure we exceed 1024
estimated_tokens_per_repetition = 12
repetitions = target_tokens // estimated_tokens_per_repetition
# Create system prompt that will be automatically cached
large_system_prompt = base_text * repetitions + (
"Please provide detailed, helpful, and accurate responses. "
"Focus on delivering high-quality information with proper context and examples."
)
# First call - should trigger automatic cache write for system prompt
messages = [
ChatMessage(
role=MessageRole.SYSTEM,
content=large_system_prompt,
),
ChatMessage(
role=MessageRole.USER,
content="What are the key benefits of renewable energy?",
),
]
response = await llm.achat(messages=messages)
# Verify cache write tokens are present (first call should write to cache automatically)
additional_kwargs = getattr(response, "additional_kwargs", {})
assert "cache_creation_input_tokens" in additional_kwargs, (
"First call should show cache creation tokens when system_prompt_caching=True"
)
cache_write_tokens = additional_kwargs.get("cache_creation_input_tokens", 0)
assert cache_write_tokens > 0, (
f"Expected cache write tokens > 0 with automatic caching, got {cache_write_tokens}"
)
# Verify response is meaningful
assert len(str(response.message.content)) > 50, "Response should be substantial"
@needs_aws_creds
@pytest.mark.asyncio
async def test_tool_call_input_output(
bedrock_converse_integration_thinking: BedrockConverse,
) -> None:
def get_weather(location: str):
return f"The weather in {location} is rainy with a temperature of 15°C."
tool = FunctionTool.from_defaults(
fn=get_weather,
name="get_weather",
description="Get the weather of a given location",
)
history = [
ChatMessage(
role="user",
content="Hello, can you tell me what is the weather today in London?",
),
ChatMessage(
role="assistant",
blocks=[
ToolCallBlock(
tool_name="get_weather",
tool_kwargs={"location": "Liverpool"},
tool_call_id="1",
),
],
),
ChatMessage(
role=MessageRole.TOOL,
content="The weather in London is 11°C and windy",
additional_kwargs={"tool_call_id": "1"},
),
ChatMessage(
role="assistant",
blocks=[
TextBlock(
text="The weather in London is windy with a temperature of 11°C"
)
],
),
]
input_message = ChatMessage(
role="user",
content="Ok, and what is the weather in Liverpool?",
)
response = bedrock_converse_integration_thinking.chat_with_tools(
tools=[tool], user_msg=input_message, chat_history=history
)
assert (
len(
[
block
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
]
)
> 0
)
assert any(
block.tool_name == "get_weather"
and (
block.tool_kwargs == {"location": "Liverpool"}
or block.tool_kwargs == json.dumps({"location": "Liverpool"})
)
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
)
aresponse = await bedrock_converse_integration_thinking.achat_with_tools(
tools=[tool], user_msg=input_message, chat_history=history
)
assert (
len(
[
block
for block in aresponse.message.blocks
if isinstance(block, ToolCallBlock)
]
)
> 0
)
assert any(
block.tool_name == "get_weather"
and (
block.tool_kwargs == {"location": "Liverpool"}
or block.tool_kwargs == json.dumps({"location": "Liverpool"})
)
for block in aresponse.message.blocks
if isinstance(block, ToolCallBlock)
)
stream_response = bedrock_converse_integration_thinking.stream_chat_with_tools(
tools=[tool], user_msg=input_message, chat_history=history
)
blocks = []
for res in stream_response:
blocks.extend(res.message.blocks)
assert len([block for block in blocks if isinstance(block, ToolCallBlock)]) > 0
assert any(
block.tool_name == "get_weather"
and (
block.tool_kwargs == {"location": "Liverpool"}
or block.tool_kwargs == json.dumps({"location": "Liverpool"})
)
for block in blocks
if isinstance(block, ToolCallBlock)
)
astream_response = (
await bedrock_converse_integration_thinking.astream_chat_with_tools(
tools=[tool], user_msg=input_message, chat_history=history
)
)
ablocks = []
async for res in astream_response:
ablocks.extend(res.message.blocks)
assert len([block for block in ablocks if isinstance(block, ToolCallBlock)]) > 0
assert any(
block.tool_name == "get_weather"
and (
block.tool_kwargs == {"location": "Liverpool"}
or block.tool_kwargs == json.dumps({"location": "Liverpool"})
)
for block in ablocks
if isinstance(block, ToolCallBlock)
)
| MockAsyncSession |
python | pypa__warehouse | warehouse/organizations/models.py | {
"start": 28486,
"end": 29609
} | class ____(db.Model):
__tablename__ = "organization_invitations"
__table_args__ = (
Index("organization_invitations_user_id_idx", "user_id"),
UniqueConstraint(
"user_id",
"organization_id",
name="_organization_invitations_user_organization_uc",
),
)
__repr__ = make_repr("invite_status", "user", "organization")
invite_status: Mapped[enum.Enum] = mapped_column(
Enum(
OrganizationInvitationStatus, values_callable=lambda x: [e.value for e in x]
),
)
token: Mapped[str]
user_id: Mapped[UUID] = mapped_column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"),
index=True,
)
organization_id: Mapped[UUID] = mapped_column(
ForeignKey("organizations.id", onupdate="CASCADE", ondelete="CASCADE"),
index=True,
)
user: Mapped[User] = relationship(
back_populates="organization_invitations", lazy=False
)
organization: Mapped[Organization] = relationship(
back_populates="invitations", lazy=False
)
| OrganizationInvitation |
python | mlflow__mlflow | mlflow/genai/label_schemas/label_schemas.py | {
"start": 4720,
"end": 5718
} | class ____(InputType):
"""A numeric input for collecting assessments from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
min_value: float | None = None
"""Minimum allowed numeric value. None means no minimum limit."""
max_value: float | None = None
"""Maximum allowed numeric value. None means no maximum limit."""
def _to_databricks_input(self) -> "_InputNumeric":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputNumeric(min_value=self.min_value, max_value=self.max_value)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputNumeric") -> "InputNumeric":
"""Create from the internal Databricks input type."""
return cls(min_value=input_obj.min_value, max_value=input_obj.max_value)
| InputNumeric |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/colormap.py | {
"start": 173,
"end": 694
} | class ____(WidgetParameterItem):
"""Registered parameter type which displays a :class:`GradientWidget <pyqtgraph.GradientWidget>`"""
def makeWidget(self):
w = GradientWidget(orientation='bottom')
w.sizeHint = lambda: QtCore.QSize(300, 35)
w.sigChanged = w.sigGradientChangeFinished
w.sigChanging = w.sigGradientChanged
w.value = w.colorMap
w.setValue = w.setColorMap
self.hideWidget = False
self.asSubItem = True
return w
| ColorMapParameterItem |
python | arrow-py__arrow | tests/test_arrow.py | {
"start": 68624,
"end": 85132
} | class ____:
def test_granularity(self):
assert self.now.humanize(granularity="second") == "just now"
later1 = self.now.shift(seconds=1)
assert self.now.humanize(later1, granularity="second") == "just now"
assert later1.humanize(self.now, granularity="second") == "just now"
assert self.now.humanize(later1, granularity="minute") == "0 minutes ago"
assert later1.humanize(self.now, granularity="minute") == "in 0 minutes"
later100 = self.now.shift(seconds=100)
assert self.now.humanize(later100, granularity="second") == "100 seconds ago"
assert later100.humanize(self.now, granularity="second") == "in 100 seconds"
assert self.now.humanize(later100, granularity="minute") == "a minute ago"
assert later100.humanize(self.now, granularity="minute") == "in a minute"
assert self.now.humanize(later100, granularity="hour") == "0 hours ago"
assert later100.humanize(self.now, granularity="hour") == "in 0 hours"
later4000 = self.now.shift(seconds=4000)
assert self.now.humanize(later4000, granularity="minute") == "66 minutes ago"
assert later4000.humanize(self.now, granularity="minute") == "in 66 minutes"
assert self.now.humanize(later4000, granularity="hour") == "an hour ago"
assert later4000.humanize(self.now, granularity="hour") == "in an hour"
assert self.now.humanize(later4000, granularity="day") == "0 days ago"
assert later4000.humanize(self.now, granularity="day") == "in 0 days"
later105 = self.now.shift(seconds=10**5)
assert self.now.humanize(later105, granularity="hour") == "27 hours ago"
assert later105.humanize(self.now, granularity="hour") == "in 27 hours"
assert self.now.humanize(later105, granularity="day") == "a day ago"
assert later105.humanize(self.now, granularity="day") == "in a day"
assert self.now.humanize(later105, granularity="week") == "0 weeks ago"
assert later105.humanize(self.now, granularity="week") == "in 0 weeks"
assert self.now.humanize(later105, granularity="month") == "0 months ago"
assert later105.humanize(self.now, granularity="month") == "in 0 months"
assert self.now.humanize(later105, granularity=["month"]) == "0 months ago"
assert later105.humanize(self.now, granularity=["month"]) == "in 0 months"
later106 = self.now.shift(seconds=3 * 10**6)
assert self.now.humanize(later106, granularity="day") == "34 days ago"
assert later106.humanize(self.now, granularity="day") == "in 34 days"
assert self.now.humanize(later106, granularity="week") == "4 weeks ago"
assert later106.humanize(self.now, granularity="week") == "in 4 weeks"
assert self.now.humanize(later106, granularity="month") == "a month ago"
assert later106.humanize(self.now, granularity="month") == "in a month"
assert self.now.humanize(later106, granularity="year") == "0 years ago"
assert later106.humanize(self.now, granularity="year") == "in 0 years"
later506 = self.now.shift(seconds=50 * 10**6)
assert self.now.humanize(later506, granularity="week") == "82 weeks ago"
assert later506.humanize(self.now, granularity="week") == "in 82 weeks"
assert self.now.humanize(later506, granularity="month") == "18 months ago"
assert later506.humanize(self.now, granularity="month") == "in 18 months"
assert self.now.humanize(later506, granularity="quarter") == "6 quarters ago"
assert later506.humanize(self.now, granularity="quarter") == "in 6 quarters"
assert self.now.humanize(later506, granularity="year") == "a year ago"
assert later506.humanize(self.now, granularity="year") == "in a year"
assert self.now.humanize(later1, granularity="quarter") == "0 quarters ago"
assert later1.humanize(self.now, granularity="quarter") == "in 0 quarters"
later107 = self.now.shift(seconds=10**7)
assert self.now.humanize(later107, granularity="quarter") == "a quarter ago"
assert later107.humanize(self.now, granularity="quarter") == "in a quarter"
later207 = self.now.shift(seconds=2 * 10**7)
assert self.now.humanize(later207, granularity="quarter") == "2 quarters ago"
assert later207.humanize(self.now, granularity="quarter") == "in 2 quarters"
later307 = self.now.shift(seconds=3 * 10**7)
assert self.now.humanize(later307, granularity="quarter") == "3 quarters ago"
assert later307.humanize(self.now, granularity="quarter") == "in 3 quarters"
later377 = self.now.shift(seconds=3.7 * 10**7)
assert self.now.humanize(later377, granularity="quarter") == "4 quarters ago"
assert later377.humanize(self.now, granularity="quarter") == "in 4 quarters"
later407 = self.now.shift(seconds=4 * 10**7)
assert self.now.humanize(later407, granularity="quarter") == "5 quarters ago"
assert later407.humanize(self.now, granularity="quarter") == "in 5 quarters"
later108 = self.now.shift(seconds=10**8)
assert self.now.humanize(later108, granularity="year") == "3 years ago"
assert later108.humanize(self.now, granularity="year") == "in 3 years"
later108onlydistance = self.now.shift(seconds=10**8)
assert (
self.now.humanize(
later108onlydistance, only_distance=True, granularity="year"
)
== "3 years"
)
assert (
later108onlydistance.humanize(
self.now, only_distance=True, granularity="year"
)
== "3 years"
)
with pytest.raises(ValueError):
self.now.humanize(later108, granularity="years")
def test_multiple_granularity(self):
assert self.now.humanize(granularity="second") == "just now"
assert self.now.humanize(granularity=["second"]) == "just now"
assert (
self.now.humanize(granularity=["year", "month", "day", "hour", "second"])
== "in 0 years 0 months 0 days 0 hours and 0 seconds"
)
later4000 = self.now.shift(seconds=4000)
assert (
later4000.humanize(self.now, granularity=["hour", "minute"])
== "in an hour and 6 minutes"
)
assert (
self.now.humanize(later4000, granularity=["hour", "minute"])
== "an hour and 6 minutes ago"
)
assert (
later4000.humanize(
self.now, granularity=["hour", "minute"], only_distance=True
)
== "an hour and 6 minutes"
)
assert (
later4000.humanize(self.now, granularity=["day", "hour", "minute"])
== "in 0 days an hour and 6 minutes"
)
assert (
self.now.humanize(later4000, granularity=["day", "hour", "minute"])
== "0 days an hour and 6 minutes ago"
)
later105 = self.now.shift(seconds=10**5)
assert (
self.now.humanize(later105, granularity=["hour", "day", "minute"])
== "a day 3 hours and 46 minutes ago"
)
with pytest.raises(ValueError):
self.now.humanize(later105, granularity=["error", "second"])
later108onlydistance = self.now.shift(seconds=10**8)
assert (
self.now.humanize(
later108onlydistance, only_distance=True, granularity=["year"]
)
== "3 years"
)
assert (
self.now.humanize(
later108onlydistance, only_distance=True, granularity=["month", "week"]
)
== "37 months and 4 weeks"
)
# this will change when leap years are implemented
assert (
self.now.humanize(
later108onlydistance, only_distance=True, granularity=["year", "second"]
)
== "3 years and 5392000 seconds"
)
one_min_one_sec_ago = self.now.shift(minutes=-1, seconds=-1)
assert (
one_min_one_sec_ago.humanize(self.now, granularity=["minute", "second"])
== "a minute and a second ago"
)
one_min_two_secs_ago = self.now.shift(minutes=-1, seconds=-2)
assert (
one_min_two_secs_ago.humanize(self.now, granularity=["minute", "second"])
== "a minute and 2 seconds ago"
)
def test_seconds(self):
later = self.now.shift(seconds=10)
# regression test for issue #727
assert self.now.humanize(later) == "10 seconds ago"
assert later.humanize(self.now) == "in 10 seconds"
assert self.now.humanize(later, only_distance=True) == "10 seconds"
assert later.humanize(self.now, only_distance=True) == "10 seconds"
def test_minute(self):
later = self.now.shift(minutes=1)
assert self.now.humanize(later) == "a minute ago"
assert later.humanize(self.now) == "in a minute"
assert self.now.humanize(later, only_distance=True) == "a minute"
assert later.humanize(self.now, only_distance=True) == "a minute"
def test_minutes(self):
later = self.now.shift(minutes=2)
assert self.now.humanize(later) == "2 minutes ago"
assert later.humanize(self.now) == "in 2 minutes"
assert self.now.humanize(later, only_distance=True) == "2 minutes"
assert later.humanize(self.now, only_distance=True) == "2 minutes"
def test_hour(self):
later = self.now.shift(hours=1)
assert self.now.humanize(later) == "an hour ago"
assert later.humanize(self.now) == "in an hour"
assert self.now.humanize(later, only_distance=True) == "an hour"
assert later.humanize(self.now, only_distance=True) == "an hour"
def test_hours(self):
later = self.now.shift(hours=2)
assert self.now.humanize(later) == "2 hours ago"
assert later.humanize(self.now) == "in 2 hours"
assert self.now.humanize(later, only_distance=True) == "2 hours"
assert later.humanize(self.now, only_distance=True) == "2 hours"
def test_day(self):
later = self.now.shift(days=1)
assert self.now.humanize(later) == "a day ago"
assert later.humanize(self.now) == "in a day"
# regression test for issue #697
less_than_48_hours = self.now.shift(
days=1, hours=23, seconds=59, microseconds=999999
)
assert self.now.humanize(less_than_48_hours) == "a day ago"
assert less_than_48_hours.humanize(self.now) == "in a day"
less_than_48_hours_date = less_than_48_hours._datetime.date()
with pytest.raises(TypeError):
# humanize other argument does not take raw datetime.date objects
self.now.humanize(less_than_48_hours_date)
assert self.now.humanize(later, only_distance=True) == "a day"
assert later.humanize(self.now, only_distance=True) == "a day"
def test_days(self):
later = self.now.shift(days=2)
assert self.now.humanize(later) == "2 days ago"
assert later.humanize(self.now) == "in 2 days"
assert self.now.humanize(later, only_distance=True) == "2 days"
assert later.humanize(self.now, only_distance=True) == "2 days"
# Regression tests for humanize bug referenced in issue 541
later = self.now.shift(days=3)
assert later.humanize(self.now) == "in 3 days"
later = self.now.shift(days=3, seconds=1)
assert later.humanize(self.now) == "in 3 days"
later = self.now.shift(days=4)
assert later.humanize(self.now) == "in 4 days"
def test_week(self):
later = self.now.shift(weeks=1)
assert self.now.humanize(later) == "a week ago"
assert later.humanize(self.now) == "in a week"
assert self.now.humanize(later, only_distance=True) == "a week"
assert later.humanize(self.now, only_distance=True) == "a week"
def test_weeks(self):
later = self.now.shift(weeks=2)
assert self.now.humanize(later) == "2 weeks ago"
assert later.humanize(self.now) == "in 2 weeks"
assert self.now.humanize(later, only_distance=True) == "2 weeks"
assert later.humanize(self.now, only_distance=True) == "2 weeks"
def test_month(self):
later = self.now.shift(months=1)
assert self.now.humanize(later) == "a month ago"
assert later.humanize(self.now) == "in a month"
assert self.now.humanize(later, only_distance=True) == "a month"
assert later.humanize(self.now, only_distance=True) == "a month"
def test_months(self):
later = self.now.shift(months=2)
earlier = self.now.shift(months=-2)
assert earlier.humanize(self.now) == "2 months ago"
assert later.humanize(self.now) == "in 2 months"
assert self.now.humanize(later, only_distance=True) == "2 months"
assert later.humanize(self.now, only_distance=True) == "2 months"
def test_year(self):
later = self.now.shift(years=1)
assert self.now.humanize(later) == "a year ago"
assert later.humanize(self.now) == "in a year"
assert self.now.humanize(later, only_distance=True) == "a year"
assert later.humanize(self.now, only_distance=True) == "a year"
def test_years(self):
later = self.now.shift(years=2)
assert self.now.humanize(later) == "2 years ago"
assert later.humanize(self.now) == "in 2 years"
assert self.now.humanize(later, only_distance=True) == "2 years"
assert later.humanize(self.now, only_distance=True) == "2 years"
arw = arrow.Arrow(2014, 7, 2)
result = arw.humanize(self.datetime)
assert result == "in a year"
def test_arrow(self):
arw = arrow.Arrow.fromdatetime(self.datetime)
result = arw.humanize(arrow.Arrow.fromdatetime(self.datetime))
assert result == "just now"
def test_datetime_tzinfo(self):
arw = arrow.Arrow.fromdatetime(self.datetime)
result = arw.humanize(self.datetime.replace(tzinfo=tz.tzutc()))
assert result == "just now"
def test_other(self):
arw = arrow.Arrow.fromdatetime(self.datetime)
with pytest.raises(TypeError):
arw.humanize(object())
def test_invalid_locale(self):
arw = arrow.Arrow.fromdatetime(self.datetime)
with pytest.raises(ValueError):
arw.humanize(locale="klingon")
def test_none(self):
arw = arrow.Arrow.utcnow()
result = arw.humanize()
assert result == "just now"
result = arw.humanize(None)
assert result == "just now"
def test_week_limit(self):
# regression test for issue #848
arw = arrow.Arrow.utcnow()
later = arw.shift(weeks=+1)
result = arw.humanize(later)
assert result == "a week ago"
def test_untranslated_granularity(self, mocker):
arw = arrow.Arrow.utcnow()
later = arw.shift(weeks=1)
# simulate an untranslated timeframe key
mocker.patch.dict("arrow.locales.EnglishLocale.timeframes")
del arrow.locales.EnglishLocale.timeframes["week"]
with pytest.raises(ValueError):
arw.humanize(later, granularity="week")
def test_empty_granularity_list(self):
arw = arrow.Arrow(2013, 1, 1, 0, 0, 0)
later = arw.shift(seconds=55000)
with pytest.raises(ValueError):
arw.humanize(later, granularity=[])
# Bulgarian is an example of a language that overrides _format_timeframe
# Applicable to all locales. Note: Contributors need to make sure
# that if they override describe or describe_multi, that delta
# is truncated on call
def test_no_floats(self):
arw = arrow.Arrow(2013, 1, 1, 0, 0, 0)
later = arw.shift(seconds=55000)
humanize_string = arw.humanize(later, locale="bg", granularity="minute")
assert humanize_string == "916 минути назад"
def test_no_floats_multi_gran(self):
arw = arrow.Arrow(2013, 1, 1, 0, 0, 0)
later = arw.shift(seconds=55000)
humanize_string = arw.humanize(
later, locale="bg", granularity=["second", "minute"]
)
assert humanize_string == "916 минути 40 няколко секунди назад"
@pytest.mark.usefixtures("time_2013_01_01")
| TestArrowHumanize |
python | PyCQA__pylint | tests/functional/e/enum_self_defined_member_5138.py | {
"start": 718,
"end": 1126
} | class ____(Enum):
METRE = "metre", "m"
MILE = "mile", "m", True
def __init__(self, text: str, unit: str, is_imperial: bool = False):
self.text: str = text
self.unit: str = unit
if is_imperial:
self.suffix = " (imp)"
else:
self.suffix = ""
print(f"100 {Length.METRE.unit}{Length.METRE.suffix}")
print(Length.MILE.foo) # [no-member]
| Length |
python | astral-sh__uv | scripts/benchmark/src/benchmark/tools.py | {
"start": 2143,
"end": 4928
} | class ____(Suite):
def __init__(self, path: str | None = None) -> None:
self.name = path or "pipx"
self.path = path or "pipx"
def install_cold(self, *, cwd: str) -> Command | None:
home_dir = os.path.join(cwd, "home")
bin_dir = os.path.join(cwd, "bin")
man_dir = os.path.join(cwd, "man")
# pipx uses a shared virtualenv directory in `${PIPX_HOME}/shared`, which
# contains pip. If we remove `${PIPX_HOME}/shared`, we're simulating the _first_
# pipx invocation on a machine, rather than `pipx run` with a cold cache. So,
# instead, we only remove the installed tools, rather than the shared
# dependencies.
venvs_dir = os.path.join(home_dir, "venvs")
return Command(
name=f"{self.name} ({Benchmark.INSTALL_COLD.value})",
prepare=f"rm -rf {venvs_dir} && rm -rf {bin_dir} && rm -rf {man_dir}",
command=[
f"PIPX_HOME={home_dir}",
f"PIPX_BIN_DIR={bin_dir}",
f"PIPX_MAN_DIR={man_dir}",
self.path,
"install",
"--pip-args=--no-cache-dir",
TOOL,
],
)
def install_warm(self, *, cwd: str) -> Command | None:
home_dir = os.path.join(cwd, "home")
bin_dir = os.path.join(cwd, "bin")
man_dir = os.path.join(cwd, "man")
# pipx uses a shared virtualenv directory in `${PIPX_HOME}/shared`, which
# contains pip. If we remove `${PIPX_HOME}/shared`, we're simulating the _first_
# pipx invocation on a machine, rather than `pipx run` with a cold cache. So,
# instead, we only remove the installed tools, rather than the shared
# dependencies.
venvs_dir = os.path.join(home_dir, "venvs")
return Command(
name=f"{self.name} ({Benchmark.INSTALL_WARM.value})",
prepare=f"rm -rf {venvs_dir} && rm -rf {bin_dir} && rm -rf {man_dir}",
command=[
f"PIPX_HOME={home_dir}",
f"PIPX_BIN_DIR={bin_dir}",
f"PIPX_MAN_DIR={man_dir}",
self.path,
"install",
TOOL,
],
)
def run(self, *, cwd: str) -> Command | None:
home_dir = os.path.join(cwd, "home")
bin_dir = os.path.join(cwd, "bin")
man_dir = os.path.join(cwd, "man")
return Command(
name=f"{self.name} ({Benchmark.RUN.value})",
prepare="",
command=[
f"PIPX_HOME={home_dir}",
f"PIPX_BIN_DIR={bin_dir}",
f"PIPX_MAN_DIR={man_dir}",
self.path,
"install",
TOOL,
],
)
| Pipx |
python | readthedocs__readthedocs.org | readthedocs/core/migrations/0016_update_dj_simple_history.py | {
"start": 149,
"end": 1409
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("core", "0015_remove_email_options"),
]
operations = [
migrations.AlterModelOptions(
name="historicaluser",
options={
"get_latest_by": ("history_date", "history_id"),
"ordering": ("-history_date", "-history_id"),
"verbose_name": "historical user",
"verbose_name_plural": "historical users",
},
),
migrations.AlterModelOptions(
name="historicaluserprofile",
options={
"get_latest_by": ("history_date", "history_id"),
"ordering": ("-history_date", "-history_id"),
"verbose_name": "historical user profile",
"verbose_name_plural": "historical user profiles",
},
),
migrations.AlterField(
model_name="historicaluser",
name="history_date",
field=models.DateTimeField(db_index=True),
),
migrations.AlterField(
model_name="historicaluserprofile",
name="history_date",
field=models.DateTimeField(db_index=True),
),
]
| Migration |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 11557,
"end": 12393
} | class ____(PointerModel):
def __init__(self, dmm, fe_type):
super(EphemeralArrayModel, self).__init__(dmm, fe_type)
self._data_type = ir.ArrayType(self._pointee_be_type,
self._fe_type.count)
def get_data_type(self):
return self._data_type
def as_data(self, builder, value):
values = [builder.load(cgutils.gep_inbounds(builder, value, i))
for i in range(self._fe_type.count)]
return cgutils.pack_array(builder, values)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.ExternalFunctionPointer)
| EphemeralArrayModel |
python | mkdocs__mkdocs | mkdocs/config/config_options.py | {
"start": 32174,
"end": 32716
} | class ____(BaseConfigOption[Union[ExtraScriptValue, str]]):
def __init__(self):
super().__init__()
self.option_type = SubConfig[ExtraScriptValue]()
def run_validation(self, value: object) -> ExtraScriptValue | str:
self.option_type.warnings = self.warnings
if isinstance(value, str):
if value.endswith('.mjs'):
return self.option_type.run_validation({'path': value, 'type': 'module'})
return value
return self.option_type.run_validation(value)
| ExtraScript |
python | getsentry__sentry | src/sentry/integrations/messaging/commands.py | {
"start": 1860,
"end": 3169
} | class ____:
def __init__(
self,
interaction_type: MessagingInteractionType,
command_text: str,
aliases: Iterable[str] = (),
) -> None:
super().__init__()
self.interaction_type = interaction_type
self.command_slug = CommandSlug(command_text)
self.aliases = frozenset(CommandSlug(alias) for alias in aliases)
@property
def name(self) -> str:
return self.interaction_type.value
def get_all_command_slugs(self) -> Iterable[CommandSlug]:
yield self.command_slug
yield from self.aliases
HELP = MessagingIntegrationCommand(
MessagingInteractionType.HELP,
"help",
aliases=("", "support", "docs"),
)
LINK_IDENTITY = MessagingIntegrationCommand(
MessagingInteractionType.LINK_IDENTITY,
"link",
)
UNLINK_IDENTITY = MessagingIntegrationCommand(
MessagingInteractionType.UNLINK_IDENTITY,
"unlink",
)
LINK_TEAM = MessagingIntegrationCommand(
MessagingInteractionType.LINK_TEAM,
"link team",
)
UNLINK_TEAM = MessagingIntegrationCommand(
MessagingInteractionType.UNLINK_TEAM,
"unlink team",
)
R = TypeVar("R") # response
# Command handler type that receives lifecycle object
CommandHandler = Callable[[CommandInput], IntegrationResponse[R]]
| MessagingIntegrationCommand |
python | huggingface__transformers | src/transformers/models/plbart/tokenization_plbart.py | {
"start": 1540,
"end": 16536
} | class ____(SentencePieceBackend):
"""
Construct an PLBART tokenizer.
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
<tokens> <eos>` for target language documents.
Args:
vocab_file (`str`):
Path to the vocabulary file.
src_lang (`str`, *optional*):
A string representing the source language.
tgt_lang (`str`, *optional*):
A string representing the target language.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The start of sequence token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The cls token, which is a special token used as the first token for all tasks.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token(`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masking tasks. This
is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the
downstream tasks.
language_codes (`str`, *optional*, defaults to `"base"`):
What language codes to use. Should be one of `"base"` or `"multi"`.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import PLBartTokenizer
>>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX")
>>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])"
>>> expected_translation_english = "Returns the maximum value of a b c."
>>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt")
```"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: list[int] = []
suffix_tokens: list[int] = []
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
language_codes="base",
tokenizer_file=None,
src_lang=None,
tgt_lang=None,
sp_model_kwargs: Optional[dict[str, Any]] = None,
additional_special_tokens=None,
clean_up_tokenization_spaces=True,
**kwargs,
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
src_lang = self._convert_lang_code_special_format(src_lang)
tgt_lang = self._convert_lang_code_special_format(tgt_lang)
self.language_codes = language_codes
fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes]
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
self.vocab_file = vocab_file
self.lang_code_to_id = {}
self.id_to_lang_code = {}
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
self.fairseq_offset = 1
_additional_special_tokens = list(fairseq_language_codes)
if additional_special_tokens is not None:
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens]
)
super().__init__(
vocab_file=vocab_file,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
tokenizer_file=tokenizer_file,
src_lang=src_lang,
tgt_lang=tgt_lang,
additional_special_tokens=_additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
language_codes=language_codes,
special_tokens_pattern="prefix_suffix",
token_type_ids_pattern="all_zeros",
**kwargs,
)
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(fairseq_language_codes)
}
self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
if self.language_codes == "base":
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
reserved_tokens = {"<s>", "<pad>", "</s>", "<unk>", "<mask>"}
reserved_tokens.update(FAIRSEQ_LANGUAGE_CODES[self.language_codes])
removed = False
for token in reserved_tokens:
idx = self._added_tokens_encoder.pop(token, None)
if idx is not None:
self._added_tokens_decoder.pop(idx, None)
removed = True
if removed:
self._update_trie()
self._update_total_vocab_size()
synced = False
for token, idx in self._added_tokens_encoder.items():
if idx in self._added_tokens_decoder:
continue
self._added_tokens_decoder[idx] = AddedToken(
token, special=True, normalized=False, lstrip=False, rstrip=False
)
synced = True
if synced:
self._update_trie()
self._update_total_vocab_size()
if self.language_codes == "base":
self._src_lang = src_lang
self.cur_lang_code_id = (
self.lang_code_to_id[self._src_lang] if self._src_lang is not None else self._src_lang
)
else:
self._src_lang = src_lang if src_lang is not None else "__en_XX__"
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def vocab_size(self):
lang_code_count = len(getattr(self, "lang_code_to_id", {}))
fairseq_offset = getattr(self, "fairseq_offset", 1)
base_vocab = len(self.sp_model) if hasattr(self, "sp_model") else 0
if getattr(self, "language_codes", "base") == "base":
return base_vocab + lang_code_count + fairseq_offset + 1 # +1 for mask token
return base_vocab + lang_code_count + fairseq_offset
def get_vocab(self):
"""Override to use fairseq vocabulary structure"""
vocab = self.fairseq_tokens_to_ids.copy()
for i in range(self.sp_model.get_piece_size()):
sp_token = self.sp_model.IdToPiece(i)
# Map SP token to fairseq ID: SP ID 0 maps to unk_token_id, others map to SP_ID + fairseq_offset
vocab_id = self.unk_token_id if i == 0 else (i + self.fairseq_offset)
if sp_token not in vocab:
vocab[sp_token] = vocab_id
# Add any additional tokens
vocab.update({token: idx for token, idx in self._added_tokens_encoder.items() if token not in vocab})
return vocab
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
new_src_lang = self._convert_lang_code_special_format(new_src_lang)
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = self._convert_lang_code_special_format(src_lang)
self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def prepare_seq2seq_batch(
self,
src_texts: list[str],
src_lang: str = "en_XX",
tgt_texts: Optional[list[str]] = None,
tgt_lang: str = "python",
**kwargs,
) -> BatchEncoding:
self.src_lang = self._convert_lang_code_special_format(src_lang)
self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
src_lang = self._convert_lang_code_special_format(src_lang)
self.cur_lang_code = self.lang_code_to_id[src_lang] if src_lang is not None else None
self.prefix_tokens = []
if self.cur_lang_code is not None:
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
lang = self._convert_lang_code_special_format(lang)
self.cur_lang_code = self.lang_code_to_id[lang] if lang is not None else None
self.prefix_tokens = []
if self.cur_lang_code is not None:
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id]
def _convert_lang_code_special_format(self, lang: str) -> str:
"""Convert Language Codes to format tokenizer uses if required"""
lang = FAIRSEQ_LANGUAGE_CODES_MAP.get(lang, lang)
return lang
def clean_up_tokenization(self, out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (`str`): The text to clean up.
Returns:
`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=None, **kwargs):
"""Override to use self.clean_up_tokenization_spaces as default for batched input."""
return super().decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=self.clean_up_tokenization_spaces,
**kwargs,
)
__all__ = ["PLBartTokenizer"]
| PLBartTokenizer |
python | keras-team__keras | keras/src/layers/activations/relu.py | {
"start": 158,
"end": 2689
} | class ____(Layer):
"""Rectified Linear Unit activation function layer.
Formula:
``` python
f(x) = max(x,0)
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Example:
``` python
relu_layer = keras.layers.ReLU(
max_value=10,
negative_slope=0.5,
threshold=0,
)
input = np.array([-10, -5, 0.0, 5, 10])
result = relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
max_value: Float >= 0. Maximum activation value. None means unlimited.
Defaults to `None`.
negative_slope: Float >= 0. Negative slope coefficient.
Defaults to `0.0`.
threshold: Float >= 0. Threshold value for thresholded activation.
Defaults to `0.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self, max_value=None, negative_slope=0.0, threshold=0.0, **kwargs
):
super().__init__(**kwargs)
if max_value is not None and max_value < 0.0:
raise ValueError(
"max_value of a ReLU layer cannot be a negative "
f"value. Received: max_value={max_value}"
)
if negative_slope is None or negative_slope < 0.0:
raise ValueError(
"negative_slope of a ReLU layer cannot be a negative "
f"value. Received: negative_slope={negative_slope}"
)
if threshold is None or threshold < 0.0:
raise ValueError(
"threshold of a ReLU layer cannot be a negative "
f"value. Received: threshold={threshold}"
)
self.max_value = max_value
self.negative_slope = negative_slope
self.threshold = threshold
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return activations.relu(
inputs,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_value": self.max_value,
"negative_slope": self.negative_slope,
"threshold": self.threshold,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| ReLU |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 47790,
"end": 51033
} | class ____(object):
BITS = 64
NIBBLE = 4
LOG_BITS = 6
MOD_MASK = BITS -1
def __init__(self,data=None):
if not data:
BitSet.__init__(self,[long(0)])
return
if isinstance(data,int):
BitSet.__init__(self,[long(data)])
return
if isinstance(data,long):
BitSet.__init__(self,[data])
return
if not isinstance(data,list):
raise TypeError("BitSet requires integer, long, or " +
"list argument")
for x in data:
if not isinstance(x, six.integer_types):
raise TypeError(self,"List argument item is " +
"not a long: %s" % (x))
self.data = data
def __str__(self):
bits = len(self.data) * BitSet.BITS
s = ""
for i in range(0,bits):
if self.at(i):
s += "1"
else:
s += "o"
if not ((i+1) % 10):
s += '|%s|' % (i+1)
return s
def __repr__(self):
return str(self)
def member(self,item):
if not item:
return False
if isinstance(item,int):
return self.at(item)
if not is_string_type(item):
raise TypeError(self,"char or unichar expected: %s" % (item))
## char is a (unicode) string with at most lenght 1, ie.
## a char.
if len(item) != 1:
raise TypeError(self,"char expected: %s" % (item))
### handle ASCII/UNICODE char
num = ord(item)
### check whether position num is in bitset
return self.at(num)
def wordNumber(self,bit):
return bit >> BitSet.LOG_BITS
def bitMask(self,bit):
pos = bit & BitSet.MOD_MASK ## bit mod BITS
return (1 << pos)
def set(self,bit,on=True):
# grow bitset as required (use with care!)
i = self.wordNumber(bit)
mask = self.bitMask(bit)
if i>=len(self.data):
d = i - len(self.data) + 1
for x in range(0,d):
self.data.append(0)
assert len(self.data) == i+1
if on:
self.data[i] |= mask
else:
self.data[i] &= (~mask)
### make add an alias for set
add = set
def off(self,bit,off=True):
self.set(bit,not off)
def at(self,bit):
i = self.wordNumber(bit)
v = self.data[i]
m = self.bitMask(bit)
return v & m
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### some further funcs ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
def illegalarg_ex(func):
raise ValueError(
"%s is only valid if parser is built for debugging" %
(func.func_name))
def runtime_ex(func):
raise RuntimeError(
"%s is only valid if parser is built for debugging" %
(func.func_name))
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenBuffer ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| BitSet |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/search_ads.py | {
"start": 8246,
"end": 9772
} | class ____(_GoogleSearchAdsBaseOperator):
"""
Retrieve details of a custom column for the given customer_id and campaign_id.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/reporting/api/reference/rest/v0/customers.customColumns/get
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsGetCustomColumnOperator`
:param customer_id: The customer ID for the custom column.
:param custom_column_id: The ID for the custom column.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param api_version: The version of the API that will be requested for example 'v0'.
"""
def __init__(
self,
*,
customer_id: str,
custom_column_id: str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.customer_id = customer_id
self.custom_column_id = custom_column_id
def execute(self, context: Context):
self.log.info(
"Retrieving the custom column for the customer %s with the id of %s",
self.customer_id,
self.custom_column_id,
)
response = self.hook.get_custom_column(
customer_id=self.customer_id,
custom_column_id=self.custom_column_id,
)
self.log.info("Retrieved custom column: %s", response["id"])
return response
| GoogleSearchAdsGetCustomColumnOperator |
python | nedbat__coveragepy | tests/test_api.py | {
"start": 1121,
"end": 24344
} | class ____(CoverageTest):
"""Api-oriented tests for coverage.py."""
def clean_files(self, files: list[str], pats: list[str]) -> list[str]:
"""Remove names matching `pats` from `files`, a list of file names."""
good = []
for f in files:
for pat in pats:
if fnmatch.fnmatch(f, pat):
break
else:
good.append(f)
return good
def assertFiles(self, files: list[str]) -> None:
"""Assert that the files here are `files`, ignoring the usual junk."""
here = os.listdir(".")
here = self.clean_files(here, ["*.pyc", "__pycache__", "*$py.class"])
assert_count_equal(here, files)
def test_unexecuted_file(self) -> None:
cov = coverage.Coverage()
self.make_file(
"mycode.py",
"""\
a = 1
b = 2
if b == 3:
c = 4
d = 5
""",
)
self.make_file(
"not_run.py",
"""\
fooey = 17
""",
)
# Import the Python file, executing it.
self.start_import_stop(cov, "mycode")
_, statements, missing, _ = cov.analysis("not_run.py")
assert statements == [1]
assert missing == [1]
def test_filenames(self) -> None:
self.make_file(
"mymain.py",
"""\
import mymod
a = 1
""",
)
self.make_file(
"mymod.py",
"""\
fooey = 17
""",
)
# Import the Python file, executing it.
cov = coverage.Coverage()
self.start_import_stop(cov, "mymain")
filename, _, _, _ = cov.analysis("mymain.py")
assert os.path.basename(filename) == "mymain.py"
filename, _, _, _ = cov.analysis("mymod.py")
assert os.path.basename(filename) == "mymod.py"
filename, _, _, _ = cov.analysis(sys.modules["mymain"])
assert os.path.basename(filename) == "mymain.py"
filename, _, _, _ = cov.analysis(sys.modules["mymod"])
assert os.path.basename(filename) == "mymod.py"
# Import the Python file, executing it again, once it's been compiled
# already.
cov = coverage.Coverage()
self.start_import_stop(cov, "mymain")
filename, _, _, _ = cov.analysis("mymain.py")
assert os.path.basename(filename) == "mymain.py"
filename, _, _, _ = cov.analysis("mymod.py")
assert os.path.basename(filename) == "mymod.py"
filename, _, _, _ = cov.analysis(sys.modules["mymain"])
assert os.path.basename(filename) == "mymain.py"
filename, _, _, _ = cov.analysis(sys.modules["mymod"])
assert os.path.basename(filename) == "mymod.py"
@pytest.mark.parametrize("cover_pylib", [False, True])
def test_stdlib(self, cover_pylib: bool) -> None:
self.make_file(
"mymain.py",
"""\
import colorsys
a = 1
hls = colorsys.rgb_to_hls(1.0, 0.5, 0.0)
""",
)
# Measure without the stdlib.
cov1 = coverage.Coverage(cover_pylib=cover_pylib)
self.start_import_stop(cov1, "mymain")
_, statements, missing, _ = cov1.analysis("mymain.py")
assert statements == [1, 2, 3]
assert missing == []
# but none were in colorsys.py
_, statements, missing, _ = cov1.analysis("colorsys.py")
if cover_pylib:
assert statements != missing
else:
assert statements == missing
def test_include_can_measure_stdlib(self) -> None:
self.make_file(
"mymain.py",
"""\
import colorsys, random
a = 1
r, g, b = [random.random() for _ in range(3)]
hls = colorsys.rgb_to_hls(r, g, b)
""",
)
# Measure without the stdlib, but include colorsys.
cov1 = coverage.Coverage(cover_pylib=False, include=["*/colorsys.py"])
self.start_import_stop(cov1, "mymain")
# some statements were marked executed in colorsys.py
_, statements, missing, _ = cov1.analysis("colorsys.py")
assert statements != missing
# but none were in random.py
_, statements, missing, _ = cov1.analysis("random.py")
assert statements == missing
def test_exclude_list(self) -> None:
cov = coverage.Coverage()
cov.clear_exclude()
assert cov.get_exclude_list() == []
cov.exclude("foo")
assert cov.get_exclude_list() == ["foo"]
cov.exclude("bar")
assert cov.get_exclude_list() == ["foo", "bar"]
assert cov._exclude_regex("exclude") == "(?:foo)|(?:bar)"
cov.clear_exclude()
assert cov.get_exclude_list() == []
def test_exclude_partial_list(self) -> None:
cov = coverage.Coverage()
cov.clear_exclude(which="partial")
assert cov.get_exclude_list(which="partial") == []
cov.exclude("foo", which="partial")
assert cov.get_exclude_list(which="partial") == ["foo"]
cov.exclude("bar", which="partial")
assert cov.get_exclude_list(which="partial") == ["foo", "bar"]
assert cov._exclude_regex(which="partial") == "(?:foo)|(?:bar)"
cov.clear_exclude(which="partial")
assert cov.get_exclude_list(which="partial") == []
def test_exclude_and_partial_are_separate_lists(self) -> None:
cov = coverage.Coverage()
cov.clear_exclude(which="partial")
cov.clear_exclude(which="exclude")
cov.exclude("foo", which="partial")
assert cov.get_exclude_list(which="partial") == ["foo"]
assert cov.get_exclude_list(which="exclude") == []
cov.exclude("bar", which="exclude")
assert cov.get_exclude_list(which="partial") == ["foo"]
assert cov.get_exclude_list(which="exclude") == ["bar"]
cov.exclude("p2", which="partial")
cov.exclude("e2", which="exclude")
assert cov.get_exclude_list(which="partial") == ["foo", "p2"]
assert cov.get_exclude_list(which="exclude") == ["bar", "e2"]
cov.clear_exclude(which="partial")
assert cov.get_exclude_list(which="partial") == []
assert cov.get_exclude_list(which="exclude") == ["bar", "e2"]
cov.clear_exclude(which="exclude")
assert cov.get_exclude_list(which="partial") == []
assert cov.get_exclude_list(which="exclude") == []
def test_datafile_default(self) -> None:
# Default data file behavior: it's .coverage
self.make_file(
"datatest1.py",
"""\
fooey = 17
""",
)
self.assertFiles(["datatest1.py"])
cov = coverage.Coverage()
self.start_import_stop(cov, "datatest1")
cov.save()
self.assertFiles(["datatest1.py", ".coverage"])
@pytest.mark.parametrize("file_class", FilePathClasses)
def test_datafile_specified(self, file_class: FilePathType) -> None:
# You can specify the data file name.
self.make_file(
"datatest2.py",
"""\
fooey = 17
""",
)
self.assertFiles(["datatest2.py"])
cov = coverage.Coverage(data_file=file_class("cov.data"))
self.start_import_stop(cov, "datatest2")
cov.save()
self.assertFiles(["datatest2.py", "cov.data"])
@pytest.mark.parametrize("file_class", FilePathClasses)
def test_datafile_and_suffix_specified(self, file_class: FilePathType) -> None:
# You can specify the data file name and suffix.
self.make_file(
"datatest3.py",
"""\
fooey = 17
""",
)
self.assertFiles(["datatest3.py"])
cov = coverage.Coverage(data_file=file_class("cov.data"), data_suffix="14")
self.start_import_stop(cov, "datatest3")
cov.save()
self.assertFiles(["datatest3.py", "cov.data.14"])
def test_datafile_from_rcfile(self) -> None:
# You can specify the data file name in the .coveragerc file
self.make_file(
"datatest4.py",
"""\
fooey = 17
""",
)
self.make_file(
".coveragerc",
"""\
[run]
data_file = mydata.dat
""",
)
self.assertFiles(["datatest4.py", ".coveragerc"])
cov = coverage.Coverage()
self.start_import_stop(cov, "datatest4")
cov.save()
self.assertFiles(["datatest4.py", ".coveragerc", "mydata.dat"])
def test_deep_datafile(self) -> None:
self.make_file("datatest5.py", "fooey = 17")
self.assertFiles(["datatest5.py"])
cov = coverage.Coverage(data_file="deep/sub/cov.data")
self.start_import_stop(cov, "datatest5")
cov.save()
self.assertFiles(["datatest5.py", "deep"])
self.assert_exists("deep/sub/cov.data")
def test_datafile_none(self) -> None:
cov = coverage.Coverage(data_file=None)
def f1() -> None: # pragma: nested
a = 1 # pylint: disable=unused-variable
one_line_number = f1.__code__.co_firstlineno + 1
lines = []
def run_one_function(f: Callable[[], None]) -> None:
cov.erase()
with cov.collect():
f()
fs = cov.get_data().measured_files()
lines.append(cov.get_data().lines(list(fs)[0]))
run_one_function(f1)
run_one_function(f1)
run_one_function(f1)
assert lines == [[one_line_number]] * 3
self.assert_doesnt_exist(".coverage")
assert os.listdir(".") == []
def test_empty_reporting(self) -> None:
# empty summary reports raise exception, just like the xml report
cov = coverage.Coverage()
cov.erase()
with pytest.raises(NoDataError, match="No data to report."):
cov.report()
def test_completely_zero_reporting(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/884
# If nothing was measured, the file-touching didn't happen properly.
self.make_file("foo/bar.py", "print('Never run')")
self.make_file("test.py", "assert True")
with pytest.warns(Warning) as warns:
cov = coverage.Coverage(source=["foo"])
self.start_import_stop(cov, "test")
cov.report()
assert_coverage_warnings(warns, "No data was collected. (no-data-collected)")
# Name Stmts Miss Cover
# --------------------------------
# foo/bar.py 1 1 0%
# --------------------------------
# TOTAL 1 1 0%
last = self.last_line_squeezed(self.stdout())
assert "TOTAL 1 1 0%" == last
def test_cov4_data_file(self) -> None:
cov4_data = (
"!coverage.py: This is a private format, don't read it directly!"
+ '{"lines":{"/somewhere/not/really.py":[1,5,2,3]}}'
)
self.make_file(".coverage", cov4_data)
cov = coverage.Coverage()
with pytest.raises(DataError, match="Looks like a coverage 4.x data file"):
cov.load()
cov.erase()
def make_code1_code2(self) -> None:
"""Create the code1.py and code2.py files."""
self.make_file(
"code1.py",
"""\
code1 = 1
""",
)
self.make_file(
"code2.py",
"""\
code2 = 1
code2 = 2
""",
)
def check_code1_code2(self, cov: Coverage) -> None:
"""Check the analysis is correct for code1.py and code2.py."""
_, statements, missing, _ = cov.analysis("code1.py")
assert statements == [1]
assert missing == []
_, statements, missing, _ = cov.analysis("code2.py")
assert statements == [1, 2]
assert missing == []
def test_start_stop_start_stop(self) -> None:
self.make_code1_code2()
cov = coverage.Coverage()
self.start_import_stop(cov, "code1")
cov.save()
self.start_import_stop(cov, "code2")
self.check_code1_code2(cov)
def test_start_save_stop(self) -> None:
self.make_code1_code2()
cov = coverage.Coverage()
with cov.collect():
import_local_file("code1")
cov.save()
import_local_file("code2")
self.check_code1_code2(cov)
def test_start_save_nostop(self) -> None:
self.make_code1_code2()
cov = coverage.Coverage()
with cov.collect():
import_local_file("code1")
cov.save()
import_local_file("code2")
self.check_code1_code2(cov)
def test_two_getdata_only_warn_once(self) -> None:
self.make_code1_code2()
cov = coverage.Coverage(source=["."], omit=["code1.py"])
with cov.collect():
import_local_file("code1")
# We didn't collect any data, so we should get a warning.
with self.assert_warnings(cov, ["No data was collected"]):
cov.get_data()
# But calling get_data a second time with no intervening activity
# won't make another warning.
with self.assert_warnings(cov, []):
cov.get_data()
def test_two_getdata_warn_twice(self) -> None:
self.make_code1_code2()
cov = coverage.Coverage(source=["."], omit=["code1.py", "code2.py"])
with cov.collect():
import_local_file("code1")
# We didn't collect any data, so we should get a warning.
with self.assert_warnings(cov, ["No data was collected"]):
cov.save()
import_local_file("code2")
# Calling get_data a second time after tracing some more will warn again.
with self.assert_warnings(cov, ["No data was collected"]):
cov.get_data()
def make_good_data_files(self) -> None:
"""Make some good data files."""
self.make_code1_code2()
cov = coverage.Coverage(data_suffix=True)
self.start_import_stop(cov, "code1")
cov.save()
cov = coverage.Coverage(data_suffix=True)
self.start_import_stop(cov, "code2")
cov.save()
self.assert_file_count(".coverage.*", 2)
def test_combining_corrupt_data(self) -> None:
# If you combine a corrupt data file, then you will get a warning,
# and the file will remain.
self.make_good_data_files()
self.make_file(".coverage.foo", """La la la, this isn't coverage data!""")
cov = coverage.Coverage()
warning_regex = r"Couldn't use data file '.*\.coverage\.foo': " + BAD_SQLITE_REGEX
with self.assert_warnings(cov, [warning_regex]):
cov.combine()
# We got the results from code1 and code2 properly.
self.check_code1_code2(cov)
# The bad file still exists, but it's the only parallel data file left.
self.assert_exists(".coverage.foo")
self.assert_file_count(".coverage.*", 1)
def test_combining_twice(self) -> None:
self.make_good_data_files()
cov1 = coverage.Coverage()
cov1.combine()
assert self.stdout() == ""
cov1.save()
self.check_code1_code2(cov1)
self.assert_file_count(".coverage.*", 0)
self.assert_exists(".coverage")
cov2 = coverage.Coverage()
with pytest.raises(NoDataError, match=r"No data to combine"):
cov2.combine(strict=True, keep=False)
cov3 = coverage.Coverage()
cov3.combine()
assert self.stdout() == ""
# Now the data is empty!
_, statements, missing, _ = cov3.analysis("code1.py")
assert statements == [1]
assert missing == [1]
_, statements, missing, _ = cov3.analysis("code2.py")
assert statements == [1, 2]
assert missing == [1, 2]
def test_combining_with_a_used_coverage(self) -> None:
# Can you use a coverage object to run one shard of a parallel suite,
# and then also combine the data?
self.make_code1_code2()
cov = coverage.Coverage(data_suffix=True)
self.start_import_stop(cov, "code1")
cov.save()
cov = coverage.Coverage(data_suffix=True)
self.start_import_stop(cov, "code2")
cov.save()
cov.combine()
assert self.stdout() == ""
self.check_code1_code2(cov)
def test_ordered_combine(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/649
# The order of the [paths] setting used to matter. Now the
# resulting path must exist, so the order doesn't matter.
def make_files() -> None:
self.make_file("plugins/p1.py", "")
self.make_file("girder/g1.py", "")
self.make_data_file(
basename=".coverage.1",
lines={
abs_file("ci/girder/g1.py"): range(10),
abs_file("ci/girder/plugins/p1.py"): range(10),
},
)
def get_combined_filenames() -> set[str]:
cov = coverage.Coverage()
cov.combine()
assert self.stdout() == ""
cov.save()
data = cov.get_data()
filenames = {relative_filename(f).replace("\\", "/") for f in data.measured_files()}
return filenames
# Case 1: get the order right.
make_files()
self.make_file(
".coveragerc",
"""\
[paths]
plugins =
plugins/
ci/girder/plugins/
girder =
girder/
ci/girder/
""",
)
assert get_combined_filenames() == {"girder/g1.py", "plugins/p1.py"}
# Case 2: get the order "wrong".
make_files()
self.make_file(
".coveragerc",
"""\
[paths]
girder =
girder/
ci/girder/
plugins =
plugins/
ci/girder/plugins/
""",
)
assert get_combined_filenames() == {"girder/g1.py", "plugins/p1.py"}
def test_warnings(self) -> None:
self.make_file(
"hello.py",
"""\
import sys, os
print("Hello")
""",
)
with pytest.warns(Warning) as warns:
cov = coverage.Coverage(source=["sys", "xyzzy", "quux"])
self.start_import_stop(cov, "hello")
cov.get_data()
assert "Hello\n" == self.stdout()
assert_coverage_warnings(
warns,
"Module sys has no Python source. (module-not-python)",
"Module xyzzy was never imported. (module-not-imported)",
"Module quux was never imported. (module-not-imported)",
"No data was collected. (no-data-collected)",
)
def test_warnings_suppressed(self) -> None:
self.make_file(
"hello.py",
"""\
import sys, os
print("Hello")
""",
)
self.make_file(
".coveragerc",
"""\
[run]
disable_warnings = no-data-collected, module-not-imported
""",
)
with pytest.warns(Warning) as warns:
cov = coverage.Coverage(source=["sys", "xyzzy", "quux"])
self.start_import_stop(cov, "hello")
cov.get_data()
assert "Hello\n" == self.stdout()
assert_coverage_warnings(warns, "Module sys has no Python source. (module-not-python)")
# No "module-not-imported" in warns
# No "no-data-collected" in warns
def test_warn_once(self) -> None:
with pytest.warns(Warning) as warns:
cov = coverage.Coverage()
cov.load()
cov._warn("Warning, warning 1!", slug="bot", once=True)
cov._warn("Warning, warning 2!", slug="bot", once=True)
assert_coverage_warnings(warns, "Warning, warning 1! (bot)")
# No "Warning, warning 2!" in warns
assert len(get_coverage_warnings(warns)) == 1
def test_warnings_with_urls(self) -> None:
with pytest.warns(Warning) as warns:
cov = coverage.Coverage()
cov.load()
cov._warn("Warning Will Robinson", slug="will-rob")
cov._warn("Warning, warning 2!", slug="second-one")
warnings = get_coverage_warnings(warns)
def url(slug: str) -> str:
return (
f"https://coverage.readthedocs.io/en/{coverage.__version__}"
+ f"/messages.html#warning-{slug}"
)
assert warnings == [
f"Warning Will Robinson (will-rob); see {url('will-rob')}",
f"Warning, warning 2! (second-one); see {url('second-one')}",
]
def test_source_and_include_dont_conflict(self) -> None:
# A bad fix made this case fail: https://github.com/coveragepy/coveragepy/issues/541
self.make_file("a.py", "import b\na = 1")
self.make_file("b.py", "b = 1")
self.make_file(
".coveragerc",
"""\
[run]
source = .
""",
)
# Just like: coverage run a.py
cov = coverage.Coverage()
self.start_import_stop(cov, "a")
cov.save()
# Run the equivalent of: coverage report --include=b.py
cov = coverage.Coverage(include=["b.py"])
cov.load()
# There should be no exception. At one point, report() threw:
# CoverageException: --include and --source are mutually exclusive
cov.report()
expected = textwrap.dedent("""\
Name Stmts Miss Cover
---------------------------
b.py 1 0 100%
---------------------------
TOTAL 1 0 100%
""")
assert expected == self.stdout()
def test_config_crash(self) -> None:
# The internal '[run] _crash' setting can be used to artificially raise
# exceptions from inside Coverage.
cov = coverage.Coverage()
cov.set_option("run:_crash", "test_config_crash")
with pytest.raises(Exception, match="Crashing because called by test_config_crash"):
cov.start()
def test_config_crash_no_crash(self) -> None:
# '[run] _crash' really checks the call stack.
cov = coverage.Coverage()
cov.set_option("run:_crash", "not_my_caller")
cov.start()
cov.stop()
def test_run_debug_sys(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/907
cov = coverage.Coverage()
with cov.collect():
d = dict(cov.sys_info())
assert cast(str, d["data_file"]).endswith(".coverage")
@pytest.mark.skipif(not testenv.DYN_CONTEXTS, reason="No dynamic contexts with this core.")
| ApiTest |
python | wandb__wandb | wandb/vendor/pygments/lexers/asm.py | {
"start": 24101,
"end": 25261
} | class ____(RegexLexer):
"""
For ca65 assembler sources.
.. versionadded:: 1.6
"""
name = 'ca65 assembler'
aliases = ['ca65']
filenames = ['*.s']
flags = re.IGNORECASE
tokens = {
'root': [
(r';.*', Comment.Single),
(r'\s+', Text),
(r'[a-z_.@$][\w.@$]*:', Name.Label),
(r'((ld|st)[axy]|(in|de)[cxy]|asl|lsr|ro[lr]|adc|sbc|cmp|cp[xy]'
r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs'
r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor'
r'|bit)\b', Keyword),
(r'\.\w+', Keyword.Pseudo),
(r'[-+~*/^&|!<>=]', Operator),
(r'"[^"\n]*.', String),
(r"'[^'\n]*.", String.Char),
(r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex),
(r'\d+', Number.Integer),
(r'%[01]+', Number.Bin),
(r'[#,.:()=\[\]]', Punctuation),
(r'[a-z_.@$][\w.@$]*', Name),
]
}
def analyse_text(self, text):
# comments in GAS start with "#"
if re.match(r'^\s*;', text, re.MULTILINE):
return 0.9
| Ca65Lexer |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 67882,
"end": 70375
} | class ____(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Scale parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
def pdf(self, x):
a, b = self.alpha, self.beta
return ((b/a)*(x/a)**(b - 1))/(1 + (x/a)**b)**2
def _cdf(self, x):
a, b = self.alpha, self.beta
return 1/(1 + (x/a)**(-b))
def _quantile(self, p):
a, b = self.alpha, self.beta
return a*((p/(1 - p))**(1/b))
def expectation(self, expr, var, **kwargs):
a, b = self.args
return Piecewise((S.NaN, b <= 1), (pi*a/(b*sin(pi/b)), True))
def LogLogistic(name, alpha, beta):
r"""
Create a continuous random variable with a log-logistic distribution.
The distribution is unimodal when ``beta > 1``.
Explanation
===========
The density of the log-logistic distribution is given by
.. math::
f(x) := \frac{(\frac{\beta}{\alpha})(\frac{x}{\alpha})^{\beta - 1}}
{(1 + (\frac{x}{\alpha})^{\beta})^2}
Parameters
==========
alpha : Real number, `\alpha > 0`, scale parameter and median of distribution
beta : Real number, `\beta > 0`, a shape parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogLogistic, density, cdf, quantile
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> p = Symbol("p")
>>> z = Symbol("z", positive=True)
>>> X = LogLogistic("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
beta - 1
/ z \
beta*|-----|
\alpha/
------------------------
2
/ beta \
|/ z \ |
alpha*||-----| + 1|
\\alpha/ /
>>> cdf(X)(z)
1/(1 + (z/alpha)**(-beta))
>>> quantile(X)(p)
alpha*(p/(1 - p))**(1/beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Log-logistic_distribution
"""
return rv(name, LogLogisticDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
#Logit-Normal distribution------------------------------------------------------
| LogLogisticDistribution |
python | wandb__wandb | wandb/sdk/wandb_login.py | {
"start": 4974,
"end": 5068
} | class ____(enum.Enum):
VALID = 1
NOTTY = 2
OFFLINE = 3
DISABLED = 4
| ApiKeyStatus |
python | neetcode-gh__leetcode | python/0695-max-area-of-island.py | {
"start": 0,
"end": 661
} | class ____:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
ROWS, COLS = len(grid), len(grid[0])
visit = set()
def dfs(r, c):
if (
r < 0
or r == ROWS
or c < 0
or c == COLS
or grid[r][c] == 0
or (r, c) in visit
):
return 0
visit.add((r, c))
return 1 + dfs(r + 1, c) + dfs(r - 1, c) + dfs(r, c + 1) + dfs(r, c - 1)
area = 0
for r in range(ROWS):
for c in range(COLS):
area = max(area, dfs(r, c))
return area
| Solution |
python | pytorch__pytorch | test/functorch/test_parsing.py | {
"start": 7527,
"end": 9021
} | class ____(TestCase):
def test_parse_pattern_number_of_arrows(self) -> None:
axes_lengths: dict[str, int] = {}
too_many_arrows_pattern = "a -> b -> c -> d"
with self.assertRaises(ValueError):
parse_pattern(too_many_arrows_pattern, axes_lengths)
too_few_arrows_pattern = "a"
with self.assertRaises(ValueError):
parse_pattern(too_few_arrows_pattern, axes_lengths)
just_right_arrows = "a -> a"
parse_pattern(just_right_arrows, axes_lengths)
def test_ellipsis_invalid_identifier(self) -> None:
axes_lengths: dict[str, int] = {"a": 1, _ellipsis: 2}
pattern = f"a {_ellipsis} -> {_ellipsis} a"
with self.assertRaises(ValueError):
parse_pattern(pattern, axes_lengths)
def test_ellipsis_matching(self) -> None:
axes_lengths: dict[str, int] = {}
pattern = "a -> a ..."
with self.assertRaises(ValueError):
parse_pattern(pattern, axes_lengths)
# raising an error on this pattern is handled by the rearrange expression validation
pattern = "a ... -> a"
parse_pattern(pattern, axes_lengths)
pattern = "a ... -> ... a"
parse_pattern(pattern, axes_lengths)
def test_left_parenthesized_ellipsis(self) -> None:
axes_lengths: dict[str, int] = {}
pattern = "(...) -> ..."
with self.assertRaises(ValueError):
parse_pattern(pattern, axes_lengths)
| TestParsingUtils |
python | pennersr__django-allauth | allauth/mfa/webauthn/forms.py | {
"start": 3761,
"end": 3875
} | class ____(AuthenticateWebAuthnForm):
reauthenticated = True
passwordless = False
| ReauthenticateWebAuthnForm |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 57177,
"end": 57261
} | class ____(_TestNormBase, TestCase):
dt = np.int64
dec = 12
| _TestNormInt64Base |
python | huggingface__transformers | src/transformers/convert_slow_tokenizer.py | {
"start": 41307,
"end": 41740
} | class ____(SpmConverter):
def post_processor(self):
return processors.TemplateProcessing(
single="[CLS]:0 $A:0 [SEP]:0",
pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
special_tokens=[
("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
],
)
| BigBirdConverter |
python | huggingface__transformers | tests/models/mpnet/test_tokenization_mpnet.py | {
"start": 846,
"end": 2616
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "microsoft/mpnet-base"
tokenizer_class = MPNetTokenizer
integration_expected_tokens = ['[UNK]', 'is', 'a', 'test', '[UNK]', '[UNK]', 'was', 'born', 'in', '92', '##00', '##0', ',', 'and', 'this', 'is', '[UNK]', '.', '生', '[UNK]', '的', '真', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '<s>', 'hi', '<s>', 'there', '[UNK]', 'following', 'string', 'should', 'be', 'properly', 'encoded', ':', '[UNK]', '.', '[UNK]', 'ir', '##d', 'and', '[UNK]', 'ir', '##d', '[UNK]', '[UNK]', 'how', 'are', 'you', 'doing'] # fmt: skip
integration_expected_token_ids = [104, 2007, 1041, 3235, 104, 104, 2005, 2145, 2003, 6231, 8893, 2696, 1014, 2002, 2027, 2007, 104, 1016, 1914, 104, 1920, 1925, 104, 104, 104, 104, 104, 104, 104, 0, 7636, 0, 2049, 104, 2210, 5168, 2327, 2026, 7923, 12363, 1028, 104, 1016, 104, 20872, 2098, 2002, 104, 20872, 2098, 104, 104, 2133, 2028, 2021, 2729] # fmt: skip
expected_tokens_from_ids = ['[UNK]', 'is', 'a', 'test', '[UNK]', '[UNK]', 'was', 'born', 'in', '92', '##00', '##0', ',', 'and', 'this', 'is', '[UNK]', '.', '生', '[UNK]', '的', '真', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '<s>', 'hi', '<s>', 'there', '[UNK]', 'following', 'string', 'should', 'be', 'properly', 'encoded', ':', '[UNK]', '.', '[UNK]', 'ir', '##d', 'and', '[UNK]', 'ir', '##d', '[UNK]', '[UNK]', 'how', 'are', 'you', 'doing'] # fmt: skip
integration_expected_decoded_text = "[UNK] is a test [UNK] [UNK] was born in 92000, and this is [UNK]. 生 [UNK] 的 真 [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] <s> hi <s> there [UNK] following string should be properly encoded : [UNK]. [UNK] ird and [UNK] ird [UNK] [UNK] how are you doing"
| MPNetTokenizerTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/yandex/provider.py | {
"start": 314,
"end": 512
} | class ____(ProviderAccount):
def to_str(self):
email = self.account.extra_data.get("default_email")
if email:
return email
return super().to_str()
| YandexAccount |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 5270,
"end": 6707
} | class ____:
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
@pytest.fixture
def dct_lock(self):
return threading.Lock()
def test_definition(self, dct_lock):
for i in FFTWDATA_SIZES:
with dct_lock:
x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
y = dct(x, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg=f"Size {i} failed")
def test_axis(self):
nt = 2
rng = np.random.RandomState(1234)
for i in [7, 8, 9, 16, 32, 64]:
x = rng.randn(nt, i)
y = dct(x, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=self.type),
decimal=self.dec)
x = x.T
y = dct(x, axis=0, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
decimal=self.dec)
| _TestDCTBase |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 11336,
"end": 11618
} | class ____(ToolBase):
"""Tool to call the figure manager destroy method."""
description = 'Quit the figure'
default_keymap = property(lambda self: mpl.rcParams['keymap.quit'])
def trigger(self, sender, event, data=None):
Gcf.destroy_fig(self.figure)
| ToolQuit |
python | python-attrs__attrs | tests/test_make.py | {
"start": 40247,
"end": 42186
} | class ____:
"""
Tests for `fields`.
"""
@given(simple_classes())
def test_instance(self, C):
"""
Raises `TypeError` on non-classes.
"""
with pytest.raises(TypeError) as e:
fields(C())
assert "Passed object must be a class." == e.value.args[0]
def test_handler_non_attrs_class(self):
"""
Raises `ValueError` if passed a non-*attrs* instance.
"""
with pytest.raises(NotAnAttrsClassError) as e:
fields(object)
assert (
f"{object!r} is not an attrs-decorated class."
) == e.value.args[0]
def test_handler_non_attrs_generic_class(self):
"""
Raises `ValueError` if passed a non-*attrs* generic class.
"""
T = TypeVar("T")
class B(Generic[T]):
pass
with pytest.raises(NotAnAttrsClassError) as e:
fields(B[str])
assert (
f"{B[str]!r} is not an attrs-decorated class."
) == e.value.args[0]
@given(simple_classes())
def test_fields(self, C):
"""
Returns a list of `Attribute`a.
"""
assert all(isinstance(a, Attribute) for a in fields(C))
@given(simple_classes())
def test_fields_properties(self, C):
"""
Fields returns a tuple with properties.
"""
for attribute in fields(C):
assert getattr(fields(C), attribute.name) is attribute
def test_generics(self):
"""
Fields work with generic classes.
"""
T = TypeVar("T")
@attr.define
class A(Generic[T]):
a: T
assert len(fields(A)) == 1
assert fields(A).a.name == "a"
assert fields(A).a.default is attr.NOTHING
assert len(fields(A[str])) == 1
assert fields(A[str]).a.name == "a"
assert fields(A[str]).a.default is attr.NOTHING
| TestFields |
python | python-openxml__python-docx | tests/oxml/unitdata/text.py | {
"start": 259,
"end": 445
} | class ____(BaseBuilder):
__nspfxs__ = ("w",)
__attrs__ = ()
def __init__(self, tag):
self.__tag__ = tag
super(CT_EmptyBuilder, self).__init__()
| CT_EmptyBuilder |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_cond_format18.py | {
"start": 315,
"end": 3381
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format18.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditionalFormatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", 1)
worksheet.write("A2", 2)
worksheet.write("A3", 3)
worksheet.write("A4", 4)
worksheet.write("A5", 5)
worksheet.write("A6", 6)
worksheet.write("A7", 7)
worksheet.write("A8", 8)
worksheet.write("A9", 9)
worksheet.write("A12", 75)
worksheet.conditional_format(
"A1", {"type": "icon_set", "icon_style": "3_arrows", "reverse_icons": True}
)
worksheet.conditional_format(
"A2", {"type": "icon_set", "icon_style": "3_flags", "icons_only": True}
)
worksheet.conditional_format(
"A3",
{
"type": "icon_set",
"icon_style": "3_traffic_lights_rimmed",
"icons_only": True,
"reverse_icons": True,
},
)
worksheet.conditional_format(
"A4",
{
"type": "icon_set",
"icon_style": "3_symbols_circled",
"icons": [{"value": 80}, {"value": 20}],
},
)
worksheet.conditional_format(
"A5",
{
"type": "icon_set",
"icon_style": "4_arrows",
"icons": [{"criteria": ">"}, {"criteria": ">"}, {"criteria": ">"}],
},
)
worksheet.conditional_format(
"A6",
{
"type": "icon_set",
"icon_style": "4_red_to_black",
"icons": [
{"criteria": ">=", "type": "number", "value": 90},
{"criteria": "<", "type": "percentile", "value": 50},
{"criteria": "<=", "type": "percent", "value": 25},
],
},
)
worksheet.conditional_format(
"A7",
{
"type": "icon_set",
"icon_style": "4_traffic_lights",
"icons": [{"value": "=$A$12"}],
},
)
worksheet.conditional_format(
"A8",
{
"type": "icon_set",
"icon_style": "5_arrows_gray",
"icons": [{"type": "formula", "value": "=$A$12"}],
},
)
worksheet.conditional_format(
"A9",
{
"type": "icon_set",
"icon_style": "5_quarters",
"icons": [{"value": 70}, {"value": 50}, {"value": 30}, {"value": 10}],
"reverse_icons": True,
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/_dynamo/comptime.py | {
"start": 12089,
"end": 15456
} | class ____:
@staticmethod
def __call__(
fn: Callable[[ComptimeContext], Any],
fallback_fn: Callable[[], Any] = lambda: None,
) -> Any:
"""fn gets called at compile time in TorchDynamo, calls fallback_fn otherwise"""
fallback_fn()
# Convenience wrappers that are more compact to use
@staticmethod
def graph_break() -> None:
comptime(lambda ctx: ctx.graph_break())
@staticmethod
def print(e: Any) -> None:
comptime(lambda ctx: ctx.print(ctx.get_local("e")), lambda: print(e))
@staticmethod
def print_graph() -> None:
comptime(lambda ctx: ctx.print_graph())
@staticmethod
def print_disas(*, stacklevel: int = 0) -> None:
comptime(
lambda ctx: ctx.print_disas(
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
)
)
@staticmethod
def print_value_stack(*, stacklevel: int = 0) -> None:
comptime(
lambda ctx: ctx.print_value_stack(
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
)
)
# This is a more useful variant of print_value_stack that can be used
# in an expression context; e.g., x + print_value_stack_and_return(y + z),
# you will see x on the stack prior to the addition operation
@staticmethod
def print_value_stack_and_return(e: Any, *, stacklevel: int = 0) -> Any:
comptime(
lambda ctx: ctx.print_value_stack(
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
)
)
return e
@staticmethod
def print_locals(*, stacklevel: int = 0) -> None:
comptime(
lambda ctx: ctx.print_locals(
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
)
)
@staticmethod
def print_bt(*, stacklevel: int = 0) -> None:
comptime(
lambda ctx: ctx.print_bt(
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
)
)
@staticmethod
def print_guards() -> None:
comptime(lambda ctx: ctx.print_guards())
@staticmethod
def assert_static(val: Any) -> None:
comptime(lambda ctx: ctx.assert_static(ctx.get_local("val")))
@staticmethod
def force_static(val: Any) -> None:
comptime(lambda ctx: ctx.get_local("val").force_static())
@staticmethod
def breakpoint() -> None:
"""
Like pdb breakpoint(), but drop into pdb whenever this line
of code is compiled by dynamo. Use it by putting
this in your model code::
from torch._dynamo.comptime import comptime
comptime.breakpoint()
And then, inside pdb, you can access 'ctx' to query things
about the compilation context::
(Pdb) !ctx.print_bt()
(Pdb) !ctx.print_locals()
(Pdb) p ctx.get_local("attention").as_fake()
"""
def inner(inner_ctx: ComptimeContext) -> None:
ctx = inner_ctx.parent() # noqa: F841
builtins.breakpoint()
comptime(inner)
@staticmethod
def sleep(sec: Union[int, float]) -> None:
comptime(lambda ctx: ctx.sleep(ctx.get_local("sec").as_python_constant()))
comptime = _Comptime()
| _Comptime |
python | sqlalchemy__sqlalchemy | examples/versioned_rows/versioned_rows_w_versionid.py | {
"start": 3653,
"end": 4559
} | class ____(Versioned, Base):
__tablename__ = "child"
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent. this causes the
# parent foreign key to be updated also
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(id=1, data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 1
assert p1.child.version_id == 2
assert session.query(
Child.id,
Child.version_id,
Child.is_current_version,
Child.calc_is_current_version,
Child.data,
).order_by(Child.id, Child.version_id).all() == (
[(1, 1, False, False, "c1"), (1, 2, True, True, "c2")]
)
| Child |
python | streamlit__streamlit | lib/tests/streamlit/elements/slider_test.py | {
"start": 1328,
"end": 13375
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall slider protos."""
def test_just_label(self):
"""Test that it can be called with no value."""
st.slider("the label")
c = self.get_delta_from_queue().new_element.slider
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert c.default == [0]
assert not c.disabled
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.slider("the label", disabled=True)
c = self.get_delta_from_queue().new_element.slider
assert c.disabled
PST = timezone(timedelta(hours=-8), "PST")
AWARE_DT = datetime(2020, 1, 1, tzinfo=PST)
AWARE_DT_END = datetime(2020, 1, 5, tzinfo=PST)
AWARE_TIME = time(12, 00, tzinfo=PST)
AWARE_TIME_END = time(21, 00, tzinfo=PST)
# datetimes are serialized in proto as micros since epoch
AWARE_DT_MICROS = 1577836800000000
AWARE_DT_END_MICROS = 1578182400000000
AWARE_TIME_MICROS = 946728000000000
AWARE_TIME_END_MICROS = 946760400000000
@parameterized.expand(
[
(1, [1], 1), # int
((0, 1), [0, 1], (0, 1)), # int tuple
([0, 1], [0, 1], (0, 1)), # int list
(0.5, [0.5], 0.5), # float
((0.2, 0.5), [0.2, 0.5], (0.2, 0.5)), # float tuple
([0.2, 0.5], [0.2, 0.5], (0.2, 0.5)), # float list
(np.int64(1), [1], 1), # numpy int
(np.int32(1), [1], 1), # numpy int
(np.single(0.5), [0.5], 0.5), # numpy float
(np.double(0.5), [0.5], 0.5), # numpy float
(AWARE_DT, [AWARE_DT_MICROS], AWARE_DT), # datetime
(
(AWARE_DT, AWARE_DT_END), # datetime tuple
[AWARE_DT_MICROS, AWARE_DT_END_MICROS],
(AWARE_DT, AWARE_DT_END),
),
(
[AWARE_DT, AWARE_DT_END], # datetime list
[AWARE_DT_MICROS, AWARE_DT_END_MICROS],
(AWARE_DT, AWARE_DT_END),
),
(AWARE_TIME, [AWARE_TIME_MICROS], AWARE_TIME), # datetime
(
(AWARE_TIME, AWARE_TIME_END), # datetime tuple
[AWARE_TIME_MICROS, AWARE_TIME_END_MICROS],
(AWARE_TIME, AWARE_TIME_END),
),
(
[AWARE_TIME, AWARE_TIME_END], # datetime list
[AWARE_TIME_MICROS, AWARE_TIME_END_MICROS],
(AWARE_TIME, AWARE_TIME_END),
),
]
)
def test_value_types(self, value, proto_value, return_value):
"""Test that it supports different types of values."""
ret = st.slider("the label", value=value)
assert ret == return_value
c = self.get_delta_from_queue().new_element.slider
assert c.label == "the label"
assert c.default == proto_value
@parameterized.expand(
[
"5", # str
5j, # complex
b"5", # bytes
]
)
def test_invalid_types(self, value):
"""Test that it rejects invalid types, specifically things that are *almost* numbers"""
with pytest.raises(StreamlitAPIException):
st.slider("the label", value=value)
@parameterized.expand(
[
(1, 2, 1, 1),
(np.int64(1), 2, 1, 1),
(1, np.int64(2), 1, 1),
(1, 2, np.int64(1), 1),
(np.single(0.5), 1.5, 0.5, 0.5),
]
)
def test_matching_types(self, min_value, max_value, value, return_value):
"""Test that NumPy types are seen as compatible with numerical Python types"""
ret = st.slider(
"the label", min_value=min_value, max_value=max_value, value=value
)
assert ret == return_value
NAIVE_DT = datetime(2020, 2, 1)
NAIVE_DT_END = datetime(2020, 2, 4)
NAIVE_TIME = time(6, 20, 34)
NAIVE_TIME_END = time(20, 6, 43)
DATE_START = date(2020, 4, 5)
DATE_END = date(2020, 6, 6)
@parameterized.expand(
[
(NAIVE_DT, NAIVE_DT), # naive datetime
((NAIVE_DT, NAIVE_DT_END), (NAIVE_DT, NAIVE_DT_END)),
([NAIVE_DT, NAIVE_DT_END], (NAIVE_DT, NAIVE_DT_END)),
(NAIVE_TIME, NAIVE_TIME), # naive time
((NAIVE_TIME, NAIVE_TIME_END), (NAIVE_TIME, NAIVE_TIME_END)),
([NAIVE_TIME, NAIVE_TIME_END], (NAIVE_TIME, NAIVE_TIME_END)),
(DATE_START, DATE_START), # date (always naive)
((DATE_START, DATE_END), (DATE_START, DATE_END)),
([DATE_START, DATE_END], (DATE_START, DATE_END)),
]
)
def test_naive_timelikes(self, value, return_value):
"""Ignore proto values (they change based on testing machine's timezone)"""
ret = st.slider("the label", value=value)
c = self.get_delta_from_queue().new_element.slider
assert ret == return_value
assert c.label == "the label"
def test_range_session_state(self):
"""Test a range set by session state."""
state = st.session_state
state["slider"] = [10, 20]
slider = st.slider(
"select a range",
min_value=0,
max_value=100,
key="slider",
)
assert slider == [10, 20]
def test_value_greater_than_min(self):
ret = st.slider("Slider label", 10, 100, 0)
c = self.get_delta_from_queue().new_element.slider
assert ret == 0
assert c.min == 0
def test_value_smaller_than_max(self):
ret = st.slider("Slider label", 10, 100, 101)
c = self.get_delta_from_queue().new_element.slider
assert ret == 101
assert c.max == 101
def test_max_min(self):
ret = st.slider("Slider label", 101, 100, 101)
c = self.get_delta_from_queue().new_element.slider
assert ret == 101
assert c.min == 100
assert c.max == 101
def test_min_equals_max(self):
with pytest.raises(StreamlitAPIException):
st.slider("oh no", min_value=10, max_value=10)
with pytest.raises(StreamlitAPIException):
date = datetime(2024, 4, 3)
st.slider("datetime", min_value=date, max_value=date)
def test_value_out_of_bounds(self):
# Max int
with pytest.raises(StreamlitAPIException) as exc:
max_value = JSNumber.MAX_SAFE_INTEGER + 1
st.slider("Label", max_value=max_value)
assert f"`max_value` ({max_value}) must be <= (1 << 53) - 1" == str(exc.value)
# Min int
with pytest.raises(StreamlitAPIException) as exc:
min_value = JSNumber.MIN_SAFE_INTEGER - 1
st.slider("Label", min_value=min_value)
assert f"`min_value` ({min_value}) must be >= -((1 << 53) - 1)" == str(
exc.value
)
# Max float
with pytest.raises(StreamlitAPIException) as exc:
max_value = 2e308
st.slider("Label", value=0.5, max_value=max_value)
assert f"`max_value` ({max_value}) must be <= 1.797e+308" == str(exc.value)
# Min float
with pytest.raises(StreamlitAPIException) as exc:
min_value = -2e308
st.slider("Label", value=0.5, min_value=min_value)
assert f"`min_value` ({min_value}) must be >= -1.797e+308" == str(exc.value)
def test_step_zero(self):
with pytest.raises(StreamlitAPIException) as exc:
st.slider("Label", min_value=0, max_value=10, step=0)
assert str(exc.value) == "Slider components cannot be passed a `step` of 0."
def test_outside_form(self):
"""Test that form id is marshalled correctly outside of a form."""
st.slider("foo")
proto = self.get_delta_from_queue().new_element.slider
assert proto.form_id == ""
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form(self):
"""Test that form id is marshalled correctly inside of a form."""
with st.form("form"):
st.slider("foo")
# 2 elements will be created: form block, widget
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
slider_proto = self.get_delta_from_queue(1).new_element.slider
assert slider_proto.form_id == form_proto.form.form_id
def test_inside_column(self):
"""Test that it works correctly inside of a column."""
col1, _col2 = st.columns(2)
with col1:
st.slider("foo")
all_deltas = self.get_all_deltas_from_queue()
# 4 elements will be created: 1 horizontal block, 2 columns, 1 widget
assert len(all_deltas) == 4
slider_proto = self.get_delta_from_queue().new_element.slider
assert slider_proto.label == "foo"
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that it can be called with label_visibility param."""
st.slider("the label", label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.slider
assert c.label_visibility.value == proto_value
def test_label_visibility_wrong_value(self):
with pytest.raises(StreamlitAPIException) as e:
st.slider("the label", label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.slider("the label"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_should_raise_exception_when_session_state_value_out_of_range(self):
"""Test out of range using st.session_state to set slider values beyond min/max."""
# Test for integer values
with pytest.raises(StreamlitValueAboveMaxError) as e:
st.session_state.slider = 10
st.slider("slider", min_value=1, max_value=5, key="slider")
assert str(e.value) == "The `value` 10 is greater than the `max_value` 5."
with pytest.raises(StreamlitValueBelowMinError) as e:
st.session_state.slider_1 = 10
st.slider("slider_1", min_value=15, max_value=20, key="slider_1")
assert str(e.value) == "The `value` 10 is less than the `min_value` 15."
# Test for dates
with pytest.raises(StreamlitValueAboveMaxError) as e:
st.session_state.slider_2 = date(2025, 1, 1)
st.slider(
"slider_2",
min_value=date(2024, 1, 1),
max_value=date(2024, 12, 31),
key="slider_2",
)
assert (
str(e.value)
== "The `value` 2025-01-01 is greater than the `max_value` 2024-12-31."
)
with pytest.raises(StreamlitValueBelowMinError) as e:
st.session_state.slider_3 = date(2023, 1, 1)
st.slider(
"slider_3",
min_value=date(2024, 1, 1),
max_value=date(2024, 12, 31),
key="slider_3",
)
assert (
str(e.value)
== "The `value` 2023-01-01 is less than the `min_value` 2024-01-01."
)
| SliderTest |
python | py-pdf__pypdf | pypdf/generic/_data_structures.py | {
"start": 39186,
"end": 40675
} | class ____(StreamObject):
def __init__(self) -> None:
self.decoded_self: Optional[DecodedStreamObject] = None
# This overrides the parent method
def get_data(self) -> bytes:
from ..filters import decode_stream_data # noqa: PLC0415
if self.decoded_self is not None:
# Cached version of decoded object
return self.decoded_self.get_data()
# Create decoded object
decoded = DecodedStreamObject()
decoded.set_data(decode_stream_data(self))
for key, value in self.items():
if key not in (SA.LENGTH, SA.FILTER, SA.DECODE_PARMS):
decoded[key] = value
self.decoded_self = decoded
return decoded.get_data()
# This overrides the parent method:
def set_data(self, data: bytes) -> None:
from ..filters import FlateDecode # noqa: PLC0415
if self.get(SA.FILTER, "") in (FT.FLATE_DECODE, [FT.FLATE_DECODE]):
if not isinstance(data, bytes):
raise TypeError("Data must be bytes")
if self.decoded_self is None:
self.get_data() # to create self.decoded_self
assert self.decoded_self is not None, "mypy"
self.decoded_self.set_data(data)
super().set_data(FlateDecode.encode(data))
else:
raise PdfReadError(
"Streams encoded with a filter different from FlateDecode are not supported"
)
| EncodedStreamObject |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/multinomial_test.py | {
"start": 1149,
"end": 14543
} | class ____(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_v1_only("b/120545219")
def testSimpleShapes(self):
with self.cached_session():
p = [.1, .3, .6]
dist = multinomial.Multinomial(total_count=1., probs=p)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testComplexShapes(self):
with self.cached_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
n = [[3., 2], [4, 5], [6, 7]]
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testN(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual((2, 1), dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count)
@test_util.run_v1_only("b/120545219")
def testP(self):
p = [[0.1, 0.2, 0.7]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=3., probs=p)
self.assertEqual((1, 3), dist.probs.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.probs)
@test_util.run_v1_only("b/120545219")
def testLogits(self):
p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32)
logits = np.log(p) - 50.
with self.cached_session():
multinom = multinomial.Multinomial(total_count=3., logits=logits)
self.assertEqual((1, 3), multinom.probs.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(p, multinom.probs)
self.assertAllClose(logits, multinom.logits)
@test_util.run_v1_only("b/120545219")
def testPmfUnderflow(self):
logits = np.array([[-200, 0]], dtype=np.float32)
with self.cached_session():
dist = multinomial.Multinomial(total_count=1., logits=logits)
lp = dist.log_prob([1., 0.]).eval()[0]
self.assertAllClose(-200, lp, atol=0, rtol=1e-6)
@test_util.run_v1_only("b/120545219")
def testPmfandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
with self.assertRaisesOpError("must be non-negative"):
dist.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval()
@test_util.run_v1_only("b/120545219")
def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
# No errors with integer n.
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=True)
multinom.prob([2., 1, 2]).eval()
multinom.prob([3., 0, 2]).eval()
# Counts don't sum to n.
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
multinom.prob([2., 3, 2]).eval()
# Counts are non-integers.
x = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(
"cannot contain fractional components."):
multinom.prob(x).eval(feed_dict={x: [1.0, 2.5, 1.5]})
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=False)
multinom.prob([1., 2., 2.]).eval()
# Non-integer arguments work.
multinom.prob([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.5, 0.5]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(0.5, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.1, 0.9]
counts = [3., 2]
dist = multinomial.Multinomial(total_count=5., probs=p)
pmf = dist.prob(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81. / 10000, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf, [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf, [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
with self.cached_session():
# [2, 2, 2]
p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]]
# [2, 2]
n = [[3., 3], [3, 3]]
# [2]
counts = [2., 1]
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
self.evaluate(pmf)
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
self.evaluate(pmf)
self.assertEqual((4, 3), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testMultinomialMean(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovariance(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_covariances = [[9. / 20, -1 / 10, -7 / 20],
[-1 / 10, 4 / 5, -7 / 10],
[-7 / 20, -7 / 10, 21 / 20]]
self.assertEqual((3, 3), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovarianceBatch(self):
with self.cached_session():
# Shape [2]
n = [5.] * 2
# Shape [4, 1, 2]
p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2
dist = multinomial.Multinomial(total_count=n, probs=p)
# Shape [2, 2]
inner_var = [[9. / 20, -9 / 20], [-9 / 20, 9 / 20]]
# Shape [4, 2, 2, 2]
expected_covariances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance())
def testCovarianceMultidimensional(self):
# Shape [3, 5, 4]
p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32)
# Shape [6, 3, 3]
p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32)
with self.cached_session():
dist = multinomial.Multinomial(ns, p)
dist2 = multinomial.Multinomial(ns2, p2)
covariance = dist.covariance()
covariance2 = dist2.covariance()
self.assertEqual((3, 5, 4, 4), covariance.get_shape())
self.assertEqual((6, 3, 3, 3), covariance2.get_shape())
@test_util.run_v1_only("b/120545219")
def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n.
theta = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
theta /= np.sum(theta, 1)[..., array_ops.newaxis]
n = np.array([[10., 9.], [8., 7.], [6., 5.]], dtype=np.float32)
with self.cached_session() as sess:
# batch_shape=[3, 2], event_shape=[3]
dist = multinomial.Multinomial(n, theta)
x = dist.sample(int(1000e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0.01, rtol=0.01)
self.assertAllClose(sample_cov_, analytic_cov, atol=0.01, rtol=0.01)
self.assertAllClose(sample_var_, analytic_var, atol=0.01, rtol=0.01)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.01, rtol=0.01)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=[7., 6., 5.],
logits=math_ops.log(2. * self._rng.rand(4, 3, 2).astype(np.float32)))
n = int(3e4)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=5.,
logits=math_ops.log(2. * self._rng.rand(4).astype(np.float32)))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
def testNotReparameterized(self):
total_count = constant_op.constant(5.0)
p = constant_op.constant([0.2, 0.6])
with backprop.GradientTape() as tape:
tape.watch(total_count)
tape.watch(p)
dist = multinomial.Multinomial(
total_count=total_count,
probs=p)
samples = dist.sample(100)
grad_total_count, grad_p = tape.gradient(samples, [total_count, p])
self.assertIsNone(grad_total_count)
self.assertIsNone(grad_p)
if __name__ == "__main__":
test.main()
| MultinomialTest |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 16475,
"end": 16704
} | class ____(OrthogonalPolyRule):
def eval(self) -> Expr:
n, x = self.n, self.variable
return Piecewise(
(chebyshevt(n + 1, x)/(n + 1), Ne(n, -1)),
(S.Zero, True))
@dataclass
| ChebyshevURule |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mysqlconnector.py | {
"start": 4006,
"end": 4135
} | class ____(
IdentifierPreparerCommon_mysqlconnector, MySQLIdentifierPreparer
):
pass
| MySQLIdentifierPreparer_mysqlconnector |
python | openai__openai-python | src/openai/types/beta/threads/runs/file_search_tool_call.py | {
"start": 1392,
"end": 1633
} | class ____(BaseModel):
ranking_options: Optional[FileSearchRankingOptions] = None
"""The ranking options for the file search."""
results: Optional[List[FileSearchResult]] = None
"""The results of the file search."""
| FileSearch |
python | django__django | tests/serializers/models/data.py | {
"start": 2176,
"end": 2247
} | class ____(models.Model):
data = models.TimeField(null=True)
| TimeData |
python | django__django | django/contrib/postgres/forms/ranges.py | {
"start": 3484,
"end": 3652
} | class ____(BaseRangeField):
default_error_messages = {"invalid": _("Enter two valid dates.")}
base_field = forms.DateField
range_type = DateRange
| DateRangeField |
python | getsentry__responses | responses/tests/test_recorder.py | {
"start": 1985,
"end": 5534
} | class ____:
def setup_method(self):
self.out_file = Path("response_record")
if self.out_file.exists():
self.out_file.unlink() # pragma: no cover
assert not self.out_file.exists()
def test_recorder(self, httpserver):
url202, url400, url404, url500 = self.prepare_server(httpserver)
def another():
requests.get(url500)
requests.put(url202)
@_recorder.record(file_path=self.out_file)
def run():
requests.get(url404)
requests.get(url400)
another()
run()
with open(self.out_file) as file:
data = yaml.safe_load(file)
assert data == get_data(httpserver.host, httpserver.port)
def test_recorder_toml(self, httpserver):
custom_recorder = _recorder.Recorder()
def dump_to_file(file_path, registered):
with open(file_path, "wb") as file:
_dump(registered, file, tomli_w.dump) # type: ignore[arg-type]
custom_recorder.dump_to_file = dump_to_file # type: ignore[assignment]
url202, url400, url404, url500 = self.prepare_server(httpserver)
def another():
requests.get(url500)
requests.put(url202)
@custom_recorder.record(file_path=self.out_file)
def run():
requests.get(url404)
requests.get(url400)
another()
run()
with open(self.out_file, "rb") as file:
data = _toml.load(file)
assert data == get_data(httpserver.host, httpserver.port)
def prepare_server(self, httpserver):
httpserver.expect_request("/500").respond_with_data(
"500 Internal Server Error",
status=500,
content_type="text/plain",
headers={"x": "foo"},
)
httpserver.expect_request("/202").respond_with_data(
"OK",
status=202,
content_type="image/tiff",
)
httpserver.expect_request("/404").respond_with_data(
"404 Not Found",
status=404,
content_type="text/plain",
headers={"x": "foo"},
)
httpserver.expect_request("/status/wrong").respond_with_data(
"Invalid status code",
status=400,
content_type="text/plain",
headers={"x": "foo"},
)
url500 = httpserver.url_for("/500")
url202 = httpserver.url_for("/202")
url404 = httpserver.url_for("/404")
url400 = httpserver.url_for("/status/wrong")
return url202, url400, url404, url500
def test_use_recorder_without_decorator(self, httpserver):
"""I want to be able to record in the REPL."""
url202, url400, url404, url500 = self.prepare_server(httpserver)
_recorder.recorder.start()
def another():
requests.get(url500)
requests.put(url202)
def run():
requests.get(url404)
requests.get(url400)
another()
run()
_recorder.recorder.stop()
_recorder.recorder.dump_to_file(self.out_file)
with open(self.out_file) as file:
data = yaml.safe_load(file)
assert data == get_data(httpserver.host, httpserver.port)
# Now, we test that the recorder is properly reset
assert _recorder.recorder.get_registry().registered
_recorder.recorder.reset()
assert not _recorder.recorder.get_registry().registered
| TestRecord |
python | spack__spack | lib/spack/spack/test/concretization/core.py | {
"start": 125346,
"end": 126524
} | class ____:
"""Tests the container of concrete specs"""
@pytest.mark.parametrize(
"input_specs", [["pkg-a"], ["pkg-a foobar=bar", "pkg-b"], ["pkg-a foobar=baz", "pkg-b"]]
)
def test_adding_specs(self, input_specs, default_mock_concretization):
"""Tests that concrete specs in the container are equivalent, but stored as different
objects in memory.
"""
container = spack.solver.asp.ConcreteSpecsByHash()
input_specs = [spack.concretize.concretize_one(s) for s in input_specs]
for s in input_specs:
container.add(s)
for root in input_specs:
for node in root.traverse(root=True):
assert node == container[node.dag_hash()]
assert node.dag_hash() in container
assert node is not container[node.dag_hash()]
@pytest.fixture()
def edges_test_repository():
repository_path = os.path.join(spack.paths.test_repos_path, "spack_repo", "edges_test")
with spack.repo.use_repositories(repository_path) as mock_repo:
yield mock_repo
@pytest.mark.usefixtures("mutable_config", "edges_test_repository")
| TestConcreteSpecsByHash |
python | walkccc__LeetCode | solutions/2652. Sum Multiples/2652-2.py | {
"start": 0,
"end": 468
} | class ____:
def sumOfMultiples(self, n: int) -> int:
def sumOfMultiples(value: int) -> int:
"""Returns the sum of multiples of value in [1, n]."""
lo = value
hi = (n // value) * value
count = (hi - lo) // value + 1
return (lo + hi) * count // 2
return (sumOfMultiples(3) + sumOfMultiples(5) + sumOfMultiples(7) -
(sumOfMultiples(15) + sumOfMultiples(21) + sumOfMultiples(35)) +
sumOfMultiples(105))
| Solution |
python | numba__numba | numba/tests/doc_examples/test_structref_usage.py | {
"start": 2504,
"end": 4852
} | class ____(unittest.TestCase):
def test_type_definition(self):
np.random.seed(0)
# Redirect print
buf = []
def print(*args):
buf.append(args)
# magictoken.ex_structref_type_definition_test.begin
# Let's test our new StructRef.
# Define one in Python
alice = MyStruct("Alice", vector=np.random.random(3))
# Define one in jit-code
@njit
def make_bob():
bob = MyStruct("unnamed", vector=np.zeros(3))
# Mutate the attributes
bob.name = "Bob"
bob.vector = np.random.random(3)
return bob
bob = make_bob()
# Out: Alice: [0.5488135 0.71518937 0.60276338]
print(f"{alice.name}: {alice.vector}")
# Out: Bob: [0.88325739 0.73527629 0.87746707]
print(f"{bob.name}: {bob.vector}")
# Define a jit function to operate on the structs.
@njit
def distance(a, b):
return np.linalg.norm(a.vector - b.vector)
# Out: 0.4332647200356598
print(distance(alice, bob))
# magictoken.ex_structref_type_definition_test.end
self.assertEqual(len(buf), 3)
def test_overload_method(self):
# magictoken.ex_structref_method.begin
from numba.core.extending import overload_method
from numba.core.errors import TypingError
# Use @overload_method to add a method for
# MyStructType.distance(other)
# where *other* is an instance of MyStructType.
@overload_method(MyStructType, "distance")
def ol_distance(self, other):
# Guard that *other* is an instance of MyStructType
if not isinstance(other, MyStructType):
raise TypingError(
f"*other* must be a {MyStructType}; got {other}"
)
def impl(self, other):
return np.linalg.norm(self.vector - other.vector)
return impl
# Test
@njit
def test():
alice = MyStruct("Alice", vector=np.random.random(3))
bob = MyStruct("Bob", vector=np.random.random(3))
# Use the method
return alice.distance(bob)
# magictoken.ex_structref_method.end
self.assertIsInstance(test(), float)
| TestStructRefUsage |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 61754,
"end": 63241
} | class ____(NonStrictDataModel):
"""
:param key: Entry key
:type key: str
:param mode: System defined input/output indication
:type mode: ArtifactModeEnum
"""
_schema = {
"properties": {
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
},
"required": ["key"],
"type": "object",
}
def __init__(self, key, mode=None, **kwargs):
super(ArtifactId, self).__init__(**kwargs)
self.key = key
self.mode = mode
@schema_property("key")
def key(self):
return self._property_key
@key.setter
def key(self, value):
if value is None:
self._property_key = None
return
self.assert_isinstance(value, "key", six.string_types)
self._property_key = value
@schema_property("mode")
def mode(self):
return self._property_mode
@mode.setter
def mode(self, value):
if value is None:
self._property_mode = None
return
if isinstance(value, six.string_types):
try:
value = ArtifactModeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "mode", enum.Enum)
self._property_mode = value
| ArtifactId |
python | bokeh__bokeh | tests/unit/bokeh/util/test_util__serialization.py | {
"start": 1464,
"end": 10642
} | class ____:
def test_default(self) -> None:
bus._simple_id = 999
assert bus.make_id() == "p1000"
assert bus.make_id() == "p1001"
assert bus.make_id() == "p1002"
def test_simple_ids_yes(self) -> None:
bus._simple_id = 999
with envset(BOKEH_SIMPLE_IDS="yes"):
assert bus.make_id() == "p1000"
assert bus.make_id() == "p1001"
assert bus.make_id() == "p1002"
def test_simple_ids_no(self) -> None:
with envset(BOKEH_SIMPLE_IDS="no"):
assert len(bus.make_id()) == 36
assert isinstance(bus.make_id(), str)
def test_make_globally_unique_id() -> None:
assert len(bus.make_globally_unique_id()) == 36
assert isinstance(bus.make_globally_unique_id(), str)
def test_make_globally_unique_css_safe_id() -> None:
assert len(bus.make_globally_unique_css_safe_id()) == 36
assert isinstance(bus.make_globally_unique_id(), str)
assert all(bus.make_globally_unique_css_safe_id()[0].isalpha() for _ in range(1000))
def test_np_consts() -> None:
assert bus.NP_EPOCH == np.datetime64(0, 'ms')
assert bus.NP_MS_DELTA == np.timedelta64(1, 'ms')
def test_binary_array_types() -> None:
assert len(bus.BINARY_ARRAY_TYPES) == 9
dtypes = [
np.dtype(np.bool_),
np.dtype(np.uint8),
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.int16),
np.dtype(np.uint32),
np.dtype(np.int32),
#np.dtype(np.uint64),
#np.dtype(np.int64),
np.dtype(np.float32),
np.dtype(np.float64),
]
for dtype in dtypes:
assert dtype in bus.BINARY_ARRAY_TYPES
def test_datetime_types() -> None:
if is_installed("pandas"):
# Four of the types are pandas specific
assert len(bus.DATETIME_TYPES) == 7
else:
assert len(bus.DATETIME_TYPES) == 3
def test_is_timedelta_type_non_pandas_types() -> None:
assert bus.is_timedelta_type(datetime.timedelta(3000))
assert bus.is_timedelta_type(np.timedelta64(3000, 'ms'))
def test_is_timedelta_type_pandas_types() -> None:
pd = pytest.importorskip("pandas")
assert bus.is_timedelta_type(pd.Timedelta("3000ms"))
def test_convert_timedelta_type_non_pandas_types() -> None:
assert bus.convert_timedelta_type(datetime.timedelta(3000)) == 259200000000.0
assert bus.convert_timedelta_type(np.timedelta64(3000, 'ms')) == 3000.
def test_convert_timedelta_type_pandas_types() -> None:
pd = pytest.importorskip("pandas")
assert bus.convert_timedelta_type(pd.Timedelta("3000ms")) == 3000.0
def test_is_datetime_type_non_pandas_types() -> None:
assert bus.is_datetime_type(datetime.datetime(2016, 5, 11))
assert bus.is_datetime_type(datetime.time(3, 54))
assert bus.is_datetime_type(np.datetime64("2011-05-11"))
def test_is_datetime_type_pandas_types() -> None:
pd = pytest.importorskip("pandas")
assert bus.is_datetime_type(pd.Timestamp(3000000))
assert bus.is_datetime_type(pd.Period('1900', 'A-DEC' if pandas_1x else 'Y-DEC'))
assert bus.is_datetime_type(pd.NaT)
def test_convert_datetime_type_non_pandas_types() -> None:
assert bus.convert_datetime_type(datetime.datetime(2018, 1, 3, 15, 37, 59, 922452)) == 1514993879922.452
assert bus.convert_datetime_type(datetime.datetime(2018, 1, 3, 15, 37, 59)) == 1514993879000.0
assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11)) == 1462924800000.0
assert bus.convert_datetime_type(datetime.time(3, 54)) == 14040000.0
assert bus.convert_datetime_type(datetime.date(2016, 5, 11)) == 1462924800000.0
assert bus.convert_datetime_type(np.datetime64("2016-05-11")) == 1462924800000.0
def test_convert_datetime_type_pandas_types() -> None:
pd = pytest.importorskip("pandas")
assert bus.convert_datetime_type(pd.Timestamp(3000000)) == 3.0
assert bus.convert_datetime_type(pd.Period('1900', 'A-DEC' if pandas_1x else 'Y-DEC')) == -2208988800000.0
assert bus.convert_datetime_type(pd.Period('1900', 'A-DEC' if pandas_1x else 'Y-DEC')) == bus.convert_datetime_type(np.datetime64("1900-01-01"))
assert np.isnan(bus.convert_datetime_type(pd.NaT))
def test_convert_datetime_type_array_ignores_non_datetime_array() -> None:
a = np.arange(0,10,100)
assert bus.convert_datetime_array(a) is a
def test_convert_datetime_array() -> None:
array = np.array(['2018-01-03T15:37:59', '2018-01-03T15:37:59.922452', '2016-05-11', 'NaT'], dtype='datetime64')
assert np.array_equal(
bus.convert_datetime_array(array),
np.array([1514993879000.0, 1514993879922.452, 1462924800000.0, np.nan], dtype="float64"),
equal_nan=True,
)
array = np.array([datetime.date(2023, 12, 15), datetime.date(2023, 12, 16)])
assert np.array_equal(
bus.convert_datetime_array(array),
np.array([1702598400000.0, 1702684800000.0], dtype="float64"),
)
def test_convert_datetime_array_NaT() -> None:
array = np.array(["NaT"], dtype="datetime64")
assert np.array_equal(
bus.convert_datetime_array(array),
np.array([np.nan], dtype="float64"),
equal_nan=True,
)
array = np.array(["NaT"], dtype="timedelta64")
assert np.array_equal(
bus.convert_datetime_array(array),
np.array([np.nan], dtype="float64"),
equal_nan=True,
)
def test_convert_datetime_type_with_tz() -> None:
# This ensures datetimes are sent to BokehJS timezone-naive
# see https://github.com/bokeh/bokeh/issues/6480
pytz = pytest.importorskip("pytz")
for tz in pytz.all_timezones:
assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11, tzinfo=datetime.tzinfo(tz))) == 1462924800000.0
testing = [[nan, 3], [-inf, [inf]]]
expected = [['NaN', 3.0], ['-Infinity', ['Infinity']]]
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
def test_transform_array(dt) -> None:
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a)
assert isinstance(out, np.ndarray)
def test_transform_series() -> None:
pd = pytest.importorskip("pandas")
# default int seems to be int64, can't be encoded!
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, np.ndarray)
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df)
assert isinstance(out, np.ndarray)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, np.ndarray)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df)
assert isinstance(out, np.ndarray)
# boolean array
arr = pd.array([True, False])
assert isinstance(arr, pd.arrays.BooleanArray)
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
# string array
arr = pd.array(['hello', 'world'])
assert isinstance(arr, pd.arrays.StringArray)
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
# floating array
arr = pd.array([1.0, 42.0, np.nan])
assert isinstance(arr, pd.core.arrays.floating.FloatingArray)
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
# integer array
arr = pd.array([0, 1])
assert isinstance(arr, pd.core.arrays.integer.IntegerArray)
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
# sparse array
arr = pd.arrays.SparseArray([1.0, 2.0, np.nan, np.nan, 5.0])
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
# datetime array
arr = pd.array([pd.NaT, datetime.datetime.today(), pd.Timestamp.today()])
assert isinstance(arr, pd.arrays.DatetimeArray)
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
# timedelta array
arr = pd.array([datetime.timedelta(seconds=1), pd.Timedelta(0, unit='s')])
assert isinstance(arr, pd.arrays.TimedeltaArray)
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
# categorical array
arr = pd.array(pd.Series(['dog', 'cat', 'dog']).astype('category'))
assert isinstance(arr, pd.arrays.Categorical)
out = bus.transform_series(arr)
assert isinstance(out, np.ndarray)
def test_array_encoding_disabled_by_dtype() -> None:
assert len(bus.BINARY_ARRAY_TYPES) > 0
dt_ok = bus.BINARY_ARRAY_TYPES
dt_bad = {np.dtype(x) for x in set(np.sctypeDict.values()) - {np.void}} - dt_ok
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
assert not bus.array_encoding_disabled(a)
for dt in dt_bad:
a = np.empty(shape=10, dtype=dt)
assert bus.array_encoding_disabled(a)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test_make_id |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-public-apis/source_public_apis/components.py | {
"start": 208,
"end": 417
} | class ____(RecordExtractor):
def extract_records(self, response: requests.Response, **kwargs) -> List[Mapping[str, Any]]:
return [{"name": cat} for cat in response.json()["categories"]]
| CustomExtractor |
python | PyCQA__isort | isort/identify.py | {
"start": 482,
"end": 8342
} | class ____(NamedTuple):
line_number: int
indented: bool
module: str
attribute: str | None = None
alias: str | None = None
cimport: bool = False
file_path: Path | None = None
def statement(self) -> str:
import_cmd = "cimport" if self.cimport else "import"
if self.attribute:
import_string = f"from {self.module} {import_cmd} {self.attribute}"
else:
import_string = f"{import_cmd} {self.module}"
if self.alias:
import_string += f" as {self.alias}"
return import_string
def __str__(self) -> str:
return (
f"{self.file_path or ''}:{self.line_number} "
f"{'indented ' if self.indented else ''}{self.statement()}"
)
def imports(
input_stream: TextIO,
config: Config = DEFAULT_CONFIG,
file_path: Path | None = None,
top_only: bool = False,
) -> Iterator[Import]:
"""Parses a python file taking out and categorizing imports."""
in_quote = ""
indexed_input = enumerate(input_stream)
for index, raw_line in indexed_input:
(skipping_line, in_quote) = skip_line(
raw_line, in_quote=in_quote, index=index, section_comments=config.section_comments
)
if top_only and not in_quote and raw_line.startswith(STATEMENT_DECLARATIONS):
break
if skipping_line:
continue
stripped_line = raw_line.strip().split("#")[0]
if stripped_line.startswith(("raise", "yield")):
if stripped_line == "yield":
while not stripped_line or stripped_line == "yield":
try:
index, next_line = next(indexed_input)
except StopIteration:
break
stripped_line = next_line.strip().split("#")[0]
while stripped_line.endswith("\\"):
try:
index, next_line = next(indexed_input)
except StopIteration:
break
stripped_line = next_line.strip().split("#")[0]
continue # pragma: no cover
line, *end_of_line_comment = raw_line.split("#", 1)
statements = [line.strip() for line in line.split(";")]
if end_of_line_comment:
statements[-1] = f"{statements[-1]}#{end_of_line_comment[0]}"
for statement in statements:
line, _raw_line = normalize_line(statement)
if line.startswith(("import ", "cimport ")):
type_of_import = "straight"
elif line.startswith("from "):
type_of_import = "from"
else:
continue # pragma: no cover
import_string, _ = parse_comments(line)
normalized_import_string = (
import_string.replace("import(", "import (").replace("\\", " ").replace("\n", " ")
)
cimports: bool = (
" cimport " in normalized_import_string
or normalized_import_string.startswith("cimport")
)
identified_import = partial(
Import,
index + 1, # line numbers use 1 based indexing
raw_line.startswith((" ", "\t")),
cimport=cimports,
file_path=file_path,
)
if "(" in line.split("#", 1)[0]:
while not line.split("#")[0].strip().endswith(")"):
try:
index, next_line = next(indexed_input)
except StopIteration:
break
line, _ = parse_comments(next_line)
import_string += "\n" + line
else:
while line.strip().endswith("\\"):
try:
index, next_line = next(indexed_input)
except StopIteration:
break
line, _ = parse_comments(next_line)
# Still need to check for parentheses after an escaped line
if "(" in line.split("#")[0] and ")" not in line.split("#")[0]:
import_string += "\n" + line
while not line.split("#")[0].strip().endswith(")"):
try:
index, next_line = next(indexed_input)
except StopIteration:
break
line, _ = parse_comments(next_line)
import_string += "\n" + line
else:
if import_string.strip().endswith(
(" import", " cimport")
) or line.strip().startswith(("import ", "cimport ")):
import_string += "\n" + line
else:
import_string = (
import_string.rstrip().rstrip("\\") + " " + line.lstrip()
)
if type_of_import == "from":
import_string = (
import_string.replace("import(", "import (")
.replace("\\", " ")
.replace("\n", " ")
)
parts = import_string.split(" cimport " if cimports else " import ")
from_import = parts[0].split(" ")
import_string = (" cimport " if cimports else " import ").join(
[from_import[0] + " " + "".join(from_import[1:]), *parts[1:]]
)
just_imports = [
item.replace("{|", "{ ").replace("|}", " }")
for item in strip_syntax(import_string).split()
]
direct_imports = just_imports[1:]
top_level_module = ""
if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports):
while "as" in just_imports:
attribute = None
as_index = just_imports.index("as")
if type_of_import == "from":
attribute = just_imports[as_index - 1]
top_level_module = just_imports[0]
module = top_level_module + "." + attribute
alias = just_imports[as_index + 1]
direct_imports.remove(attribute)
direct_imports.remove(alias)
direct_imports.remove("as")
just_imports[1:] = direct_imports
if attribute == alias and config.remove_redundant_aliases:
yield identified_import(top_level_module, attribute)
else:
yield identified_import(top_level_module, attribute, alias=alias)
else:
module = just_imports[as_index - 1]
alias = just_imports[as_index + 1]
just_imports.remove(alias)
just_imports.remove("as")
just_imports.remove(module)
if module == alias and config.remove_redundant_aliases:
yield identified_import(module)
else:
yield identified_import(module, alias=alias)
if just_imports:
if type_of_import == "from":
module = just_imports.pop(0)
for attribute in just_imports:
yield identified_import(module, attribute)
else:
for module in just_imports:
yield identified_import(module)
| Import |
python | facelessuser__soupsieve | tests/test_level1/test_comments.py | {
"start": 43,
"end": 1279
} | class ____(util.TestCase):
"""Test comments."""
MARKUP = """
<div>
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre>
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
def test_comments(self):
"""Test comments."""
self.assert_selector(
self.MARKUP,
"""
/* Start comment */
div
/* This still works as new lines and whitespace count as descendant combiner.
This comment won't be seen. */
span#\\33
/* End comment */
""",
['3'],
flags=util.HTML
)
def test_comments_in_pseudo_classes(self):
"""Test comments in pseudo-classes."""
self.assert_selector(
self.MARKUP,
"""
span:not(
/* Comments should basically work like they do in real CSS. */
span#\\33 /* Don't select id 3 */
)
""",
['1', '4', '5', '6'],
flags=util.HTML
)
| TestComments |
python | huggingface__transformers | src/transformers/models/longt5/modeling_longt5.py | {
"start": 47898,
"end": 49244
} | class ____(nn.Module):
"""Transient-Global self attention used in encoder"""
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
super().__init__()
self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(
config, has_relative_attention_bias=has_relative_attention_bias
)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
output_attentions=False,
**kwargs: Any, # to accept past_key_values and use_cache kwargs
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.TransientGlobalSelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5
| LongT5LayerTransientGlobalSelfAttention |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/global_statement.py | {
"start": 167,
"end": 1825
} | class ____:
pass
def fix_constant(value):
"""All this is ok, but try not to use `global` ;)"""
global CONSTANT # [global-statement]
print(CONSTANT)
CONSTANT = value
def global_with_import():
"""Should only warn for global-statement when using `Import` node"""
global sys # [global-statement]
import sys
def global_with_import_from():
"""Should only warn for global-statement when using `ImportFrom` node"""
global namedtuple # [global-statement]
from collections import namedtuple
def global_del():
"""Deleting the global name prevents `global-variable-not-assigned`"""
global CONSTANT # [global-statement]
print(CONSTANT)
del CONSTANT
def global_operator_assign():
"""Operator assigns should only throw a global statement error"""
global CONSTANT # [global-statement]
print(CONSTANT)
CONSTANT += 1
def global_function_assign():
"""Function assigns should only throw a global statement error"""
global CONSTANT # [global-statement]
def CONSTANT():
pass
CONSTANT()
def override_func():
"""Overriding a function should only throw a global statement error"""
global FUNC # [global-statement]
def FUNC():
pass
FUNC()
def override_class():
"""Overriding a class should only throw a global statement error"""
global CLASS # [global-statement]
class CLASS:
pass
CLASS()
def multiple_assignment():
"""Should warn on every assignment."""
global CONSTANT # [global-statement]
CONSTANT = 1
CONSTANT = 2
def no_assignment():
"""Shouldn't warn"""
global CONSTANT
| CLASS |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 591907,
"end": 592524
} | class ____(sgqlc.types.Type):
"""An enterprise organization that a user is a member of."""
__schema__ = github_schema
__field_names__ = ("cursor", "node", "role")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Organization", graphql_name="node")
"""The item at the end of the edge."""
role = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseUserAccountMembershipRole), graphql_name="role")
"""The role of the user in the enterprise membership."""
| EnterpriseOrganizationMembershipEdge |
python | ansible__ansible | test/units/plugins/cache/test_cache.py | {
"start": 5454,
"end": 6177
} | class ____(unittest.TestCase):
def setUp(self):
self.cache = cache_loader.get('memory')
@pytest.mark.usefixtures('collection_loader')
def test_plugin_load_failure(self):
# See https://github.com/ansible/ansible/issues/18751
# Note no fact_connection config set, so this will fail
with pytest.raises(AnsibleError, match="Unable to load the cache plugin.*json.*"):
cache_loader.get('json')
def test_update(self):
self.cache.set('cache_key', {'key2': 'updatedvalue'})
assert self.cache.get('cache_key')['key2'] == 'updatedvalue'
def test_memory_cachemodule_with_loader():
assert isinstance(cache_loader.get('memory'), MemoryCache)
| TestCachePlugin |
python | PyCQA__pyflakes | pyflakes/checker.py | {
"start": 12732,
"end": 12850
} | class ____(Assignment):
"""
Represents binding a name with an assignment expression.
"""
| NamedExprAssignment |
python | facelessuser__soupsieve | tests/test_level1/test_id.py | {
"start": 89,
"end": 841
} | class ____(util.TestCase):
"""Test ID selectors."""
MARKUP = """
<div>
<p>Some text <span id="1"> in a paragraph</span>.
<a id="2" href="http://google.com">Link</a>
</p>
</div>
"""
def test_id(self):
"""Test ID."""
self.assert_selector(
self.MARKUP,
"#\\31",
["1"],
flags=util.HTML
)
def test_tag_and_id(self):
"""Test tag and ID."""
self.assert_selector(
self.MARKUP,
"a#\\32",
["2"],
flags=util.HTML
)
def test_malformed_id(self):
"""Test malformed ID."""
# Malformed id
self.assert_raises('td#.some-class', SelectorSyntaxError)
| TestId |
python | Textualize__textual | src/textual/binding.py | {
"start": 1123,
"end": 1188
} | class ____(Exception):
"""A binding was not found."""
| NoBinding |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 192965,
"end": 197824
} | class ____(MyBaseModel):
a: str
"""
)
assert module.B.model_json_schema() == {
'$defs': {
'A': {
'properties': {'a': {'title': 'A', 'type': 'string'}},
'required': ['a'],
'title': 'A',
'type': 'object',
}
},
'properties': {'b': {'$ref': '#/$defs/A'}},
'required': ['b'],
'title': 'B',
'type': 'object',
}
def test_enum_complex_value() -> None:
"""https://github.com/pydantic/pydantic/issues/7045"""
class MyEnum(Enum):
foo = (1, 2)
bar = (2, 3)
ta = TypeAdapter(MyEnum)
# insert_assert(ta.json_schema())
assert ta.json_schema() == {'enum': [[1, 2], [2, 3]], 'title': 'MyEnum', 'type': 'array'}
def test_json_schema_serialization_defaults_required():
class Model(BaseModel):
a: str = 'a'
class SerializationDefaultsRequiredModel(Model):
model_config = ConfigDict(json_schema_serialization_defaults_required=True)
model_schema = Model.model_json_schema(mode='serialization')
sdr_model_schema = SerializationDefaultsRequiredModel.model_json_schema(mode='serialization')
assert 'required' not in model_schema
assert sdr_model_schema['required'] == ['a']
def test_json_schema_mode_override():
class Model(BaseModel):
a: Json[int] # requires a string to validate, but will dump an int
class ValidationModel(Model):
model_config = ConfigDict(json_schema_mode_override='validation', title='Model')
class SerializationModel(Model):
model_config = ConfigDict(json_schema_mode_override='serialization', title='Model')
# Ensure the ValidationModel and SerializationModel schemas do not depend on the value of the mode
assert ValidationModel.model_json_schema(mode='validation') == ValidationModel.model_json_schema(
mode='serialization'
)
assert SerializationModel.model_json_schema(mode='validation') == SerializationModel.model_json_schema(
mode='serialization'
)
# Ensure the two submodels models have different JSON schemas
assert ValidationModel.model_json_schema() != SerializationModel.model_json_schema()
# Ensure the submodels' JSON schemas match the expected mode even when the opposite value is specified:
assert ValidationModel.model_json_schema(mode='serialization') == Model.model_json_schema(mode='validation')
assert SerializationModel.model_json_schema(mode='validation') == Model.model_json_schema(mode='serialization')
def test_models_json_schema_generics() -> None:
class G(BaseModel, Generic[T]):
foo: T
class M(BaseModel):
foo: Literal['a', 'b']
GLiteral = G[Literal['a', 'b']]
assert models_json_schema(
[
(GLiteral, 'serialization'),
(GLiteral, 'validation'),
(M, 'validation'),
]
) == (
{
(GLiteral, 'serialization'): {'$ref': '#/$defs/G_Literal__a____b___'},
(GLiteral, 'validation'): {'$ref': '#/$defs/G_Literal__a____b___'},
(M, 'validation'): {'$ref': '#/$defs/M'},
},
{
'$defs': {
'G_Literal__a____b___': {
'properties': {'foo': {'enum': ['a', 'b'], 'title': 'Foo', 'type': 'string'}},
'required': ['foo'],
'title': "G[Literal['a', 'b']]",
'type': 'object',
},
'M': {
'properties': {'foo': {'enum': ['a', 'b'], 'title': 'Foo', 'type': 'string'}},
'required': ['foo'],
'title': 'M',
'type': 'object',
},
}
},
)
def test_recursive_non_generic_model() -> None:
class Foo(BaseModel):
maybe_bar: Union[None, 'Bar']
class Bar(BaseModel):
foo: Foo
# insert_assert(Bar(foo=Foo(maybe_bar=None)).model_dump())
assert Bar.model_validate({'foo': {'maybe_bar': None}}).model_dump() == {'foo': {'maybe_bar': None}}
# insert_assert(Bar.model_json_schema())
assert Bar.model_json_schema() == {
'$defs': {
'Bar': {
'properties': {'foo': {'$ref': '#/$defs/Foo'}},
'required': ['foo'],
'title': 'Bar',
'type': 'object',
},
'Foo': {
'properties': {'maybe_bar': {'anyOf': [{'$ref': '#/$defs/Bar'}, {'type': 'null'}]}},
'required': ['maybe_bar'],
'title': 'Foo',
'type': 'object',
},
},
'$ref': '#/$defs/Bar',
}
def test_module_with_colon_in_name(create_module):
module = create_module(
# language=Python
"""
from pydantic import BaseModel
| A |
python | tensorflow__tensorflow | tensorflow/compiler/tests/segment_reduction_ops_test.py | {
"start": 1053,
"end": 12474
} | class ____(xla_test.XLATestCase):
"""Test cases for segment reduction ops."""
def _findDevice(self, device_name):
devices = device_lib.list_local_devices()
for d in devices:
if d.device_type == device_name:
return True
return False
def _segmentReduction(self, op, data, indices, num_segments):
with self.session() as sess, self.test_scope():
d = array_ops.placeholder(data.dtype, shape=data.shape)
if isinstance(indices, int):
i = array_ops.placeholder(np.int32, shape=[])
else:
i = array_ops.placeholder(indices.dtype, shape=indices.shape)
return sess.run(op(d, i, num_segments), {d: data, i: indices})
def _unsortedSegmentSum(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_sum, data, indices,
num_segments)
def _segmentSumV2(self, data, indices, num_segments):
return self._segmentReduction(math_ops.segment_sum_v2, data, indices,
num_segments)
def _segmentProdV2(self, data, indices, num_segments):
return self._segmentReduction(math_ops.segment_prod_v2, data, indices,
num_segments)
def _segmentMinV2(self, data, indices, num_segments):
return self._segmentReduction(math_ops.segment_min_v2, data, indices,
num_segments)
def _segmentMaxV2(self, data, indices, num_segments):
return self._segmentReduction(math_ops.segment_max_v2, data, indices,
num_segments)
def _unsortedSegmentProd(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_prod, data, indices,
num_segments)
def _unsortedSegmentMin(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_min, data, indices,
num_segments)
def _unsortedSegmentMax(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_max, data, indices,
num_segments)
def testSegmentSum(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([1, 0, 2, 12], dtype=dtype),
self._segmentSumV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 4))
def testSegmentProd(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([0, 1, 2, 60], dtype=dtype),
self._segmentProdV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 4))
def testSegmentProdNumSegmentsLess(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([0, 1, 2], dtype=dtype),
self._segmentProdV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 3))
def testSegmentProdNumSegmentsMore(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([0, 1, 2, 60, 1], dtype=dtype),
self._segmentProdV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 5))
def testSegmentMin(self):
for dtype in self.int_types | self.float_types:
maxval = dtypes.as_dtype(dtype).max
if dtype == np.float64 and self._findDevice("TPU"):
maxval = np.inf
self.assertAllClose(
np.array([0, maxval, 2, 3], dtype=dtype),
self._segmentMinV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 4))
def testSegmentMinNumSegmentsLess(self):
for dtype in self.int_types | self.float_types:
maxval = dtypes.as_dtype(dtype).max
if dtype == np.float64 and self._findDevice("TPU"):
maxval = np.inf
self.assertAllClose(
np.array([0, maxval, 2], dtype=dtype),
self._segmentMinV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 3))
def testSegmentMinNumSegmentsMore(self):
for dtype in self.int_types | self.float_types:
maxval = dtypes.as_dtype(dtype).max
if dtype == np.float64 and self._findDevice("TPU"):
maxval = np.inf
self.assertAllClose(
np.array([0, maxval, 2, 3, maxval], dtype=dtype),
self._segmentMinV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 5))
def testSegmentMax(self):
for dtype in self.int_types | self.float_types:
minval = dtypes.as_dtype(dtype).min
if dtype == np.float64 and self._findDevice("TPU"):
minval = -np.inf
self.assertAllClose(
np.array([1, minval, 2, 5], dtype=dtype),
self._segmentMaxV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 4))
def testSegmentMaxNumSegmentsLess(self):
for dtype in self.int_types | self.float_types:
minval = dtypes.as_dtype(dtype).min
if dtype == np.float64 and self._findDevice("TPU"):
minval = -np.inf
self.assertAllClose(
np.array([1, minval, 2], dtype=dtype),
self._segmentMaxV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 3))
def testSegmentMaxNumSegmentsMore(self):
for dtype in self.int_types | self.float_types:
minval = dtypes.as_dtype(dtype).min
if dtype == np.float64 and self._findDevice("TPU"):
minval = -np.inf
self.assertAllClose(
np.array([1, minval, 2, 5, minval], dtype=dtype),
self._segmentMaxV2(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([0, 0, 2, 3, 3, 3], dtype=np.int32), 5))
def testUnsortedSegmentSum0DIndices1DData(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array(
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 1, 2, 3, 4, 5],
[0, 0, 0, 0, 0, 0]],
dtype=dtype),
self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype), 2, 4))
def testUnsortedSegmentSum1DIndices1DData(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([1, 3, 2, 9], dtype=dtype),
self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 0, 2, 1, 3, 3], dtype=np.int32), 4))
def testUnsortedSegmentSum1DIndices1DDataNegativeIndices(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([6, 3, 0, 6], dtype=dtype),
self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
def testUnsortedSegmentSum1DIndices2DDataDisjoint(self):
for dtype in self.numeric_types:
data = np.array(
[[0, 1, 2, 3], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43],
[50, 51, 52, 53]],
dtype=dtype)
indices = np.array([8, 1, 0, 3, 7], dtype=np.int32)
num_segments = 10
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[30, 31, 32, 33], [20, 21, 22, 23], [0, 0, 0, 0],
[40, 41, 42, 43], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[50, 51, 52, 53], [0, 1, 2, 3], [0, 0, 0, 0]],
dtype=dtype), y)
def testUnsortedSegmentSum1DIndices2DDataNonDisjoint(self):
for dtype in self.numeric_types:
data = np.array(
[[0, 1, 2, 3], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43],
[50, 51, 52, 53]],
dtype=dtype)
indices = np.array([0, 1, 2, 0, 1], dtype=np.int32)
num_segments = 4
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[40, 42, 44, 46], [70, 72, 74, 76], [30, 31, 32, 33],
[0, 0, 0, 0]],
dtype=dtype), y)
def testUnsortedSegmentSum2DIndices3DData(self):
for dtype in self.numeric_types:
data = np.array(
[[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]], [[
80, 81, 82
], [123, 124, 125]], [[103, 104, 105], [106, 107, 108]]],
dtype=dtype)
indices = np.array([[3, 5], [3, 1], [5, 0], [6, 2]], dtype=np.int32)
num_segments = 8
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[123, 124, 125], [110, 111, 112], [106, 107, 108], [
100, 102, 104
], [0, 0, 0.], [90, 92, 94], [103, 104, 105], [0, 0, 0]],
dtype=dtype), y)
def testUnsortedSegmentSum1DIndices3DData(self):
for dtype in self.numeric_types:
data = np.array(
[[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]], [[
120, 121, 122
], [123, 124, 125]], [[103, 104, 105], [106, 107, 108]]],
dtype=dtype)
indices = np.array([3, 0, 2, 5], dtype=np.int32)
num_segments = 6
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[[100, 101, 102.], [110, 111, 112]], [[0, 0, 0], [0, 0, 0]],
[[120, 121, 122], [123, 124, 125]], [[0, 1, 2.], [10, 11, 12]],
[[0, 0, 0], [0, 0, 0]], [[103, 104, 105], [106, 107, 108]]],
dtype=dtype), y)
def testUnsortedSegmentSumShapeError(self):
for dtype in self.numeric_types:
data = np.ones((4, 8, 7), dtype=dtype)
indices = np.ones((3, 2), dtype=np.int32)
num_segments = 4
self.assertRaises(
ValueError,
functools.partial(self._segmentReduction,
math_ops.unsorted_segment_sum, data, indices,
num_segments))
def testUnsortedSegmentOps1DIndices1DDataNegativeIndices(self):
"""Tests for min, max, and prod ops.
These share most of their implementation with sum, so we only test basic
functionality.
"""
for dtype in self.numeric_types:
self.assertAllClose(
np.array([8, 3, 1, 0], dtype=dtype),
self._unsortedSegmentProd(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
for dtype in self.int_types | self.float_types:
minval = dtypes.as_dtype(dtype).min
maxval = dtypes.as_dtype(dtype).max
self.assertAllClose(
np.array([2, 3, maxval, 0], dtype=dtype),
self._unsortedSegmentMin(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
self.assertAllClose(
np.array([4, 3, minval, 6], dtype=dtype),
self._unsortedSegmentMax(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
if __name__ == "__main__":
googletest.main()
| SegmentReductionOpsTest |
python | keras-team__keras | guides/understanding_masking_and_padding.py | {
"start": 5218,
"end": 6682
} | class ____(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.embedding = layers.Embedding(
input_dim=5000, output_dim=16, mask_zero=True
)
self.lstm = layers.LSTM(32)
def call(self, inputs):
x = self.embedding(inputs)
# Note that you could also prepare a `mask` tensor manually.
# It only needs to be a boolean tensor
# with the right shape, i.e. (batch_size, timesteps).
mask = self.embedding.compute_mask(inputs)
output = self.lstm(
x, mask=mask
) # The layer will ignore the masked values
return output
layer = MyLayer()
x = np.random.random((32, 10)) * 100
x = x.astype("int32")
layer(x)
"""
## Supporting masking in your custom layers
"""
"""
Sometimes, you may need to write layers that generate a mask (like `Embedding`), or
layers that need to modify the current mask.
For instance, any layer that produces a tensor with a different time dimension than its
input, such as a `Concatenate` layer that concatenates on the time dimension, will
need to modify the current mask so that downstream layers will be able to properly
take masked timesteps into account.
To do this, your layer should implement the `layer.compute_mask()` method, which
produces a new mask given the input and the current mask.
Here is an example of a `TemporalSplit` layer that needs to modify the current mask.
"""
| MyLayer |
python | yaml__pyyaml | tests/legacy_tests/test_multi_constructor.py | {
"start": 553,
"end": 1573
} | class ____(yaml.FullLoader):
pass
def test_multi_constructor(input_filename, code_filename, verbose=False):
with open(input_filename, 'rb') as file:
input = file.read().decode('utf-8')
with open(code_filename, 'rb') as file:
native = _load_code(file.read())
# default multi constructor for ! and !! tags
Multi1.add_multi_constructor('!', myconstructor1)
Multi1.add_multi_constructor('tag:yaml.org,2002:', myconstructor1)
data = yaml.load(input, Loader=Multi1)
if verbose:
print('Multi1:')
print(data)
print(native)
assert(data == native)
# default multi constructor for all tags
Multi2.add_multi_constructor(None, myconstructor2)
data = yaml.load(input, Loader=Multi2)
if verbose:
print('Multi2:')
print(data)
print(native)
assert(data == native)
test_multi_constructor.unittest = ['.multi', '.code']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| Multi2 |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox35.py | {
"start": 315,
"end": 864
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox35.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"text_rotation": -90})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/side_channel/engine_configuration_channel.py | {
"start": 591,
"end": 4937
} | class ____(SideChannel):
"""
This is the SideChannel for engine configuration exchange. The data in the
engine configuration is as follows :
- int width;
- int height;
- int qualityLevel;
- float timeScale;
- int targetFrameRate;
- int captureFrameRate;
"""
class ConfigurationType(IntEnum):
SCREEN_RESOLUTION = 0
QUALITY_LEVEL = 1
TIME_SCALE = 2
TARGET_FRAME_RATE = 3
CAPTURE_FRAME_RATE = 4
def __init__(self) -> None:
super().__init__(uuid.UUID("e951342c-4f7e-11ea-b238-784f4387d1f7"))
def on_message_received(self, msg: IncomingMessage) -> None:
"""
Is called by the environment to the side channel. Can be called
multiple times per step if multiple messages are meant for that
SideChannel.
Note that Python should never receive an engine configuration from
Unity
"""
raise UnityCommunicationException(
"The EngineConfigurationChannel received a message from Unity, "
+ "this should not have happened."
)
def set_configuration_parameters(
self,
width: Optional[int] = None,
height: Optional[int] = None,
quality_level: Optional[int] = None,
time_scale: Optional[float] = None,
target_frame_rate: Optional[int] = None,
capture_frame_rate: Optional[int] = None,
) -> None:
"""
Sets the engine configuration. Takes as input the configurations of the
engine.
:param width: Defines the width of the display. (Must be set alongside height)
:param height: Defines the height of the display. (Must be set alongside width)
:param quality_level: Defines the quality level of the simulation.
:param time_scale: Defines the multiplier for the deltatime in the
simulation. If set to a higher value, time will pass faster in the
simulation but the physics might break.
:param target_frame_rate: Instructs simulation to try to render at a
specified frame rate.
:param capture_frame_rate: Instructs the simulation to consider time between
updates to always be constant, regardless of the actual frame rate.
"""
if (width is None and height is not None) or (
width is not None and height is None
):
raise UnitySideChannelException(
"You cannot set the width/height of the screen resolution without also setting the height/width"
)
if width is not None and height is not None:
screen_msg = OutgoingMessage()
screen_msg.write_int32(self.ConfigurationType.SCREEN_RESOLUTION)
screen_msg.write_int32(width)
screen_msg.write_int32(height)
super().queue_message_to_send(screen_msg)
if quality_level is not None:
quality_level_msg = OutgoingMessage()
quality_level_msg.write_int32(self.ConfigurationType.QUALITY_LEVEL)
quality_level_msg.write_int32(quality_level)
super().queue_message_to_send(quality_level_msg)
if time_scale is not None:
time_scale_msg = OutgoingMessage()
time_scale_msg.write_int32(self.ConfigurationType.TIME_SCALE)
time_scale_msg.write_float32(time_scale)
super().queue_message_to_send(time_scale_msg)
if target_frame_rate is not None:
target_frame_rate_msg = OutgoingMessage()
target_frame_rate_msg.write_int32(self.ConfigurationType.TARGET_FRAME_RATE)
target_frame_rate_msg.write_int32(target_frame_rate)
super().queue_message_to_send(target_frame_rate_msg)
if capture_frame_rate is not None:
capture_frame_rate_msg = OutgoingMessage()
capture_frame_rate_msg.write_int32(
self.ConfigurationType.CAPTURE_FRAME_RATE
)
capture_frame_rate_msg.write_int32(capture_frame_rate)
super().queue_message_to_send(capture_frame_rate_msg)
def set_configuration(self, config: EngineConfig) -> None:
"""
Sets the engine configuration. Takes as input an EngineConfig.
"""
self.set_configuration_parameters(**config._asdict())
| EngineConfigurationChannel |
python | pypa__pipenv | pipenv/vendor/zipp/__init__.py | {
"start": 5238,
"end": 11801
} | class ____:
"""
A :class:`importlib.resources.abc.Traversable` interface for zip files.
Implements many of the features users enjoy from
:class:`pathlib.Path`.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = zipfile.ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'mem/abcde.zip'
Path accepts the zipfile object itself or a filename
>>> path = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = path.iterdir()
>>> a
Path('mem/abcde.zip', 'a.txt')
>>> b
Path('mem/abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('mem/abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text(encoding='utf-8')
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> import os
>>> str(c).replace(os.sep, posixpath.sep)
'mem/abcde.zip/b/c.txt'
At the root, ``name``, ``filename``, and ``parent``
resolve to the zipfile.
>>> str(path)
'mem/abcde.zip/'
>>> path.name
'abcde.zip'
>>> path.filename == pathlib.Path('mem/abcde.zip')
True
>>> str(path.parent)
'mem'
If the zipfile has no filename, such attributes are not
valid and accessing them will raise an Exception.
>>> zf.filename = None
>>> path.name
Traceback (most recent call last):
...
TypeError: ...
>>> path.filename
Traceback (most recent call last):
...
TypeError: ...
>>> path.parent
Traceback (most recent call last):
...
TypeError: ...
# workaround python/cpython#106763
>>> pass
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
"""
Construct a Path from a ZipFile or filename.
Note: When the source is an existing ZipFile object,
its type (__class__) will be mutated to a
specialized type. If the caller wishes to retain the
original type, the caller should either create a
separate ZipFile object or pass a filename.
"""
self.root = FastLookup.make(root)
self.at = at
def __eq__(self, other):
"""
>>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo'
False
"""
if self.__class__ is not other.__class__:
return NotImplemented
return (self.root, self.at) == (other.root, other.at)
def __hash__(self):
return hash((self.root, self.at))
def open(self, mode='r', *args, pwd=None, **kwargs):
"""
Open this entry as text or binary following the semantics
of ``pathlib.Path.open()`` by passing arguments through
to io.TextIOWrapper().
"""
if self.is_dir():
raise IsADirectoryError(self)
zip_mode = mode[0]
if not self.exists() and zip_mode == 'r':
raise FileNotFoundError(self)
stream = self.root.open(self.at, zip_mode, pwd=pwd)
if 'b' in mode:
if args or kwargs:
raise ValueError("encoding args invalid for binary operation")
return stream
# Text mode:
encoding, args, kwargs = _extract_text_encoding(*args, **kwargs)
return io.TextIOWrapper(stream, encoding, *args, **kwargs)
def _base(self):
return pathlib.PurePosixPath(self.at or self.root.filename)
@property
def name(self):
return self._base().name
@property
def suffix(self):
return self._base().suffix
@property
def suffixes(self):
return self._base().suffixes
@property
def stem(self):
return self._base().stem
@property
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
def read_text(self, *args, **kwargs):
encoding, args, kwargs = _extract_text_encoding(*args, **kwargs)
with self.open('r', encoding, *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return self.__class__(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return self.exists() and not self.is_dir()
def exists(self):
return self.at in self.root._name_set()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self.root.namelist())
return filter(self._is_child, subs)
def match(self, path_pattern):
return pathlib.PurePosixPath(self.at).match(path_pattern)
def is_symlink(self):
"""
Return whether this path is a symlink.
"""
info = self.root.getinfo(self.at)
mode = info.external_attr >> 16
return stat.S_ISLNK(mode)
def glob(self, pattern):
if not pattern:
raise ValueError(f"Unacceptable pattern: {pattern!r}")
prefix = re.escape(self.at)
tr = Translator(seps='/')
matches = re.compile(prefix + tr.translate(pattern)).fullmatch
return map(self._next, filter(matches, self.root.namelist()))
def rglob(self, pattern):
return self.glob(f'**/{pattern}')
def relative_to(self, other, *extra):
return posixpath.relpath(str(self), str(other.joinpath(*extra)))
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, *other):
next = posixpath.join(self.at, *other)
return self._next(self.root.resolve_dir(next))
__truediv__ = joinpath
@property
def parent(self):
if not self.at:
return self.filename.parent
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
| Path |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 11420,
"end": 11688
} | class ____(LTTextBox):
def analyze(self, laparams):
LTTextBox.analyze(self, laparams)
self._objs = csort(self._objs, key=lambda obj: -obj.x1)
return
def get_writing_mode(self):
return 'tb-rl'
## LTTextGroup
##
| LTTextBoxVertical |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks.py | {
"start": 16697,
"end": 18980
} | class ____:
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListener(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
if decided_to_stop_training():
return True
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListener()
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with
tf.compat.v1.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
A `CheckpointSaverListener` can request training to be stopped, by returning
True in `after_save`. Please note that, in replicated distributed training
setting, only `chief` should use this behavior. Otherwise each worker will do
their own evaluation, which may be wasteful of resources.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
@tf_export(v1=["train.CheckpointSaverHook"])
| CheckpointSaverListener |
python | pypa__warehouse | tests/common/db/packaging.py | {
"start": 1507,
"end": 1823
} | class ____(WarehouseFactory):
class Meta:
model = Description
id = factory.Faker("uuid4", cast_to=None)
raw = factory.Faker("paragraph")
html = factory.LazyAttribute(lambda o: readme.render(o.raw))
rendered_by = factory.LazyAttribute(lambda o: readme.renderer_version())
| DescriptionFactory |
python | django__django | tests/postgres_tests/models.py | {
"start": 5462,
"end": 5738
} | class ____(PostgreSQLModel):
"""
To test postgres-specific aggregation functions for statistics
"""
int1 = models.IntegerField()
int2 = models.IntegerField()
related_field = models.ForeignKey(AggregateTestModel, models.SET_NULL, null=True)
| StatTestModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/unitofwork.py | {
"start": 22855,
"end": 24121
} | class ____(_PostSortRec):
__slots__ = ("mapper", "sort_key")
def __init__(self, uow, mapper):
self.mapper = mapper
self.sort_key = ("DeleteAll", mapper._sort_key)
assert mapper is mapper.base_mapper
@util.preload_module("sqlalchemy.orm.persistence")
def execute(self, uow):
util.preloaded.orm_persistence._delete_obj(
self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, True, False),
uow,
)
def per_state_flush_actions(self, uow):
states = list(
uow.states_for_mapper_hierarchy(self.mapper, True, False)
)
base_mapper = self.mapper.base_mapper
save_all = _SaveUpdateAll(uow, base_mapper)
for state in states:
# keep saves before deletes -
# this ensures 'row switch' operations work
action = _DeleteState(uow, state)
uow.dependencies.add((save_all, action))
yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, True)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.mapper)
| _DeleteAll |
python | PyCQA__pylint | tests/functional/u/use/use_implicit_booleaness_not_comparison.py | {
"start": 4845,
"end": 4953
} | class ____:
@classmethod
@property
def parent_function(cls):
return {}
| ParentWithProperty |
python | kamyu104__LeetCode-Solutions | Python/jump-game-vii.py | {
"start": 614,
"end": 1142
} | class ____(object):
def canReach(self, s, minJump, maxJump):
"""
:type s: str
:type minJump: int
:type maxJump: int
:rtype: bool
"""
q = collections.deque([0])
reachable = 0
while q:
i = q.popleft()
for j in xrange(max(i+minJump, reachable+1), min(i+maxJump+1, len(s))):
if s[j] != '0':
continue
q.append(j)
reachable = i+maxJump
return i == len(s)-1
| Solution2 |
python | PyCQA__isort | tests/unit/test_exceptions.py | {
"start": 2726,
"end": 3061
} | class ____(TestISortError):
def setup_class(self):
self.instance: exceptions.LiteralSortTypeMismatch = exceptions.LiteralSortTypeMismatch(
tuple, list
)
def test_variables(self):
assert self.instance.kind is tuple
assert self.instance.expected_kind is list
| TestLiteralSortTypeMismatch |
python | scikit-learn__scikit-learn | sklearn/utils/_set_output.py | {
"start": 3017,
"end": 4528
} | class ____:
container_lib = "pandas"
def create_container(self, X_output, X_original, columns, inplace=True):
pd = check_library_installed("pandas")
columns = get_columns(columns)
if not inplace or not isinstance(X_output, pd.DataFrame):
# In all these cases, we need to create a new DataFrame
# Unfortunately, we cannot use `getattr(container, "index")`
# because `list` exposes an `index` attribute.
if isinstance(X_output, pd.DataFrame):
index = X_output.index
elif isinstance(X_original, (pd.DataFrame, pd.Series)):
index = X_original.index
else:
index = None
# We don't pass columns here because it would intend columns selection
# instead of renaming.
X_output = pd.DataFrame(X_output, index=index, copy=not inplace)
if columns is not None:
return self.rename_columns(X_output, columns)
return X_output
def is_supported_container(self, X):
pd = check_library_installed("pandas")
return isinstance(X, pd.DataFrame)
def rename_columns(self, X, columns):
# we cannot use `rename` since it takes a dictionary and at this stage we have
# potentially duplicate column names in `X`
X.columns = columns
return X
def hstack(self, Xs):
pd = check_library_installed("pandas")
return pd.concat(Xs, axis=1)
| PandasAdapter |
python | scrapy__scrapy | scrapy/exceptions.py | {
"start": 238,
"end": 326
} | class ____(Exception):
"""Indicates a missing configuration situation"""
| NotConfigured |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py | {
"start": 27400,
"end": 35327
} | class ____(AwsBaseOperator[EmrHook]):
"""
Creates an EMR JobFlow, reading the config from the EMR connection.
A dictionary of JobFlow overrides can be passed that override the config from the connection.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrCreateJobFlowOperator`
:param emr_conn_id: :ref:`Amazon Elastic MapReduce Connection <howto/connection:emr>`.
Use to receive an initial Amazon EMR cluster configuration:
``boto3.client('emr').run_job_flow`` request body.
If this is None or empty or the connection does not exist,
then an empty initial configuration is used.
:param job_flow_overrides: boto3 style arguments or reference to an arguments file
(must be '.json') to override specific ``emr_conn_id`` extra parameters. (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param wait_for_completion: Whether to finish task immediately after creation (False) or wait for jobflow
completion (True)
(default: None)
:param wait_policy: Deprecated. Use `wait_for_completion` instead. Whether to finish the task immediately after creation (None) or:
- wait for the jobflow completion (WaitPolicy.WAIT_FOR_COMPLETION)
- wait for the jobflow completion and cluster to terminate (WaitPolicy.WAIT_FOR_STEPS_COMPLETION)
(default: None)
:param waiter_max_attempts: Maximum number of tries before failing.
:param waiter_delay: Number of seconds between polling the state of the notebook.
:param deferrable: If True, the operator will wait asynchronously for the crawl to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
aws_hook_class = EmrHook
template_fields: Sequence[str] = aws_template_fields(
"job_flow_overrides",
"waiter_delay",
"waiter_max_attempts",
)
template_ext: Sequence[str] = (".json",)
template_fields_renderers = {"job_flow_overrides": "json"}
ui_color = "#f9c915"
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
emr_conn_id: str | None = "emr_default",
job_flow_overrides: str | dict[str, Any] | None = None,
wait_for_completion: bool | None = None,
wait_policy: WaitPolicy | None = None,
waiter_max_attempts: int | None = None,
waiter_delay: int | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
):
super().__init__(**kwargs)
self.emr_conn_id = emr_conn_id
self.job_flow_overrides = job_flow_overrides or {}
self.wait_for_completion = wait_for_completion
self.waiter_max_attempts = waiter_max_attempts or 60
self.waiter_delay = waiter_delay or 60
self.deferrable = deferrable
if wait_policy is not None:
warnings.warn(
"`wait_policy` parameter is deprecated and will be removed in a future release; "
"please use `wait_for_completion` (bool) instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if wait_for_completion is not None:
raise ValueError(
"Cannot specify both `wait_for_completion` and deprecated `wait_policy`. "
"Please use `wait_for_completion` (bool)."
)
self.wait_for_completion = wait_policy in (
WaitPolicy.WAIT_FOR_COMPLETION,
WaitPolicy.WAIT_FOR_STEPS_COMPLETION,
)
@property
def _hook_parameters(self):
return {**super()._hook_parameters, "emr_conn_id": self.emr_conn_id}
def execute(self, context: Context) -> str | None:
self.log.info(
"Creating job flow using aws_conn_id: %s, emr_conn_id: %s", self.aws_conn_id, self.emr_conn_id
)
if isinstance(self.job_flow_overrides, str):
job_flow_overrides: dict[str, Any] = ast.literal_eval(self.job_flow_overrides)
self.job_flow_overrides = job_flow_overrides
else:
job_flow_overrides = self.job_flow_overrides
response = self.hook.create_job_flow(job_flow_overrides)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Job flow creation failed: {response}")
self._job_flow_id = response["JobFlowId"]
self.log.info("Job flow with id %s created", self._job_flow_id)
EmrClusterLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self._job_flow_id,
)
if self._job_flow_id:
EmrLogsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self._job_flow_id,
log_uri=get_log_uri(emr_client=self.hook.conn, job_flow_id=self._job_flow_id),
)
if self.wait_for_completion:
waiter_name = WAITER_POLICY_NAME_MAPPING[WaitPolicy.WAIT_FOR_COMPLETION]
if self.deferrable:
self.defer(
trigger=EmrCreateJobFlowTrigger(
job_flow_id=self._job_flow_id,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
else:
self.hook.get_waiter(waiter_name).wait(
ClusterId=self._job_flow_id,
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
)
return self._job_flow_id
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error creating jobFlow: {validated_event}")
self.log.info("JobFlow created successfully")
return validated_event["job_flow_id"]
def on_kill(self) -> None:
"""Terminate the EMR cluster (job flow) unless TerminationProtected is enabled on the cluster."""
if self._job_flow_id:
self.log.info("Terminating job flow %s", self._job_flow_id)
self.hook.conn.terminate_job_flows(JobFlowIds=[self._job_flow_id])
| EmrCreateJobFlowOperator |
python | arrow-py__arrow | arrow/locales.py | {
"start": 100498,
"end": 101830
} | class ____(Locale):
names = ["ne", "ne-np"]
past = "{0} पहिले"
future = "{0} पछी"
timeframes = {
"now": "अहिले",
"second": "एक सेकेन्ड",
"seconds": "{0} सेकण्ड",
"minute": "मिनेट",
"minutes": "{0} मिनेट",
"hour": "एक घण्टा",
"hours": "{0} घण्टा",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक महिना",
"months": "{0} महिना",
"year": "एक बर्ष",
"years": "{0} बर्ष",
}
meridians = {"am": "पूर्वाह्न", "pm": "अपरान्ह", "AM": "पूर्वाह्न", "PM": "अपरान्ह"}
month_names = [
"",
"जनवरी",
"फेब्रुअरी",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अगष्ट",
"सेप्टेम्बर",
"अक्टोबर",
"नोवेम्बर",
"डिसेम्बर",
]
month_abbreviations = [
"",
"जन",
"फेब",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अग",
"सेप",
"अक्ट",
"नोव",
"डिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"बिहिवार",
"शुक्रवार",
"शनिवार",
"आइतवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "बिहि", "शुक्र", "शनि", "आइत"]
| NepaliLocale |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB132.py | {
"start": 775,
"end": 956
} | class ____:
def remove(self, item) -> None:
return
def __contains__(self, other) -> bool:
return True
c = Container()
if "x" in c:
c.remove("x")
| Container |
python | pytorch__pytorch | test/test_pytree.py | {
"start": 54884,
"end": 59715
} | class ____(TestCase):
def setUp(self):
if IS_FBCODE:
raise unittest.SkipTest("C++ pytree tests are not supported in fbcode")
def assertEqual(self, x, y, *args, **kwargs):
x_typename, y_typename = type(x).__name__, type(y).__name__
if not ("treespec" in x_typename.lower() or "treespec" in y_typename.lower()):
super().assertEqual(x, y, *args, **kwargs)
# The Dynamo polyfill returns a polyfilled Python class for C++ PyTreeSpec instead of the
# C++ class. So we compare the type names and reprs instead because the types themselves
# won't be equal.
super().assertEqual(x_typename, y_typename, *args, **kwargs)
if not TEST_WITH_TORCHDYNAMO or type(x) is type(y):
super().assertEqual(x, y, *args, **kwargs)
else:
super().assertEqual(
x.unflatten(range(x.num_leaves)),
y.unflatten(range(y.num_leaves)),
*args,
**kwargs,
)
def test_treespec_equality(self):
self.assertEqual(cxx_pytree.treespec_leaf(), cxx_pytree.treespec_leaf())
def test_treespec_repr(self):
# Check that it looks sane
tree = (0, [0, 0, [0]])
spec = cxx_pytree.tree_structure(tree)
self.assertEqual(
repr(spec), "PyTreeSpec((*, [*, *, [*]]), NoneIsLeaf, namespace='torch')"
)
@parametrize(
"spec",
[
cxx_pytree.tree_structure([]),
cxx_pytree.tree_structure(()),
cxx_pytree.tree_structure({}),
cxx_pytree.tree_structure([0]),
cxx_pytree.tree_structure([0, 1]),
cxx_pytree.tree_structure((0, 1, 2)),
cxx_pytree.tree_structure({"a": 0, "b": 1, "c": 2}),
cxx_pytree.tree_structure(
OrderedDict([("a", (0, 1)), ("b", 2), ("c", {"a": 3, "b": 4, "c": 5})])
),
cxx_pytree.tree_structure([(0, 1, [2, 3])]),
cxx_pytree.tree_structure(
defaultdict(list, {"a": [0, 1], "b": [1, 2], "c": {}})
),
],
)
def test_pytree_serialize(self, spec):
self.assertEqual(
spec,
cxx_pytree.tree_structure(
cxx_pytree.tree_unflatten([0] * spec.num_leaves, spec)
),
)
serialized_spec = cxx_pytree.treespec_dumps(spec)
self.assertIsInstance(serialized_spec, str)
roundtrip_spec = cxx_pytree.treespec_loads(serialized_spec)
self.assertEqual(roundtrip_spec, spec)
def test_pytree_serialize_namedtuple(self):
python_pytree._register_namedtuple(
GlobalPoint,
serialized_type_name="test_pytree.test_pytree_serialize_namedtuple.GlobalPoint",
)
spec = cxx_pytree.tree_structure(GlobalPoint(0, 1))
roundtrip_spec = cxx_pytree.treespec_loads(cxx_pytree.treespec_dumps(spec))
self.assertEqual(roundtrip_spec.type._fields, spec.type._fields)
LocalPoint = namedtuple("LocalPoint", ["x", "y"])
python_pytree._register_namedtuple(
LocalPoint,
serialized_type_name="test_pytree.test_pytree_serialize_namedtuple.LocalPoint",
)
spec = cxx_pytree.tree_structure(LocalPoint(0, 1))
roundtrip_spec = cxx_pytree.treespec_loads(cxx_pytree.treespec_dumps(spec))
self.assertEqual(roundtrip_spec.type._fields, spec.type._fields)
def test_pytree_custom_type_serialize(self):
spec = cxx_pytree.tree_structure(GlobalDummyType(0, 1))
serialized_spec = cxx_pytree.treespec_dumps(spec)
roundtrip_spec = cxx_pytree.treespec_loads(serialized_spec)
self.assertEqual(roundtrip_spec, spec)
class LocalDummyType:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
if not isinstance(other, LocalDummyType):
return NotImplemented
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
cxx_pytree.register_pytree_node(
LocalDummyType,
lambda dummy: ([dummy.x, dummy.y], None),
lambda xs, _: LocalDummyType(*xs),
serialized_type_name="LocalDummyType",
)
spec = cxx_pytree.tree_structure(LocalDummyType(0, 1))
serialized_spec = cxx_pytree.treespec_dumps(spec)
roundtrip_spec = cxx_pytree.treespec_loads(serialized_spec)
self.assertEqual(roundtrip_spec, spec)
instantiate_parametrized_tests(TestGenericPytree)
instantiate_parametrized_tests(TestPythonPytree)
instantiate_parametrized_tests(TestCxxPytree)
if __name__ == "__main__":
run_tests()
| TestCxxPytree |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB118.py | {
"start": 3233,
"end": 4145
} | class ____:
# In an ideal world, perhaps we'd emit a diagnostic here,
# since this `lambda` is clearly not a method definition,
# and *could* be safely replaced with an `operator` function.
# Practically speaking, however, it's hard to see how we'd accurately determine
# that the `lambda` is *not* a method definition
# without risking false positives elsewhere or introducing complex heuristics
# that users would find surprising and confusing
FOO = sorted([x for x in BAR], key=lambda x: x.baz)
# https://github.com/astral-sh/ruff/issues/19305
import pytest
@pytest.fixture
def my_fixture_with_param(request):
return request.param
@pytest.fixture()
def my_fixture_with_param2(request):
return request.param
# Decorated function (should be ignored)
def custom_decorator(func):
return func
@custom_decorator
def add(x, y):
return x + y
| NotAMethodButHardToDetect |
python | pypa__warehouse | tests/unit/utils/test_compression.py | {
"start": 341,
"end": 4463
} | class ____:
@pytest.mark.parametrize(
"vary", [["Cookie"], ["Authorization"], ["Cookie", "Authorization"]]
)
def test_bails_if_vary(self, vary):
request = pretend.stub()
response = pretend.stub(vary=vary)
compressor(request, response)
def test_bails_if_content_encoding(self):
request = pretend.stub()
response = pretend.stub(headers={"Content-Encoding": "something"}, vary=None)
compressor(request, response)
@pytest.mark.parametrize(
("vary", "expected"),
[
(None, {"Accept-Encoding"}),
(["Something-Else"], {"Accept-Encoding", "Something-Else"}),
],
)
def test_sets_vary(self, vary, expected):
request = pretend.stub(accept_encoding=AcceptEncodingNoHeader())
response = HTTPOk(body=b"foo")
response.vary = vary
compressor(request, response)
assert set(response.vary) == expected
def test_compresses_non_streaming(self):
decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo"
compressed_body = b"".join(list(gzip_app_iter([decompressed_body])))
request = pretend.stub(accept_encoding=AcceptEncodingValidHeader("gzip"))
response = HTTPOk(body=decompressed_body)
response.md5_etag()
original_etag = response.etag
compressor(request, response)
assert response.content_encoding == "gzip"
assert response.content_length == len(compressed_body)
assert response.body == compressed_body
assert response.etag != original_etag
def test_compresses_streaming(self):
decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo"
compressed_body = b"".join(list(gzip_app_iter([decompressed_body])))
request = pretend.stub(accept_encoding=AcceptEncodingValidHeader("gzip"))
response = HTTPOk(app_iter=iter([decompressed_body]))
compressor(request, response)
assert response.content_encoding == "gzip"
assert response.content_length is None
assert response.body == compressed_body
def test_compresses_streaming_with_etag(self):
decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo"
compressed_body = b"".join(list(gzip_app_iter([decompressed_body])))
request = pretend.stub(accept_encoding=AcceptEncodingValidHeader("gzip"))
response = HTTPOk(app_iter=iter([decompressed_body]))
response.etag = "foo"
compressor(request, response)
assert response.content_encoding == "gzip"
assert response.content_length is None
assert response.body == compressed_body
assert response.etag == "rfbezwKUdGjz6VPWDLDTvA"
def test_buffers_small_streaming(self):
decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo"
compressed_body = b"".join(list(gzip_app_iter([decompressed_body])))
request = pretend.stub(accept_encoding=AcceptEncodingValidHeader("gzip"))
response = HTTPOk(
app_iter=iter([decompressed_body]), content_length=len(decompressed_body)
)
compressor(request, response)
assert response.content_encoding == "gzip"
assert response.content_length == len(compressed_body)
assert response.body == compressed_body
def test_doesnt_compress_too_small(self):
request = pretend.stub(accept_encoding=AcceptEncodingValidHeader("gzip"))
response = HTTPOk(body=b"foo")
compressor(request, response)
assert response.content_encoding is None
assert response.content_length == 3
assert response.body == b"foo"
def test_compression_tween_factory():
callbacks = []
registry = pretend.stub()
request = pretend.stub(add_response_callback=callbacks.append)
response = pretend.stub()
def handler(inner_request):
assert inner_request is request
return response
tween = compression_tween_factory(handler, registry)
assert tween(request) is response
assert callbacks == [compressor]
| TestCompressor |
python | huggingface__transformers | tests/models/arcee/test_modeling_arcee.py | {
"start": 1112,
"end": 1241
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = ArceeModel
@require_torch
| ArceeModelTester |
python | catalyst-team__catalyst | catalyst/contrib/datasets/market1501.py | {
"start": 288,
"end": 2590
} | class ____(MetricLearningTrainDataset):
"""
Market1501 train dataset.
This dataset should be used for training stage of the reid pipeline.
.. _Scalable Person Re-identification\: A Benchmark:
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zheng_Scalable_Person_Re-Identification_ICCV_2015_paper.pdf # noqa: E501, W505
"""
def __init__(
self,
root: str,
transform: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
):
"""
Market1501 dataset for train stage of reid task.
Args:
root: path to a directory that contains Market-1501-v15.09.15
transform: transformation that should be applied to images
"""
self.root = Path(root)
self._data_dir = self.root / "Market-1501-v15.09.15/bounding_box_train"
self.transform = transform
self.images, self.pids = self._load_data(self._data_dir)
@staticmethod
def _load_data(data_dir: Path) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Load data from train directory of the dataset.
Parse names of images to get person id as labels.
Args:
data_dir: path to directory that contains training data
Returns:
images for training and their labels
"""
filenames = list(data_dir.glob("*.jpg"))
data = torch.from_numpy(
np.array([imread(filename) for filename in filenames])
).float()
targets = torch.from_numpy(
np.array([int(filename.name.split("_")[0]) for filename in filenames])
)
return data, targets
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
"""Get item from dataset.
Args:
index: index of the element
Returns:
dict of image and its pid
"""
image, pid = self.images[index], self.pids[index]
if self.transform is not None:
image = self.transform(image)
return {"image": image, "pid": pid}
def __len__(self) -> int:
"""Get len of the dataset"""
return len(self.pids)
def get_labels(self) -> List[int]:
"""Get list of labels of dataset"""
return self.pids.tolist()
| Market1501MLDataset |
python | Netflix__metaflow | metaflow/plugins/env_escape/override_decorators.py | {
"start": 301,
"end": 585
} | class ____(Override):
def __init__(self, is_setattr, obj_mapping, wrapped_function):
super(AttrOverride, self).__init__(obj_mapping, wrapped_function)
self._is_setattr = is_setattr
@property
def is_setattr(self):
return self._is_setattr
| AttrOverride |
python | doocs__leetcode | solution/2400-2499/2443.Sum of Number and Its Reverse/Solution.py | {
"start": 0,
"end": 144
} | class ____:
def sumOfNumberAndReverse(self, num: int) -> bool:
return any(k + int(str(k)[::-1]) == num for k in range(num + 1))
| Solution |
python | openai__openai-python | src/openai/_module_client.py | {
"start": 2520,
"end": 2655
} | class ____(LazyProxy["Webhooks"]):
@override
def __load__(self) -> Webhooks:
return _load_client().webhooks
| WebhooksProxy |
python | huggingface__transformers | tests/models/segformer/test_image_processing_segformer.py | {
"start": 1115,
"end": 3236
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_reduce_labels=False,
):
size = size if size is not None else {"height": 30, "width": 30}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_reduce_labels = do_reduce_labels
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
| SegformerImageProcessingTester |
python | wandb__wandb | wandb/vendor/pygments/lexers/graphics.py | {
"start": 7092,
"end": 12431
} | class ____(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
('[a-zA-Z_]\w*:(?!:)', Name.Label),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
| AsymptoteLexer |
python | ansible__ansible | lib/ansible/plugins/shell/powershell.py | {
"start": 6217,
"end": 17259
} | class ____(ShellBase):
# Common shell filenames that this plugin handles
# Powershell is handled differently. It's selected when winrm is the
# connection
COMPATIBLE_SHELLS = frozenset() # type: frozenset[str]
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'powershell'
# We try catch as some connection plugins don't have a console (PSRP).
_CONSOLE_ENCODING = "try { [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding } catch {}"
_SHELL_REDIRECT_ALLNULL = '> $null'
_SHELL_AND = ';'
# Used by various parts of Ansible to do Windows specific changes
_IS_WINDOWS = True
# TODO: add binary module support
def env_prefix(self, **kwargs):
# powershell/winrm env handling is handled in the exec wrapper
return ""
def join_path(self, *args):
# use normpath() to remove doubled slashed and convert forward to backslashes
parts = [ntpath.normpath(arg) for arg in args]
# Because ntpath.join treats any component that begins with a backslash as an absolute path,
# we have to strip slashes from at least the beginning, otherwise join will ignore all previous
# path components except for the drive.
return ntpath.join(parts[0], *[part.strip('\\') for part in parts[1:]])
def get_remote_filename(self, pathname):
# powershell requires that script files end with .ps1
base_name = os.path.basename(pathname.strip())
name, ext = os.path.splitext(base_name.strip())
if ext.lower() not in ['.ps1', '.exe']:
return name + '.ps1'
return base_name.strip()
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
return path.endswith('/') or path.endswith('\\')
def chmod(self, paths, mode):
raise NotImplementedError('chmod is not implemented for Powershell')
def chown(self, paths, user):
raise NotImplementedError('chown is not implemented for Powershell')
def set_user_facl(self, paths, user, mode):
raise NotImplementedError('set_user_facl is not implemented for Powershell')
def remove(self, path, recurse=False):
quoted_path = self._escape(path)
if recurse:
return self._encode_script("""Remove-Item '%s' -Force -Recurse;""" % quoted_path)
else:
return self._encode_script("""Remove-Item '%s' -Force;""" % quoted_path)
def mkdtemp(
self,
basefile: str | None = None,
system: bool = False,
mode: int = 0o700,
tmpdir: str | None = None,
) -> str:
# This is not called in Ansible anymore but it is kept for backwards
# compatibility in case other action plugins outside Ansible calls this.
if not basefile:
basefile = self.__class__._generate_temp_dir_name()
basetmpdir = self._escape(tmpdir if tmpdir else self.get_option('remote_tmp'))
script = f"""
{self._CONSOLE_ENCODING}
$tmp_path = [System.Environment]::ExpandEnvironmentVariables('{basetmpdir}')
$tmp = New-Item -Type Directory -Path $tmp_path -Name '{basefile}'
Write-Output -InputObject $tmp.FullName
"""
return self._encode_script(script.strip())
def _mkdtemp2(
self,
basefile: str | None = None,
system: bool = False,
mode: int = 0o700,
tmpdir: str | None = None,
) -> _ShellCommand:
# Windows does not have an equivalent for the system temp files, so
# the param is ignored
if not basefile:
basefile = self.__class__._generate_temp_dir_name()
basetmpdir = tmpdir if tmpdir else self.get_option('remote_tmp')
script, stdin = _bootstrap_powershell_script("powershell_mkdtemp.ps1", {
'Directory': basetmpdir,
'Name': basefile,
})
return _ShellCommand(
command=self._encode_script(script),
input_data=stdin,
)
def expand_user(
self,
user_home_path: str,
username: str = '',
) -> str:
# This is not called in Ansible anymore but it is kept for backwards
# compatibility in case other actions plugins outside Ansible called this.
if user_home_path == '~':
script = 'Write-Output (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = "Write-Output ((Get-Location).Path + '%s')" % self._escape(user_home_path[1:])
else:
script = "Write-Output '%s'" % self._escape(user_home_path)
return self._encode_script(f"{self._CONSOLE_ENCODING}; {script}")
def _expand_user2(
self,
user_home_path: str,
username: str = '',
) -> _ShellCommand:
script, stdin = _bootstrap_powershell_script("powershell_expand_user.ps1", {
'Path': user_home_path,
})
return _ShellCommand(
command=self._encode_script(script),
input_data=stdin,
)
def exists(self, path):
path = self._escape(path)
script = """
If (Test-Path '%s')
{
$res = 0;
}
Else
{
$res = 1;
}
Write-Output '$res';
Exit $res;
""" % path
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
display.deprecated(
msg="The `ShellModule.checksum` method is deprecated.",
version="2.23",
help_text="Use `ActionBase._execute_remote_stat()` instead.",
)
path = self._escape(path)
script = """
If (Test-Path -PathType Leaf '%(path)s')
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open('%(path)s', [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container '%(path)s')
{
Write-Output "3";
}
Else
{
Write-Output "1";
}
""" % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, arg_path=None):
bootstrap_wrapper = _get_powershell_script("bootstrap_wrapper.ps1")
# pipelining bypass
if cmd == '':
return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
# non-pipelining
cmd_parts = shlex.split(cmd, posix=False)
cmd_parts = list(map(to_text, cmd_parts))
if shebang and shebang.lower() == '#!powershell':
if arg_path:
# Running a module without the exec_wrapper and with an argument
# file.
script_path = cmd_parts[0]
if not script_path.lower().endswith('.ps1'):
script_path += '.ps1'
cmd_parts.insert(0, '-File')
cmd_parts[1] = f'"{script_path}"'
if arg_path:
cmd_parts.append(f'"{arg_path}"')
wrapper_cmd = " ".join(_common_args + cmd_parts)
return wrapper_cmd
else:
# Running a module with ANSIBLE_KEEP_REMOTE_FILES=true, the script
# arg is actually the input manifest JSON to provide to the bootstrap
# wrapper.
wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
return wrapper_cmd
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
elif not shebang:
# The module is assumed to be a binary
cmd_parts.append(arg_path)
script = """
Try
{
%s
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
""" % (env_string, ' '.join(cmd_parts))
return self._encode_script(script, preserve_rc=False)
def wrap_for_exec(self, cmd):
super().wrap_for_exec(cmd)
return '& %s; exit $LASTEXITCODE' % cmd
def _escape(self, value):
"""Return value escaped for use in PowerShell single quotes."""
# There are 5 chars that need to be escaped in a single quote.
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
return re.compile(u"(['\u2018\u2019\u201a\u201b])").sub(u'\\1\\1', value)
def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
"""Convert a PowerShell script to a single base64-encoded command."""
script = to_text(script)
if script == u'-':
cmd_parts = _common_args + ['-Command', '-']
else:
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
# try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
# NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
if preserve_rc:
script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
% script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
| ShellModule |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 64872,
"end": 65297
} | class ____(_ConfigBase):
vectorizer: _NamedVectorizerConfig
vector_index_config: Union[
VectorIndexConfigHNSW, VectorIndexConfigFlat, VectorIndexConfigDynamic
]
def to_dict(self) -> Dict:
ret_dict = super().to_dict()
ret_dict["vectorIndexType"] = self.vector_index_config.vector_index_type()
return ret_dict
NamedVectorConfig = _NamedVectorConfig
@dataclass
| _NamedVectorConfig |
python | encode__django-rest-framework | tests/test_atomic_requests.py | {
"start": 1393,
"end": 2092
} | class ____(TestCase):
def setUp(self):
self.view = BasicView.as_view()
connections.databases['default']['ATOMIC_REQUESTS'] = True
def tearDown(self):
connections.databases['default']['ATOMIC_REQUESTS'] = False
def test_no_exception_commit_transaction(self):
request = factory.post('/')
with self.assertNumQueries(1):
response = self.view(request)
assert not transaction.get_rollback()
assert response.status_code == status.HTTP_200_OK
assert BasicModel.objects.count() == 1
@unittest.skipUnless(
connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints."
)
| DBTransactionTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.