language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spyder-ide__spyder | spyder/widgets/simplecodeeditor.py | {
"start": 1436,
"end": 2049
} | class ____(QWidget):
"""
Adapted from:
https://doc.qt.io/qt-5/qtwidgets-widgets-codeeditor-example.html
"""
def __init__(self, code_editor=None):
super().__init__(code_editor)
self._editor = code_editor
self._left_padding = 6 # Pixels
self._right_padding = 3 # Pixels
# --- Qt overrides
# ------------------------------------------------------------------------
def sizeHint(self):
return QSize(self._editor.linenumberarea_width(), 0)
def paintEvent(self, event):
self._editor.linenumberarea_paint_event(event)
| LineNumberArea |
python | ansible__ansible | test/integration/targets/var_precedence/ansible-var-precedence-check.py | {
"start": 4454,
"end": 18706
} | class ____(object):
def __init__(self, features, dynamic_inventory=False):
clean_test_dir()
self.dynamic_inventory = dynamic_inventory
self.di = None
self.features = features[:]
self.inventory = ''
self.playvars = dict()
self.varsfiles = []
self.playbook = dict(hosts='testhost', gather_facts=False)
self.tasks = []
self.roles = []
self.ansible_command = None
self.stdout = None
def write_playbook(self):
fname = os.path.join(TESTDIR, 'site.yml')
pb_copy = self.playbook.copy()
if self.playvars:
pb_copy['vars'] = self.playvars
if self.varsfiles:
pb_copy['vars_files'] = self.varsfiles
if self.roles:
pb_copy['roles'] = []
for role in self.roles:
role.write_role()
role_def = dict(role=role.name)
role_def.update(role.params)
pb_copy['roles'].append(role_def)
if self.tasks:
pb_copy['tasks'] = self.tasks
with open(fname, 'w') as f:
pb_yaml = yaml.dump([pb_copy], f, default_flow_style=False, indent=2)
def build(self):
if self.dynamic_inventory:
# python based inventory file
self.di = DynamicInventory(self.features)
self.di.write_script()
else:
# ini based inventory file
if 'ini_host' in self.features:
self.inventory += 'testhost findme=ini_host\n'
else:
self.inventory += 'testhost\n'
self.inventory += '\n'
if 'ini_child' in self.features:
self.inventory += '[child]\n'
self.inventory += 'testhost\n'
self.inventory += '\n'
self.inventory += '[child:vars]\n'
self.inventory += 'findme=ini_child\n'
self.inventory += '\n'
if 'ini_parent' in self.features:
if 'ini_child' in self.features:
self.inventory += '[parent:children]\n'
self.inventory += 'child\n'
else:
self.inventory += '[parent]\n'
self.inventory += 'testhost\n'
self.inventory += '\n'
self.inventory += '[parent:vars]\n'
self.inventory += 'findme=ini_parent\n'
self.inventory += '\n'
if 'ini_all' in self.features:
self.inventory += '[all:vars]\n'
self.inventory += 'findme=ini_all\n'
self.inventory += '\n'
# default to a single file called inventory
invfile = os.path.join(TESTDIR, 'inventory', 'hosts')
ipath = os.path.join(TESTDIR, 'inventory')
if not os.path.isdir(ipath):
os.makedirs(ipath)
with open(invfile, 'w') as f:
f.write(self.inventory)
hpath = os.path.join(TESTDIR, 'inventory', 'host_vars')
if not os.path.isdir(hpath):
os.makedirs(hpath)
gpath = os.path.join(TESTDIR, 'inventory', 'group_vars')
if not os.path.isdir(gpath):
os.makedirs(gpath)
if 'ini_host_vars_file' in self.features:
hfile = os.path.join(hpath, 'testhost')
with open(hfile, 'w') as f:
f.write('findme: ini_host_vars_file\n')
if 'ini_group_vars_file_all' in self.features:
hfile = os.path.join(gpath, 'all')
with open(hfile, 'w') as f:
f.write('findme: ini_group_vars_file_all\n')
if 'ini_group_vars_file_child' in self.features:
hfile = os.path.join(gpath, 'child')
with open(hfile, 'w') as f:
f.write('findme: ini_group_vars_file_child\n')
if 'ini_group_vars_file_parent' in self.features:
hfile = os.path.join(gpath, 'parent')
with open(hfile, 'w') as f:
f.write('findme: ini_group_vars_file_parent\n')
if 'pb_host_vars_file' in self.features:
os.makedirs(os.path.join(TESTDIR, 'host_vars'))
fname = os.path.join(TESTDIR, 'host_vars', 'testhost')
with open(fname, 'w') as f:
f.write('findme: pb_host_vars_file\n')
if 'pb_group_vars_file_parent' in self.features:
if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')):
os.makedirs(os.path.join(TESTDIR, 'group_vars'))
fname = os.path.join(TESTDIR, 'group_vars', 'parent')
with open(fname, 'w') as f:
f.write('findme: pb_group_vars_file_parent\n')
if 'pb_group_vars_file_child' in self.features:
if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')):
os.makedirs(os.path.join(TESTDIR, 'group_vars'))
fname = os.path.join(TESTDIR, 'group_vars', 'child')
with open(fname, 'w') as f:
f.write('findme: pb_group_vars_file_child\n')
if 'pb_group_vars_file_all' in self.features:
if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')):
os.makedirs(os.path.join(TESTDIR, 'group_vars'))
fname = os.path.join(TESTDIR, 'group_vars', 'all')
with open(fname, 'w') as f:
f.write('findme: pb_group_vars_file_all\n')
if 'play_var' in self.features:
self.playvars['findme'] = 'play_var'
if 'set_fact' in self.features:
self.tasks.append(dict(set_fact='findme="set_fact"'))
if 'vars_file' in self.features:
self.varsfiles.append('varsfile.yml')
fname = os.path.join(TESTDIR, 'varsfile.yml')
with open(fname, 'w') as f:
f.write('findme: vars_file\n')
if 'include_vars' in self.features:
self.tasks.append(dict(include_vars='included_vars.yml'))
fname = os.path.join(TESTDIR, 'included_vars.yml')
with open(fname, 'w') as f:
f.write('findme: include_vars\n')
if 'role_var' in self.features:
role = Role('role_var')
role.vars = True
role.load = True
self.roles.append(role)
if 'role_parent_default' in self.features:
role = Role('role_default')
role.load = False
role.defaults = True
self.roles.append(role)
role = Role('role_parent_default')
role.dependencies.append('role_default')
role.defaults = True
role.load = True
if 'role_params' in self.features:
role.params = dict(findme='role_params')
self.roles.append(role)
elif 'role_default' in self.features:
role = Role('role_default')
role.defaults = True
role.load = True
if 'role_params' in self.features:
role.params = dict(findme='role_params')
self.roles.append(role)
debug_task = dict(debug='var=findme')
test_task = {'assert': dict(that=['findme == "%s"' % self.features[0]])}
if 'task_vars' in self.features:
test_task['vars'] = dict(findme="task_vars")
if 'registered_vars' in self.features:
test_task['register'] = 'findme'
if 'block_vars' in self.features:
block_wrapper = [
debug_task,
{
'block': [test_task],
'vars': dict(findme="block_vars"),
}
]
else:
block_wrapper = [debug_task, test_task]
if 'include_params' in self.features:
self.tasks.append(dict(name='including tasks', include_tasks='included_tasks.yml', vars=dict(findme='include_params')))
else:
self.tasks.append(dict(include_tasks='included_tasks.yml'))
fname = os.path.join(TESTDIR, 'included_tasks.yml')
with open(fname, 'w') as f:
f.write(yaml.dump(block_wrapper))
self.write_playbook()
def run(self):
"""
if self.dynamic_inventory:
cmd = 'ansible-playbook -c local -i inventory/hosts site.yml'
else:
cmd = 'ansible-playbook -c local -i inventory site.yml'
"""
cmd = 'ansible-playbook -c local -i inventory site.yml'
if 'extra_vars' in self.features:
cmd += ' --extra-vars="findme=extra_vars"'
cmd = cmd + ' -vvvvv'
self.ansible_command = cmd
(rc, so, se) = run_command(cmd, cwd=TESTDIR)
self.stdout = so
if rc != 0:
raise Exception("playbook failed (rc=%s), stdout: '%s' stderr: '%s'" % (rc, so, se))
def show_tree(self):
print('## TREE')
cmd = 'tree %s' % TESTDIR
(rc, so, se) = run_command(cmd)
lines = so.split('\n')
lines = lines[:-3]
print('\n'.join(lines))
def show_content(self):
print('## CONTENT')
cmd = 'find %s -type f | xargs tail -n +1' % TESTDIR
(rc, so, se) = run_command(cmd)
print(so)
def show_stdout(self):
print('## COMMAND')
print(self.ansible_command)
print('## STDOUT')
print(self.stdout)
def main():
features = [
'extra_vars',
'include_params',
# 'role_params', # FIXME: we don't yet validate tasks within a role
'set_fact',
# 'registered_vars', # FIXME: hard to simulate
'include_vars',
# 'role_dep_params',
'task_vars',
'block_vars',
'role_var',
'vars_file',
'play_var',
# 'host_facts', # FIXME: hard to simulate
'pb_host_vars_file',
'ini_host_vars_file',
'ini_host',
'pb_group_vars_file_child',
# 'ini_group_vars_file_child', # FIXME: this contradicts documented precedence pb group vars files should override inventory ones
'pb_group_vars_file_parent',
'ini_group_vars_file_parent',
'pb_group_vars_file_all',
'ini_group_vars_file_all',
'ini_child',
'ini_parent',
'ini_all',
'role_parent_default',
'role_default',
]
parser = OptionParser()
parser.add_option('-f', '--feature', action='append')
parser.add_option('--use_dynamic_inventory', action='store_true')
parser.add_option('--show_tree', action='store_true')
parser.add_option('--show_content', action='store_true')
parser.add_option('--show_stdout', action='store_true')
parser.add_option('--copy_testcases_to_local_dir', action='store_true')
(options, args) = parser.parse_args()
if options.feature:
for f in options.feature:
if f not in features:
print('%s is not a valid feature' % f)
sys.exit(1)
features = list(options.feature)
fdesc = {
'ini_host': 'host var inside the ini',
'script_host': 'host var inside the script _meta',
'ini_child': 'child group var inside the ini',
'script_child': 'child group var inside the script',
'ini_parent': 'parent group var inside the ini',
'script_parent': 'parent group var inside the script',
'ini_all': 'all group var inside the ini',
'script_all': 'all group var inside the script',
'ini_host_vars_file': 'var in inventory/host_vars/host',
'ini_group_vars_file_parent': 'var in inventory/group_vars/parent',
'ini_group_vars_file_child': 'var in inventory/group_vars/child',
'ini_group_vars_file_all': 'var in inventory/group_vars/all',
'pb_group_vars_file_parent': 'var in playbook/group_vars/parent',
'pb_group_vars_file_child': 'var in playbook/group_vars/child',
'pb_group_vars_file_all': 'var in playbook/group_vars/all',
'pb_host_vars_file': 'var in playbook/host_vars/host',
'play_var': 'var set in playbook header',
'role_parent_default': 'var in roles/role_parent/defaults/main.yml',
'role_default': 'var in roles/role/defaults/main.yml',
'role_var': 'var in ???',
'include_vars': 'var in included file',
'set_fact': 'var made by set_fact',
'vars_file': 'var in file added by vars_file',
'block_vars': 'vars defined on the block',
'task_vars': 'vars defined on the task',
'extra_vars': 'var passed via the cli'
}
dinv = options.use_dynamic_inventory
if dinv:
# some features are specific to ini, so swap those
for (idx, x) in enumerate(features):
if x.startswith('ini_') and 'vars_file' not in x:
features[idx] = x.replace('ini_', 'script_')
dinv = options.use_dynamic_inventory
index = 1
while features:
VTM = VarTestMaker(features, dynamic_inventory=dinv)
VTM.build()
if options.show_tree or options.show_content or options.show_stdout:
print('')
if options.show_tree:
VTM.show_tree()
if options.show_content:
VTM.show_content()
try:
print("CHECKING: %s (%s)" % (features[0], fdesc.get(features[0], '')))
res = VTM.run()
if options.show_stdout:
VTM.show_stdout()
features.pop(0)
if options.copy_testcases_to_local_dir:
topdir = 'testcases'
if index == 1 and os.path.isdir(topdir):
shutil.rmtree(topdir)
if not os.path.isdir(topdir):
os.makedirs(topdir)
thisindex = str(index)
if len(thisindex) == 1:
thisindex = '0' + thisindex
thisdir = os.path.join(topdir, '%s.%s' % (thisindex, res))
shutil.copytree(TESTDIR, thisdir)
except Exception as e:
print("ERROR !!!")
print(e)
print('feature: %s failed' % features[0])
sys.exit(1)
finally:
shutil.rmtree(TESTDIR)
index += 1
if __name__ == "__main__":
main()
| VarTestMaker |
python | numba__numba | numba/core/typing/arraydecl.py | {
"start": 25818,
"end": 25974
} | class ____(AttributeTemplate):
key = types.ArrayCTypes
def resolve_data(self, ctinfo):
return types.uintp
@infer_getattr
| ArrayCTypesAttribute |
python | getsentry__sentry | tests/acceptance/test_organization_security_privacy.py | {
"start": 147,
"end": 3621
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("owner@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.login_as(self.user)
self.path = f"/settings/{self.org.slug}/security-and-privacy/"
def load_organization_helper(self) -> None:
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert self.browser.wait_until(
'[data-test-id="organization-settings-security-and-privacy"]'
)
def renders_2fa_setting(self) -> bool:
return self.browser.wait_until("#require2FA")
def test_renders_2fa_setting_for_owner(self) -> None:
self.browser.get(self.path)
self.load_organization_helper()
assert self.renders_2fa_setting()
def test_renders_2fa_setting_for_manager(self) -> None:
manager_user = self.create_user("manager@example.com")
self.create_member(organization=self.org, user=manager_user, role="manager")
self.login_as(manager_user)
self.browser.get(self.path)
self.load_organization_helper()
assert self.renders_2fa_setting()
def test_setting_2fa_without_2fa_enabled(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert not self.browser.element_exists(
'[data-test-id="organization-settings-security-and-privacy"] .error'
)
self.browser.click("#require2FA")
self.browser.wait_until("[role='dialog']")
self.browser.click("[role='dialog'] [data-test-id='confirm-button']")
self.browser.wait_until_not("[role='dialog']")
self.browser.wait_until_test_id("toast-error")
self.load_organization_helper()
def test_renders_advanced_data_scrubbing_without_rule(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert self.browser.wait_until('[data-test-id="advanced-data-scrubbing"]')
self.load_organization_helper()
def test_renders_advanced_data_scrubbing_with_rules(self) -> None:
relayPiiConfig = json.dumps(
{
"rules": {
"0": {
"type": "password",
"redaction": {"method": "replace", "text": "Scrubbed"},
},
"1": {"type": "creditcard", "redaction": {"method": "mask"}},
},
"applications": {"password": ["0"], "$message": ["1"]},
}
)
self.org.update_option("sentry:relay_pii_config", relayPiiConfig)
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert self.browser.wait_until('[data-test-id="advanced-data-scrubbing"]')
assert self.browser.wait_until('[data-test-id="advanced-data-scrubbing-rules"]')
self.load_organization_helper()
def test_renders_advanced_data_scrubbing_add_rule_modal(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert self.browser.wait_until('[data-test-id="advanced-data-scrubbing"]')
self.browser.click_when_visible("[aria-label='Add Rule']")
self.load_organization_helper()
| OrganizationSecurityAndPrivacyTest |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/widgets/serversconfig.py | {
"start": 21828,
"end": 26892
} | class ____(QTableView):
def __init__(self, parent):
QTableView.__init__(self, parent)
self._parent = parent
self.delete_queue = []
self.source_model = LSPServersModel(self)
self.setModel(self.source_model)
self.setItemDelegateForColumn(CMD, ItemDelegate(self))
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSortingEnabled(True)
self.setEditTriggers(QAbstractItemView.AllEditTriggers)
self.selectionModel().selectionChanged.connect(self.selection)
self.verticalHeader().hide()
self.load_servers()
def focusOutEvent(self, e):
"""Qt Override."""
# self.source_model.update_active_row()
# self._parent.delete_btn.setEnabled(False)
super().focusOutEvent(e)
def focusInEvent(self, e):
"""Qt Override."""
super().focusInEvent(e)
self.selectRow(self.currentIndex().row())
def selection(self, index):
"""Update selected row."""
self.update()
self.isActiveWindow()
self._parent.delete_btn.setEnabled(True)
def adjust_cells(self):
"""Adjust column size based on contents."""
self.resizeColumnsToContents()
fm = self.horizontalHeader().fontMetrics()
names = [fm.width(s.cmd) for s in self.source_model.servers]
if names:
self.setColumnWidth(CMD, max(names))
self.horizontalHeader().setStretchLastSection(True)
def get_server_by_lang(self, lang):
return self.source_model.server_map.get(lang)
def load_servers(self):
servers = list(iter_servers(self._parent.get_option,
self._parent.set_option,
self._parent.remove_option))
for i, server in enumerate(servers):
server.index = i
server.language = LSP_LANGUAGE_NAME[server.language.lower()]
server_map = {x.language: x for x in servers}
self.source_model.servers = servers
self.source_model.server_map = server_map
self.source_model.reset()
self.adjust_cells()
self.sortByColumn(LANGUAGE, Qt.AscendingOrder)
def save_servers(self):
language_set = set({})
for server in self.source_model.servers:
language_set |= {server.language.lower()}
server.save()
while len(self.delete_queue) > 0:
server = self.delete_queue.pop(0)
language_set |= {server.language.lower()}
server.delete()
return language_set
def delete_server(self, idx):
server = self.source_model.servers.pop(idx)
self.delete_queue.append(server)
self.source_model.server_map.pop(server.language)
self.source_model.reset()
self.adjust_cells()
self.sortByColumn(LANGUAGE, Qt.AscendingOrder)
def delete_server_by_lang(self, language):
idx = next((i for i, x in enumerate(self.source_model.servers)
if x.language == language), None)
if idx is not None:
self.delete_server(idx)
def show_editor(self, new_server=False):
server = LSPServer(get_option=self._parent.get_option,
set_option=self._parent.set_option,
remove_option=self._parent.remove_option)
if not new_server:
idx = self.currentIndex().row()
server = self.source_model.row(idx)
dialog = LSPServerEditor(self, **server.__dict__)
if dialog.exec_():
server = dialog.get_options()
self.source_model.server_map[server.language] = server
self.source_model.servers = list(
self.source_model.server_map.values())
self.source_model.reset()
self.adjust_cells()
self.sortByColumn(LANGUAGE, Qt.AscendingOrder)
self._parent.set_modified(True)
def next_row(self):
"""Move to next row from currently selected row."""
row = self.currentIndex().row()
rows = self.source_model.rowCount()
if row + 1 == rows:
row = -1
self.selectRow(row + 1)
def previous_row(self):
"""Move to previous row from currently selected row."""
row = self.currentIndex().row()
rows = self.source_model.rowCount()
if row == 0:
row = rows
self.selectRow(row - 1)
def keyPressEvent(self, event):
"""Qt Override."""
key = event.key()
if key in [Qt.Key_Enter, Qt.Key_Return]:
self.show_editor()
elif key in [Qt.Key_Backtab]:
self.parent().reset_btn.setFocus()
elif key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right]:
super().keyPressEvent(event)
else:
super().keyPressEvent(event)
def mouseDoubleClickEvent(self, event):
"""Qt Override."""
self.show_editor()
| LSPServerTable |
python | getsentry__sentry | src/sentry/api/endpoints/organization_traces.py | {
"start": 2749,
"end": 3038
} | class ____(TypedDict):
trace: str
numErrors: int
numOccurrences: int
numSpans: int
matchingSpans: int
project: str | None
name: str | None
rootDuration: float | None
duration: int
start: int
end: int
breakdowns: list[TraceInterval]
| TraceResult |
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 7830,
"end": 9661
} | class ____(Truncation, ToolChoice):
id: str | None = None
created_at: float | None = None
error: ResponseError | None = None
incomplete_details: IncompleteDetails | None = None
instructions: str | None = None
metadata: dict[str, str] | None = None
model: str | None = None
object: str = "response"
output: list[OutputItem]
parallel_tool_calls: bool | None = None
temperature: float | None = None
tools: list[Tool] | None = None
top_p: float | None = None
max_output_tokens: int | None = None
previous_response_id: str | None = None
reasoning: ReasoningParams | None = None
status: str | None = None
text: Any | None = None
usage: ResponseUsage | None = None
user: str | None = None
@property
def output_text(self) -> str:
"""Convenience property that aggregates all `output_text` items from the `output`
list.
If no `output_text` content blocks exist, then an empty string is returned.
"""
texts: list[str] = []
for output in self.output:
if output.type == "message":
texts.extend(
content.text for content in output.content if content.type == "output_text"
)
return "".join(texts)
@model_validator(mode="after")
def check_status(self) -> "Response":
if self.status and self.status not in {
"completed",
"failed",
"in_progress",
"incomplete",
}:
warnings.warn(
f"Invalid status: {self.status}. Must be 'completed', 'failed', "
"'in_progress', or 'incomplete'."
)
return self
#################################
# ResponsesRequest helper classes
#################################
| Response |
python | ray-project__ray | python/ray/dashboard/modules/aggregator/publisher/async_publisher_client.py | {
"start": 1782,
"end": 4820
} | class ____(PublisherClientInterface):
"""Client for publishing ray event batches to an external HTTP service."""
def __init__(
self,
endpoint: str,
executor: ThreadPoolExecutor,
events_filter_fn: Callable[[object], bool],
timeout: float = PUBLISHER_TIMEOUT_SECONDS,
preserve_proto_field_name: bool = False,
) -> None:
self._endpoint = endpoint
self._executor = executor
self._events_filter_fn = events_filter_fn
self._timeout = aiohttp.ClientTimeout(total=timeout)
self._session = None
self._preserve_proto_field_name = preserve_proto_field_name
async def publish(self, batch: PublishBatch) -> PublishStats:
events_batch: list[events_base_event_pb2.RayEvent] = batch.events
if not events_batch:
# Nothing to publish -> success but nothing published
return PublishStats(True, 0, 0)
filtered = [e for e in events_batch if self._events_filter_fn(e)]
num_filtered_out = len(events_batch) - len(filtered)
if not filtered:
# All filtered out -> success but nothing published
return PublishStats(True, 0, num_filtered_out)
# Convert protobuf objects to python dictionaries for HTTP POST. Run in executor to avoid blocking the event loop.
filtered_json = await get_or_create_event_loop().run_in_executor(
self._executor,
lambda: [
json.loads(
message_to_json(
e,
always_print_fields_with_no_presence=True,
preserving_proto_field_name=self._preserve_proto_field_name,
)
)
for e in filtered
],
)
try:
# Create session on first use (lazy initialization)
if not self._session:
self._session = aiohttp.ClientSession(timeout=self._timeout)
return await self._send_http_request(filtered_json, num_filtered_out)
except Exception as e:
logger.error("Failed to send events to external service. Error: %s", e)
return PublishStats(False, 0, 0)
async def _send_http_request(self, json_data, num_filtered_out) -> PublishStats:
async with self._session.post(
self._endpoint,
json=json_data,
) as resp:
resp.raise_for_status()
return PublishStats(True, len(json_data), num_filtered_out)
async def close(self) -> None:
"""Closes the http session if one was created. Should be called when the publisherClient is no longer required"""
if self._session:
await self._session.close()
self._session = None
def set_session(self, session) -> None:
"""Inject an HTTP client session.
If a session is set explicitly, it will be used and managed by close().
"""
self._session = session
| AsyncHttpPublisherClient |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_snippets.py | {
"start": 29941,
"end": 30864
} | class ____(util.MdCase):
"""Test snippet URL cases no max size."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'url_download': True,
'url_max_size': 0
}
}
@patch('urllib.request.urlopen')
def test_content_length_zero(self, mock_urlopen):
"""Test empty content."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.read.return_value = b'contents'
cm.headers = {'content-length': str(1024 * 1024 * 48)}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
<p>contents</p>
''',
True
)
| TestURLSnippetsNoMax |
python | getsentry__sentry | tests/sentry/notifications/api/endpoints/test_user_notification_settings_options.py | {
"start": 2551,
"end": 8306
} | class ____(UserNotificationSettingsOptionsBaseTest):
method = "PUT"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
def test_simple(self) -> None:
response = self.get_success_response(
"me",
user_id=self.user.id,
scope_type="organization",
scope_identifier=self.organization.id,
type="alerts",
status_code=status.HTTP_201_CREATED,
value="always",
)
row = NotificationSettingOption.objects.get(
user_id=self.user.id,
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.ISSUE_ALERTS.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
assert response.data["id"] == str(row.id)
def test_user_scope(self) -> None:
notification_settings = [
NotificationSettingEnum.QUOTA,
NotificationSettingEnum.QUOTA_WARNINGS,
NotificationSettingEnum.QUOTA_THRESHOLDS,
NotificationSettingEnum.QUOTA_ERRORS,
NotificationSettingEnum.QUOTA_TRANSACTIONS,
NotificationSettingEnum.QUOTA_ATTACHMENTS,
NotificationSettingEnum.QUOTA_REPLAYS,
NotificationSettingEnum.QUOTA_MONITOR_SEATS,
NotificationSettingEnum.QUOTA_SPANS,
NotificationSettingEnum.QUOTA_LOG_BYTES,
NotificationSettingEnum.QUOTA_SEER_USERS,
]
# turn on notification settings
for setting in notification_settings:
response = self.get_success_response(
"me",
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type=setting.value,
status_code=status.HTTP_201_CREATED,
value="always",
)
record = NotificationSettingOption.objects.filter(
user_id=self.user.id,
scope_type=NotificationScopeEnum.USER.value,
scope_identifier=self.user.id,
type=setting.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
).get()
assert response.data == {
"id": str(record.id),
"scopeType": "user",
"scopeIdentifier": str(self.user.id),
"type": setting.value,
"value": "always",
"user_id": str(self.user.id),
"team_id": None,
}
# turn off notification settings
for setting in notification_settings:
response = self.get_success_response(
"me",
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type=setting.value,
status_code=status.HTTP_201_CREATED,
value="never",
)
record = NotificationSettingOption.objects.filter(
user_id=self.user.id,
scope_type=NotificationScopeEnum.USER.value,
scope_identifier=self.user.id,
type=setting.value,
value=NotificationSettingsOptionEnum.NEVER.value,
).get()
assert response.data == {
"id": str(record.id),
"scopeType": "user",
"scopeIdentifier": str(self.user.id),
"type": setting.value,
"value": "never",
"user_id": str(self.user.id),
"team_id": None,
}
def test_invalid_scope_type(self) -> None:
response = self.get_error_response(
"me",
user_id=self.user.id,
scope_type="invalid",
scope_identifier=self.organization.id,
type="alerts",
status_code=status.HTTP_400_BAD_REQUEST,
value="always",
)
assert response.data["scopeType"] == ["Invalid scope type"]
def test_invalid_value(self) -> None:
response = self.get_error_response(
"me",
user_id=self.user.id,
scope_type="organization",
scope_identifier=self.organization.id,
type="alerts",
status_code=status.HTTP_400_BAD_REQUEST,
value="hello",
)
assert response.data["value"] == ["Invalid value"]
def test_invalid_value_for_option(self) -> None:
response = self.get_error_response(
"me",
user_id=self.user.id,
scope_type="organization",
scope_identifier=self.organization.id,
type="alerts",
status_code=status.HTTP_400_BAD_REQUEST,
value=NotificationSettingsOptionEnum.SUBSCRIBE_ONLY.value,
)
assert response.data["nonFieldErrors"] == ["Invalid type for value"]
def test_reports(self) -> None:
response = self.get_success_response(
"me",
user_id=self.user.id,
scope_type="organization",
scope_identifier=self.organization.id,
type="reports",
status_code=status.HTTP_201_CREATED,
value="always",
)
row = NotificationSettingOption.objects.get(
user_id=self.user.id,
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.REPORTS.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
assert response.data["id"] == str(row.id)
| UserNotificationSettingsOptionsPutTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/collective_ops_test.py | {
"start": 55868,
"end": 61704
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testGroupInitialization(self):
group_size = 2
group_key = 100
@def_function.function
def f():
with ops.device('CPU:0'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=0, group_size=group_size)
with ops.device('CPU:1'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=1, group_size=group_size)
# TODO(b/193864859): Add validation with reduction op.
self.evaluate(f())
@combinations.generate(device_combination)
def testAllReduceV3(self, device, communication):
group_size = 2
group_key = 101
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle0, [1.0], reduction='Add'))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle1, [2.0], reduction='Add'))
return collectives
for result in run_all_reduce_2devices():
self.assertAllClose(result, [3.], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3(self, device, communication):
group_size = 2
group_key = 104
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0, [1.0, 3.0]))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1, [2.0, 4.0]))
return collectives
result = run_all_to_all_2devices()
self.assertAllClose(result[0], [1.0, 2.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[1], [3.0, 4.0], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3DifferentUserRank(self, device, communication):
group_size = 2
group_key = 105
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0, [1.0, 3.0]))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1, [2.0, 4.0]))
return collectives
result = run_all_to_all_2devices()
self.assertAllClose(result[0], [2.0, 1.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[1], [4.0, 3.0], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3DifferentUserRankWithTensorInput(self, device,
communication):
group_size = 2
group_key = 106
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0,
constant_op.constant([1.0, 2.0])))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1,
constant_op.constant([3.0, 4.0])))
return collectives
result = run_all_to_all_2devices()
# FIXME(b/214407359): This is correct.
# result[0] is rank 1 and shall have 4, 2.
self.assertAllClose(result[1], [4.0, 2.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[0], [3.0, 1.0], rtol=1e-5, atol=1e-5)
def _setup_context(num_devices=4):
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', num_devices)
context.ensure_initialized()
context.set_log_device_placement(True)
if __name__ == '__main__':
os.environ['NCCL_DEBUG'] = 'INFO'
v2_compat.enable_v2_behavior()
test.main()
| CollectiveOpsV3Test |
python | kamyu104__LeetCode-Solutions | Python/power-of-three.py | {
"start": 43,
"end": 356
} | class ____(object):
def __init__(self):
self.__max_log3 = int(math.log(0x7fffffff) / math.log(3))
self.__max_pow3 = 3 ** self.__max_log3
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
return n > 0 and self.__max_pow3 % n == 0
| Solution |
python | scipy__scipy | scipy/optimize/tests/test_isotonic_regression.py | {
"start": 184,
"end": 7113
} | class ____:
@pytest.mark.parametrize(
("y", "w", "msg"),
[
([[0, 1]], None,
"array has incorrect number of dimensions: 2; expected 1"),
([0, 1], [[1, 2]],
"Input arrays y and w must have one dimension of equal length"),
([0, 1], [1],
"Input arrays y and w must have one dimension of equal length"),
(1, [1, 2],
"Input arrays y and w must have one dimension of equal length"),
([1, 2], 1,
"Input arrays y and w must have one dimension of equal length"),
([0, 1], [0, 1],
"Weights w must be strictly positive"),
]
)
def test_raise_error(self, y, w, msg):
with pytest.raises(ValueError, match=msg):
isotonic_regression(y=y, weights=w)
def test_simple_pava(self):
# Test case of Busing 2020
# https://doi.org/10.18637/jss.v102.c01
y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64)
w = np.ones_like(y)
r = np.full(shape=y.shape[0] + 1, fill_value=-1, dtype=np.intp)
pava(y, w, r)
assert_allclose(y, [4, 4, 4, 4, 4, 4, 8])
# Only first 2 elements of w are changed.
assert_allclose(w, [6, 1, 1, 1, 1, 1, 1])
# Only first 3 elements of r are changed.
assert_allclose(r, [0, 6, 7, -1, -1, -1, -1, -1])
@pytest.mark.parametrize("y_dtype", [np.float64, np.float32, np.int64, np.int32])
@pytest.mark.parametrize("w_dtype", [np.float64, np.float32, np.int64, np.int32])
@pytest.mark.parametrize("w", [None, "ones"])
def test_simple_isotonic_regression(self, w, w_dtype, y_dtype):
# Test case of Busing 2020
# https://doi.org/10.18637/jss.v102.c01
y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=y_dtype)
if w is not None:
w = np.ones_like(y, dtype=w_dtype)
res = isotonic_regression(y, weights=w)
assert res.x.dtype == np.float64
assert res.weights.dtype == np.float64
assert_allclose(res.x, [4, 4, 4, 4, 4, 4, 8])
assert_allclose(res.weights, [6, 1])
assert_allclose(res.blocks, [0, 6, 7])
# Assert that y was not overwritten
assert_equal(y, np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64))
@pytest.mark.parametrize("increasing", [True, False])
def test_linspace(self, increasing):
n = 10
y = np.linspace(0, 1, n) if increasing else np.linspace(1, 0, n)
res = isotonic_regression(y, increasing=increasing)
assert_allclose(res.x, y)
assert_allclose(res.blocks, np.arange(n + 1))
def test_weights(self):
w = np.array([1, 2, 5, 0.5, 0.5, 0.5, 1, 3])
y = np.array([3, 2, 1, 10, 9, 8, 20, 10])
res = isotonic_regression(y, weights=w)
assert_allclose(res.x, [12/8, 12/8, 12/8, 9, 9, 9, 50/4, 50/4])
assert_allclose(res.weights, [8, 1.5, 4])
assert_allclose(res.blocks, [0, 3, 6, 8])
# weights are like repeated observations, we repeat the 3rd element 5
# times.
w2 = np.array([1, 2, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 1, 3])
y2 = np.array([3, 2, 1, 1, 1, 1, 1, 10, 9, 8, 20, 10])
res2 = isotonic_regression(y2, weights=w2)
assert_allclose(np.diff(res2.x[0:7]), 0)
assert_allclose(res2.x[4:], res.x)
assert_allclose(res2.weights, res.weights)
assert_allclose(res2.blocks[1:] - 4, res.blocks[1:])
def test_against_R_monotone(self):
y = [0, 6, 8, 3, 5, 2, 1, 7, 9, 4]
res = isotonic_regression(y)
# R code
# library(monotone)
# options(digits=8)
# monotone(c(0, 6, 8, 3, 5, 2, 1, 7, 9, 4))
x_R = [
0, 4.1666667, 4.1666667, 4.1666667, 4.1666667, 4.1666667,
4.1666667, 6.6666667, 6.6666667, 6.6666667,
]
assert_allclose(res.x, x_R)
assert_equal(res.blocks, [0, 1, 7, 10])
n = 100
y = np.linspace(0, 1, num=n, endpoint=False)
y = 5 * y + np.sin(10 * y)
res = isotonic_regression(y)
# R code
# library(monotone)
# n <- 100
# y <- 5 * ((1:n)-1)/n + sin(10 * ((1:n)-1)/n)
# options(digits=8)
# monotone(y)
x_R = [
0.00000000, 0.14983342, 0.29866933, 0.44552021, 0.58941834, 0.72942554,
0.86464247, 0.99421769, 1.11735609, 1.23332691, 1.34147098, 1.44120736,
1.53203909, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
1.57081100, 1.57081100, 1.57081100, 1.62418532, 1.71654534, 1.81773256,
1.92723551, 2.04445967, 2.16873336, 2.29931446, 2.43539782, 2.57612334,
2.72058450, 2.86783750, 3.01691060, 3.16681390, 3.31654920, 3.46511999,
3.61154136, 3.75484992, 3.89411335, 4.02843976, 4.15698660, 4.27896904,
4.39366786, 4.50043662, 4.59870810, 4.68799998, 4.76791967, 4.83816823,
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
4.86564130, 4.86564130, 4.86564130, 4.86564130,
]
assert_allclose(res.x, x_R)
# Test increasing
assert np.all(np.diff(res.x) >= 0)
# Test balance property: sum(y) == sum(x)
assert_allclose(np.sum(res.x), np.sum(y))
# Reverse order
res_inv = isotonic_regression(-y, increasing=False)
assert_allclose(-res_inv.x, res.x)
assert_equal(res_inv.blocks, res.blocks)
def test_readonly(self):
x = np.arange(3, dtype=float)
w = np.ones(3, dtype=float)
x.flags.writeable = False
w.flags.writeable = False
res = isotonic_regression(x, weights=w)
assert np.all(np.isfinite(res.x))
assert np.all(np.isfinite(res.weights))
assert np.all(np.isfinite(res.blocks))
def test_non_contiguous_arrays(self):
x = np.arange(10, dtype=float)[::3]
w = np.ones(10, dtype=float)[::3]
assert not x.flags.c_contiguous
assert not x.flags.f_contiguous
assert not w.flags.c_contiguous
assert not w.flags.f_contiguous
res = isotonic_regression(x, weights=w)
assert np.all(np.isfinite(res.x))
assert np.all(np.isfinite(res.weights))
assert np.all(np.isfinite(res.blocks))
| TestIsotonicRegression |
python | django__django | tests/admin_scripts/app_with_import/models.py | {
"start": 186,
"end": 291
} | class ____(models.Model):
user = models.OneToOneField(User, models.CASCADE, primary_key=True)
| UserProfile |
python | kamyu104__LeetCode-Solutions | Python/array-partition-i.py | {
"start": 67,
"end": 549
} | class ____(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
LEFT, RIGHT = -10000, 10000
lookup = [0] * (RIGHT-LEFT+1)
for num in nums:
lookup[num-LEFT] += 1
r, result = 0, 0
for i in xrange(LEFT, RIGHT+1):
result += (lookup[i-LEFT] + 1 - r) / 2 * i
r = (lookup[i-LEFT] + r) % 2
return result
# Time: O(nlogn)
# Space: O(1)
| Solution |
python | jamielennox__requests-mock | requests_mock/response.py | {
"start": 1177,
"end": 3075
} | class ____(RequestsCookieJar):
def set(self, name, value, **kwargs):
"""Add a cookie to the Jar.
:param str name: cookie name/key.
:param str value: cookie value.
:param int version: Integer or None. Netscape cookies have version 0.
RFC 2965 and RFC 2109 cookies have a version cookie-attribute of 1.
However, note that cookielib may 'downgrade' RFC 2109 cookies to
Netscape cookies, in which case version is 0.
:param str port: String representing a port or a set of ports
(eg. '80', or '80,8080'),
:param str domain: The domain the cookie should apply to.
:param str path: Cookie path (a string, eg. '/acme/rocket_launchers').
:param bool secure: True if cookie should only be returned over a
secure connection.
:param int expires: Integer expiry date in seconds since epoch or None.
:param bool discard: True if this is a session cookie.
:param str comment: String comment from the server explaining the
function of this cookie.
:param str comment_url: URL linking to a comment from the server
explaining the function of this cookie.
"""
# just here to provide the function documentation
return super(CookieJar, self).set(name, value, **kwargs)
def _check_body_arguments(**kwargs):
# mutual exclusion, only 1 body method may be provided
provided = [x for x in _BODY_ARGS if kwargs.pop(x, None) is not None]
if len(provided) > 1:
raise RuntimeError('You may only supply one body element. You '
'supplied %s' % ', '.join(provided))
extra = [x for x in kwargs if x not in _HTTP_ARGS]
if extra:
raise TypeError('Too many arguments provided. Unexpected '
'arguments %s.' % ', '.join(extra))
| CookieJar |
python | walkccc__LeetCode | solutions/3119. Maximum Number of Potholes That Can Be Fixed/3119.py | {
"start": 0,
"end": 312
} | class ____:
def maxPotholes(self, road: str, budget: int) -> int:
ans = 0
for length in sorted(map(len, road.split('.')), reverse=True):
canRepair = max(0, budget - 1)
if length > canRepair:
return ans + canRepair
ans += length
budget -= length + 1
return ans
| Solution |
python | gevent__gevent | src/gevent/_threading.py | {
"start": 4374,
"end": 8342
} | class ____(object):
"""
Create a queue object.
The queue is always infinite size.
"""
__slots__ = ('_queue', '_mutex', '_not_empty', 'unfinished_tasks')
def __init__(self):
self._queue = deque()
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self._mutex = Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self._not_empty = _Condition(self._mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
with self._mutex:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError(
'task_done() called too many times; %s remaining tasks' % (
self.unfinished_tasks
)
)
self.unfinished_tasks = unfinished
def qsize(self, len=len):
"""Return the approximate size of the queue (not reliable!)."""
return len(self._queue)
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
return not self.qsize()
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
return False
def put(self, item):
"""Put an item into the queue.
"""
with self._mutex:
self._queue.append(item)
self.unfinished_tasks += 1
self._not_empty.notify_one()
def get(self, cookie, timeout=-1):
"""
Remove and return an item from the queue.
If *timeout* is given, and is not -1, then we will
attempt to wait for only that many seconds to get an item.
If those seconds elapse and no item has become available,
raises :class:`EmptyTimeout`.
"""
with self._mutex:
while not self._queue:
# Temporarily release our mutex and wait for someone
# to wake us up. There *should* be an item in the queue
# after that.
notified = self._not_empty.wait(cookie, timeout)
# Ok, we're holding the mutex again, so our state is guaranteed stable.
# It's possible that in the brief window where we didn't hold the lock,
# someone put something in the queue, and if so, we can take it.
if not notified and not self._queue:
raise EmptyTimeout
item = self._queue.popleft()
return item
def allocate_cookie(self):
"""
Create and return the *cookie* to pass to `get()`.
Each thread that will use `get` needs a distinct cookie.
"""
return Lock()
def kill(self):
"""
Call to destroy this object.
Use this when it's not possible to safely drain the queue, e.g.,
after a fork when the locks are in an uncertain state.
"""
self._queue = None
self._mutex = None
self._not_empty = None
self.unfinished_tasks = None
| Queue |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 102336,
"end": 102622
} | class ____(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode="sum")
def forward(self, indices, offsets):
return self.emb_bag(indices, offsets)
| EmbBagWrapper |
python | davidhalter__jedi | test/completion/fstring.py | {
"start": 0,
"end": 359
} | class ____:
bar = 1
#? 10 int()
f'{Foo.bar}'
#? 10 ['bar']
f'{Foo.bar}'
#? 10 int()
Fr'{Foo.bar'
#? 10 ['bar']
Fr'{Foo.bar'
#? int()
Fr'{Foo.bar
#? ['bar']
Fr'{Foo.bar
#? ['Exception']
F"{Excepti
#? 8 Foo
Fr'a{Foo.bar'
#? str()
Fr'sasdf'
#? 7 str()
Fr'''sasdf''' + ''
#? ['upper']
f'xyz'.uppe
#? 3 []
f'f'
# Github #1248
#? int()
{"foo": 1}[f"foo"]
| Foo |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 20532,
"end": 21002
} | class ____(DagsterError):
"""Indicates an error while attempting to launch a pipeline run."""
def __init__(self, *args, **kwargs):
from dagster._utils.error import SerializableErrorInfo
self.serializable_error_info = check.opt_inst_param(
kwargs.pop("serializable_error_info", None),
"serializable_error_info",
SerializableErrorInfo,
)
super().__init__(*args, **kwargs)
| DagsterLaunchFailedError |
python | huggingface__transformers | src/transformers/models/led/modeling_led.py | {
"start": 109384,
"end": 115850
} | class ____(LEDPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.led = LEDModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
global_attention_mask: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], LEDSeq2SeqQuestionAnsweringModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.led(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
global_attention_mask=global_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return LEDSeq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
encoder_global_attentions=outputs.encoder_global_attentions,
)
__all__ = [
"LEDForConditionalGeneration",
"LEDForQuestionAnswering",
"LEDForSequenceClassification",
"LEDModel",
"LEDPreTrainedModel",
]
| LEDForQuestionAnswering |
python | pyinstaller__pyinstaller | PyInstaller/utils/win32/versioninfo.py | {
"start": 14181,
"end": 15602
} | class ____:
"""
WORD wLength;
WORD wValueLength;
WORD wType;
WCHAR szKey[];
WORD Padding[];
String Value[];
"""
def __init__(self, name=None, val=None):
self.name = name or ''
self.val = val or ''
def fromRaw(self, data, i, limit):
i, (sublen, vallen, typ, self.name) = parseCommon(data, i)
limit = i + sublen
i = nextDWord(i)
i, self.val = parseUString(data, i, limit)
return i
def toRaw(self):
raw_name = getRaw(self.name)
raw_val = getRaw(self.val)
# TODO: document the size of vallen and sublen.
vallen = len(self.val) + 1 # Number of (wide-)characters, not bytes!
typ = 1
sublen = 6 + len(raw_name) + 2
pad = b''
if sublen % 4:
pad = b'\000\000'
sublen = sublen + len(pad) + (vallen * 2)
return struct.pack('HHH', sublen, vallen, typ) + raw_name + b'\000\000' + pad + raw_val + b'\000\000'
def __eq__(self, other):
return self.toRaw() == other
def __str__(self, indent=''):
return "StringStruct(%r, %r)" % (self.name, self.val)
def __repr__(self):
return 'versioninfo.StringStruct(%r, %r)' % (self.name, self.val)
def parseCodePage(data, i, limit):
i, (sublen, wValueLength, wType, nm) = parseCommon(data, i)
return i, (sublen, wValueLength, wType, nm)
| StringStruct |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mssql/pymssql.py | {
"start": 1371,
"end": 4097
} | class ____(MSDialect):
supports_statement_cache = True
supports_native_decimal = True
supports_native_uuid = True
driver = "pymssql"
preparer = MSIdentifierPreparer_pymssql
colspecs = util.update_copy(
MSDialect.colspecs,
{sqltypes.Numeric: _MSNumeric_pymssql, sqltypes.Float: sqltypes.Float},
)
@classmethod
def import_dbapi(cls):
module = __import__("pymssql")
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, "decode") else str(x)
if client_ver < (1,):
util.warn(
"The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI."
)
return module
def _get_server_version_info(self, connection):
vers = connection.exec_driver_sql("select @@version").scalar()
m = re.match(r"Microsoft .*? - (\d+)\.(\d+)\.(\d+)\.(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
port = opts.pop("port", None)
if port and "host" in opts:
opts["host"] = "%s:%s" % (opts["host"], port)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
"The server failed to resume the transaction",
):
if msg in str(e):
return True
else:
return False
def get_isolation_level_values(self, dbapi_connection):
return super().get_isolation_level_values(dbapi_connection) + [
"AUTOCOMMIT"
]
def set_isolation_level(self, dbapi_connection, level):
if level == "AUTOCOMMIT":
dbapi_connection.autocommit(True)
else:
dbapi_connection.autocommit(False)
super().set_isolation_level(dbapi_connection, level)
dialect = MSDialect_pymssql
| MSDialect_pymssql |
python | viewflow__viewflow | viewflow/workflow/flow/views/actions.py | {
"start": 1701,
"end": 2539
} | class ____(
mixins.SuccessMessageMixin,
mixins.TaskViewTemplateNames,
generic.FormView,
):
"""
Default unassign view for flow task.
Get confirmation from user, and unassign task
"""
form_class = forms.Form
template_filename = "task_cancel.html"
success_message = _("Task {task} has been canceled.")
def form_valid(self, *args, **kwargs):
"""If the form is valid, save the associated model and cancels the task."""
self.request.activation.cancel()
return super().form_valid(*args, **kwargs)
def get_success_url(self):
"""Continue on task or redirect back to task list."""
activation = self.request.activation
return activation.flow_task.reverse(
"detail", args=[activation.process.pk, activation.task.pk]
)
| CancelTaskView |
python | ZoranPandovski__al-go-rithms | data_structures/b_tree/Python/tree_size.py | {
"start": 122,
"end": 557
} | class ____():
def tree_size(self,root):
if root == None:
return 0
return ( 1 + self.tree_size(root.left) + self.tree_size(root.right))
## Testcases ###
# tsize = TreeSize()
# root = None
# obj = Tree()
#
#
# # Create a sample tree
# for i in range(10):
# rand = random.randrange(1,20,2)
# root = obj.insert(root,rand)
#
# print tsize.tree_size(root)
| TreeSize |
python | kamyu104__LeetCode-Solutions | Python/stream-of-characters.py | {
"start": 2812,
"end": 3267
} | class ____(object):
def __init__(self, words):
"""
:type words: List[str]
"""
self.__trie = AhoTrie(words)
def query(self, letter): # O(m) times
"""
:type letter: str
:rtype: bool
"""
return len(self.__trie.step(letter)) > 0
# Your StreamChecker object will be instantiated and called as such:
# obj = StreamChecker(words)
# param_1 = obj.query(letter)
| StreamChecker |
python | joke2k__faker | tests/providers/test_internet.py | {
"start": 1045,
"end": 16187
} | class ____:
"""Test internet provider methods"""
num_samples = 100
ipv4_pattern: Pattern = re.compile(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}" r"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
)
ipv4_network_pattern: Pattern = re.compile(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"
r"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
r"/(?:\d|[12]\d|3[0-2])$",
)
def test_email(self, faker, num_samples):
for _ in range(num_samples):
email = faker.email()
assert "@" in email
def test_safe_default_email(self, faker, num_samples):
expected_domains = ["example.com", "example.org", "example.net"]
for _ in range(num_samples):
email = faker.email()
assert email.split("@")[1] in expected_domains
def test_unsafe_email(self, faker, num_samples):
not_expected_domains = ["example.com", "example.org", "example.net"]
for _ in range(num_samples):
email = faker.email(safe=False)
assert email.split("@")[1] not in not_expected_domains
def test_email_with_domain(self, faker):
domain = "example.com"
email = faker.email(domain=domain)
assert email.split("@")[1] == domain
def test_safe_email(self, faker, num_samples):
expected_domains = ["example.com", "example.org", "example.net"]
for _ in range(num_samples):
email = faker.safe_email()
assert email.split("@")[1] in expected_domains
def test_safe_domain_names(self, faker, num_samples):
expected_domains = ["example.com", "example.org", "example.net"]
for _ in range(num_samples):
safe_domain_name = faker.safe_domain_name()
assert safe_domain_name in expected_domains
@patch(
"faker.providers.internet.Provider.image_placeholder_services",
{"https://dummyimage.com/{width}x{height}"},
)
def test_image_url(self, faker):
my_width = 500
my_height = 1024
url = faker.image_url(my_width, my_height)
assert f"https://dummyimage.com/{my_width}x{my_height}" == url
url = faker.image_url()
assert "https://dummyimage.com/" in url
url = faker.image_url(placeholder_url="https://example.com/{width}/height")
assert url.startswith("https://example.com/")
def test_hostname(self, faker):
hostname_1_level = faker.hostname(levels=1)
hostname_parts = hostname_1_level.split(".")
assert hostname_1_level and isinstance(hostname_1_level, str)
assert len(hostname_parts) == 3
hostname_0_level = faker.hostname(levels=0)
assert hostname_0_level and isinstance(hostname_0_level, str)
def test_ipv4(self, faker, num_samples):
for _ in range(num_samples):
address = faker.ipv4()
assert 7 <= len(address) <= 15
assert self.ipv4_pattern.fullmatch(address)
for _ in range(num_samples):
address = faker.ipv4(network=True)
assert 9 <= len(address) <= 18
assert self.ipv4_network_pattern.fullmatch(address)
for _ in range(num_samples):
address = faker.ipv4(private=True)
assert 7 <= len(address) <= 15
assert self.ipv4_pattern.fullmatch(address)
assert ip_address(address).is_private
for _ in range(num_samples):
address = faker.ipv4(private=False)
assert 7 <= len(address) <= 15
assert self.ipv4_pattern.fullmatch(address)
assert not ip_address(address).is_private
def test_ipv4_caching(self, faker):
from faker.providers.internet import _IPv4Constants
# The extra [None] here is to test code path involving whole IPv4 pool
for address_class in list(_IPv4Constants._network_classes.keys()) + [None]:
if address_class is None:
networks_attr = "_cached_all_networks"
else:
networks_attr = f"_cached_all_class_{address_class}_networks"
weights_attr = f"{networks_attr}_weights"
provider = InternetProvider(faker)
# First, test cache creation
assert not hasattr(provider, networks_attr)
assert not hasattr(provider, weights_attr)
provider.ipv4(address_class=address_class)
assert hasattr(provider, networks_attr)
assert hasattr(provider, weights_attr)
# Then, test cache access on subsequent calls
with patch.object(
InternetProvider, networks_attr, create=True, new_callable=PropertyMock
) as mock_networks_cache:
with patch.object(
InternetProvider,
weights_attr,
create=True,
new_callable=PropertyMock,
) as mock_weights_cache:
# Keep test fast by patching the cache attributes to return something simple
mock_networks_cache.return_value = [ip_network("10.0.0.0/24")]
mock_weights_cache.return_value = [10]
for _ in range(100):
provider.ipv4(address_class=address_class)
# Python's hasattr() internally calls getattr()
# So each call to ipv4() accesses the cache attributes twice
assert mock_networks_cache.call_count == 200
assert mock_weights_cache.call_count == 200
def test_ipv4_network_class(self, faker, num_samples):
for _ in range(num_samples):
klass = faker.ipv4_network_class()
assert klass in "abc"
def test_ipv4_private(self, faker, num_samples):
for _ in range(num_samples):
address = faker.ipv4_private()
assert 7 <= len(address) <= 15
assert self.ipv4_pattern.fullmatch(address)
assert ip_address(address).is_private
for _ in range(num_samples):
address = faker.ipv4_private(network=True)
assert 9 <= len(address) <= 18
assert self.ipv4_network_pattern.fullmatch(address)
assert ip_network(address)[0].is_private
def test_ipv4_private_class(self, faker, num_samples):
from faker.providers.internet import _IPv4Constants
for clas in "abc":
class_network = _IPv4Constants._network_classes[clas]
class_min = class_network.network_address
class_max = class_network.broadcast_address
for _ in range(num_samples):
address = faker.ipv4_private(address_class=clas)
assert 7 <= len(address) <= 15
assert self.ipv4_pattern.fullmatch(address)
assert ip_address(address).is_private
assert class_min <= ip_address(address) <= class_max
def test_ipv4_public_caching(self, faker):
from faker.providers.internet import _IPv4Constants
for address_class in _IPv4Constants._network_classes.keys():
networks_attr = f"_cached_public_class_{address_class}_networks"
weights_attr = f"{networks_attr}_weights"
provider = InternetProvider(faker)
# First, test cache creation
assert not hasattr(provider, networks_attr)
assert not hasattr(provider, weights_attr)
provider.ipv4_public(address_class=address_class)
assert hasattr(provider, networks_attr)
assert hasattr(provider, weights_attr)
# Then, test cache access on subsequent calls
with patch.object(
InternetProvider, networks_attr, create=True, new_callable=PropertyMock
) as mock_networks_cache:
with patch.object(
InternetProvider,
weights_attr,
create=True,
new_callable=PropertyMock,
) as mock_weights_cache:
# Keep test fast by patching the cache attributes to return something simple
mock_networks_cache.return_value = [ip_network("10.0.0.0/24")]
mock_weights_cache.return_value = [10]
for _ in range(100):
provider.ipv4_public(address_class=address_class)
# Python's hasattr() internally calls getattr()
# So each call to ipv4_public() accesses the cache attributes twice
assert mock_networks_cache.call_count == 200
assert mock_weights_cache.call_count == 200
def test_ipv4_public(self, faker, num_samples):
for _ in range(num_samples):
address = faker.ipv4_public()
assert 7 <= len(address) <= 15
assert self.ipv4_pattern.fullmatch(address)
assert not ip_address(address).is_private
for _ in range(num_samples):
address = faker.ipv4_public(network=True)
assert 9 <= len(address) <= 18
assert self.ipv4_network_pattern.fullmatch(address)
# Hack around ipaddress module
# As 192.0.0.0 is net addr of many 192.0.0.0/* nets
# ipaddress considers them as private
if ip_network(address).network_address != ip_address("192.0.0.0"):
assert not ip_network(address)[0].is_private
def test_ipv4_public_class(self, faker, num_samples):
from faker.providers.internet import _IPv4Constants
for clas in "abc":
class_network = _IPv4Constants._network_classes[clas]
class_min = class_network.network_address
class_max = class_network.broadcast_address
for _ in range(num_samples):
address = faker.ipv4_public(address_class=clas)
assert 7 <= len(address) <= 15
assert not ip_address(address).is_private
assert class_min <= ip_address(address) <= class_max
assert self.ipv4_pattern.fullmatch(address)
def test_ipv4_distribution_selection(self):
from faker.generator import Generator, random
from faker.utils.distribution import choices_distribution
provider = InternetProvider(Generator())
subnets = [ip_network("10.0.0.0/8"), ip_network("11.0.0.0/8")]
valid_weights = [1, 1]
list_of_invalid_weights = [
[1, 2, 3], # List size does not match subnet list size
["a", "b"], # List size matches, but elements are invalid
11, # Not a list or valid iterable
]
with patch("faker.providers.internet.choices_distribution", wraps=choices_distribution) as mock_choices_fn:
with patch("faker.generator.random.choice", wraps=random.choice) as mock_random_choice:
# If weights argument is valid, only `choices_distribution` should be called
provider._random_ipv4_address_from_subnets(subnets, valid_weights)
assert mock_choices_fn.call_count == 1
assert mock_random_choice.call_count == 0
# If weights argument is invalid, calls to `choices_distribution` will fail
# and calls to `random.choice` will be made as failover behavior
for invalid_weights in list_of_invalid_weights:
# Reset mock objects for each iteration
mock_random_choice.reset_mock()
mock_choices_fn.reset_mock()
provider._random_ipv4_address_from_subnets(subnets, invalid_weights)
assert mock_choices_fn.call_count == 0
assert mock_random_choice.call_count == 1
def test_ipv6(self, faker, num_samples):
provider = InternetProvider(faker)
for _ in range(num_samples):
address = provider.ipv6()
assert len(address) >= 3 # ::1
assert len(address) <= 39
assert re.compile(r"^([0-9a-f]{0,4}:){2,7}[0-9a-f]{1,4}$").search(address)
for _ in range(num_samples):
address = provider.ipv6(network=True)
assert len(address) >= 4 # ::/8
assert len(address) <= 39 + 4
assert re.compile(r"^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$").search(address)
def test_mac_address(self, faker):
provider = InternetProvider(faker)
unicast_address = provider.mac_address()
assert int(unicast_address[0:2], base=16) % 2 == 0
multicast_address = provider.mac_address(multicast=True)
assert int(multicast_address[0:2], base=16) % 2 == 1
def test_port_number(self, faker, num_samples):
for _ in range(num_samples):
assert 0 <= faker.port_number() <= 65535
assert 0 <= faker.port_number(is_system=True) <= 1023
assert 1024 <= faker.port_number(is_user=True) <= 49151
assert 49152 <= faker.port_number(is_dynamic=True) <= 65535
def test_http_method(self, faker, num_samples):
expected_methods = [
"CONNECT",
"DELETE",
"GET",
"HEAD",
"OPTIONS",
"PATCH",
"POST",
"PUT",
"TRACE",
]
got_methods = set()
for _ in range(num_samples):
got_methods.add(faker.http_method())
assert expected_methods == sorted(got_methods)
def test_http_status_code(self, faker, num_samples):
provider = InternetProvider(faker)
status_code = provider.http_status_code()
assert isinstance(status_code, int)
assert 100 <= status_code <= 599
status_code = provider.http_status_code(include_unassigned=False)
assert isinstance(status_code, int)
assert 100 <= status_code <= 599
assert status_code in InternetProvider.http_assigned_codes
def test_dga(self, faker):
assert faker.dga() != faker.dga()
expected_domain = "cqphixmpdfpptskr.com"
assert faker.dga(day=1, month=1, year=1000, tld="com", length=16) == expected_domain
def test_iana_id(self, faker, num_samples):
for _ in range(num_samples):
assert 1 <= int(faker.iana_id()) <= 8888888
def test_ripe_id(self, faker, num_samples):
pattern: Pattern = re.compile(r"^ORG-[A-Z]{2,4}[1-9]\d{0,4}-RIPE$")
for _ in range(num_samples):
assert pattern.fullmatch(faker.ripe_id())
def test_nic_handles(self, faker, num_samples):
pattern: Pattern = re.compile(r"^[A-Z]{2,4}[1-9]\d{0,4}-[A-Z]*")
for _ in range(num_samples):
nhs = faker.nic_handles()
for nh in nhs:
assert pattern.fullmatch(nh)
nhs = faker.nic_handles(suffix="??", count=num_samples)
assert len(nhs) == num_samples
for nh in nhs:
assert pattern.fullmatch(nh)
with pytest.raises(ValueError):
faker.nic_handles(suffix="")
| TestInternetProvider |
python | huggingface__transformers | src/transformers/models/blt/configuration_blt.py | {
"start": 4888,
"end": 6541
} | class ____(PreTrainedConfig):
"""
Configuration class for the Blt Global Transformer component.
"""
model_type = "blt_global_transformer"
default_theta = 500000.0
def __init__(
self,
hidden_size: Optional[int] = 2048,
num_attention_heads: Optional[int] = 16,
num_key_value_heads: Optional[int] = None,
num_hidden_layers: Optional[int] = 25,
rms_norm_eps: Optional[float] = 1e-5,
dropout: Optional[float] = 0.0,
max_position_embeddings: Optional[int] = 4096,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
hidden_act: Optional[str] = "silu",
intermediate_size: Optional[int] = 5632,
initializer_range: Optional[float] = 0.02,
**kwargs,
):
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads or num_attention_heads
self.head_dim = hidden_size // num_attention_heads
self.intermediate_size = intermediate_size or int(8 * hidden_size / 3)
self.num_hidden_layers = num_hidden_layers
self.rms_norm_eps = rms_norm_eps
self.dropout = dropout
self.max_position_embeddings = max_position_embeddings
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rope_parameters = rope_parameters
# Remove tie_word_embeddings from kwargs to avoid duplicate parameter error
kwargs.pop("tie_word_embeddings", None)
super().__init__(**kwargs, tie_word_embeddings=False)
| BltGlobalTransformerConfig |
python | scikit-learn__scikit-learn | sklearn/linear_model/_least_angle.py | {
"start": 53445,
"end": 64381
} | class ____(Lars):
"""Cross-validated Least Angle Regression model.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
max_iter : int, default=500
Maximum number of iterations to perform.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation.
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of lists, the outer list length is `n_targets`.
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
lasso_path : Compute Lasso path with coordinate descent.
Lasso : Linear Model trained with L1 prior as
regularizer (aka the Lasso).
LassoCV : Lasso linear model with iterative fitting
along a regularization path.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
LassoLarsIC : Lasso model fit with Lars using BIC
or AIC for model selection.
sklearn.decomposition.sparse_encode : Sparse coding.
Notes
-----
In `fit`, once the best parameter `alpha` is found through
cross-validation, the model is fit again using the entire training set.
Examples
--------
>>> from sklearn.linear_model import LarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)
>>> reg = LarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9996
>>> reg.alpha_
np.float64(0.2961)
>>> reg.predict(X[:1,])
array([154.3996])
"""
_parameter_constraints: dict = {
**Lars._parameter_constraints,
"max_iter": [Interval(Integral, 0, None, closed="left")],
"cv": ["cv_object"],
"max_n_alphas": [Interval(Integral, 1, None, closed="left")],
"n_jobs": [Integral, None],
}
for parameter in ["n_nonzero_coefs", "jitter", "fit_path", "random_state"]:
_parameter_constraints.pop(parameter)
method = "lar"
def __init__(
self,
*,
fit_intercept=True,
verbose=False,
max_iter=500,
precompute="auto",
cv=None,
max_n_alphas=1000,
n_jobs=None,
eps=np.finfo(float).eps,
copy_X=True,
):
self.max_iter = max_iter
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
super().__init__(
fit_intercept=fit_intercept,
verbose=verbose,
precompute=precompute,
n_nonzero_coefs=500,
eps=eps,
copy_X=copy_X,
fit_path=True,
)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.multi_output = False
return tags
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, **params):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict, default=None
Parameters to be passed to the CV splitter.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns an instance of self.
"""
_raise_for_params(params, self, "fit")
X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(splitter=Bunch(split={}))
# As we use cross-validation, the Gram matrix is not precomputed here
Gram = self.precompute
if hasattr(Gram, "__array__"):
warnings.warn(
'Parameter "precompute" cannot be an array in '
'%s. Automatically switch to "auto" instead.' % self.__class__.__name__
)
Gram = "auto"
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train],
y[train],
X[test],
y[test],
Gram=Gram,
copy=False,
method=self.method,
verbose=max(0, self.verbose - 1),
fit_intercept=self.fit_intercept,
max_iter=self.max_iter,
eps=self.eps,
positive=self.positive,
)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
all_alphas = np.concatenate(next(zip(*cv_paths)))
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, _, _, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.mse_path_ = mse_path
# Now compute the full model using best_alpha
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
self._fit(
X,
y,
max_iter=self.max_iter,
alpha=best_alpha,
Xy=None,
fit_path=True,
)
return self
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
splitter=check_cv(self.cv),
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
return router
| LarsCV |
python | numba__numba | numba/tests/test_ssa.py | {
"start": 766,
"end": 962
} | class ____(TestCase):
def check_func(self, func, *args):
got = func(*copy.deepcopy(args))
exp = func.py_func(*copy.deepcopy(args))
self.assertEqual(got, exp)
| SSABaseTest |
python | patrick-kidger__equinox | equinox/_module/_module.py | {
"start": 14601,
"end": 23398
} | class ____(Hashable, metaclass=_ModuleMeta):
"""Base class. Create your model by inheriting from this.
This will make your model a
[dataclass](https://docs.python.org/3/library/dataclasses.html) and a
[pytree](https://jax.readthedocs.io/en/latest/pytrees.html).
**Fields**
Specify all its fields at the class level (identical to
[dataclasses](https://docs.python.org/3/library/dataclasses.html)). This defines
its children as a PyTree.
```python
class MyModule(equinox.Module):
weight: jax.Array
bias: jax.Array
submodule: equinox.Module
```
**Initialisation**
A default `__init__` is automatically provided, which just fills in fields with
the arguments passed. For example `MyModule(weight, bias, submodule)`.
Alternatively (quite commonly) you can provide an `__init__` method yourself:
```python
class MyModule(equinox.Module):
weight: jax.Array
bias: jax.Array
submodule: equinox.Module
def __init__(self, in_size, out_size, key):
wkey, bkey, skey = jax.random.split(key, 3)
self.weight = jax.random.normal(wkey, (out_size, in_size))
self.bias = jax.random.normal(bkey, (out_size,))
self.submodule = equinox.nn.Linear(in_size, out_size, key=skey)
```
**Methods**
It is common to create some methods on the class -- for example to define the
forward pass of a model.
```python
class MyModule(equinox.Module):
... # as above
def __call__(self, x):
return self.submodule(x) + self.weight @ x + self.bias
```
!!! tip
You don't have to define `__call__`:
- You can define other methods if you want.
- You can define multiple methods if you want.
- You can define no methods if you want. (And just use `equinox.Module` as a
nice syntax for custom PyTrees.)
No method is special-cased.
**Usage**
After you have defined your model, then you can use it just like any other
PyTree -- that just happens to have some methods attached. In particular you can
pass it around across `jax.jit`, `jax.grad` etc. in exactly the way that you're
used to.
!!! example
If you wanted to, then it would be completely safe to do
```python
class MyModule(equinox.Module):
...
@jax.jit
def __call__(self, x):
...
```
because `self` is just a PyTree. Unlike most other neural network libraries,
you can mix Equinox and native JAX without any difficulties at all.
!!! tip "For fans of strong typing."
Equinox modules are all [ABCs](https://docs.python.org/3/library/abc.html)
by default. This means you can use
[`abc.abstractmethod`](https://docs.python.org/3/library/abc.html#abc.abstractmethod).
You can also create abstract instance attributes or abstract class
attributes, see [`equinox.AbstractVar`][] and
[`equinox.AbstractClassVar`][].
""" # noqa: E501
def __new__(cls, *args: object, **kwargs: object) -> "Module":
del args, kwargs
self = super().__new__(cls)
# We record currently-initialising modules
_currently_initialising.add(self)
return self
def __repr__(self) -> str:
return tree_pformat(self)
def __hash__(self) -> int:
return hash(
tuple((k, getattr(self, k)) for k in _module_info[type(self)].names_tuple)
)
def __eq__(self, other: object, /) -> bool | np.bool_ | Bool[Array, ""]: # pyright: ignore
return tree_equal(self, other)
if not TYPE_CHECKING:
def __setattr__(self, name: str, value: Any) -> None:
if self in _currently_initialising and (
name in _module_info[type(self)].names_set
or name in WRAPPER_FIELD_NAMES
):
_error_method_assignment(self, value)
_warn_jax_transformed_function(type(self), value)
object.__setattr__(self, name, value)
return
# Allow:
# ```
# class SomeModule(eqx.Module, Generic[T]): ...
# x = SomeModule[int]()
# x.__orig_class__ # SomeModule[int]
# ```
# This attribute is set after instantiation here:
# https://github.com/python/cpython/blob/7b3ab5921fa25ed8b97b6296f97c5c78aacf5447/Lib/typing.py#L728
# So without special-casing it's incompatible with frozen dataclasses.
if name == "__orig_class__":
object.__setattr__(self, name, value)
raise dataclasses.FrozenInstanceError(f"cannot assign to field '{name}'")
def __getattribute__(self, name: str, /) -> Any:
out = super().__getattribute__(name)
# Arrange for bound methods to be treated as PyTrees as well. This
# ensures that
# ```
# @jax.jit
# def run(fn):
# ...
# run(SomeModule().some_method)
# ```
# works.
if (
not _is_magic(name)
and isinstance(out, types.MethodType)
and out.__self__ is self
):
out = BoundMethod(object.__getattribute__(out, "__func__"), self)
return out
def _is_magic(k: str, /) -> bool:
return (k.startswith("__") and k.endswith("__")) or (k == "_abc_impl")
def is_abstract_module(cls: type[Module], /) -> bool:
if not issubclass(cls, Module):
raise TypeError(f"{cls} is not a subclass of `Module`.")
return (
(len(cls.__abstractmethods__) > 0)
or (len(cls.__abstractvars__) > 0)
or (len(cls.__abstractclassvars__) > 0)
or (cls in _abstract_module_registry)
)
_P = ParamSpec("_P")
_T = TypeVar("_T")
def module_update_wrapper(
wrapper: Module, wrapped: Callable[_P, _T] | None = None
) -> Callable[_P, _T]:
"""Like `functools.update_wrapper` (or its better-known cousin, `functools.wraps`),
but acts on [`equinox.Module`][]s, and does not modify its input (it returns the
updated module instead).
!!! Example
```python
class Wrapper(eqx.Module):
fn: Callable
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
@property
def __wrapped__(self):
return self.fn
def make_wrapper(fn):
return eqx.module_update_wrapper(Wrapper(fn))
```
For example, [`equinox.filter_jit`][] returns a module representing the JIT'd
computation. `module_update_wrapper` is used on this module to indicate that this
JIT'd computation wraps the original one. (Just like how `functools.wraps` is used.)
Note that as in the above example, the wrapper class must supply a `__wrapped__`
property, which redirects to the wrapped object.
**Arguments:**
- `wrapper`: the instance of the wrapper.
- `wrapped`: optional, the callable that is being wrapped. If omitted then
`wrapper.__wrapped__` will be used.
**Returns:**
A copy of `wrapper`, with the attributes `__module__`, `__name__`, `__qualname__`,
`__doc__`, and `__annotations__` copied over from the wrapped function.
"""
cls = wrapper.__class__
if not isinstance(getattr(cls, "__wrapped__", None), property):
raise ValueError("Wrapper module must supply `__wrapped__` as a property.")
if wrapped is None:
wrapped = wrapper.__wrapped__ # pyright: ignore
# Make a clone, to avoid mutating the original input.
leaves, treedef = jtu.tree_flatten(wrapper)
wrapper = jtu.tree_unflatten(treedef, leaves)
# Like `ft.update_wrapper(wrapper, wrapped, updated=())`.
# We don't update __dict__ as it's common/possible for wrapper and wrapped to
# both be classes implementing __call__, in which case copying __dict__ over
# basically just breaks the wrapper class.
# We don't set __wrapped__, and instead demand that the wrapper class tell us
# how to redirect to the wrapped object. This is avoid duplicating part of the
# PyTree.
_currently_initialising.add(wrapper)
try:
for field_name in WRAPPER_FIELD_NAMES:
try:
value = getattr(wrapped, field_name)
except AttributeError:
pass
else:
setattr(wrapper, field_name, value)
finally:
_currently_initialising.remove(wrapper)
return cast(Callable[_P, _T], wrapper)
from ._prebuilt import BoundMethod # After Module is defined.
| Module |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 65249,
"end": 65848
} | class ____(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = BridgeTowerPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.text_config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.text_config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
mlm_score = self.transform(x)
mlm_score = self.decoder(mlm_score) + self.bias
return mlm_score
| BridgeTowerMLMHead |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 10450,
"end": 38013
} | class ____(RuntimeError):
pass
def has_symbolic_sizes_strides(elem: torch.Tensor) -> bool:
return elem._has_symbolic_sizes_strides
Int: TypeAlias = Union[torch.SymInt, int]
def create_contiguous(shape: Sequence[Int]) -> list[Int]:
strides: list[Int] = [1]
for dim in reversed(shape[:-1]):
strides.append(dim * strides[-1]) # type: ignore[operator]
return list(reversed(strides))
def hint_int(a: Union[torch.SymInt, int], fallback: Optional[int] = None) -> int:
"""
Retrieve the hint for an int (based on the underlying real values as observed
at runtime). If no hint is available (e.g., because data dependent shapes),
if fallback is not None, use that instead (otherwise raise an error).
"""
if isinstance(a, torch.SymInt):
return a.node.require_hint(fallback)
assert type(a) is int, a
return a
Scalar: TypeAlias = Union[torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool]
def has_hint(a: Scalar) -> bool:
if isinstance(a, SymTypes):
return a.node.has_hint()
return True
def is_concrete_int(a: IntLikeType) -> bool:
"""
Utility to check if underlying object
in SymInt is concrete value. Also returns
true if integer is passed in.
Args:
a (SymInt or int): Object to test if it int
"""
assert isinstance(a, (SymInt, int))
if isinstance(a, int):
return True
if isinstance(a.node.expr, sympy.core.numbers.Integer):
return True
return False
def is_concrete_float(a: FloatLikeType) -> bool:
r"""Utility to check if underlying object
in SymInt is concrete value. Also returns
true if integer is passed in.
Args:
a (SymInt or float): Object to test if it float
"""
assert isinstance(a, (SymFloat, float))
if isinstance(a, float):
return True
if isinstance(a.node.expr, sympy.core.numbers.Float):
return True
return False
def is_concrete_bool(a: BoolLikeType) -> bool:
"""
Utility to check if underlying object
in SymBool is concrete value. Also returns
true if integer is passed in.
Args:
a (SymBool or bool): Object to test if it bool
"""
assert isinstance(a, (SymBool, bool))
if isinstance(a, bool):
return True
if isinstance(
a.node.expr, (sympy.logic.boolalg.BooleanTrue, sympy.logic.boolalg.BooleanFalse)
):
return True
return False
def has_static_value(a: Union[SymBool, SymFloat, SymInt, bool, float, int]) -> bool:
"""
User-code friendly utility to check if a value is static or dynamic.
Returns true if given a constant, or a symbolic expression with a fixed value.
Args:
a (Union[SymBool, SymFloat, SymInt, bool, float, int]): Object to test
"""
assert isinstance(a, BoolLike + FloatLike + IntLike)
if (
isinstance(a, BoolLike)
and is_concrete_bool(a) # type: ignore[arg-type]
or isinstance(a, FloatLike)
and is_concrete_float(a) # type: ignore[arg-type]
or isinstance(a, IntLike)
and is_concrete_int(a) # type: ignore[arg-type]
):
return True
assert isinstance(a, py_sym_types)
return a.node.shape_env.bound_sympy(a.node.expr).is_singleton() # type: ignore[union-attr]
def guard_size_oblivious(expr: Union[torch.SymBool, bool]) -> bool:
"""
Perform a guard on a symbolic boolean expression in a size oblivious way.
This is typically used when a non-oblivious test would result in a guard
on a data dependent value of which we don't know the value of at compile time.
When a guard is tested this way, we may diverge in behavior from how regular
PyTorch semantics would treat it. For more information, see
https://github.com/pytorch/pytorch/pull/118579
"""
if isinstance(expr, torch.SymBool):
return expr.node.guard_size_oblivious("", 0)
else:
assert isinstance(expr, bool), expr
return expr
def check_consistent(new: _T, old: _T) -> None:
"""
Test that two "meta" values (typically either Tensor or SymInt) have
the same values, e.g., after retracing. If we don't understand the
quantities in question, we'll just skip the consistency check.
"""
# TODO: do boolean equality test too, see
# https://github.com/pytorch/pytorch/issues/124110
scalar_types = (torch.SymInt, torch.SymFloat, int, float)
if isinstance(new, torch.Tensor):
assert isinstance(old, torch.Tensor)
torch._check(
old.dim() == new.dim(), lambda: f"{old.shape} != {new.shape} (old != new)"
)
# Do this manually so that each individual test is irrefutable
# (TODO: should be a helper for this, maybe sym_eq? That
# gives us a compound expression and I'm not sure it
# simplifies right now)
for i, j in zip(old.shape, new.shape):
torch._check(i == j, lambda: f"{old.shape} != {new.shape} (old != new)")
# NB: bool is subclass of int
elif isinstance(new, scalar_types) and not isinstance(new, bool):
assert isinstance(old, scalar_types) and not isinstance(old, bool), (
f"{old} != {new}"
)
torch._check(old == new, lambda: f"{old} != {new} (old != new)")
def resolve_unbacked_bindings(
shape_env: Optional[ShapeEnv],
bindings: Optional[dict[sympy.Symbol, pytree.KeyPath]],
) -> Optional[dict[sympy.Symbol, pytree.KeyPath]]:
"""
When we do fake tensor prop, we oftentimes will allocate new unbacked symints.
We then run proxy tensor mode, which populates node.meta["unbacked_bindings"]
with these new symints. To ensure consistency we use PropagateUnbackedSymInts
to rename unbacked bindings to their old ones. But all of the node metas are
still using the old bindings from before the renaming. This function helps to
post facto apply any renamings discovered in the PropogateUnbackedSymInts pass.
"""
if bindings is None:
return None
assert shape_env is not None
return {shape_env.unbacked_renamings.get(k, k): v for k, v in bindings.items()}
Result: TypeAlias = Union[torch.Tensor, tuple[torch.Tensor, ...]]
def rebind_unbacked(
shape_env: Optional[ShapeEnv], n: torch.fx.Node, result: Result
) -> None:
"""
Suppose we are retracing a pre-existing FX graph that previously had
fake tensor propagation (and therefore unbacked SymInts). When we retrace,
we re-propagate fake tensors, which results in new unbacked SymInts.
When this happens, we need to tell the shape environment about the equivalence
of the old and new unbacked SymInts. Pass us the old torch.fx.Node (which
has the old binding information) and the new result (which we can extract the
new unbacked SymInts out from).
"""
# Inputs never need rebinding
if n.op == "placeholder":
return
if bindings := resolve_unbacked_bindings(
shape_env, n.meta.get("unbacked_bindings")
):
assert shape_env is not None
for raw_u0, path in bindings.items():
u1 = pytree.key_get(result, path)
# Sometimes, things were previously unbacked bindings become constants.
# There are two situations this can happen.
#
# First, you might have a runtime assert that causes the
# constant-ification. In this case, the /binding/ itself will
# still be an unbacked symbol (because we will only force it
# to be a constant later in fake tensor propagation). In this
# case, u1 is a SymInt and we still do all our work as normal.
#
# But second, it might be that fake tensor propagation DIRECTLY
# converted the unbacked SymInt into a constant. This happens
# more rarely, but we have identified two situations it can
# validly occur:
#
# - If you have a tensor_version operator, these are initially
# allocated as unbacked SymInts, but after AOTAutograd they
# get forced specialized to specific values. In this case,
# there is no reason to do runtime asserts on them, this is
# just a hack to properly keep track of them to start.
#
# - If you have an item() call on a constant tensor, the result
# of the item() call is constant and we do not need runtime
# asserts on this symbol. In
# https://github.com/pytorch/pytorch/issues/140625 we have a
# case where in the initial trace of the program we are unable
# to determine that torch.tensor is constant, but then
# subsequent passes cause torch.tensor to become a constant and
# then the unbacked symbol goes poof.
#
# In all of these cases, it is no longer necessary to generate
# deferred runtime asserts, since other subsystems (e.g., the
# constant-ification pass) ensure that the quantity is now truly
# static and cannot change at runtime. So it's OK to discard
# in these situations.
#
# There is one more hazard (re
# https://github.com/pytorch/pytorch/issues/141248), the problem
# is that you can end up with "dangling" unbacked symbols that
# exist in the ShapeEnv but are never bound anywhere. You might
# like an invariant that unbacked symbols never get lost. But
# we do not have this invariant, so do not try to enforce it.
if isinstance(u1, (int, float)):
log.info(
"rebind_unbacked: discard %s %s %s -> %s",
n.target,
raw_u0,
path,
u1,
)
continue
# We only care about rebinding unbacked things
if u1.node.hint is not None:
continue
# unbacked symbols bindings might be replaced to other backed or
# unbacked replacements.
#
# Example:
# u = x.item()
# torch._check(u == 5)
#
# The safest approach is to retrieve raw_u1 from u1.node._expr
# and perform the rebinding on the original unbacked symbol,
# even if it’s no longer directly referenced.
#
# In other words, we should always rebind the original symbol
# before any replacements are applied.
# u0 -> u0 == s1
raw_u1 = u1.node._expr
# TODO Do we still need this logic below?
# Simplify SymBool binding
if (
isinstance(raw_u1, sympy.Piecewise)
and len(raw_u1.args) == 2
and (
raw_u1_args0 := cast(
tuple[sympy.Basic, sympy.Basic], raw_u1.args[0]
)
)
and raw_u1_args0[0] == 1
and isinstance(eq := raw_u1_args0[1], sympy.Eq)
and isinstance(new_raw_u1 := eq.lhs, sympy.Symbol)
and shape_env.var_to_range[new_raw_u1].issubset(ValueRanges(0, 1))
and eq.rhs == 1
and cast(tuple[sympy.Basic, sympy.Basic], raw_u1.args[1]) == (0, True)
):
# This is what the pattern match above is testing
repacked = _sympy_cast_symbool_to_symint_guardless(
sympy.Eq(new_raw_u1, 1)
)
assert repacked == raw_u1, f"{repacked} != {raw_u1}"
# Cancel the to_int(to_bool(x)). This is sound because x in
# [0, 1]
raw_u1 = new_raw_u1
if not isinstance(raw_u1, sympy.Symbol):
assert not raw_u1.free_symbols, (
f"should have been constant, but got {raw_u1}"
)
continue
# The old and new could be the same if you improperly hit the memo
# while retracing. Make sure you updated FakeTensorMode.epoch
assert raw_u0 != raw_u1, f"{raw_u0} possible memo disaster"
# Reuse the OLD symbol name
shape_env._rename_unbacked_to(raw_u1, raw_u0)
# NB: You could try to expand this to cover more cases by simply
# detecting whenever you have an int output, but this is a bit
# dangerous in case someone adds a function that returns an int but is
# mutating. So manually whitelist for now.
def is_accessor_node(node: torch.fx.Node) -> bool:
"""
Helper function to determine if a node is trying to access
a symbolic integer such as size, stride, offset or item. Currently
primarily only used in a DCE pass to figure out purity.
"""
# Dynamo only exercised condition
if (
node.op == "call_method"
and isinstance(node.args[0], torch.fx.Node)
and isinstance(node.args[0].meta.get("example_value"), torch.Tensor)
and node.target in ["size", "stride", "storage_offset", "item"]
):
return True
if node.op == "call_function" and node.target in [
torch.ops.aten.sym_size,
torch.ops.aten.sym_size.default,
torch.ops.aten.sym_size.int,
torch.ops.aten.sym_stride,
torch.ops.aten.sym_stride.default,
torch.ops.aten.sym_stride.int,
torch.ops.aten.sym_storage_offset,
torch.ops.aten.sym_storage_offset.default,
torch.ops.aten.sym_numel.default,
]:
return True
return False
def canonicalize_bool_expr(expr: _T) -> _T:
"""
Canonicalize a boolean expression by transforming it into a lt / le
inequality and moving all the non-constant terms to the rhs.
We canonicalize And / Ors / Not via cnf and then canonicalize their subexpr
recursively
nb. sympy.Rel.canonical is not good enough https://github.com/sympy/sympy/issues/25924
Args:
expr (sympy.Expr): Expression to canonicalize
"""
# Canonicalise an inequality by transforming it into a lt / le
# inequality and moving all the non-constant terms to the rhs
# We canonicalise And / Ors / Not via cnf
# nb. Relational.canonical in sympy is broken
# https://github.com/sympy/sympy/issues/25924
if not isinstance(
expr, (sympy.Rel, sympy.And, sympy.Or, sympy.Not, sympy.Eq, sympy.Ne)
):
return expr
if isinstance(expr, (sympy.And, sympy.Or, sympy.Not)):
expr = sympy.logic.boolalg.to_cnf(expr)
return _canonicalize_bool_expr_impl(expr) # type: ignore[arg-type, return-value]
def _sympy_from_args(
cls: type[Union[sympy.Add, sympy.Mul]],
args: list[sympy.Expr],
sort: bool = True,
is_commutative: Optional[bool] = None,
) -> sympy.Expr:
"""
Create a sympy expression from a list of arguments, optimizing for performance.
This function creates a sympy Add or Mul expression from a list of arguments
while avoiding expensive operations like flattening. It handles sorting the
arguments appropriately based on the expression type.
Args:
cls: The sympy class to create (Add or Mul)
args: List of sympy expressions to combine
sort: Whether to sort the arguments (default: True)
is_commutative: Whether the operation is commutative (default: None)
Returns:
A sympy expression of type cls combining all arguments
Raises:
ValueError: If cls is not sympy.Add or sympy.Mul
"""
if not args:
return cls.identity # type: ignore[union-attr]
# These args are already in canonical form, so we avoid calling
# Add(*args) to avoid expensive Add.flatten operation
if sort:
if cls is sympy.Add:
sort_fn = sympy.core.add._addsort
elif cls is sympy.Mul:
sort_fn = sympy.core.mul._mulsort
else:
raise ValueError(f"Unknown cls: {cls}")
# we don't support non commutative with sort
assert is_commutative is True
if args[0].is_Number:
rest = args[1:]
sort_fn(rest)
return cls._from_args([args[0]] + rest, is_commutative=is_commutative) # type: ignore[attr-defined]
else:
args = args.copy()
sort_fn(args)
return cls._from_args(args, is_commutative=is_commutative) # type: ignore[attr-defined]
else:
# if the args are already sorted, we create directly
return cls._from_args(args, is_commutative=is_commutative) # type: ignore[attr-defined]
def _canonicalize_bool_expr_impl(expr: SympyBoolean) -> SympyBoolean:
"""
After canonicalization, we are guaranteed to have eliminated Ge/Gt relations
(rewriting them to Le/Lt, respectively).
"""
if isinstance(expr, (sympy.And, sympy.Or)):
return type(expr)(*map(canonicalize_bool_expr, expr.args))
opposite = {sympy.Gt: sympy.Lt, sympy.Ge: sympy.Le}
t: Union[type[Any]]
if isinstance(expr, tuple(opposite.keys())):
rhs = expr.lhs - expr.rhs # type: ignore[attr-defined]
t = opposite[type(expr)] # type: ignore[index]
else:
assert isinstance(expr, (sympy.Lt, sympy.Le, sympy.Eq, sympy.Ne))
rhs = expr.rhs - expr.lhs
t = type(expr)
def is_neg(t: sympy.Expr) -> bool:
return (t.is_Number and t.is_negative) or (
isinstance(t, sympy.Mul) and t.args[0].is_Number and t.args[0].is_negative
)
lhs = S.Zero
rhs = _reduce_to_lowest_terms(rhs)
if isinstance(rhs, sympy.Add):
pos = []
neg = []
for term in rhs.args:
if is_neg(term):
neg.append(-term)
else:
pos.append(term)
# these are already sorted
rhs = _sympy_from_args(sympy.Add, pos, sort=False, is_commutative=True)
# the terms were changed, so needs a sorting
lhs = _sympy_from_args(sympy.Add, neg, sort=True, is_commutative=True)
elif is_neg(rhs):
# lhs == 0
lhs, rhs = -rhs, S.Zero
# We don't have to evaluate here because lhs, rhs came from a Boolean
# and it was already simplified
return t(lhs, rhs, evaluate=False)
def _reduce_to_lowest_terms(expr: sympy.Expr) -> sympy.Expr:
"""
Eliminates any integer factor from a given expression.
E.g., 6x + 4y reduces to 3x + 2y.
Useful when an expression is == or != to 0.
"""
def integer_coefficient(x: sympy.Expr) -> int:
if x.is_Integer:
return abs(int(x))
elif x.is_Mul:
# If one of the args of a Mul is an Integer, it is the
# first arg. eg: args(2*x*3*y) == (6, x, y)
return abs(int(x.args[0])) if x.args[0].is_Integer else 1 # type: ignore[call-overload]
else:
return 1
def div_by_factor(x: sympy.Expr, factor: int) -> sympy.Expr:
if x.is_Integer:
return x / factor
elif x.is_Mul:
if x.args[0] != factor:
args = [x.args[0] / sympy.Integer(factor), *x.args[1:]]
else:
# Mul._from_args require a canonical list of args
# so we remove the first arg (x.args[0] / factor) if it was 1
args = list(x.args[1:])
return _sympy_from_args(sympy.Mul, args, is_commutative=x.is_commutative)
else:
raise AssertionError(f"illegal arg to div_by_factor: {x}")
if expr.is_Add:
atoms = cast(Sequence[sympy.Expr], expr.args)
factor = functools.reduce(math.gcd, map(integer_coefficient, atoms))
if factor == 1:
return expr
# pyrefly: ignore [bad-argument-type]
atoms = [div_by_factor(x, factor) for x in atoms]
return _sympy_from_args(
sympy.Add, atoms, sort=True, is_commutative=expr.is_commutative
)
elif expr.is_Integer:
return S.One
elif expr.is_Mul:
return div_by_factor(expr, integer_coefficient(expr))
return expr
def is_nested_int(s: IntLikeType) -> TypeGuard[SymInt]:
return isinstance(s, torch.SymInt) and s.node.is_nested_int()
IterateExprsAtom: TypeAlias = Union[
SymInt, SymFloat, SymBool, int, float, bool, sympy.Basic, torch.Tensor
]
IterateExprs: TypeAlias = Union[IterateExprsAtom, Sequence[IterateExprsAtom]]
def _iterate_exprs(val: IterateExprs) -> Iterator[sympy.Basic]:
"""
Recursively iterate through a value and yield all sympy expressions contained within it.
This function traverses various data structures (tensors, lists, tuples, etc.) and extracts
any symbolic expressions they contain. It's used for operations like finding free symbols
in complex nested structures.
Args:
val: The value to extract sympy expressions from. Can be a symbolic type (SymInt, SymFloat, SymBool),
a sympy expression, a primitive type (int, float, bool), a container (tuple, list),
a sparse tensor, a regular tensor, None, or a torch.Generator.
Yields:
sympy.Basic: Each sympy expression found in the value.
Raises:
AssertionError: If the value is of an unsupported type.
"""
# This is almost close enough to implement in terms of _iterate_nodes()
# except that it needs to handle `list[sympy.Basic]` which _iterate_nodes()
# can't handle.
if isinstance(val, SymTypes):
# This allow applies to the jagged layout NestedTensor case as
# nested ints are not symbolic
if is_symbolic(val):
yield val.node.expr
elif isinstance(val, SymNode):
yield val.expr
elif isinstance(val, sympy.Basic):
yield val
elif isinstance(val, (int, float, bool)):
pass
elif isinstance(val, (tuple, list)):
for s in val:
yield from _iterate_exprs(s)
elif is_sparse_any(val):
yield from _iterate_exprs(val.size())
elif isinstance(val, torch.Tensor):
yield from _iterate_exprs(val.size())
yield from _iterate_exprs(val.stride())
yield from _iterate_exprs(val.storage_offset())
elif val is None:
pass
# see Note: [Generator arguments in AOTDispatcher]
elif isinstance(val, torch.Generator):
pass
else:
raise AssertionError(f"cannot extract sympy expressions from {val} {type(val)}")
def _iterate_nodes(val: Any) -> Iterator[SymNode]:
"""
Recursively iterate through a value and yield all SymNodes contained
within it.
"""
if isinstance(val, SymNode):
yield val
elif isinstance(val, py_sym_types):
# This allow applies to the jagged layout NestedTensor case as
# nested ints are not symbolic
if is_symbolic(val):
yield val.node
elif isinstance(val, (tuple, list, torch.Size)):
for s in val:
yield from _iterate_nodes(s)
elif isinstance(val, torch.Tensor):
yield from _iterate_nodes(val.size())
if not is_sparse_any(val):
yield from _iterate_nodes(val.stride())
yield from _iterate_nodes(val.storage_offset())
def free_symbols(val: IterateExprs) -> OrderedSet[sympy.Symbol]:
"""
Recursively collect all free symbols from a value.
This function traverses various data structures (tensors, lists, tuples, etc.) and extracts
all sympy symbols contained within them. It's useful for finding all symbolic variables
that a complex nested structure depends on.
Args:
val: The value to extract symbols from. Can be a symbolic type (SymInt, SymFloat, SymBool),
a container (tuple, list), a tensor, or None.
Returns:
OrderedSet[sympy.Symbol]: An ordered set of all free symbols found in the value.
"""
if val is None:
return OrderedSet()
itr = _iterate_exprs(val)
# we need at least 1 to call union, so we hand code the identity
try:
first_expr = next(itr)
except StopIteration:
return OrderedSet()
# TODO: Apparently, returning an OrderedSet here breaks
# python test/distributed/tensor/test_dtensor_compile.py TestDTensorCompile.test_dtensor_dynamic
return first_expr.free_symbols.union(*(e.free_symbols for e in itr)) # type: ignore[return-value]
def has_free_symbols(val: IterateExprs) -> bool:
"""Faster version of bool(free_symbols(val))"""
return not all((e.is_number or e.is_Boolean) for e in _iterate_exprs(val))
def has_free_unbacked_symbols(x: IterateExprs) -> bool:
"""Faster version of bool(free_unbacked_symbols(val))"""
from sympy.core.traversal import iterargs
for s in _iterate_exprs(x):
for arg in iterargs(s):
if arg.is_Symbol and symbol_is_type(
arg, (SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT)
):
return True
return False
def free_unbacked_symbols(x: IterateExprs) -> OrderedSet[sympy.Symbol]:
"""Like free_symbols, but filtered to only report unbacked symbols"""
# NB: keep synced with is_unbacked_symint
return OrderedSet(
s
for s in free_symbols(x)
if symbol_is_type(s, (SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT))
)
def _free_non_source_unbacked_symbols(
x: IterateExprs, unbacked_inputs: OrderedSet[sympy.Symbol]
) -> OrderedSet[sympy.Symbol]:
"""Unbacked symbols that are not inputs to the graph. These are symbols that originated from
data-dependent operations as opposed to mark_unbacked calls."""
unbacked_symbols = free_unbacked_symbols(x)
non_source_symbols = unbacked_symbols - unbacked_inputs
return non_source_symbols
# WARNING: Don't use this on Dynamo produced graphs, they don't have meta
# setup!
def is_symbol_binding_fx_node(node: torch.fx.Node) -> Optional[sympy.Symbol]:
"""
Check if a given FX node is a symbol binding node.
A symbol binding node is one that has a SymInt value in its meta that contains
a sympy Symbol expression, and is either a placeholder node or contains unbacked symbols.
Args:
node (torch.fx.Node): The FX node to check
Returns:
Optional[sympy.Symbol]: The sympy Symbol if the node is a symbol binding node, None otherwise
"""
if (
"val" in node.meta
and isinstance(node.meta["val"], torch.SymInt)
and isinstance(node.meta["val"].node.expr, sympy.Symbol)
and (
node.op == "placeholder"
or free_unbacked_symbols(node.meta["val"].node.expr)
)
):
return node.meta["val"].node.expr
return None
def find_symbol_binding_fx_nodes(
graph: torch.fx.Graph,
) -> dict[sympy.Symbol, torch.fx.Node]:
"""
Find all nodes in an FX graph that bind sympy Symbols.
This function scans through all nodes in the given FX graph and identifies
nodes that bind sympy Symbols (typically placeholder nodes with SymInt values).
When multiple nodes bind the same symbol, only the first occurrence is kept.
Args:
graph: The FX graph to search for symbol binding nodes
Returns:
A dictionary mapping from sympy Symbols to their binding FX nodes
"""
r = {}
# NB: Prefer first occurrence of symbol
for node in graph.nodes:
if (s := is_symbol_binding_fx_node(node)) is not None and s not in r:
r[s] = node
return r
@dataclass(frozen=True)
| ConstraintViolationError |
python | ray-project__ray | python/ray/tests/test_actor_advanced.py | {
"start": 15280,
"end": 40924
} | class ____:
def ping(self):
return "pong"
def foobar(self):
actor = NonDetachedActor.remote()
return ray.get([foo.remote(), actor.foo.remote()])
actor = DetachedActor._remote(lifetime="detached", name="{}")
ray.get(actor.ping.remote())
""".format(
address, get_actor_name, create_actor_name
)
run_string_as_driver(driver_script)
assert len(ray.util.list_named_actors()) == 2
assert get_actor_name in ray.util.list_named_actors()
assert create_actor_name in ray.util.list_named_actors()
detached_actor = ray.get_actor(create_actor_name)
assert ray.get(detached_actor.ping.remote()) == "pong"
# Verify that a detached actor is able to create tasks/actors
# even if the driver of the detached actor has exited.
assert ray.get(detached_actor.foobar.remote()) == ["bar", "bar"]
@pytest.mark.parametrize(
"ray_start_regular",
[{"include_dashboard": True}],
indirect=True,
)
def test_detached_actor_cleanup(ray_start_regular):
@ray.remote
class DetachedActor:
def ping(self):
return "pong"
dup_actor_name = "actor"
def create_and_kill_actor(actor_name):
# Make sure same name is creatable after killing it.
detached_actor = DetachedActor.options(
lifetime="detached", name=actor_name
).remote()
# Wait for detached actor creation.
assert ray.get(detached_actor.ping.remote()) == "pong"
del detached_actor
assert ray.util.list_named_actors() == [dup_actor_name]
detached_actor = ray.get_actor(dup_actor_name)
ray.kill(detached_actor)
# Wait until actor dies.
actor_status = ray.util.state.get_actor(id=detached_actor._actor_id.hex())
max_wait_time = 10
wait_time = 0
while actor_status.state != "DEAD":
actor_status = ray.util.state.get_actor(id=detached_actor._actor_id.hex())
print(f"actor status is {actor_status}")
time.sleep(1.0)
wait_time += 1
if wait_time >= max_wait_time:
assert None, "It took too much time to kill an actor: {}".format(
detached_actor._actor_id
)
create_and_kill_actor(dup_actor_name)
# This shouldn't be broken because actor
# name should have been cleaned up from GCS.
create_and_kill_actor(dup_actor_name)
address = ray_start_regular["address"]
driver_script = """
import ray
import ray._private.gcs_utils as gcs_utils
import time
from ray._private.test_utils import convert_actor_state
import traceback
try:
def _load_state_api():
try:
from ray.util import state as state_api
return state_api
except Exception:
pass
raise ImportError("No usable Ray State API found")
ray.init(address="{}", namespace="default_test_namespace")
@ray.remote
class DetachedActor:
def ping(self):
return "pong"
# Make sure same name is creatable after killing it.
detached_actor = DetachedActor.options(lifetime="detached", name="{}").remote()
assert ray.get(detached_actor.ping.remote()) == "pong"
ray.kill(detached_actor)
# Wait until actor dies.
actor_status = _load_state_api().get_actor(id=detached_actor._actor_id.hex())
max_wait_time = 10
wait_time = 0
while actor_status.state != "DEAD": # noqa
actor_status = _load_state_api().get_actor(id=detached_actor._actor_id.hex())
time.sleep(1.0)
wait_time += 1
if wait_time >= max_wait_time:
assert None, (
"It took too much time to kill an actor")
except Exception:
traceback.print_exc()
raise
""".format(
address, dup_actor_name
)
run_string_as_driver(driver_script)
# Make sure we can create a detached actor created/killed
# at other scripts.
create_and_kill_actor(dup_actor_name)
@pytest.mark.parametrize("ray_start_regular", [{"local_mode": True}], indirect=True)
def test_detached_actor_local_mode(ray_start_regular):
RETURN_VALUE = 3
@ray.remote
class Y:
def f(self):
return RETURN_VALUE
Y.options(lifetime="detached", name="test").remote()
assert ray.util.list_named_actors() == ["test"]
y = ray.get_actor("test")
assert ray.get(y.f.remote()) == RETURN_VALUE
ray.kill(y)
assert not ray.util.list_named_actors()
with pytest.raises(ValueError):
ray.get_actor("test")
@pytest.mark.parametrize("ray_start_regular", [{"local_mode": True}], indirect=True)
def test_get_actor_local_mode(ray_start_regular):
@ray.remote
class A:
def hi(self):
return "hi"
a = A.options(name="hi").remote() # noqa: F841
b = ray.get_actor("hi")
assert ray.get(b.hi.remote()) == "hi"
@pytest.mark.parametrize(
"ray_start_cluster",
[
{
"num_cpus": 3,
"num_nodes": 1,
"resources": {"first_node": 5},
"include_dashboard": True,
}
],
indirect=True,
)
def test_detached_actor_cleanup_due_to_failure(ray_start_cluster):
cluster = ray_start_cluster
node = cluster.add_node(resources={"second_node": 1})
cluster.wait_for_nodes()
@ray.remote
class DetachedActor:
def ping(self):
return "pong"
def kill_itself(self):
# kill itself.
os._exit(0)
worker_failure_actor_name = "worker_failure_actor_name"
node_failure_actor_name = "node_failure_actor_name"
def wait_until_actor_dead(handle):
actor_status = ray.util.state.get_actor(id=handle._actor_id.hex())
max_wait_time = 10
wait_time = 0
while actor_status.state != "DEAD":
actor_status = ray.util.state.get_actor(id=handle._actor_id.hex())
time.sleep(1.0)
wait_time += 1
if wait_time >= max_wait_time:
assert None, "It took too much time to kill an actor: {}".format(
handle._actor_id
)
def create_detached_actor_blocking(actor_name, schedule_in_second_node=False):
resources = {"second_node": 1} if schedule_in_second_node else {"first_node": 1}
actor_handle = DetachedActor.options(
lifetime="detached", name=actor_name, resources=resources
).remote()
# Wait for detached actor creation.
assert ray.get(actor_handle.ping.remote()) == "pong"
return actor_handle
# Name should be cleaned when workers fail
deatched_actor = create_detached_actor_blocking(worker_failure_actor_name)
deatched_actor.kill_itself.remote()
wait_until_actor_dead(deatched_actor)
# Name should be available now.
deatched_actor = create_detached_actor_blocking(worker_failure_actor_name)
assert ray.get(deatched_actor.ping.remote()) == "pong"
# Name should be cleaned when nodes fail.
deatched_actor = create_detached_actor_blocking(
node_failure_actor_name, schedule_in_second_node=True
)
cluster.remove_node(node)
wait_until_actor_dead(deatched_actor)
# Name should be available now.
deatched_actor = create_detached_actor_blocking(node_failure_actor_name)
assert ray.get(deatched_actor.ping.remote()) == "pong"
# This test verifies actor creation task failure will not
# hang the caller.
def test_actor_creation_task_crash(ray_start_regular):
# Test actor death in constructor.
@ray.remote(max_restarts=0)
class Actor:
def __init__(self):
print("crash")
os._exit(0)
def f(self):
return "ACTOR OK"
# Verify an exception is thrown.
a = Actor.remote()
with pytest.raises(ray.exceptions.RayActorError) as excinfo:
ray.get(a.f.remote())
assert excinfo.value.actor_id == a._actor_id.hex()
# Test an actor can be restarted successfully
# afte it dies in its constructor.
@ray.remote(max_restarts=3)
class RestartableActor:
def __init__(self):
count = self.get_count()
count += 1
# Make it die for the first 2 times.
if count < 3:
self.set_count(count)
print("crash: " + str(count))
os._exit(0)
else:
print("no crash")
def f(self):
return "ACTOR OK"
def get_count(self):
value = _internal_kv_get("count")
if value is None:
count = 0
else:
count = int(value)
return count
def set_count(self, count):
_internal_kv_put("count", str(count), True)
# Verify we can get the object successfully.
ra = RestartableActor.remote()
ray.get(ra.f.remote())
@pytest.mark.parametrize(
"ray_start_regular", [{"num_cpus": 2, "resources": {"a": 1}}], indirect=True
)
def test_pending_actor_removed_by_owner(ray_start_regular):
# Verify when an owner of pending actors is killed, the actor resources
# are correctly returned.
@ray.remote(num_cpus=1, resources={"a": 1})
class A:
def __init__(self):
self.actors = []
def create_actors(self):
self.actors = [B.remote() for _ in range(2)]
@ray.remote(resources={"a": 1})
class B:
def ping(self):
return True
@ray.remote(resources={"a": 1})
def f():
return True
a = A.remote()
# Create pending actors
ray.get(a.create_actors.remote())
# Owner is dead. pending actors should be killed
# and raylet should return workers correctly.
del a
a = B.remote()
assert ray.get(a.ping.remote())
ray.kill(a)
assert ray.get(f.remote())
def test_pickling_actor_handle(ray_start_regular_shared):
@ray.remote
class Foo:
def method(self):
pass
f = Foo.remote()
new_f = ray._private.worker.pickle.loads(ray._private.worker.pickle.dumps(f))
# Verify that we can call a method on the unpickled handle. TODO(rkn):
# we should also test this from a different driver.
ray.get(new_f.method.remote())
def test_pickled_actor_handle_call_in_method_twice(ray_start_regular_shared):
@ray.remote
class Actor1:
def f(self):
return 1
@ray.remote
class Actor2:
def __init__(self, constructor):
self.actor = constructor()
def step(self):
ray.get(self.actor.f.remote())
a = Actor1.remote()
b = Actor2.remote(lambda: a)
ray.get(b.step.remote())
ray.get(b.step.remote())
def test_kill(ray_start_regular_shared):
@ray.remote
class Actor:
def hang(self):
while True:
time.sleep(1)
actor = Actor.remote()
result = actor.hang.remote()
ready, _ = ray.wait([result], timeout=0.5)
assert len(ready) == 0
kill_actor_and_wait_for_failure(actor)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(result)
with pytest.raises(ValueError):
ray.kill("not_an_actor_handle")
def test_get_actor_no_input(ray_start_regular_shared):
for bad_name in [None, "", " "]:
with pytest.raises(ValueError):
ray.get_actor(bad_name)
def test_actor_resource_demand(shutdown_only):
ray.shutdown()
cluster = ray.init(num_cpus=3)
global_state_accessor = make_global_state_accessor(cluster)
@ray.remote(num_cpus=2)
class Actor:
def foo(self):
return "ok"
a = Actor.remote()
ray.get(a.foo.remote())
time.sleep(1)
message = global_state_accessor.get_all_resource_usage()
resource_usages = gcs_utils.ResourceUsageBatchData.FromString(message)
# The actor is scheduled so there should be no more demands left.
assert len(resource_usages.resource_load_by_shape.resource_demands) == 0
@ray.remote(num_cpus=80)
class Actor2:
pass
actors = []
actors.append(Actor2.remote())
time.sleep(1)
# This actor cannot be scheduled.
message = global_state_accessor.get_all_resource_usage()
resource_usages = gcs_utils.ResourceUsageBatchData.FromString(message)
assert len(resource_usages.resource_load_by_shape.resource_demands) == 1
assert resource_usages.resource_load_by_shape.resource_demands[0].shape == {
"CPU": 80.0
}
assert (
resource_usages.resource_load_by_shape.resource_demands[
0
].num_infeasible_requests_queued
== 1
)
actors.append(Actor2.remote())
time.sleep(1)
# Two actors cannot be scheduled.
message = global_state_accessor.get_all_resource_usage()
resource_usages = gcs_utils.ResourceUsageBatchData.FromString(message)
assert len(resource_usages.resource_load_by_shape.resource_demands) == 1
assert (
resource_usages.resource_load_by_shape.resource_demands[
0
].num_infeasible_requests_queued
== 2
)
def test_kill_pending_actor_with_no_restart_true():
cluster = ray.init()
global_state_accessor = make_global_state_accessor(cluster)
@ray.remote(resources={"WORKER": 1.0})
class PendingActor:
pass
# Kill actor with `no_restart=True`.
actor = PendingActor.remote()
# TODO(ffbin): The raylet doesn't guarantee the order when dealing with
# RequestWorkerLease and CancelWorkerLease. If we kill the actor
# immediately after creating the actor, we may not be able to clean up
# the request cached by the raylet.
# See https://github.com/ray-project/ray/issues/13545 for details.
time.sleep(1)
ray.kill(actor, no_restart=True)
def condition1():
message = global_state_accessor.get_all_resource_usage()
resource_usages = gcs_utils.ResourceUsageBatchData.FromString(message)
if len(resource_usages.resource_load_by_shape.resource_demands) == 0:
return True
return False
# Actor is dead, so the infeasible task queue length is 0.
wait_for_condition(condition1, timeout=10)
ray.shutdown()
def test_actor_timestamps(ray_start_regular):
@ray.remote
class Foo:
def get_id(self):
return ray.get_runtime_context().get_actor_id()
def kill_self(self):
sys.exit(1)
def graceful_exit():
actor = Foo.remote()
actor_id = ray.get(actor.get_id.remote())
state_after_starting = ray._private.state.actors()[actor_id]
time.sleep(1)
del actor
time.sleep(1)
state_after_ending = ray._private.state.actors()[actor_id]
assert state_after_starting["StartTime"] == state_after_ending["StartTime"]
start_time = state_after_ending["StartTime"]
end_time = state_after_ending["EndTime"]
assert end_time > start_time > 0, f"Start: {start_time}, End: {end_time}"
def not_graceful_exit():
actor = Foo.remote()
actor_id = ray.get(actor.get_id.remote())
state_after_starting = ray._private.state.actors()[actor_id]
time.sleep(1)
actor.kill_self.remote()
time.sleep(1)
state_after_ending = ray._private.state.actors()[actor_id]
assert state_after_starting["StartTime"] == state_after_ending["StartTime"]
start_time = state_after_ending["StartTime"]
end_time = state_after_ending["EndTime"]
assert end_time > start_time > 0, f"Start: {start_time}, End: {end_time}"
def restarted():
actor = Foo.options(max_restarts=1, max_task_retries=-1).remote()
actor_id = ray.get(actor.get_id.remote())
state_after_starting = ray._private.state.actors()[actor_id]
time.sleep(1)
actor.kill_self.remote()
time.sleep(1)
actor.kill_self.remote()
time.sleep(1)
state_after_ending = ray._private.state.actors()[actor_id]
assert state_after_starting["StartTime"] == state_after_ending["StartTime"]
start_time = state_after_ending["StartTime"]
end_time = state_after_ending["EndTime"]
assert end_time > start_time > 0, f"Start: {start_time}, End: {end_time}"
graceful_exit()
not_graceful_exit()
restarted()
def test_kill_pending_actor_with_no_restart_false():
cluster = ray.init()
global_state_accessor = make_global_state_accessor(cluster)
@ray.remote(resources={"WORKER": 1.0}, max_restarts=1)
class PendingActor:
pass
# Kill actor with `no_restart=False`.
actor = PendingActor.remote()
# TODO(ffbin): The raylet doesn't guarantee the order when dealing with
# RequestWorkerLease and CancelWorkerLease. If we kill the actor
# immediately after creating the actor, we may not be able to clean up
# the request cached by the raylet.
# See https://github.com/ray-project/ray/issues/13545 for details.
time.sleep(1)
ray.kill(actor, no_restart=False)
def condition1():
message = global_state_accessor.get_all_resource_usage()
resource_usages = gcs_utils.ResourceUsageBatchData.FromString(message)
if len(resource_usages.resource_load_by_shape.resource_demands) == 0:
return False
return True
# Actor restarts, so the infeasible task queue length is 1.
wait_for_condition(condition1, timeout=10)
# Kill actor again and actor is dead,
# so the infeasible task queue length is 0.
ray.kill(actor, no_restart=False)
def condition2():
message = global_state_accessor.get_all_resource_usage()
resource_usages = gcs_utils.ResourceUsageBatchData.FromString(message)
if len(resource_usages.resource_load_by_shape.resource_demands) == 0:
return True
return False
wait_for_condition(condition2, timeout=10)
ray.shutdown()
def test_actor_namespace_access(ray_start_regular):
@ray.remote
class A:
def hi(self):
return "hi"
A.options(name="actor_in_current_namespace", lifetime="detached").remote()
A.options(name="actor_name", namespace="namespace", lifetime="detached").remote()
ray.get_actor("actor_in_current_namespace") # => works
ray.get_actor("actor_name", namespace="namespace") # => works
match_str = r"Failed to look up actor with name.*"
with pytest.raises(ValueError, match=match_str):
ray.get_actor("actor_name") # => errors
def test_get_actor_after_killed(shutdown_only):
ray.init(num_cpus=2, include_dashboard=True)
@ray.remote
class A:
def ready(self):
return True
actor = A.options(name="actor", namespace="namespace").remote()
ray.kill(actor)
with pytest.raises(ValueError):
ray.get_actor("actor", namespace="namespace")
actor = A.options(
name="actor_2",
namespace="namespace",
max_restarts=1,
max_task_retries=-1,
).remote()
ray.kill(actor, no_restart=False)
assert ray.get(ray.get_actor("actor_2", namespace="namespace").ready.remote())
def test_get_actor_from_concurrent_tasks(shutdown_only):
@ray.remote
class Actor:
def get_actor_id(self) -> str:
return ray.get_runtime_context().get_actor_id()
actor_name = "test_actor"
@ray.remote(num_cpus=0)
def get_or_create_actor():
try:
# The first task will try to get the actor but fail (doesn't exist).
try:
actor = ray.get_actor(actor_name)
except Exception:
print("Get failed, trying to create")
# Actor must be detached so it outlives this task and other tasks can
# get a handle to it.
actor = Actor.options(name=actor_name, lifetime="detached").remote()
except Exception:
# Multiple tasks may have reached the creation block above.
# Only one will succeed and the others will get an error, in which case
# they fall here and should be able to get the actor handle.
print("Someone else created it, trying to get")
actor = ray.get_actor(actor_name)
return ray.get(actor.get_actor_id.remote())
# Run 10 concurrent tasks to get or create the same actor.
# Only one task should succeed at creating it, and all the others should get it.
assert len(set(ray.get([get_or_create_actor.remote() for _ in range(10)]))) == 1
def test_get_or_create_actor_from_multiple_threads(shutdown_only):
"""Make sure we can create actors in multiple threads without
race conditions.
Check https://github.com/ray-project/ray/issues/41324
"""
@ray.remote
class Counter:
def __init__(self):
self._count = 0
def inc(self):
self._count += 1
def get(self) -> int:
return self._count
counter = Counter.remote()
@ray.remote
class Actor:
def __init__(self):
ray.get(counter.inc.remote())
def get_actor_id(self) -> str:
return ray.get_runtime_context().get_actor_id()
def _create_or_get_actor(*args):
a = Actor.options(
name="test_actor",
get_if_exists=True,
# Actor must be detached so it outlives this function and other threads
# can get a handle to it.
lifetime="detached",
).remote()
return ray.get(a.get_actor_id.remote())
# Concurrently submit 100 calls to create or get the actor from 10 threads.
# Ensure that exactly one call actually creates the actor and the other 99 get it.
with ThreadPoolExecutor(max_workers=10) as tp:
assert len(set(tp.map(_create_or_get_actor, range(100)))) == 1
assert ray.get(counter.get.remote()) == 1
def test_get_actor_in_remote_workers(ray_start_cluster):
"""Make sure we can get and create actors without
race condition in a remote worker.
Check https://github.com/ray-project/ray/issues/20092. # noqa
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address, namespace="xxx")
@ray.remote(num_cpus=0)
class RemoteProc:
def __init__(self):
pass
def procTask(self, a, b):
print("[%s]-> %s" % (a, b))
return a, b
@ray.remote
def submit_named_actors():
RemoteProc.options(
name="test", lifetime="detached", max_concurrency=10, namespace="xxx"
).remote()
proc = ray.get_actor("test", namespace="xxx")
ray.get(proc.procTask.remote(1, 2))
# Should be able to create an actor with the same name
# immediately after killing it.
ray.kill(proc)
RemoteProc.options(
name="test", lifetime="detached", max_concurrency=10, namespace="xxx"
).remote()
proc = ray.get_actor("test", namespace="xxx")
return ray.get(proc.procTask.remote(1, 2))
assert (1, 2) == ray.get(submit_named_actors.remote())
def test_resource_leak_when_cancel_actor_in_phase_of_creating(ray_start_cluster):
"""Make sure there is no resource leak when cancel an actor in phase of
creating.
Check https://github.com/ray-project/ray/issues/27743. # noqa
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(num_cpus=1)
class Actor:
def __init__(self, signal_1, signal_2):
signal_1.send.remote()
ray.get(signal_2.wait.remote())
pass
signal_1 = SignalActor.remote()
signal_2 = SignalActor.remote()
actor = Actor.remote(signal_1, signal_2)
wait_for_condition(lambda: ray.available_resources()["CPU"] != 2)
# Checking that the constructor of `Actor`` is invoked.
ready_ids, _ = ray.wait([signal_1.wait.remote()], timeout=3.0)
assert len(ready_ids) == 1
# Kill the actor which is in the phase of creating.
ray.kill(actor)
# Ensure there is no resource leak.
wait_for_condition(lambda: ray.available_resources()["CPU"] == 2)
def test_actor_gc(monkeypatch, shutdown_only):
MAX_DEAD_ACTOR_CNT = 5
with monkeypatch.context() as m:
m.setenv("RAY_maximum_gcs_destroyed_actor_cached_count", MAX_DEAD_ACTOR_CNT)
ray.init()
@ray.remote
class Actor:
def ready(self):
pass
actors = [Actor.remote() for _ in range(10)]
ray.get([actor.ready.remote() for actor in actors])
alive_actors = 0
for a in list_actors():
if a["state"] == "ALIVE":
alive_actors += 1
assert alive_actors == 10
# Kill actors
del actors
def verify_cached_dead_actor_cleaned():
return len(list_actors()) == MAX_DEAD_ACTOR_CNT # noqa
wait_for_condition(verify_cached_dead_actor_cleaned)
# Test detached actors
actors = [Actor.options(lifetime="detached").remote() for _ in range(10)]
ray.get([actor.ready.remote() for actor in actors])
alive_actors = 0
for a in list_actors():
if a["state"] == "ALIVE":
alive_actors += 1
assert alive_actors == 10
# Kill actors
for actor in actors:
ray.kill(actor)
wait_for_condition(verify_cached_dead_actor_cleaned)
# Test actors created by a driver.
driver = """
import ray
from ray.util.state import list_actors
ray.init("auto")
@ray.remote
| DetachedActor |
python | gevent__gevent | src/gevent/testing/exception.py | {
"start": 1161,
"end": 1265
} | class ____(Exception):
"""An exception whose traceback should be ignored by the hub"""
| ExpectedException |
python | PrefectHQ__prefect | tests/utilities/test_collections.py | {
"start": 23554,
"end": 24465
} | class ____:
@pytest.mark.parametrize(
"dct, keys, value, expected",
[
({}, "a.b.c", 1, {"a": {"b": {"c": 1}}}),
({"a": {"b": {"c": 1}}}, "a.b.c", 2, {"a": {"b": {"c": 2}}}),
({"a": {"b": {"c": 1}}}, "a.b.d", 2, {"a": {"b": {"c": 1, "d": 2}}}),
({"a": {"b": {"c": 1}}}, "a", 2, {"a": 2}),
(
{"a": {"b": {"c": 1}}},
["a", "b", "d"],
2,
{"a": {"b": {"c": 1, "d": 2}}},
),
],
)
def test_set_in_dict(self, dct, keys, value, expected):
set_in_dict(dct, keys, value)
assert dct == expected
def test_set_in_dict_raises_key_error(self):
with pytest.raises(
TypeError, match="Key path exists and contains a non-dict value"
):
set_in_dict({"a": {"b": [2]}}, ["a", "b", "c"], 1)
| TestSetInDict |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_solr_backend.py | {
"start": 58321,
"end": 59420
} | class ____(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super().setUp()
# Wipe it clean.
clear_solr_index()
# Stow.
self.old_ui = connections["solr"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SolrMockModelSearchIndex()
self.sammi = SolrAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.smmi, self.sammi])
connections["solr"]._index = self.ui
self.sqs = SearchQuerySet("solr")
self.smmi.update("solr")
self.sammi.update("solr")
def tearDown(self):
# Restore.
connections["solr"]._index = self.old_ui
super().tearDown()
def test_pickling(self):
results = self.sqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
| LiveSolrPickleTestCase |
python | pypa__warehouse | tests/common/db/organizations.py | {
"start": 3174,
"end": 3407
} | class ____(WarehouseFactory):
class Meta:
model = OrganizationNameCatalog
normalized_name = factory.Faker("pystr", max_chars=12)
organization_id = factory.Faker("uuid4", cast_to=None)
| OrganizationNameCatalogFactory |
python | networkx__networkx | networkx/algorithms/tests/test_summarization.py | {
"start": 103,
"end": 4884
} | class ____:
def build_original_graph(self):
original_matrix = [
("1", "BC"),
("2", "ABC"),
("3", ["A", "B", "6"]),
("4", "ABC"),
("5", "AB"),
("6", ["5"]),
("A", ["6"]),
]
graph = nx.DiGraph()
for source, targets in original_matrix:
for target in targets:
graph.add_edge(source, target)
return graph
def build_compressed_graph(self):
compressed_matrix = [
("1", "BC"),
("2", ["ABC"]),
("3", ["A", "B", "6"]),
("4", ["ABC"]),
("5", "AB"),
("6", ["5"]),
("A", ["6"]),
("ABC", "ABC"),
]
compressed_graph = nx.DiGraph()
for source, targets in compressed_matrix:
for target in targets:
compressed_graph.add_edge(source, target)
return compressed_graph
def test_empty(self):
"""
Verify that an empty directed graph results in no compressor nodes
"""
G = nx.DiGraph()
compressed_graph, c_nodes = nx.dedensify(G, threshold=2)
assert c_nodes == set()
@staticmethod
def densify(G, compressor_nodes, copy=True):
"""
Reconstructs the original graph from a dedensified, directed graph
Parameters
----------
G: dedensified graph
A networkx graph
compressor_nodes: iterable
Iterable of compressor nodes in the dedensified graph
inplace: bool, optional (default: False)
Indicates if densification should be done inplace
Returns
-------
G: graph
A densified networkx graph
"""
if copy:
G = G.copy()
for compressor_node in compressor_nodes:
all_neighbors = set(nx.all_neighbors(G, compressor_node))
out_neighbors = set(G.neighbors(compressor_node))
for out_neighbor in out_neighbors:
G.remove_edge(compressor_node, out_neighbor)
in_neighbors = all_neighbors - out_neighbors
for in_neighbor in in_neighbors:
G.remove_edge(in_neighbor, compressor_node)
for out_neighbor in out_neighbors:
G.add_edge(in_neighbor, out_neighbor)
G.remove_node(compressor_node)
return G
def setup_method(self):
self.c_nodes = ("ABC",)
def test_dedensify_edges(self):
"""
Verifies that dedensify produced the correct edges to/from compressor
nodes in a directed graph
"""
G = self.build_original_graph()
compressed_G = self.build_compressed_graph()
compressed_graph, c_nodes = nx.dedensify(G, threshold=2)
for s, t in compressed_graph.edges():
o_s = "".join(sorted(s))
o_t = "".join(sorted(t))
compressed_graph_exists = compressed_graph.has_edge(s, t)
verified_compressed_exists = compressed_G.has_edge(o_s, o_t)
assert compressed_graph_exists == verified_compressed_exists
assert len(c_nodes) == len(self.c_nodes)
def test_dedensify_edge_count(self):
"""
Verifies that dedensify produced the correct number of compressor nodes
in a directed graph
"""
G = self.build_original_graph()
original_edge_count = len(G.edges())
c_G, c_nodes = nx.dedensify(G, threshold=2)
compressed_edge_count = len(c_G.edges())
assert compressed_edge_count <= original_edge_count
compressed_G = self.build_compressed_graph()
assert compressed_edge_count == len(compressed_G.edges())
def test_densify_edges(self):
"""
Verifies that densification produces the correct edges from the
original directed graph
"""
compressed_G = self.build_compressed_graph()
original_graph = self.densify(compressed_G, self.c_nodes, copy=True)
G = self.build_original_graph()
for s, t in G.edges():
assert G.has_edge(s, t) == original_graph.has_edge(s, t)
def test_densify_edge_count(self):
"""
Verifies that densification produces the correct number of edges in the
original directed graph
"""
compressed_G = self.build_compressed_graph()
compressed_edge_count = len(compressed_G.edges())
original_graph = self.densify(compressed_G, self.c_nodes)
original_edge_count = len(original_graph.edges())
assert compressed_edge_count <= original_edge_count
G = self.build_original_graph()
assert original_edge_count == len(G.edges())
| TestDirectedDedensification |
python | matplotlib__matplotlib | lib/matplotlib/dates.py | {
"start": 56149,
"end": 57264
} | class ____(RRuleLocator):
"""
Make ticks on occurrences of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Parameters
----------
bymonthday : int or list of int, default: all days
Ticks will be placed on every day in *bymonthday*. Default is
``bymonthday=range(1, 32)``, i.e., every day of the month.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if interval != int(interval) or interval < 1:
raise ValueError("interval must be an integer greater than 0")
if bymonthday is None:
bymonthday = range(1, 32)
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
super().__init__(rule, tz=tz)
| DayLocator |
python | mlflow__mlflow | tests/pyfunc/test_model_export_with_class_and_artifacts.py | {
"start": 3320,
"end": 10804
} | class ____(get_model_class()):
"""
A custom Python model class defined in the test module scope.
"""
@pytest.fixture(scope="module")
def main_scoped_model_class():
"""
A custom Python model class defined in the ``__main__`` scope.
"""
return get_model_class()
@pytest.fixture(scope="module")
def iris_data():
iris = sklearn.datasets.load_iris()
x = iris.data[:, :2]
y = iris.target
return x, y
@pytest.fixture(scope="module")
def sklearn_knn_model(iris_data):
x, y = iris_data
knn_model = sklearn.neighbors.KNeighborsClassifier()
knn_model.fit(x, y)
return knn_model
@pytest.fixture(scope="module")
def sklearn_logreg_model(iris_data):
x, y = iris_data
linear_lr = sklearn.linear_model.LogisticRegression()
linear_lr.fit(x, y)
return linear_lr
@pytest.fixture
def model_path(tmp_path):
return os.path.join(tmp_path, "model")
@pytest.fixture
def pyfunc_custom_env(tmp_path):
conda_env = os.path.join(tmp_path, "conda_env.yml")
_mlflow_conda_env(
conda_env,
additional_pip_deps=["scikit-learn", "pytest", "cloudpickle"],
)
return conda_env
def _conda_env():
# NB: We need mlflow as a dependency in the environment.
return _mlflow_conda_env(
additional_pip_deps=[
f"cloudpickle=={cloudpickle.__version__}",
f"scikit-learn=={sklearn.__version__}",
],
)
def test_model_save_load(sklearn_knn_model, main_scoped_model_class, iris_data, tmp_path):
sklearn_model_path = os.path.join(tmp_path, "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(tmp_path, "pyfunc_model")
mlflow.pyfunc.save_model(
path=pyfunc_model_path,
artifacts={"sk_model": sklearn_model_path},
conda_env=_conda_env(),
python_model=main_scoped_model_class(test_predict),
)
loaded_pyfunc_model = mlflow.pyfunc.load_model(model_uri=pyfunc_model_path)
np.testing.assert_array_equal(
loaded_pyfunc_model.predict(iris_data[0]),
test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]),
)
@pytest.mark.skip(
reason="In MLflow 3.0, `log_model` does not start a run. Consider removing this test."
)
def test_pyfunc_model_log_load_no_active_run(sklearn_knn_model, main_scoped_model_class, iris_data):
sklearn_artifact_path = "sk_model_no_run"
with mlflow.start_run():
mlflow.sklearn.log_model(sklearn_knn_model, name=sklearn_artifact_path)
sklearn_model_uri = f"runs:/{mlflow.active_run().info.run_id}/{sklearn_artifact_path}"
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_artifact_path = "pyfunc_model"
assert mlflow.active_run() is None
mlflow.pyfunc.log_model(
name=pyfunc_artifact_path,
artifacts={"sk_model": sklearn_model_uri},
python_model=main_scoped_model_class(test_predict),
)
pyfunc_model_uri = f"runs:/{mlflow.active_run().info.run_id}/{pyfunc_artifact_path}"
loaded_pyfunc_model = mlflow.pyfunc.load_model(model_uri=pyfunc_model_uri)
np.testing.assert_array_equal(
loaded_pyfunc_model.predict(iris_data[0]),
test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]),
)
mlflow.end_run()
def test_model_log_load(sklearn_knn_model, main_scoped_model_class, iris_data):
sklearn_artifact_path = "sk_model"
with mlflow.start_run():
sklearn_model_info = mlflow.sklearn.log_model(sklearn_knn_model, name=sklearn_artifact_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_artifact_path = "pyfunc_model"
with mlflow.start_run():
pyfunc_model_info = mlflow.pyfunc.log_model(
name=pyfunc_artifact_path,
artifacts={"sk_model": sklearn_model_info.model_uri},
python_model=main_scoped_model_class(test_predict),
)
pyfunc_model_path = _download_artifact_from_uri(pyfunc_model_info.model_uri)
model_config = Model.load(os.path.join(pyfunc_model_path, "MLmodel"))
loaded_pyfunc_model = mlflow.pyfunc.load_model(model_uri=pyfunc_model_info.model_uri)
assert model_config.to_yaml() == loaded_pyfunc_model.metadata.to_yaml()
np.testing.assert_array_equal(
loaded_pyfunc_model.predict(iris_data[0]),
test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]),
)
def test_python_model_predict_compatible_without_params(sklearn_knn_model, iris_data):
class CustomSklearnModelWithoutParams(mlflow.pyfunc.PythonModel):
def __init__(self, predict_fn):
self.predict_fn = predict_fn
def load_context(self, context):
super().load_context(context)
self.model = mlflow.sklearn.load_model(model_uri=context.artifacts["sk_model"])
def predict(self, context, model_input):
return self.predict_fn(self.model, model_input)
sklearn_artifact_path = "sk_model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(sklearn_knn_model, name=sklearn_artifact_path)
sklearn_model_uri = model_info.model_uri
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_artifact_path = "pyfunc_model"
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name=pyfunc_artifact_path,
artifacts={"sk_model": sklearn_model_uri},
python_model=CustomSklearnModelWithoutParams(test_predict),
)
pyfunc_model_path = _download_artifact_from_uri(model_info.model_uri)
model_config = Model.load(os.path.join(pyfunc_model_path, "MLmodel"))
loaded_pyfunc_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri)
assert model_config.to_yaml() == loaded_pyfunc_model.metadata.to_yaml()
np.testing.assert_array_equal(
loaded_pyfunc_model.predict(iris_data[0]),
test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]),
)
def test_signature_and_examples_are_saved_correctly(iris_data, main_scoped_model_class, tmp_path):
sklearn_model_path = str(tmp_path.joinpath("sklearn_model"))
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
data = iris_data
signature_ = infer_signature(*data)
example_ = data[0][:3]
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.pyfunc.save_model(
path=path,
artifacts={"sk_model": sklearn_model_path},
python_model=main_scoped_model_class(test_predict),
signature=signature,
input_example=example,
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
np.testing.assert_array_equal(_read_example(mlflow_model, path), example)
| ModuleScopedSklearnModel |
python | scipy__scipy | scipy/integrate/tests/test_integrate.py | {
"start": 12281,
"end": 12956
} | class ____(ODE):
r"""
Free vibration of a simple oscillator::
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
Solution::
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
"""
stop_t = 1 + 0.09
z0 = array([1.0, 0.1], float)
k = 4.0
m = 1.0
def f(self, z, t):
tmp = zeros((2, 2), float)
tmp[0, 1] = 1.0
tmp[1, 0] = -self.k / self.m
return dot(tmp, z)
def verify(self, zs, t):
omega = sqrt(self.k / self.m)
u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
| SimpleOscillator |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_sequence.py | {
"start": 661,
"end": 5182
} | class ____(fixtures.TablesTest):
__requires__ = ("sequences",)
__sparse_driver_backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"seq_pk",
metadata,
Column(
"id",
Integer,
normalize_sequence(config, Sequence("tab_id_seq")),
primary_key=True,
),
Column("data", String(50)),
)
Table(
"seq_opt_pk",
metadata,
Column(
"id",
Integer,
normalize_sequence(
config,
Sequence("tab_id_seq", data_type=Integer, optional=True),
),
primary_key=True,
),
Column("data", String(50)),
)
Table(
"seq_no_returning",
metadata,
Column(
"id",
Integer,
normalize_sequence(config, Sequence("noret_id_seq")),
primary_key=True,
),
Column("data", String(50)),
implicit_returning=False,
)
if testing.requires.schemas.enabled:
Table(
"seq_no_returning_sch",
metadata,
Column(
"id",
Integer,
normalize_sequence(
config,
Sequence(
"noret_sch_id_seq", schema=config.test_schema
),
),
primary_key=True,
),
Column("data", String(50)),
implicit_returning=False,
schema=config.test_schema,
)
def test_insert_roundtrip(self, connection):
connection.execute(self.tables.seq_pk.insert(), dict(data="some data"))
self._assert_round_trip(self.tables.seq_pk, connection)
def test_insert_lastrowid(self, connection):
r = connection.execute(
self.tables.seq_pk.insert(), dict(data="some data")
)
eq_(
r.inserted_primary_key, (testing.db.dialect.default_sequence_base,)
)
def test_nextval_direct(self, connection):
r = connection.scalar(self.tables.seq_pk.c.id.default)
eq_(r, testing.db.dialect.default_sequence_base)
@requirements.sequences_optional
def test_optional_seq(self, connection):
r = connection.execute(
self.tables.seq_opt_pk.insert(), dict(data="some data")
)
eq_(r.inserted_primary_key, (1,))
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(row, (testing.db.dialect.default_sequence_base, "some data"))
def test_insert_roundtrip_no_implicit_returning(self, connection):
connection.execute(
self.tables.seq_no_returning.insert(), dict(data="some data")
)
self._assert_round_trip(self.tables.seq_no_returning, connection)
@testing.combinations((True,), (False,), argnames="implicit_returning")
@testing.requires.schemas
def test_insert_roundtrip_translate(self, connection, implicit_returning):
seq_no_returning = Table(
"seq_no_returning_sch",
MetaData(),
Column(
"id",
Integer,
normalize_sequence(
config, Sequence("noret_sch_id_seq", schema="alt_schema")
),
primary_key=True,
),
Column("data", String(50)),
implicit_returning=implicit_returning,
schema="alt_schema",
)
connection = connection.execution_options(
schema_translate_map={"alt_schema": config.test_schema}
)
connection.execute(seq_no_returning.insert(), dict(data="some data"))
self._assert_round_trip(seq_no_returning, connection)
@testing.requires.schemas
def test_nextval_direct_schema_translate(self, connection):
seq = normalize_sequence(
config, Sequence("noret_sch_id_seq", schema="alt_schema")
)
connection = connection.execution_options(
schema_translate_map={"alt_schema": config.test_schema}
)
r = connection.scalar(seq)
eq_(r, testing.db.dialect.default_sequence_base)
| SequenceTest |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_user_details.py | {
"start": 49,
"end": 1160
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-user-details"
def setUp(self) -> None:
self.owner_user = self.create_user("foo@localhost", username="foo")
self.user = self.create_user("bar@localhost", username="bar")
self.org = self.create_organization(owner=self.owner_user)
self.member = self.create_member(organization=self.org, user=self.user)
self.login_as(user=self.owner_user)
def test_gets_info_for_user_in_org(self) -> None:
response = self.get_success_response(self.org.slug, self.user.id)
assert response.data["id"] == str(self.user.id)
assert response.data["email"] == self.user.email
def test_cannot_access_info_if_user_not_in_org(self) -> None:
user = self.create_user("meep@localhost", username="meep")
self.get_error_response(self.org.slug, user.id, status_code=404)
def test_bad_user_id(self) -> None:
self.get_error_response(self.org.slug, 123, status_code=404)
self.get_error_response(self.org.slug, "not_valid", status_code=400)
| OrganizationUserDetailsTest |
python | tensorflow__tensorflow | tensorflow/compiler/tests/matrix_band_part_test.py | {
"start": 985,
"end": 4744
} | class ____(xla_test.XLATestCase, parameterized.TestCase):
@parameterized.parameters(
{
'batch_shape': [],
'rows': 1,
'cols': 1
},
{
'batch_shape': [],
'rows': 1,
'cols': 2
},
{
'batch_shape': [],
'rows': 1,
'cols': 7
},
{
'batch_shape': [],
'rows': 2,
'cols': 1
},
{
'batch_shape': [],
'rows': 2,
'cols': 2
},
{
'batch_shape': [],
'rows': 2,
'cols': 7
},
{
'batch_shape': [],
'rows': 7,
'cols': 1
},
{
'batch_shape': [],
'rows': 7,
'cols': 2
},
{
'batch_shape': [],
'rows': 7,
'cols': 7
},
{
'batch_shape': [2,],
'rows': 1,
'cols': 1
},
{
'batch_shape': [2,],
'rows': 1,
'cols': 2
},
{
'batch_shape': [2,],
'rows': 1,
'cols': 7
},
{
'batch_shape': [2,],
'rows': 2,
'cols': 1
},
{
'batch_shape': [2,],
'rows': 2,
'cols': 2
},
{
'batch_shape': [2,],
'rows': 2,
'cols': 7
},
{
'batch_shape': [2,],
'rows': 7,
'cols': 1
},
{
'batch_shape': [2,],
'rows': 7,
'cols': 2
},
{
'batch_shape': [2,],
'rows': 7,
'cols': 7
},
{
'batch_shape': [1, 3, 2],
'rows': 1,
'cols': 1
},
{
'batch_shape': [1, 3, 2],
'rows': 1,
'cols': 2
},
{
'batch_shape': [1, 3, 2],
'rows': 1,
'cols': 7
},
{
'batch_shape': [1, 3, 2],
'rows': 2,
'cols': 1
},
{
'batch_shape': [1, 3, 2],
'rows': 2,
'cols': 2
},
{
'batch_shape': [1, 3, 2],
'rows': 2,
'cols': 7
},
{
'batch_shape': [1, 3, 2],
'rows': 7,
'cols': 1
},
{
'batch_shape': [1, 3, 2],
'rows': 7,
'cols': 2
},
{
'batch_shape': [1, 3, 2],
'rows': 7,
'cols': 7
},
)
def testMatrixBandPart(self, batch_shape, rows, cols):
# TODO(b/125505881): Disabled due to LLVM backend crash.
if self.device == 'XLA_CPU' and cols == 7 and rows == 1 and batch_shape == [
1, 3, 2
]:
pass
for dtype in self.float_types:
with self.session():
mat = np.ones(batch_shape + [rows, cols]).astype(dtype)
batch_mat = np.tile(mat, batch_shape + [1, 1])
for lower in -1, 0, 1, rows - 1:
for upper in -1, 0, 1, cols - 1:
band_np = mat
if lower >= 0:
band_np = np.triu(band_np, -lower)
if upper >= 0:
band_np = np.tril(band_np, upper)
if batch_shape:
band_np = np.tile(band_np, batch_shape + [1, 1])
placeholder = array_ops.placeholder(dtype)
with self.test_scope():
band = array_ops.matrix_band_part(
placeholder, constant_op.constant(lower, dtype=dtypes.int32),
constant_op.constant(upper, dtype=dtypes.int32))
feed_dict = {placeholder: batch_mat}
self.assertAllEqual(band_np, band.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| MatrixBandPartTest |
python | django__django | tests/queries/models.py | {
"start": 15676,
"end": 15869
} | class ____(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
| Ticket21203Parent |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/pooling.py | {
"start": 43953,
"end": 45167
} | class ____(Layer):
"""Abstract class for different global pooling 3D layers."""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super(GlobalPooling3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
self.keepdims = keepdims
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
if self.keepdims:
return tensor_shape.TensorShape(
[input_shape[0], 1, 1, 1, input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
if self.keepdims:
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], 1, 1, 1])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format, 'keepdims': self.keepdims}
base_config = super(GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| GlobalPooling3D |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 10337,
"end": 11513
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Outputs of question answering models.
"""
)
| LukeTokenClassifierOutput |
python | keras-team__keras | keras/src/utils/numerical_utils_test.py | {
"start": 182,
"end": 5466
} | class ____(testing.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorical_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
def test_build_pos_neg_masks(self):
query_labels = np.array([0, 1, 2, 2, 0])
key_labels = np.array([0, 1, 2, 0, 2])
expected_shape = (len(query_labels), len(key_labels))
positive_mask, negative_mask = numerical_utils.build_pos_neg_masks(
query_labels, key_labels, remove_diagonal=False
)
positive_mask = backend.convert_to_numpy(positive_mask)
negative_mask = backend.convert_to_numpy(negative_mask)
self.assertEqual(positive_mask.shape, expected_shape)
self.assertEqual(negative_mask.shape, expected_shape)
self.assertTrue(
np.all(np.logical_not(np.logical_and(positive_mask, negative_mask)))
)
expected_positive_mask_keep_diag = np.array(
[
[1, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1],
[0, 0, 1, 0, 1],
[1, 0, 0, 1, 0],
],
dtype="bool",
)
self.assertTrue(
np.all(positive_mask == expected_positive_mask_keep_diag)
)
self.assertTrue(
np.all(
negative_mask
== np.logical_not(expected_positive_mask_keep_diag)
)
)
positive_mask, negative_mask = numerical_utils.build_pos_neg_masks(
query_labels, key_labels, remove_diagonal=True
)
positive_mask = backend.convert_to_numpy(positive_mask)
negative_mask = backend.convert_to_numpy(negative_mask)
self.assertEqual(positive_mask.shape, expected_shape)
self.assertEqual(negative_mask.shape, expected_shape)
self.assertTrue(
np.all(np.logical_not(np.logical_and(positive_mask, negative_mask)))
)
expected_positive_mask_with_remove_diag = np.array(
[
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 1, 0, 1],
[1, 0, 0, 1, 0],
],
dtype="bool",
)
self.assertTrue(
np.all(positive_mask == expected_positive_mask_with_remove_diag)
)
query_labels = np.array([1, 2, 3])
key_labels = np.array([1, 2, 3, 1])
positive_mask, negative_mask = numerical_utils.build_pos_neg_masks(
query_labels, key_labels, remove_diagonal=True
)
positive_mask = backend.convert_to_numpy(positive_mask)
negative_mask = backend.convert_to_numpy(negative_mask)
expected_shape_diff_sizes = (len(query_labels), len(key_labels))
self.assertEqual(positive_mask.shape, expected_shape_diff_sizes)
self.assertEqual(negative_mask.shape, expected_shape_diff_sizes)
self.assertTrue(
np.all(np.logical_not(np.logical_and(positive_mask, negative_mask)))
)
| TestNumericalUtils |
python | spyder-ide__spyder | spyder/plugins/completion/api.py | {
"start": 20432,
"end": 20791
} | class ____:
"""LSP completion element categories."""
TEXT = 1
METHOD = 2
FUNCTION = 3
CONSTRUCTOR = 4
FIELD = 5
VARIABLE = 6
CLASS = 7
INTERFACE = 8
MODULE = 9
PROPERTY = 10
UNIT = 11
VALUE = 12
ENUM = 13
KEYWORD = 14
SNIPPET = 15
COLOR = 16
FILE = 17
REFERENCE = 18
| CompletionItemKind |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/util.py | {
"start": 9796,
"end": 14526
} | class ____(dict):
"""Dict keys available as attributes. Shadows."""
def __getattribute__(self, key):
try:
return self[key]
except KeyError:
return dict.__getattribute__(self, key)
def __call__(self, *keys):
return tuple([self[key] for key in keys])
get_all = __call__
def drop_all_tables_from_metadata(metadata, engine_or_connection):
from . import engines
def go(connection):
engines.testing_reaper.prepare_for_drop_tables(connection)
if not connection.dialect.supports_alter:
from . import assertions
with assertions.expect_warnings(
"Can't sort tables", assert_=False
):
metadata.drop_all(connection)
else:
metadata.drop_all(connection)
if not isinstance(engine_or_connection, Connection):
with engine_or_connection.begin() as connection:
go(connection)
else:
go(engine_or_connection)
def drop_all_tables(
engine,
inspector,
schema=None,
consider_schemas=(None,),
include_names=None,
):
if include_names is not None:
include_names = set(include_names)
if schema is not None:
assert consider_schemas == (
None,
), "consider_schemas and schema are mutually exclusive"
consider_schemas = (schema,)
with engine.begin() as conn:
for table_key, fkcs in reversed(
inspector.sort_tables_on_foreign_key_dependency(
consider_schemas=consider_schemas
)
):
if table_key:
if (
include_names is not None
and table_key[1] not in include_names
):
continue
conn.execute(
DropTable(
Table(table_key[1], MetaData(), schema=table_key[0])
)
)
elif fkcs:
if not engine.dialect.supports_alter:
continue
for t_key, fkc in fkcs:
if (
include_names is not None
and t_key[1] not in include_names
):
continue
tb = Table(
t_key[1],
MetaData(),
Column("x", Integer),
Column("y", Integer),
schema=t_key[0],
)
conn.execute(
DropConstraint(
ForeignKeyConstraint([tb.c.x], [tb.c.y], name=fkc)
)
)
def teardown_events(event_cls):
@decorator
def decorate(fn, *arg, **kw):
try:
return fn(*arg, **kw)
finally:
event_cls._clear()
return decorate
def total_size(o):
"""Returns the approximate memory footprint an object and all of its
contents.
source: https://code.activestate.com/recipes/577504/
"""
def dict_handler(d):
return chain.from_iterable(d.items())
all_handlers = {
tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
def count_cache_key_tuples(tup):
"""given a cache key tuple, counts how many instances of actual
tuples are found.
used to alert large jumps in cache key complexity.
"""
stack = [tup]
sentinel = object()
num_elements = 0
while stack:
elem = stack.pop(0)
if elem is sentinel:
num_elements += 1
elif isinstance(elem, tuple):
if elem:
stack = list(elem) + [sentinel] + stack
return num_elements
@contextlib.contextmanager
def skip_if_timeout(seconds: float, cleanup: Any = None):
now = time.time()
yield
sec = time.time() - now
if sec > seconds:
try:
cleanup()
finally:
config.skip_test(
f"test took too long ({sec:.4f} seconds > {seconds})"
)
| adict |
python | keras-team__keras | keras/src/layers/preprocessing/feature_space_test.py | {
"start": 308,
"end": 22721
} | class ____(testing.TestCase):
def _get_train_data_dict(
self,
as_dataset=False,
as_tensors=False,
as_labeled_dataset=False,
include_strings=True,
):
data = {
"float_1": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"float_2": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"float_3": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"int_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"int_2": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"int_3": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
if include_strings:
data["string_1"] = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
data["string_2"] = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
if as_dataset:
return tf_data.Dataset.from_tensor_slices(data)
elif as_tensors:
return {
key: ops.convert_to_tensor(value) for key, value in data.items()
}
elif as_labeled_dataset:
labels = [0, 1, 0, 1, 0, 0, 1, 0, 1, 1]
return tf_data.Dataset.from_tensor_slices((data, labels))
return data
def test_basic_usage_no_strings(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("int_1", "int_2"), ("int_2", "int_3")],
output_mode="concat",
)
# Test unbatched adapt
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
# Test batched adapt
fs.adapt(
self._get_train_data_dict(
as_dataset=True, include_strings=False
).batch(4)
)
# Test unbatched call on raw data
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
out_dim = 152
self.assertEqual(out.shape, (out_dim,))
# Test unbatched call on backend tensors
data = self._get_train_data_dict(as_tensors=True, include_strings=False)
data = {key: value[0] for key, value in data.items()}
out = fs(data)
self.assertEqual(out.shape, (out_dim,))
# Test batched call on raw data
out = fs(self._get_train_data_dict(include_strings=False))
self.assertEqual(out.shape, (10, out_dim))
# Test batched call on backend tensors
out = fs(
self._get_train_data_dict(as_tensors=True, include_strings=False)
)
self.assertEqual(out.shape, (10, out_dim))
def test_output_mode_dict_no_strings(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("int_1", "int_2")],
output_mode="dict",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
# Test unbatched call on raw data
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (32,))
self.assertEqual(out["int_1_X_int_2"].shape, (32,))
# Test batched call on raw data
out = fs(self._get_train_data_dict(include_strings=False))
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (10, 32))
# Test batched call on backend tensors
out = fs(
self._get_train_data_dict(as_tensors=True, include_strings=False)
)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (10, 32))
def test_output_mode_dict_of_ints_no_strings(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": cls.integer_categorical(output_mode="int"),
"int_2": cls.integer_hashed(num_bins=32, output_mode="int"),
"int_3": cls.integer_categorical(output_mode="int"),
},
crosses=[
cls.cross(
("int_1", "int_2"), output_mode="int", crossing_dim=32
),
],
output_mode="dict",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_2"].dtype).startswith("int")
)
self.assertEqual(out["int_1_X_int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_1_X_int_2"].dtype).startswith(
"int"
)
)
def test_basic_usage(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="concat",
)
# Test unbatched adapt
fs.adapt(self._get_train_data_dict(as_dataset=True))
# Test batched adapt
fs.adapt(self._get_train_data_dict(as_dataset=True).batch(4))
# Test unbatched call on raw data
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
out_dim = 195
self.assertEqual(out.shape, (out_dim,))
# Test unbatched call on tensors
if backend.backend() == "tensorflow":
data = self._get_train_data_dict(as_tensors=True)
data = {key: value[0] for key, value in data.items()}
out = fs(data)
self.assertEqual(out.shape, (out_dim,))
# Test batched call on raw data
out = fs(self._get_train_data_dict())
self.assertEqual(out.shape, (10, out_dim))
# Test batched call on tensors
if backend.backend() == "tensorflow":
out = fs(self._get_train_data_dict(as_tensors=True))
self.assertEqual(out.shape, (10, out_dim))
def test_output_mode_dict(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="dict",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
# Test unbatched call on raw data
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (11,))
self.assertEqual(out["int_2"].shape, (32,))
self.assertEqual(out["string_2_X_int_2"].shape, (32,))
# Test batched call on raw data
out = fs(self._get_train_data_dict())
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (10, 11))
self.assertEqual(out["int_2"].shape, (10, 32))
self.assertEqual(out["string_2_X_int_2"].shape, (10, 32))
# Test batched call on tensors
if backend.backend() == "tensorflow":
out = fs(self._get_train_data_dict(as_tensors=True))
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (10, 11))
self.assertEqual(out["int_2"].shape, (10, 32))
self.assertEqual(out["string_2_X_int_2"].shape, (10, 32))
def test_output_mode_dict_of_ints(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": cls.string_categorical(output_mode="int"),
"string_2": cls.string_hashed(num_bins=32, output_mode="int"),
"int_1": cls.integer_categorical(output_mode="int"),
"int_2": cls.integer_hashed(num_bins=32, output_mode="int"),
"int_3": cls.integer_categorical(output_mode="int"),
},
crosses=[
cls.cross(
("float_3", "string_1"), output_mode="int", crossing_dim=32
),
cls.cross(
("string_2", "int_2"), output_mode="int", crossing_dim=32
),
],
output_mode="dict",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["string_1"].dtype).startswith("int")
)
self.assertEqual(out["int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_2"].dtype).startswith("int")
)
self.assertEqual(out["string_2_X_int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["string_2_X_int_2"].dtype).startswith(
"int"
)
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string dtype."
)
def test_functional_api_sync_processing(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="concat",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
inputs = fs.get_inputs()
features = fs.get_encoded_features()
outputs = layers.Dense(1)(features)
model = models.Model(inputs=inputs, outputs=outputs)
model.compile("adam", "mse")
ds = self._get_train_data_dict(as_labeled_dataset=True)
model.fit(ds.batch(4))
model.evaluate(ds.batch(4))
ds = self._get_train_data_dict(as_dataset=True)
model.predict(ds.batch(4))
@pytest.mark.requires_trainable_backend
def test_tf_data_async_processing(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "int_1"), ("int_1", "int_2")],
output_mode="concat",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
features = fs.get_encoded_features()
outputs = layers.Dense(1)(features)
model = models.Model(inputs=features, outputs=outputs)
model.compile("adam", "mse")
ds = self._get_train_data_dict(
as_labeled_dataset=True, include_strings=False
)
# Try map before batch
ds = ds.map(lambda x, y: (fs(x), y))
model.fit(ds.batch(4))
# Try map after batch
ds = self._get_train_data_dict(
as_labeled_dataset=True, include_strings=False
)
ds = ds.batch(4)
ds = ds.map(lambda x, y: (fs(x), y))
model.evaluate(ds)
ds = self._get_train_data_dict(as_dataset=True, include_strings=False)
ds = ds.map(fs)
model.predict(ds.batch(4))
def test_advanced_usage(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": cls.float(),
"float_2": cls.float_normalized(),
"float_3": cls.float_discretized(num_bins=3),
"string_1": cls.string_categorical(max_tokens=5),
"string_2": cls.string_hashed(num_bins=32),
"int_1": cls.integer_categorical(
max_tokens=5, num_oov_indices=2
),
"int_2": cls.integer_hashed(num_bins=32),
"int_3": cls.integer_categorical(max_tokens=5),
},
crosses=[
cls.cross(("float_3", "string_1"), crossing_dim=32),
cls.cross(("string_2", "int_2"), crossing_dim=32),
],
output_mode="concat",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertEqual(out.shape, (148,))
def test_manual_kpl(self):
data = {
"text": ["1st string", "2nd string", "3rd string"],
}
cls = feature_space.FeatureSpace
# Test with a tf-idf TextVectorization layer
tv = layers.TextVectorization(output_mode="tf_idf")
fs = feature_space.FeatureSpace(
features={
"text": cls.feature(
preprocessor=tv, dtype="string", output_mode="float"
),
},
output_mode="concat",
)
fs.adapt(tf_data.Dataset.from_tensor_slices(data))
out = fs(data)
self.assertEqual(list(out.shape), [3, 5])
def test_no_adapt(self):
data = {
"int_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"text_1": [
"This is",
"not just",
"an example",
"of random words.",
"these are",
"some words",
"in",
"a random",
"example.",
"Bye!",
],
"float_1": [
-1.2,
0.0,
2.4,
1.2,
15.0,
-100.0,
23.1,
3.12,
0.1,
-0.01,
],
}
cls = feature_space.FeatureSpace
# Pre-defined vocabulary. No need to adapt.
tv_vocab = [
"this",
"is",
"just",
"an",
"example",
"with",
"some",
"words",
]
tv_with_vocab = layers.TextVectorization(
vocabulary=tv_vocab, output_mode="int", output_sequence_length=3
)
# Pre-defined mean and variance. No need to adapt.
mean, variance = 12.0, 5.0
normalization = layers.Normalization(mean=mean, variance=variance)
fs = feature_space.FeatureSpace(
{
"int_1": "integer_hashed",
"text_1": cls.feature(
dtype="string",
preprocessor=tv_with_vocab,
output_mode="int",
),
"float_1": cls.feature(
dtype="float32",
preprocessor=normalization,
output_mode="float",
),
},
output_mode="dict",
)
out = fs(data)
float_out = ops.divide(
ops.convert_to_tensor(data["float_1"]) - mean, ops.sqrt(variance)
)
float_out = ops.reshape(float_out, (10, -1))
self.assertEqual(tuple(out["int_1"].shape), (10, 32))
self.assertEqual(tuple(out["text_1"].shape), (10, 3))
self.assertAllClose(out["float_1"], float_out, atol=1e-3)
@pytest.mark.skipif(
backend.backend() in ("numpy", "torch"),
reason=(
"TODO: When using FeatureSpace as a Model in torch and numpy, "
"the error is large."
),
)
def test_saving(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": cls.float(),
"float_2": cls.float_normalized(),
"float_3": cls.float_discretized(num_bins=3),
"int_1": cls.integer_categorical(
max_tokens=5, num_oov_indices=2
),
"int_2": cls.integer_hashed(num_bins=32),
"int_3": cls.integer_categorical(max_tokens=5),
},
crosses=[
cls.cross(("float_3", "int_1"), crossing_dim=32),
cls.cross(("int_1", "int_2"), crossing_dim=32),
],
output_mode="concat",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
ref_out = fs(data)
temp_filepath = os.path.join(self.get_temp_dir(), "fs.keras")
fs.save(temp_filepath)
fs = saving_api.load_model(temp_filepath)
# Save again immediately after loading to test idempotency
temp_filepath = os.path.join(self.get_temp_dir(), "fs2.keras")
fs.save(temp_filepath)
# Test correctness of the first saved FS
out = fs(data)
self.assertAllClose(out, ref_out)
inputs = fs.get_inputs()
outputs = fs.get_encoded_features()
model = models.Model(inputs=inputs, outputs=outputs)
ds = self._get_train_data_dict(as_dataset=True, include_strings=False)
out = model.predict(ds.batch(4))
self.assertAllClose(out[0], ref_out)
# Test correctness of the re-saved FS
fs = saving_api.load_model(temp_filepath)
out = fs(data)
self.assertAllClose(out, ref_out)
def test_errors(self):
# Test no features
with self.assertRaisesRegex(ValueError, "cannot be None or empty"):
feature_space.FeatureSpace(features={})
# Test no crossing dim
with self.assertRaisesRegex(ValueError, "`crossing_dim`"):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
crosses=[("f1", "f2")],
crossing_dim=None,
)
# Test wrong cross feature name
with self.assertRaisesRegex(ValueError, "should be present in "):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
crosses=[("f1", "unknown")],
crossing_dim=32,
)
# Test wrong output mode
with self.assertRaisesRegex(ValueError, "for argument `output_mode`"):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
output_mode="unknown",
)
# Test call before adapt
with self.assertRaisesRegex(ValueError, "You need to call `.adapt"):
fs = feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
}
)
fs({"f1": [0], "f2": [0]})
# Test get_encoded_features before adapt
with self.assertRaisesRegex(ValueError, "You need to call `.adapt"):
fs = feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
}
)
fs.get_encoded_features()
| FeatureSpaceTest |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py | {
"start": 1473,
"end": 1663
} | class ____(StrictBaseModel):
"""Task outlet reference serializer for assets."""
dag_id: str
task_id: str
created_at: datetime
updated_at: datetime
| TaskOutletAssetReference |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/descriptor1.py | {
"start": 2041,
"end": 2352
} | class ____:
def __get__(self, instance: Any, owner: Any) -> int: ...
@overload
def __set__(self, owner: bytes, value: int | None) -> None: ...
@overload
def __set__(self, owner: "B", value: int | None) -> None: ...
def __set__(self, owner: Any, value: int | None) -> None: ...
| Descriptor5 |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/async/app_async.py | {
"start": 738,
"end": 1134
} | class ____(webapp2.RequestHandler):
def get(self):
acct = Account.get_by_id(users.get_current_user().user_id())
acct.view_counter += 1
future = acct.put_async()
# ...read something else from Datastore...
self.response.out.write("Content of the page")
future.get_result()
app = webapp2.WSGIApplication([("/", MyRequestHandler)])
| MyRequestHandler |
python | catalyst-team__catalyst | catalyst/contrib/layers/se.py | {
"start": 1384,
"end": 2350
} | class ____(nn.Module): # noqa: N801
"""
The sSE (Channel Squeeze and Spatial Excitation) block from the
`Concurrent Spatial and Channel ‘Squeeze & Excitation’
in Fully Convolutional Networks`__ paper.
Adapted from
https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/66178
Shape:
- Input: (batch, channels, height, width)
- Output: (batch, channels, height, width) (same shape as input)
__ https://arxiv.org/abs/1803.02579
"""
def __init__(self, in_channels: int):
"""
Args:
in_channels: The number of channels
in the feature map of the input.
"""
super().__init__()
self.conv = nn.Conv2d(in_channels, 1, kernel_size=1, stride=1)
def forward(self, x: torch.Tensor):
"""Forward call."""
input_x = x
x = self.conv(x)
x = torch.sigmoid(x)
x = torch.mul(input_x, x)
return x
| sSE |
python | pypa__pip | tests/unit/test_finder.py | {
"start": 16377,
"end": 19928
} | class ____:
def make_test_link_evaluator(self, formats: Iterable[str]) -> LinkEvaluator:
target_python = TargetPython()
return LinkEvaluator(
project_name="pytest",
canonical_name=canonicalize_name("pytest"),
formats=frozenset(formats),
target_python=target_python,
allow_yanked=True,
)
@pytest.mark.parametrize(
"url, expected_version",
[
("http:/yo/pytest-1.0.tar.gz", "1.0"),
("http:/yo/pytest-1.0-py2.py3-none-any.whl", "1.0"),
],
)
def test_evaluate_link__match(self, url: str, expected_version: str) -> None:
"""Test that 'pytest' archives match for 'pytest'"""
link = Link(url)
evaluator = self.make_test_link_evaluator(formats=["source", "binary"])
actual = evaluator.evaluate_link(link)
assert actual == (LinkType.candidate, expected_version)
@pytest.mark.parametrize(
"url, link_type, fail_reason",
[
# TODO: Uncomment this test case when #1217 is fixed.
# 'http:/yo/pytest-xdist-1.0.tar.gz',
(
"http:/yo/pytest2-1.0.tar.gz",
LinkType.format_invalid,
"Missing project version for pytest",
),
(
"http:/yo/pytest_xdist-1.0-py2.py3-none-any.whl",
LinkType.different_project,
"wrong project name (not pytest)",
),
],
)
def test_evaluate_link__substring_fails(
self,
url: str,
link_type: LinkType,
fail_reason: str,
) -> None:
"""Test that 'pytest<something> archives won't match for 'pytest'."""
link = Link(url)
evaluator = self.make_test_link_evaluator(formats=["source", "binary"])
actual = evaluator.evaluate_link(link)
assert actual == (link_type, fail_reason)
def test_process_project_url(data: TestData) -> None:
project_name = "simple"
index_url = data.index_url("simple")
project_url = Link(f"{index_url}/{project_name}")
finder = make_test_finder(index_urls=[index_url])
link_evaluator = finder.make_link_evaluator(project_name)
actual = finder.process_project_url(
project_url,
link_evaluator=link_evaluator,
)
assert len(actual) == 1
package_link = actual[0]
assert package_link.name == "simple"
assert str(package_link.version) == "1.0"
def test_find_all_candidates_nothing() -> None:
"""Find nothing without anything"""
finder = make_test_finder()
assert not finder.find_all_candidates("pip")
def test_find_all_candidates_find_links(data: TestData) -> None:
finder = make_test_finder(find_links=[data.find_links])
versions = finder.find_all_candidates("simple")
assert [str(v.version) for v in versions] == ["3.0", "2.0", "1.0"]
def test_find_all_candidates_index(data: TestData) -> None:
finder = make_test_finder(index_urls=[data.index_url("simple")])
versions = finder.find_all_candidates("simple")
assert [str(v.version) for v in versions] == ["1.0"]
def test_find_all_candidates_find_links_and_index(data: TestData) -> None:
finder = make_test_finder(
find_links=[data.find_links],
index_urls=[data.index_url("simple")],
)
versions = finder.find_all_candidates("simple")
# first the find-links versions then the page versions
assert [str(v.version) for v in versions] == ["3.0", "2.0", "1.0", "1.0"]
| TestLinkEvaluator |
python | jazzband__django-polymorphic | example/pexp/models.py | {
"start": 357,
"end": 440
} | class ____(Project):
supervisor = models.CharField(max_length=30)
| ResearchProject |
python | pallets__quart | src/quart/testing/connections.py | {
"start": 861,
"end": 3833
} | class ____:
def __init__(
self, app: Quart, scope: HTTPScope, _preserve_context: bool = False
) -> None:
self.app = app
self.headers: Headers | None = None
self.push_promises: list[tuple[str, Headers]] = []
self.response_data = bytearray()
self.scope = scope
self.status_code: int | None = None
self._preserve_context = _preserve_context
self._send_queue: asyncio.Queue = asyncio.Queue()
self._receive_queue: asyncio.Queue = asyncio.Queue()
self._task: Awaitable[None] = None
async def send(self, data: bytes) -> None:
await self._send_queue.put(
{"type": "http.request", "body": data, "more_body": True}
)
async def send_complete(self) -> None:
await self._send_queue.put(
{"type": "http.request", "body": b"", "more_body": False}
)
async def receive(self) -> bytes:
data = await self._receive_queue.get()
if isinstance(data, Exception):
raise data
else:
return data
async def disconnect(self) -> None:
await self._send_queue.put({"type": "http.disconnect"})
async def __aenter__(self) -> TestHTTPConnection:
self._task = asyncio.ensure_future(
self.app(self.scope, self._asgi_receive, self._asgi_send)
)
return self
async def __aexit__(
self, exc_type: type, exc_value: BaseException, tb: TracebackType
) -> None:
if exc_type is not None:
await self.disconnect()
await self._task
while not self._receive_queue.empty():
data = await self._receive_queue.get()
if isinstance(data, bytes):
self.response_data.extend(data)
elif not isinstance(data, HTTPDisconnectError):
raise data
async def as_response(self) -> Response:
while not self._receive_queue.empty():
data = await self._receive_queue.get()
if isinstance(data, bytes):
self.response_data.extend(data)
return self.app.response_class(
bytes(self.response_data), self.status_code, self.headers
)
async def _asgi_receive(self) -> ASGIReceiveEvent:
return await self._send_queue.get()
async def _asgi_send(self, message: ASGISendEvent) -> None:
if message["type"] == "http.response.start":
self.headers = decode_headers(message["headers"])
self.status_code = message["status"]
elif message["type"] == "http.response.body":
await self._receive_queue.put(message["body"])
elif message["type"] == "http.response.push":
self.push_promises.append(
(message["path"], decode_headers(message["headers"]))
)
elif message["type"] == "http.disconnect":
await self._receive_queue.put(HTTPDisconnectError())
| TestHTTPConnection |
python | weaviate__weaviate-python-client | weaviate/backup/backup.py | {
"start": 2398,
"end": 2812
} | class ____(BaseModel):
"""Return type of the backup list method."""
collections: List[str] = Field(default_factory=list, alias="classes")
status: BackupStatus
backup_id: str = Field(alias="id")
started_at: Optional[datetime] = Field(alias="startedAt", default=None)
completed_at: Optional[datetime] = Field(alias="completedAt", default=None)
size: float = Field(default=0)
| BackupListReturn |
python | tiangolo__fastapi | tests/test_additional_responses_custom_validationerror.py | {
"start": 261,
"end": 318
} | class ____(BaseModel):
status: str
title: str
| Error |
python | apache__airflow | airflow-ctl/src/airflowctl/exceptions.py | {
"start": 1278,
"end": 1437
} | class ____(AirflowCtlNotFoundException):
"""Raise when a credential couldn't be found while performing an operation."""
| AirflowCtlCredentialNotFoundException |
python | ansible__ansible | lib/ansible/plugins/lookup/url.py | {
"start": 6784,
"end": 9162
} | class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
ret = []
for term in terms:
display.vvvv("url lookup connecting to %s" % term)
if self.get_option('follow_redirects') in ('yes', 'no'):
display.deprecated(
msg="Using 'yes' or 'no' for 'follow_redirects' parameter is deprecated.",
version='2.22',
)
try:
response = open_url(
term, validate_certs=self.get_option('validate_certs'),
use_proxy=self.get_option('use_proxy'),
url_username=self.get_option('username'),
url_password=self.get_option('password'),
headers=self.get_option('headers'),
force=self.get_option('force'),
timeout=self.get_option('timeout'),
http_agent=self.get_option('http_agent'),
force_basic_auth=self.get_option('force_basic_auth'),
follow_redirects=self.get_option('follow_redirects'),
use_gssapi=self.get_option('use_gssapi'),
unix_socket=self.get_option('unix_socket'),
ca_path=self.get_option('ca_path'),
unredirected_headers=self.get_option('unredirected_headers'),
ciphers=self.get_option('ciphers'),
use_netrc=self.get_option('use_netrc')
)
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, to_native(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, to_native(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, to_native(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, to_native(e)))
if self.get_option('split_lines'):
for line in response.read().splitlines():
ret.append(to_text(line))
else:
ret.append(to_text(response.read()))
return ret
| LookupModule |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/hooks/test_hive.py | {
"start": 25068,
"end": 37663
} | class ____:
def _upload_dataframe(self):
df = pd.DataFrame({"a": [1, 2], "b": [1, 2]})
self.local_path = "/tmp/TestHiveServer2Hook.csv"
df.to_csv(self.local_path, header=False, index=False)
def setup_method(self):
self._upload_dataframe()
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
self.database = "airflow"
self.table = "hive_server_hook"
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
a int,
b int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',';
LOAD DATA LOCAL INPATH '{{ params.csv_path }}'
OVERWRITE INTO TABLE {{ params.table }};
"""
self.columns = [f"{self.table}.a", f"{self.table}.b"]
with mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_metastore_client"
) as get_metastore_mock:
get_metastore_mock.return_value = mock.MagicMock()
self.hook = HiveMetastoreHook()
def test_get_conn(self):
hook = MockHiveServer2Hook()
hook.get_conn()
@mock.patch("pyhive.hive.connect")
def test_get_conn_with_password(self, mock_connect):
conn_id = "conn_with_password"
conn_env = CONN_ENV_PREFIX + conn_id.upper()
with mock.patch.dict(
"os.environ",
{conn_env: "jdbc+hive2://conn_id:conn_pass@localhost:10000/default?auth_mechanism=LDAP"},
):
HiveServer2Hook(hiveserver2_conn_id=conn_id).get_conn()
mock_connect.assert_called_once_with(
host="localhost",
port=10000,
auth="LDAP",
kerberos_service_name=None,
username="conn_id",
password="conn_pass",
database="default",
)
@pytest.mark.parametrize(
("host", "port", "schema", "message"),
[
("localhost", "10000", "default", None),
("localhost:", "10000", "default", "The host used in beeline command"),
(";ocalhost", "10000", "default", "The host used in beeline command"),
(";ocalho/", "10000", "default", "The host used in beeline command"),
("localhost", "as", "default", "The port used in beeline command"),
("localhost", "0;", "default", "The port used in beeline command"),
("localhost", "10/", "default", "The port used in beeline command"),
("localhost", ":", "default", "The port used in beeline command"),
("localhost", "-1", "default", "The port used in beeline command"),
("localhost", "655536", "default", "The port used in beeline command"),
("localhost", "1234", "default;", "The schema used in beeline command"),
],
)
def test_get_conn_with_wrong_connection_parameters(self, host, port, schema, message):
connection = Connection(
conn_id="test",
conn_type="hive",
host=host,
port=port,
schema=schema,
)
hook = HiveCliHook()
if message:
with pytest.raises(Exception, match=message):
hook._validate_beeline_parameters(connection)
else:
hook._validate_beeline_parameters(connection)
def test_get_records(self):
hook = MockHiveServer2Hook()
query = f"SELECT * FROM {self.table}"
envron_name = "AIRFLOW_CTX_LOGICAL_DATE" if AIRFLOW_V_3_0_PLUS else "AIRFLOW_CTX_EXECUTION_DATE"
with mock.patch.dict(
"os.environ",
{
"AIRFLOW_CTX_DAG_ID": "test_dag_id",
"AIRFLOW_CTX_TASK_ID": "HiveHook_3835",
envron_name: "2015-01-01T00:00:00+00:00",
"AIRFLOW_CTX_DAG_RUN_ID": "55",
"AIRFLOW_CTX_DAG_OWNER": "airflow",
"AIRFLOW_CTX_DAG_EMAIL": "test@airflow.com",
},
):
results = hook.get_records(query, schema=self.database)
assert results == [(1, 1), (2, 2)]
date_key = "logical_date" if AIRFLOW_V_3_0_PLUS else "execution_date"
hook.get_conn.assert_called_with(self.database)
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_id=test_dag_id")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.task_id=HiveHook_3835")
hook.mock_cursor.execute.assert_any_call(f"set airflow.ctx.{date_key}=2015-01-01T00:00:00+00:00")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_run_id=55")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_owner=airflow")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_email=test@airflow.com")
@pytest.mark.parametrize("df_type", ["pandas", "polars"])
def test_get_df(self, df_type):
hook = MockHiveServer2Hook()
query = f"SELECT * FROM {self.table}"
envron_name = "AIRFLOW_CTX_LOGICAL_DATE" if AIRFLOW_V_3_0_PLUS else "AIRFLOW_CTX_EXECUTION_DATE"
with mock.patch.dict(
"os.environ",
{
"AIRFLOW_CTX_DAG_ID": "test_dag_id",
"AIRFLOW_CTX_TASK_ID": "HiveHook_3835",
envron_name: "2015-01-01T00:00:00+00:00",
"AIRFLOW_CTX_DAG_RUN_ID": "55",
"AIRFLOW_CTX_DAG_OWNER": "airflow",
"AIRFLOW_CTX_DAG_EMAIL": "test@airflow.com",
},
):
df = hook.get_df(query, schema=self.database, df_type=df_type)
assert len(df) == 2
if df_type == "pandas":
assert df["hive_server_hook.a"].values.tolist() == [1, 2]
assert isinstance(df, pd.DataFrame)
elif df_type == "polars":
assert df["hive_server_hook.a"].to_list() == [1, 2]
assert isinstance(df, pl.DataFrame)
date_key = "logical_date" if AIRFLOW_V_3_0_PLUS else "execution_date"
hook.get_conn.assert_called_with(self.database)
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_id=test_dag_id")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.task_id=HiveHook_3835")
hook.mock_cursor.execute.assert_any_call(f"set airflow.ctx.{date_key}=2015-01-01T00:00:00+00:00")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_run_id=55")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_owner=airflow")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_email=test@airflow.com")
hook = MockHiveServer2Hook(connection_cursor=EmptyMockConnectionCursor())
query = f"SELECT * FROM {self.table}"
df = hook.get_df(query, schema=self.database, df_type=df_type)
assert len(df) == 0
def test_get_results_header(self):
hook = MockHiveServer2Hook()
query = f"SELECT * FROM {self.table}"
results = hook.get_results(query, schema=self.database)
assert [col[0] for col in results["header"]] == self.columns
def test_get_results_data(self):
hook = MockHiveServer2Hook()
query = f"SELECT * FROM {self.table}"
results = hook.get_results(query, schema=self.database)
assert results["data"] == [(1, 1), (2, 2)]
def test_to_csv(self):
hook = MockHiveServer2Hook()
hook._get_results = mock.MagicMock(
return_value=iter(
[
[
("hive_server_hook.a", "INT_TYPE", None, None, None, None, True),
("hive_server_hook.b", "INT_TYPE", None, None, None, None, True),
],
(1, 1),
(2, 2),
]
)
)
query = f"SELECT * FROM {self.table}"
csv_filepath = "query_results.csv"
hook.to_csv(
query,
csv_filepath,
schema=self.database,
delimiter=",",
lineterminator="\n",
output_header=True,
fetch_size=2,
)
df = pd.read_csv(csv_filepath, sep=",")
assert df.columns.tolist() == self.columns
assert df[self.columns[0]].values.tolist() == [1, 2]
assert len(df) == 2
def test_multi_statements(self):
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
f"SELECT * FROM {self.table}",
"DROP TABLE test_multi_statements",
]
hook = MockHiveServer2Hook()
envron_name = "AIRFLOW_CTX_LOGICAL_DATE" if AIRFLOW_V_3_0_PLUS else "AIRFLOW_CTX_EXECUTION_DATE"
with mock.patch.dict(
"os.environ",
{
"AIRFLOW_CTX_DAG_ID": "test_dag_id",
"AIRFLOW_CTX_TASK_ID": "HiveHook_3835",
envron_name: "2015-01-01T00:00:00+00:00",
"AIRFLOW_CTX_DAG_RUN_ID": "55",
"AIRFLOW_CTX_DAG_OWNER": "airflow",
"AIRFLOW_CTX_DAG_EMAIL": "test@airflow.com",
},
):
results = hook.get_records(sqls, schema=self.database)
assert results == [(1, 1), (2, 2)]
date_key = "logical_date" if AIRFLOW_V_3_0_PLUS else "execution_date"
hook.get_conn.assert_called_with(self.database)
hook.mock_cursor.execute.assert_any_call("CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)")
hook.mock_cursor.execute.assert_any_call(f"SELECT * FROM {self.table}")
hook.mock_cursor.execute.assert_any_call("DROP TABLE test_multi_statements")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_id=test_dag_id")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.task_id=HiveHook_3835")
hook.mock_cursor.execute.assert_any_call(f"set airflow.ctx.{date_key}=2015-01-01T00:00:00+00:00")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_run_id=55")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_owner=airflow")
hook.mock_cursor.execute.assert_any_call("set airflow.ctx.dag_email=test@airflow.com")
def test_get_results_with_hive_conf(self):
date_key = "logical_date" if AIRFLOW_V_3_0_PLUS else "execution_date"
hql = [
"set key",
"set airflow.ctx.dag_id",
"set airflow.ctx.dag_run_id",
"set airflow.ctx.task_id",
f"set airflow.ctx.{date_key}",
]
dag_id_ctx_var_name = AIRFLOW_VAR_NAME_FORMAT_MAPPING["AIRFLOW_CONTEXT_DAG_ID"]["env_var_format"]
task_id_ctx_var_name = AIRFLOW_VAR_NAME_FORMAT_MAPPING["AIRFLOW_CONTEXT_TASK_ID"]["env_var_format"]
logical_date_ctx_var_name = (
AIRFLOW_VAR_NAME_FORMAT_MAPPING["AIRFLOW_CONTEXT_LOGICAL_DATE"]["env_var_format"]
if AIRFLOW_V_3_0_PLUS
else AIRFLOW_VAR_NAME_FORMAT_MAPPING["AIRFLOW_CONTEXT_EXECUTION_DATE"]["env_var_format"]
)
dag_run_id_ctx_var_name = AIRFLOW_VAR_NAME_FORMAT_MAPPING["AIRFLOW_CONTEXT_DAG_RUN_ID"][
"env_var_format"
]
with mock.patch.dict(
"os.environ",
{
dag_id_ctx_var_name: "test_dag_id",
task_id_ctx_var_name: "test_task_id",
logical_date_ctx_var_name: f"test_{date_key}",
dag_run_id_ctx_var_name: "test_dag_run_id",
},
):
hook = MockHiveServer2Hook()
hook._get_results = mock.MagicMock(
return_value=iter(
[
"header",
("value", "test"),
("test_dag_id", "test"),
("test_task_id", "test"),
(f"test_{date_key}", "test"),
("test_dag_run_id", "test"),
]
)
)
output = "\n".join(
res_tuple[0] for res_tuple in hook.get_results(hql, hive_conf={"key": "value"})["data"]
)
assert "value" in output
assert "test_dag_id" in output
assert "test_task_id" in output
assert f"test_{date_key}" in output
assert "test_dag_run_id" in output
@pytest.mark.db_test
@mock.patch.dict("os.environ", AIRFLOW__CORE__SECURITY="kerberos")
| TestHiveServer2Hook |
python | ApeWorX__ape | src/ape/types/events.py | {
"start": 10195,
"end": 11437
} | class ____(BaseContractLog):
"""
A mock version of the ContractLog class used for testing purposes.
This class is designed to match a subset of event arguments in a ContractLog instance
by only comparing those event arguments that the user explicitly provides.
Inherits from :class:`~ape.types.BaseContractLog`, and overrides the
equality method for custom comparison
of event arguments between a MockContractLog and a ContractLog instance.
"""
def __eq__(self, other: Any) -> bool:
if (
not hasattr(other, "contract_address")
or not hasattr(other, "event_name")
or self.contract_address != other.contract_address
or self.event_name != other.event_name
):
return False
# NOTE: `self.event_arguments` contains a subset of items from `other.event_arguments`,
# but we skip those the user doesn't care to check
for name, value in self.event_arguments.items():
other_input = other.event_arguments.get(name)
if not _equal_event_inputs(value, other_input):
# Only exit on False; Else, keep checking.
return False
return True
| MockContractLog |
python | encode__django-rest-framework | rest_framework/request.py | {
"start": 2445,
"end": 3939
} | class ____:
"""
Placeholder for unset attributes.
Cannot use `None`, as that may be a valid value.
"""
pass
def _hasattr(obj, name):
return not getattr(obj, name) is Empty
def clone_request(request, method):
"""
Internal helper method to clone a request, replacing with a different
HTTP method. Used for checking permissions against other methods.
"""
ret = Request(request=request._request,
parsers=request.parsers,
authenticators=request.authenticators,
negotiator=request.negotiator,
parser_context=request.parser_context)
ret._data = request._data
ret._files = request._files
ret._full_data = request._full_data
ret._content_type = request._content_type
ret._stream = request._stream
ret.method = method
if hasattr(request, '_user'):
ret._user = request._user
if hasattr(request, '_auth'):
ret._auth = request._auth
if hasattr(request, '_authenticator'):
ret._authenticator = request._authenticator
if hasattr(request, 'accepted_renderer'):
ret.accepted_renderer = request.accepted_renderer
if hasattr(request, 'accepted_media_type'):
ret.accepted_media_type = request.accepted_media_type
if hasattr(request, 'version'):
ret.version = request.version
if hasattr(request, 'versioning_scheme'):
ret.versioning_scheme = request.versioning_scheme
return ret
| Empty |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly1.py | {
"start": 1021,
"end": 1177
} | class ____(F1):
# This should generate an error because a "not required" field can't
# override a "required" field.
c: ReadOnly[NotRequired[int]]
| F6 |
python | spyder-ide__spyder | spyder/utils/palette.py | {
"start": 757,
"end": 2662
} | class ____(DarkPalette):
"""Dark palette for Spyder."""
# Colors for information and feedback in dialogs
COLOR_SUCCESS_1 = Green.B40
COLOR_SUCCESS_2 = Green.B70
COLOR_SUCCESS_3 = Green.B90
COLOR_ERROR_1 = Red.B40
COLOR_ERROR_2 = Red.B70
COLOR_ERROR_3 = Red.B110
COLOR_WARN_1 = Orange.B40
COLOR_WARN_2 = Orange.B70
COLOR_WARN_3 = Orange.B90
COLOR_WARN_4 = Orange.B100
# Icon colors
ICON_1 = Gray.B140
ICON_2 = Blue.B80
ICON_3 = Green.B80
ICON_4 = Red.B70
ICON_5 = Orange.B70
ICON_6 = Gray.B30
ICON_7 = GroupDark.B90
# Colors for icons and variable explorer in dark mode
GROUP_1 = GroupDark.B10
GROUP_2 = GroupDark.B20
GROUP_3 = GroupDark.B30
GROUP_4 = GroupDark.B40
GROUP_5 = GroupDark.B50
GROUP_6 = GroupDark.B60
GROUP_7 = GroupDark.B70
GROUP_8 = GroupDark.B80
GROUP_9 = GroupDark.B90
GROUP_10 = GroupDark.B100
GROUP_11 = GroupDark.B110
GROUP_12 = GroupDark.B120
# Colors for highlight in editor
COLOR_HIGHLIGHT_1 = Blue.B10
COLOR_HIGHLIGHT_2 = Blue.B20
COLOR_HIGHLIGHT_3 = Blue.B30
COLOR_HIGHLIGHT_4 = Blue.B50
# Colors for occurrences from find widget
COLOR_OCCURRENCE_1 = Gray.B10
COLOR_OCCURRENCE_2 = Gray.B20
COLOR_OCCURRENCE_3 = Gray.B30
COLOR_OCCURRENCE_4 = Gray.B50
COLOR_OCCURRENCE_5 = Gray.B80
# Colors for Spyder and Python logos
PYTHON_LOGO_UP = Logos.B10
PYTHON_LOGO_DOWN = Logos.B20
SPYDER_LOGO_BACKGROUND = Logos.B30
SPYDER_LOGO_WEB = Logos.B40
SPYDER_LOGO_SNAKE = Logos.B40
# For special tabs
SPECIAL_TABS_SEPARATOR = Gray.B70
SPECIAL_TABS_SELECTED = DarkPalette.COLOR_ACCENT_2
# For the heart used to ask for donations
COLOR_HEART = Blue.B80
# For editor tooltips
TIP_TITLE_COLOR = Green.B80
TIP_CHAR_HIGHLIGHT_COLOR = Orange.B90
| SpyderPaletteDark |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/param.py | {
"start": 5370,
"end": 10400
} | class ____(MutableMapping[str, Any]):
"""
Class to hold all params for dags or tasks.
All the keys are strictly string and values are converted into Param's object
if they are not already. This class is to replace param's dictionary implicitly
and ideally not needed to be used directly.
:param dict_obj: A dict or dict like object to init ParamsDict
:param suppress_exception: Flag to suppress value exceptions while initializing the ParamsDict
"""
__version__: ClassVar[int] = 1
__slots__ = ["__dict", "suppress_exception"]
def __init__(self, dict_obj: Mapping[str, Any] | None = None, suppress_exception: bool = False):
self.__dict = {k: v if isinstance(v, Param) else Param(v) for k, v in (dict_obj or {}).items()}
self.suppress_exception = suppress_exception
def __bool__(self) -> bool:
return bool(self.__dict)
def __eq__(self, other: Any) -> bool:
if isinstance(other, ParamsDict):
return self.dump() == other.dump()
if isinstance(other, dict):
return self.dump() == other
return NotImplemented
def __hash__(self):
return hash(self.dump())
def __copy__(self) -> ParamsDict:
return ParamsDict(self.__dict, self.suppress_exception)
def __deepcopy__(self, memo: dict[int, Any] | None) -> ParamsDict:
return ParamsDict(copy.deepcopy(self.__dict, memo), self.suppress_exception)
def __contains__(self, o: object) -> bool:
return o in self.__dict
def __len__(self) -> int:
return len(self.__dict)
def __delitem__(self, v: str) -> None:
del self.__dict[v]
def __iter__(self):
return iter(self.__dict)
def __repr__(self):
return repr(self.dump())
def __setitem__(self, key: str, value: Any) -> None:
"""
Override for dictionary's ``setitem`` method to ensure all values are of Param's type only.
:param key: A key which needs to be inserted or updated in the dict
:param value: A value which needs to be set against the key. It could be of any
type but will be converted and stored as a Param object eventually.
"""
if isinstance(value, Param):
param = value
elif key in self.__dict:
param = self.__dict[key]
try:
param.resolve(value=value, suppress_exception=self.suppress_exception)
except ParamValidationError as ve:
raise ParamValidationError(f"Invalid input for param {key}: {ve}") from None
else:
# if the key isn't there already and if the value isn't of Param type create a new Param object
param = Param(value)
self.__dict[key] = param
def __getitem__(self, key: str) -> Any:
"""
Override for dictionary's ``getitem`` method to call the resolve method after fetching the key.
:param key: The key to fetch
"""
param = self.__dict[key]
return param.resolve(suppress_exception=self.suppress_exception)
def get_param(self, key: str) -> Param:
"""Get the internal :class:`.Param` object for this key."""
return self.__dict[key]
def items(self):
return ItemsView(self.__dict)
def values(self):
return ValuesView(self.__dict)
def update(self, *args, **kwargs) -> None:
if len(args) == 1 and not kwargs and isinstance(args[0], ParamsDict):
return super().update(args[0].__dict)
super().update(*args, **kwargs)
def dump(self) -> dict[str, Any]:
"""Dump the ParamsDict object as a dictionary, while suppressing exceptions."""
return {k: v.resolve(suppress_exception=True) for k, v in self.items()}
def validate(self) -> dict[str, Any]:
"""Validate & returns all the Params object stored in the dictionary."""
resolved_dict = {}
try:
for k, v in self.items():
resolved_dict[k] = v.resolve(suppress_exception=self.suppress_exception)
except ParamValidationError as ve:
raise ParamValidationError(f"Invalid input for param {k}: {ve}") from None
return resolved_dict
def serialize(self) -> dict[str, Any]:
return self.dump()
@staticmethod
def deserialize(data: dict, version: int) -> ParamsDict:
if version > ParamsDict.__version__:
raise TypeError("serialized version > class version")
return ParamsDict(data)
def _fill_missing_param_source(
self,
source: Literal["dag", "task"] | None = None,
) -> None:
for key in self.__dict:
if self.__dict[key].source is None:
self.__dict[key].source = source
@staticmethod
def filter_params_by_source(params: ParamsDict, source: Literal["dag", "task"]) -> ParamsDict:
return ParamsDict(
{key: param for key, param in params.__dict.items() if param.source == source},
)
| ParamsDict |
python | Textualize__rich | benchmarks/benchmarks.py | {
"start": 2801,
"end": 3921
} | class ____:
def time_table_no_wrapping(self):
self._print_table(width=100)
def time_table_heavy_wrapping(self):
self._print_table(width=30)
def _print_table(self, width):
table = Table(title="Star Wars Movies")
console = Console(
file=StringIO(), color_system="truecolor", legacy_windows=False, width=width
)
table.add_column("Released", justify="right", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row(
"Dec 20, 2019", "[b]Star Wars[/]: The Rise of Skywalker", "$952,110,690"
)
table.add_row(
"May 25, 2018", "Solo: A [red][b]Star Wars[/] Story[/]", "$393,151,347"
)
table.add_row(
"Dec 15, 2017",
"[b red]Star Wars[/] Ep. V111: The Last Jedi",
"$1,332,539,889",
)
table.add_row(
"Dec 16, 2016", "Rogue One: A [blue]Star Wars[/] Story", "$1,332,439,889"
)
console.print(table)
| TableSuite |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 92266,
"end": 92340
} | class ____(Binop):
operation = operator.mod
_operator_repr = "%"
| Mod |
python | doocs__leetcode | solution/2900-2999/2914.Minimum Number of Changes to Make Binary String Beautiful/Solution.py | {
"start": 0,
"end": 123
} | class ____:
def minChanges(self, s: str) -> int:
return sum(s[i] != s[i - 1] for i in range(1, len(s), 2))
| Solution |
python | django__django | tests/admin_filters/tests.py | {
"start": 8043,
"end": 8156
} | class ____(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
| DecadeFilterBookAdminWithoutParameter |
python | pytorch__pytorch | benchmarks/tensorexpr/conv.py | {
"start": 2651,
"end": 2945
} | class ____(ConvImplBench):
def __init__(self, *args):
super().__init__("depthwise_conv", *args)
@staticmethod
def module():
return "depthwise_conv"
benchmark.register_benchmark_class(ConvBench)
benchmark.register_benchmark_class(DepthwiseConvBench)
| DepthwiseConvBench |
python | ray-project__ray | rllib/core/learner/tests/test_learner_group.py | {
"start": 20016,
"end": 23636
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDown(cls) -> None:
ray.shutdown()
def test_async_update(self):
"""Test that async style updates converge to the same result as sync."""
scaling_modes = ["multi-gpu-ddp", "multi-cpu-ddp", "remote-gpu"]
for scaling_mode in scaling_modes:
print(f"Testing scaling mode: {scaling_mode}.")
env = gym.make("CartPole-v1")
config_overrides = REMOTE_CONFIGS[scaling_mode]
config = BaseTestingAlgorithmConfig().update_from_dict(config_overrides)
learner_group = config.build_learner_group(env=env)
timer_sync = _Timer()
timer_async = _Timer()
with timer_sync:
learner_group.update(episodes=FAKE_EPISODES, async_update=False)
with timer_async:
result_async = learner_group.update(
episodes=FAKE_EPISODES, async_update=True
)
# Ideally the first async update will return nothing, and an easy
# way to check that is if the time for an async update call is faster
# than the time for a sync update call.
self.assertLess(timer_async.mean, timer_sync.mean)
self.assertIsInstance(result_async, list)
loss = float("inf")
iter_i = 0
while True:
result_async = learner_group.update(
episodes=FAKE_EPISODES, async_update=True
)
if not result_async:
continue
self.assertIsInstance(result_async, list)
self.assertIsInstance(result_async[0], dict)
# Check one async Learner result.
loss = result_async[0][DEFAULT_MODULE_ID][Learner.TOTAL_LOSS_KEY]
# The loss is initially around 0.69 (ln2). When it gets to around
# 0.57 the return of the policy gets to around 100.
if loss < 0.57:
break
# Compare reported "mean_weight" with actual ones.
_check_multi_worker_weights(learner_group, result_async)
iter_i += 1
learner_group.shutdown()
self.assertLess(loss, 0.57)
def _check_multi_worker_weights(learner_group, results):
# Check that module weights are updated across workers and synchronized.
# for i in range(1, len(results)):
learner_1_results = results[0]
for module_id, mod_result in learner_1_results.items():
if module_id == ALL_MODULES:
continue
results = MetricsLogger.peek_results(results)
reported_mean_weights = np.mean([r[module_id]["mean_weight"] for r in results])
# Compare the reported mean weights (merged across all Learner workers,
# which all should have the same weights after updating) with the actual
# current mean weights.
parameters = learner_group.get_state(
components=(
COMPONENT_LEARNER + "/" + COMPONENT_RL_MODULE + "/" + module_id
),
)[COMPONENT_LEARNER][COMPONENT_RL_MODULE][module_id]
actual_mean_weights = np.mean([w.mean() for w in parameters.values()])
check(reported_mean_weights, actual_mean_weights, rtol=0.02)
if __name__ == "__main__":
import sys
class_ = sys.argv[1] if len(sys.argv) > 1 else None
sys.exit(pytest.main(["-v", __file__ + ("" if class_ is None else "::" + class_)]))
| TestLearnerGroupAsyncUpdate |
python | coleifer__peewee | tests/regressions.py | {
"start": 28399,
"end": 28911
} | class ____(ModelTestCase):
requires = [User]
def test_reselect_model_regression(self):
u1, u2, u3 = [User.create(username='u%s' % i) for i in '123']
query = User.select(User.username).order_by(User.username.desc())
self.assertEqual(list(query.tuples()), [('u3',), ('u2',), ('u1',)])
query = query.select(User)
self.assertEqual(list(query.tuples()), [
(u3.id, 'u3',),
(u2.id, 'u2',),
(u1.id, 'u1',)])
| TestReselectModelRegression |
python | django__django | tests/postgres_tests/test_indexes.py | {
"start": 3801,
"end": 4835
} | class ____(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = BrinIndex
def test_suffix(self):
self.assertEqual(BrinIndex.suffix, "brin")
def test_deconstruction(self):
index = BrinIndex(
fields=["title"],
name="test_title_brin",
autosummarize=True,
pages_per_range=16,
)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, "django.contrib.postgres.indexes.BrinIndex")
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"fields": ["title"],
"name": "test_title_brin",
"autosummarize": True,
"pages_per_range": 16,
},
)
def test_invalid_pages_per_range(self):
with self.assertRaisesMessage(
ValueError, "pages_per_range must be None or a positive integer"
):
BrinIndex(fields=["title"], name="test_title_brin", pages_per_range=0)
| BrinIndexTests |
python | lepture__authlib | authlib/oauth2/rfc9068/introspection.py | {
"start": 382,
"end": 4385
} | class ____(IntrospectionEndpoint):
r"""JWTIntrospectionEndpoint inherits from :ref:`specs/rfc7662`
:class:`~authlib.oauth2.rfc7662.IntrospectionEndpoint` and implements the machinery
to automatically process the JWT access tokens.
:param issuer: The issuer identifier for which tokens will be introspected.
:param \\*\\*kwargs: Other parameters are inherited from
:class:`~authlib.oauth2.rfc7662.introspection.IntrospectionEndpoint`.
::
class MyJWTAccessTokenIntrospectionEndpoint(JWTIntrospectionEndpoint):
def get_jwks(self): ...
def get_username(self, user_id): ...
# endpoint dedicated to JWT access token introspection
authorization_server.register_endpoint(
MyJWTAccessTokenIntrospectionEndpoint(
issuer="https://authorization-server.example.org",
)
)
# another endpoint dedicated to refresh token introspection
authorization_server.register_endpoint(MyRefreshTokenIntrospectionEndpoint)
"""
#: Endpoint name to be registered
ENDPOINT_NAME = "introspection"
def __init__(self, issuer, server=None, *args, **kwargs):
super().__init__(*args, server=server, **kwargs)
self.issuer = issuer
def create_endpoint_response(self, request):
""""""
# The authorization server first validates the client credentials
client = self.authenticate_endpoint_client(request)
# then verifies whether the token was issued to the client making
# the revocation request
token = self.authenticate_token(request, client)
# the authorization server invalidates the token
body = self.create_introspection_payload(token)
return 200, body, default_json_headers
def authenticate_token(self, request, client):
""""""
self.check_params(request, client)
# do not attempt to decode refresh_tokens
if request.form.get("token_type_hint") not in ("access_token", None):
raise ContinueIteration()
validator = JWTBearerTokenValidator(issuer=self.issuer, resource_server=None)
validator.get_jwks = self.get_jwks
try:
token = validator.authenticate_token(request.form["token"])
# if the token is not a JWT, fall back to the regular flow
except InvalidTokenError as exc:
raise ContinueIteration() from exc
if token and self.check_permission(token, client, request):
return token
def create_introspection_payload(self, token):
if not token:
return {"active": False}
try:
token.validate()
except ExpiredTokenError:
return {"active": False}
except InvalidClaimError as exc:
if exc.claim_name == "iss":
raise ContinueIteration() from exc
raise InvalidTokenError() from exc
payload = {
"active": True,
"token_type": "Bearer",
"client_id": token["client_id"],
"scope": token["scope"],
"sub": token["sub"],
"aud": token["aud"],
"iss": token["iss"],
"exp": token["exp"],
"iat": token["iat"],
}
if username := self.get_username(token["sub"]):
payload["username"] = username
return payload
def get_jwks(self):
"""Return the JWKs that will be used to check the JWT access token signature.
Developers MUST re-implement this method::
def get_jwks(self):
return load_jwks("jwks.json")
"""
raise NotImplementedError()
def get_username(self, user_id: str) -> str:
"""Returns an username from a user ID.
Developers MAY re-implement this method::
def get_username(self, user_id):
return User.get(id=user_id).username
"""
return None
| JWTIntrospectionEndpoint |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec4.py | {
"start": 3405,
"end": 3537
} | class ____:
# This should generate an error because Concatenate is not
# allowed in this context.
x: Concatenate[int, ...]
| B |
python | getsentry__sentry | src/sentry/migrations/0925_backfill_open_periods.py | {
"start": 6988,
"end": 8329
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0924_dashboard_add_unique_constraint_for_user_org_position"),
]
operations = [
migrations.RunPython(
backfill_group_open_periods,
migrations.RunPython.noop,
hints={"tables": ["sentry_groupopenperiod"]},
),
]
| Migration |
python | getsentry__sentry | tests/sentry/api/helpers/test_group_index.py | {
"start": 41987,
"end": 44600
} | class ____(TestCase):
@patch("sentry.signals.issue_deleted.send_robust")
def test_delete_groups_simple(self, send_robust: Mock) -> None:
groups = [self.create_group(), self.create_group()]
group_ids = [group.id for group in groups]
request = self.make_request(user=self.user, method="GET")
request.user = self.user
request.GET = QueryDict(f"id={group_ids[0]}&id={group_ids[1]}")
hashes = ["0" * 32, "1" * 32]
for i, group in enumerate(groups):
GroupHash.objects.create(project=self.project, group=group, hash=hashes[i])
add_group_to_inbox(group, GroupInboxReason.NEW)
with self.tasks():
schedule_tasks_to_delete_groups(request, [self.project], self.organization.id)
assert (
len(GroupHash.objects.filter(project_id=self.project.id, group_id__in=group_ids).all())
== 0
)
assert (
len(GroupInbox.objects.filter(project_id=self.project.id, group_id__in=group_ids).all())
== 0
)
assert send_robust.called
@patch(
"sentry.tasks.delete_seer_grouping_records.delete_seer_grouping_records_by_hash.apply_async"
)
@patch("sentry.signals.issue_deleted.send_robust")
def test_delete_groups_deletes_seer_records_by_hash(
self, send_robust: Mock, mock_delete_seer_grouping_records_by_hash: MagicMock
) -> None:
self.project.update_option("sentry:similarity_backfill_completed", int(time()))
groups = [self.create_group(), self.create_group()]
group_ids = [group.id for group in groups]
request = self.make_request(user=self.user, method="GET")
request.user = self.user
request.GET = QueryDict(f"id={group_ids[0]}&id={group_ids[1]}")
hashes = ["0" * 32, "1" * 32]
for i, group in enumerate(groups):
GroupHash.objects.create(project=self.project, group=group, hash=hashes[i])
add_group_to_inbox(group, GroupInboxReason.NEW)
with self.tasks():
schedule_tasks_to_delete_groups(request, [self.project], self.organization.id)
assert (
len(GroupHash.objects.filter(project_id=self.project.id, group_id__in=group_ids).all())
== 0
)
assert (
len(GroupInbox.objects.filter(project_id=self.project.id, group_id__in=group_ids).all())
== 0
)
assert send_robust.called
mock_delete_seer_grouping_records_by_hash.assert_called_with(
args=[self.project.id, hashes, 0]
)
| DeleteGroupsTest |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 33229,
"end": 33461
} | class ____(PrefectBaseModel):
"""Filter by `ArtifactCollection.flow_run_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of flow run IDs to include"
)
| ArtifactCollectionFilterFlowRunId |
python | huggingface__transformers | examples/modular-transformers/modeling_multimodal2.py | {
"start": 14825,
"end": 18664
} | class ____(nn.Module):
def __init__(self, config: Multimodal2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})."
)
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
| Multimodal2VisionEmbeddings |
python | langchain-ai__langchain | libs/partners/anthropic/tests/unit_tests/middleware/test_anthropic_tools.py | {
"start": 10455,
"end": 16390
} | class ____:
"""Test system message handling in wrap_model_call."""
def test_text_editor_no_system_message(self) -> None:
"""Test text editor middleware without system message."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeTextEditorMiddleware()
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# No system message should be added for text editor
assert captured_request is not None
assert captured_request.system_message is None
def test_memory_middleware_adds_system_message(self) -> None:
"""Test memory middleware adds system message when none exists."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeMemoryMiddleware()
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# System message should be added
assert captured_request is not None
assert captured_request.system_message is not None
assert isinstance(captured_request.system_message, SystemMessage)
assert "MEMORY PROTOCOL" in captured_request.system_message.text
def test_memory_middleware_merges_system_message(self) -> None:
"""Test memory middleware merges with existing system message."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeMemoryMiddleware()
existing_message = SystemMessage("You are a helpful assistant.")
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=existing_message,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# System message should be merged
assert captured_request is not None
assert captured_request.system_message is not None
assert isinstance(captured_request.system_message, SystemMessage)
assert "You are a helpful assistant." in captured_request.system_message.text
assert "MEMORY PROTOCOL" in captured_request.system_message.text
async def test_async_memory_middleware_merges_system_message(self) -> None:
"""Test async memory middleware merges with existing system message."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeMemoryMiddleware()
existing_message = SystemMessage("You are a helpful assistant.")
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=existing_message,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
async def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
await middleware.awrap_model_call(request, handler)
# System message should be merged
assert captured_request is not None
assert captured_request.system_message is not None
assert isinstance(captured_request.system_message, SystemMessage)
assert "You are a helpful assistant." in captured_request.system_message.text
assert "MEMORY PROTOCOL" in captured_request.system_message.text
def test_custom_system_prompt_merges_correctly(self) -> None:
"""Test custom system prompt merges with existing system message."""
from langchain.agents.middleware.types import ModelRequest
custom_prompt = "Custom instructions for memory tool."
middleware = StateClaudeMemoryMiddleware(system_prompt=custom_prompt)
existing_message = SystemMessage("Existing instructions.")
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=existing_message,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# Both prompts should be in the final message
assert captured_request is not None
assert captured_request.system_message is not None
assert "Existing instructions." in captured_request.system_message.text
assert custom_prompt in captured_request.system_message.text
| TestSystemMessageHandling |
python | mlflow__mlflow | mlflow/tensorflow/__init__.py | {
"start": 35405,
"end": 37846
} | class ____:
def __init__(self, keras_model, signature):
self.keras_model = keras_model
self.signature = signature
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.keras_model
def predict(
self,
data,
params: dict[str, Any] | None = None,
):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns
Model predictions.
"""
if isinstance(data, pandas.DataFrame):
# This line is for backwards compatibility:
# If model signature is not None, when calling
# `keras_pyfunc_model.predict(pandas_dataframe)`, `_enforce_schema` will convert
# dataframe input into dict input, so in the case `_KerasModelWrapper.predict`
# will receive a dict type input.
# If model signature is None, `_enforce_schema` can do nothing, and if the input
# is dataframe, `_KerasModelWrapper.predict` will receive a dataframe input,
# we need to handle this case, to keep backwards compatibility.
return pandas.DataFrame(self.keras_model.predict(data.values), index=data.index)
supported_input_types = (np.ndarray, list, tuple, dict)
if not isinstance(data, supported_input_types):
raise MlflowException(
f"Unsupported input data type: {type(data)}. "
f"Must be one of: {[x.__name__ for x in supported_input_types]}",
INVALID_PARAMETER_VALUE,
)
return self.keras_model.predict(data)
def _assoc_list_to_map(lst):
"""
Convert an association list to a dictionary.
"""
d = {}
for run_id, metric in lst:
d[run_id] = d[run_id] + [metric] if run_id in d else [metric]
return d
@picklable_exception_safe_function
def _get_tensorboard_callback(lst):
import tensorflow as tf
for x in lst:
if isinstance(x, tf.keras.callbacks.TensorBoard):
return x
return None
# A representation of a TensorBoard event logging directory with two attributes:
# :location - string: The filesystem location of the logging directory
# :is_temp - boolean: `True` if the logging directory was created for temporary use by MLflow,
# `False` otherwise
| _KerasModelWrapper |
python | ray-project__ray | python/ray/data/_internal/planner/exchange/push_based_shuffle_task_scheduler.py | {
"start": 15357,
"end": 33537
} | class ____(ExchangeTaskScheduler):
"""
Push-based shuffle merges intermediate map outputs on the reducer nodes
while other map tasks are executing. The merged outputs are merged again
during a final reduce stage. This works as follows:
1. Submit rounds of concurrent map and merge tasks until all map inputs
have been processed. In each round, we execute:
M map tasks
Each produces N outputs. Each output contains P blocks.
N merge tasks
Takes 1 output from each of M map tasks.
Each produces P outputs.
Where M and N are chosen to maximize parallelism across CPUs. Note that
this assumes that all CPUs in the cluster will be dedicated to the
shuffle job.
Map and merge tasks are pipelined so that we always merge the previous
round of map outputs while executing the next round of map tasks.
2. In the final reduce stage:
R reduce tasks
Takes 1 output from one of the merge tasks from every round.
Notes:
N * P = R = total number of output blocks
M / N = merge factor - the ratio of map : merge tasks is to improve
pipelined parallelism. For example, if map takes twice as long to
execute as merge, then we should set this to 2. If pipeline bubbles
appear and the merge tasks are much longer than the map tasks, then
the merge factor should be decreased, and vice versa.
See paper at https://arxiv.org/abs/2203.05072 for more details.
"""
def execute(
self,
refs: List[RefBundle],
output_num_blocks: int,
task_ctx: TaskContext,
map_ray_remote_args: Optional[Dict[str, Any]] = None,
reduce_ray_remote_args: Optional[Dict[str, Any]] = None,
merge_factor: float = 2,
_debug_limit_execution_to_num_blocks: int = None,
) -> Tuple[List[RefBundle], StatsDict]:
logger.debug("Using experimental push-based shuffle.")
# TODO: Preemptively clear the blocks list since we will incrementally delete
# the last remaining references as we submit the dependent map tasks during the
# map-merge stage.
# TODO(swang): For jobs whose reduce work is heavier than the map work,
# we should support fractional merge factors.
# TODO(swang): For large jobs, we should try to choose the merge factor
# automatically, e.g., by running one test round of map and merge tasks
# and comparing their run times.
# TODO(swang): Add option to automatically reduce write amplification
# during map-merge stage, by limiting how many partitions can be
# processed concurrently.
input_blocks_list = []
for ref_bundle in refs:
input_blocks_list.extend(ref_bundle.block_refs)
input_owned = all(b.owns_blocks for b in refs)
if map_ray_remote_args is None:
map_ray_remote_args = {}
if reduce_ray_remote_args is None:
reduce_ray_remote_args = {}
# The placement strategy for reduce tasks is overwritten to colocate
# them with their inputs from the merge stage, so remove any
# pre-specified scheduling strategy here.
reduce_ray_remote_args = reduce_ray_remote_args.copy()
reduce_ray_remote_args.pop("scheduling_strategy", None)
# Compute all constants used for task scheduling.
num_cpus_per_node_map = _get_num_cpus_per_node_map()
stage = self._compute_shuffle_schedule(
num_cpus_per_node_map,
len(input_blocks_list),
merge_factor,
output_num_blocks,
)
caller_memory_usage = (
stage.get_estimated_num_refs() * CALLER_MEMORY_USAGE_PER_OBJECT_REF
)
self.warn_on_driver_memory_usage(
caller_memory_usage,
"Execution is estimated to use at least "
f"{convert_bytes_to_human_readable_str(caller_memory_usage)}"
" of driver memory. Ensure that the driver machine has at least "
"this much memory to ensure job completion.",
)
# TODO(swang): Use INFO level. Currently there is no easy way to set
# the logging level to DEBUG from a driver script, so just print
# verbosely for now.
# See https://github.com/ray-project/ray/issues/42002.
logger.debug(f"Push-based shuffle schedule:\n{stage}")
map_fn = self._map_partition
merge_fn = self._merge
def map_partition(*args, **kwargs):
return map_fn(self._exchange_spec.map, *args, **kwargs)
def merge(*args, **kwargs):
return merge_fn(self._exchange_spec.reduce, *args, **kwargs)
shuffle_map = cached_remote_fn(map_partition)
shuffle_map = shuffle_map.options(
**map_ray_remote_args,
num_returns=1 + stage.merge_schedule.num_merge_tasks_per_round,
)
if _debug_limit_execution_to_num_blocks is not None:
input_blocks_list = input_blocks_list[:_debug_limit_execution_to_num_blocks]
logger.debug(f"Limiting execution to {len(input_blocks_list)} map tasks")
map_stage_iter = _MapStageIterator(
input_blocks_list,
shuffle_map,
[output_num_blocks, stage.merge_schedule, *self._exchange_spec._map_args],
)
sub_progress_bar_dict = task_ctx.sub_progress_bar_dict
bar_name = ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME
assert bar_name in sub_progress_bar_dict, sub_progress_bar_dict
map_bar = sub_progress_bar_dict[bar_name]
map_stage_executor = _PipelinedStageExecutor(
map_stage_iter, stage.num_map_tasks_per_round, progress_bar=map_bar
)
shuffle_merge = cached_remote_fn(merge)
merge_stage_iter = _MergeStageIterator(
map_stage_iter, shuffle_merge, stage, self._exchange_spec._reduce_args
)
merge_stage_executor = _PipelinedStageExecutor(
merge_stage_iter,
stage.merge_schedule.num_merge_tasks_per_round,
max_concurrent_rounds=2,
)
# Execute the map-merge stage. This submits tasks in rounds of M map
# tasks and N merge tasks each. Task execution between map and merge is
# pipelined, so that while executing merge for one round of inputs, we
# also execute the map tasks for the following round.
map_done = False
merge_done = False
map_stage_metadata_schema = []
merge_stage_metadata_schema = []
while not (map_done and merge_done):
try:
map_stage_metadata_schema += next(map_stage_executor)
except StopIteration:
map_done = True
break
try:
merge_stage_metadata_schema += next(merge_stage_executor)
except StopIteration:
merge_done = True
break
self.warn_on_high_local_memory_store_usage()
all_merge_results = merge_stage_iter.pop_merge_results()
if _debug_limit_execution_to_num_blocks is not None:
for merge_idx in range(len(all_merge_results)):
while len(all_merge_results[merge_idx]) < stage.num_rounds:
# Repeat the first merge task's results.
all_merge_results[merge_idx].append(
all_merge_results[merge_idx][0][:]
)
# Execute and wait for the reduce stage.
bar_name = ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME
assert bar_name in sub_progress_bar_dict, sub_progress_bar_dict
reduce_bar = sub_progress_bar_dict[bar_name]
shuffle_reduce = cached_remote_fn(self._exchange_spec.reduce)
reduce_stage_iter = _ReduceStageIterator(
stage,
shuffle_reduce,
all_merge_results,
reduce_ray_remote_args,
self._exchange_spec._reduce_args,
_debug_limit_execution_to_num_blocks,
)
max_reduce_tasks_in_flight = output_num_blocks
ctx = DataContext.get_current()
if ctx.pipeline_push_based_shuffle_reduce_tasks:
# If pipelining is enabled, we should still try to utilize all
# cores.
max_reduce_tasks_in_flight = min(
max_reduce_tasks_in_flight, sum(num_cpus_per_node_map.values())
)
reduce_stage_executor = _PipelinedStageExecutor(
reduce_stage_iter,
max_reduce_tasks_in_flight,
max_concurrent_rounds=2,
progress_bar=reduce_bar,
)
reduce_stage_metadata_schema = []
while True:
try:
reduce_stage_metadata_schema += next(reduce_stage_executor)
except StopIteration:
break
self.warn_on_high_local_memory_store_usage()
new_blocks = reduce_stage_iter.pop_reduce_results()
sorted_blocks = [
(block[0], block[1], reduce_stage_metadata_schema[i])
for i, block in enumerate(new_blocks)
]
sorted_blocks.sort(key=lambda x: x[0])
new_blocks, reduce_stage_metadata_schema = [], []
if sorted_blocks:
res: Tuple[
List[Any], List[ObjectRef[Block]], List[BlockMetadataWithSchema]
] = unzip(sorted_blocks)
_, new_blocks, reduce_stage_metadata_schema = res
del sorted_blocks
if _debug_limit_execution_to_num_blocks is not None:
output_num_blocks = min(
_debug_limit_execution_to_num_blocks, output_num_blocks
)
assert (
len(new_blocks) == output_num_blocks
), f"Expected {output_num_blocks} outputs, produced {len(new_blocks)}"
output = []
for block, meta_with_schema in zip(new_blocks, reduce_stage_metadata_schema):
output.append(
RefBundle(
[
(
block,
meta_with_schema.metadata,
)
],
owns_blocks=input_owned,
schema=meta_with_schema.schema,
)
)
stats = {
"map": to_stats(map_stage_metadata_schema),
"merge": to_stats(merge_stage_metadata_schema),
"reduce": to_stats(reduce_stage_metadata_schema),
}
return (output, stats)
@staticmethod
def _map_partition(
map_fn,
idx: int,
block: Block,
output_num_blocks: int,
schedule: _MergeTaskSchedule,
*map_args: List[Any],
) -> List[Union[Block, "BlockMetadataWithSchema"]]:
mapper_outputs = map_fn(idx, block, output_num_blocks, *map_args)
# A merge task may produce results for multiple downstream reducer
# tasks. Therefore, each map task should give each merge task a
# partition of its outputs, where the length of the partition is equal
# to the number of reducers downstream to the merge task.
partition = []
merge_idx = 0
while merge_idx < schedule.num_merge_tasks_per_round and mapper_outputs:
output = mapper_outputs.pop(0)
partition.append(output)
if len(partition) == schedule.get_num_reducers_per_merge_idx(merge_idx):
yield partition
partition = []
merge_idx += 1
assert not partition
assert len(mapper_outputs) == 1, (
mapper_outputs,
"The last output should be a BlockMetadataWithSchema",
)
assert isinstance(mapper_outputs[0], BlockMetadataWithSchema)
yield mapper_outputs[0]
assert merge_idx == schedule.num_merge_tasks_per_round, (
merge_idx,
schedule.num_merge_tasks_per_round,
)
@staticmethod
def _merge(
reduce_fn,
*all_mapper_outputs: List[List[Block]],
reduce_args: Optional[List[Any]] = None,
) -> List[Union["BlockMetadataWithSchema", Block]]:
"""
Returns list of [BlockMetadata, O1, O2, O3, ...output_num_blocks].
"""
assert (
len({len(mapper_outputs) for mapper_outputs in all_mapper_outputs}) == 1
), "Received different number of map inputs"
stats = BlockExecStats.builder()
if not reduce_args:
reduce_args = []
num_rows = 0
size_bytes = 0
schemas = []
for i, mapper_outputs in enumerate(zip(*all_mapper_outputs)):
block_meta_with_schema: Tuple[Block, "BlockMetadataWithSchema"] = reduce_fn(
*reduce_args, *mapper_outputs, partial_reduce=True
)
block, meta_with_schema = block_meta_with_schema
yield block
block = BlockAccessor.for_block(block)
num_rows += block.num_rows()
size_bytes += block.size_bytes()
del block
schemas.append(meta_with_schema.schema)
schema = _take_first_non_empty_schema(iter(schemas))
meta = BlockMetadata(
num_rows=num_rows,
size_bytes=size_bytes,
input_files=None,
exec_stats=stats.build(),
)
meta_with_schema = BlockMetadataWithSchema(metadata=meta, schema=schema)
yield meta_with_schema
@staticmethod
def _compute_shuffle_schedule(
num_cpus_per_node_map: Dict[str, int],
num_input_blocks: int,
merge_factor: float,
num_output_blocks: int,
) -> _PushBasedShuffleStage:
num_cpus_total = sum(v for v in num_cpus_per_node_map.values())
logger.debug(
f"Found {num_cpus_total} CPUs available CPUs for push-based shuffle."
)
num_tasks_per_map_merge_group = merge_factor + 1
num_total_merge_tasks = math.ceil(num_input_blocks / merge_factor)
num_merge_tasks_per_round = 0
merge_task_placement = []
leftover_cpus = 0
# Compute the total number of merge tasks and their node placement.
# Each merge task should be grouped with `merge_factor` map tasks for
# pipelining. These groups should then be spread across nodes according
# to CPU availability for load-balancing.
num_input_blocks_remaining = num_input_blocks
for node, num_cpus in num_cpus_per_node_map.items():
# First find how many merge tasks we should run on this node.
# We take the min of the number of CPUs on this node and the number
# of input blocks that we haven't scheduled yet, in case there are
# fewer input blocks than CPU slots on this node.
num_cpu_slots = min(num_cpus, num_input_blocks_remaining)
num_merge_tasks_on_cur_node = round(
num_cpu_slots / num_tasks_per_map_merge_group
)
# For small datasets, the number of tasks to run may be less than
# the total CPU slots available.
num_merge_tasks_on_cur_node = min(
num_merge_tasks_on_cur_node, num_total_merge_tasks
)
for i in range(num_merge_tasks_on_cur_node):
merge_task_placement.append(node)
# We schedule `merge_factor` many map tasks for every merge
# task. Subtract from the number of input blocks remaining to
# account for cases where the number of map tasks is smaller
# than the available CPU slots.
num_input_blocks_remaining -= merge_factor
num_cpus -= num_tasks_per_map_merge_group
num_merge_tasks_per_round += num_merge_tasks_on_cur_node
# Handle the case where a single node cannot fit a group of map and
# merge tasks, but we can spread the group across multiple distinct
# nodes.
leftover_cpus += num_cpus
if (
leftover_cpus >= num_tasks_per_map_merge_group
and num_merge_tasks_per_round < num_total_merge_tasks
):
merge_task_placement.append(node)
num_merge_tasks_per_round += 1
leftover_cpus -= num_tasks_per_map_merge_group
num_input_blocks_remaining -= merge_factor
num_input_blocks_remaining = max(0, num_input_blocks_remaining)
if num_merge_tasks_per_round == 0:
# For small datasets, make sure we have at least one merge task.
for node, num_cpus in num_cpus_per_node_map.items():
if num_cpus >= 1:
merge_task_placement.append(node)
num_merge_tasks_per_round = 1
break
assert num_merge_tasks_per_round == len(merge_task_placement)
assert num_merge_tasks_per_round > 0, num_merge_tasks_per_round
# Use the remaining CPUs to execute map tasks.
num_map_tasks_per_round = num_cpus_total - num_merge_tasks_per_round
num_map_tasks_per_round = min(num_map_tasks_per_round, num_input_blocks)
# Make sure there is at least one map task in each round.
num_map_tasks_per_round = max(num_map_tasks_per_round, 1)
num_rounds = math.ceil(num_input_blocks / num_map_tasks_per_round)
return _PushBasedShuffleStage(
num_output_blocks,
num_rounds,
num_map_tasks_per_round,
merge_task_placement,
)
def _get_num_cpus_per_node_map() -> Dict[str, int]:
total_resources_by_node = ray.state.total_resources_per_node()
# Map from per-node resource name to number of CPUs available on that
# node.
num_cpus_per_node_map = {}
for node_id, resources in total_resources_by_node.items():
num_cpus = int(resources.get("CPU", 0))
if num_cpus == 0:
continue
num_cpus_per_node_map[node_id] = num_cpus
return num_cpus_per_node_map
| PushBasedShuffleTaskScheduler |
python | pytorch__pytorch | torch/autograd/forward_ad.py | {
"start": 4252,
"end": 5596
} | class ____(NamedTuple):
r"""Namedtuple returned by :func:`unpack_dual` containing the primal and tangent components of the dual tensor.
See :func:`unpack_dual` for more details.
"""
primal: torch.Tensor
tangent: Optional[torch.Tensor]
def unpack_dual(tensor, *, level=None):
r"""Unpack a "dual tensor" to get both its Tensor value and its forward AD gradient.
The result is a namedtuple ``(primal, tangent)`` where ``primal`` is a view of
:attr:`tensor`'s primal and ``tangent`` is :attr:`tensor`'s tangent as-is.
Neither of these tensors can be dual tensor of level :attr:`level`.
This function is backward differentiable.
Example::
>>> # xdoctest: +SKIP("Undefined variables")
>>> with dual_level():
... inp = make_dual(x, x_t)
... out = f(inp)
... y, jvp = unpack_dual(out)
... jvp = unpack_dual(out).tangent
Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__
for detailed steps on how to use this API.
"""
if level is None:
level = _current_level
if level < 0:
return UnpackedDualTensor(tensor, None)
primal, dual = torch._VF._unpack_dual(tensor, level=level)
return UnpackedDualTensor(primal, dual)
| UnpackedDualTensor |
python | mozilla__bleach | bleach/_vendor/html5lib/filters/alphabeticalattributes.py | {
"start": 451,
"end": 919
} | class ____(base.Filter):
"""Alphabetizes attributes for elements"""
def __iter__(self):
for token in base.Filter.__iter__(self):
if token["type"] in ("StartTag", "EmptyTag"):
attrs = OrderedDict()
for name, value in sorted(token["data"].items(),
key=_attr_key):
attrs[name] = value
token["data"] = attrs
yield token
| Filter |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/asset_graph_view/bfs.py | {
"start": 4363,
"end": 8157
} | class ____:
"""Queue that returns parents before their children."""
@total_ordering
class QueueItem(NamedTuple):
level: int
sort_key: str
asset_graph_subset: AssetGraphSubset
def __eq__(self, other: object) -> bool:
if isinstance(other, ToposortedPriorityQueue.QueueItem):
return self.level == other.level and self.sort_key == other.sort_key
return False
def __lt__(self, other: object) -> bool:
if isinstance(other, ToposortedPriorityQueue.QueueItem):
return self.level < other.level or (
self.level == other.level and self.sort_key < other.sort_key
)
raise TypeError()
def __init__(
self,
asset_graph_view: AssetGraphView,
items: Iterable["EntitySubset"],
include_full_execution_set: bool,
):
self._asset_graph_view = asset_graph_view
self._include_full_execution_set = include_full_execution_set
self._toposort_level_by_asset_key = {
asset_key: i
for i, asset_keys in enumerate(
asset_graph_view.asset_graph.toposorted_asset_keys_by_level
)
for asset_key in asset_keys
}
self._heap = [self._queue_item(entity_subset) for entity_subset in items]
heapify(self._heap)
def enqueue(self, entity_subset: "EntitySubset") -> None:
heappush(self._heap, self._queue_item(entity_subset))
def dequeue(self) -> AssetGraphSubset:
# For multi-assets, will include all required multi-asset keys if
# include_full_execution_set is set to True, or just the passed in
# asset key if it was not. If there are multiple assets in the subset
# the subset will have the same partitions included for each asset.
#
# Returns the union of all subsets that share the same sort key
# (i.e. asset key).
# the minimum item in a heap is always at index 0
min_item = self._heap[0]
result = AssetGraphSubset.create_empty_subset()
# Collect all items with the same minimum sort key (asset key)
while self._heap and self._heap[0].sort_key == min_item.sort_key:
heap_value = heappop(self._heap)
result |= heap_value.asset_graph_subset
return result
def _queue_item(self, entity_subset: "EntitySubset") -> "ToposortedPriorityQueue.QueueItem":
asset_key = entity_subset.key
if self._include_full_execution_set:
execution_set_keys = self._asset_graph_view.asset_graph.get(
asset_key
).execution_set_asset_keys
else:
execution_set_keys = {asset_key}
level = max(
self._toposort_level_by_asset_key[asset_key] for asset_key in execution_set_keys
)
serializable_entity_subset = entity_subset.convert_to_serializable_subset()
serializable_entity_subsets = [
SerializableEntitySubset(key=asset_key, value=serializable_entity_subset.value)
for asset_key in execution_set_keys
]
entity_subsets = [
check.not_none(
self._asset_graph_view.get_subset_from_serializable_subset(
serializable_entity_subset
)
)
for serializable_entity_subset in serializable_entity_subsets
]
asset_graph_subset = AssetGraphSubset.from_entity_subsets(entity_subsets)
return ToposortedPriorityQueue.QueueItem(
level,
asset_key.to_string(),
asset_graph_subset=asset_graph_subset,
)
def __len__(self) -> int:
return len(self._heap)
| ToposortedPriorityQueue |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 96884,
"end": 97165
} | class ____(sgqlc.types.Enum):
"""Properties by which saved reply connections can be ordered.
Enumeration Choices:
* `UPDATED_AT`: Order saved reply by when they were updated.
"""
__schema__ = github_schema
__choices__ = ("UPDATED_AT",)
| SavedReplyOrderField |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/types.py | {
"start": 3376,
"end": 3643
} | class ____(
sqltypes.Float[Union[decimal.Decimal, float]], sqltypes.MatchType
):
def __init__(self, **kw: Any):
# TODO: float arguments?
sqltypes.Float.__init__(self) # type: ignore[arg-type]
sqltypes.MatchType.__init__(self)
| _MatchType |
python | python-attrs__attrs | typing-examples/baseline.py | {
"start": 1556,
"end": 1801
} | class ____:
a: int = attrs.field(repr=True)
b: str = attrs.field(repr=False)
c: str = attrs.field(repr=lambda value: "c is for cookie")
d: bool = attrs.field(repr=str)
@attrs.define(on_setattr=attrs.setters.validate)
| WithCustomRepr |
python | astropy__astropy | astropy/coordinates/representation/base.py | {
"start": 50709,
"end": 64600
} | class ____(BaseRepresentationOrDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
def __init_subclass__(cls, **kwargs):
"""Set default ``attr_classes`` and component getters on a Differential.
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
super().__init_subclass__(**kwargs) # sets `cls.name`
# Don't do anything for base helper classes.
if cls.__name__ in (
"BaseDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
):
return
if not hasattr(cls, "base_representation"):
raise NotImplementedError(
"Differential representations must have a"
'"base_representation" class attribute.'
)
# If not defined explicitly, create attr_classes.
if not hasattr(cls, "attr_classes"):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = {"d_" + c: u.Quantity for c in base_attr_classes}
repr_name = cls.name
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError(f"Differential class {repr_name} already defined")
DIFFERENTIAL_CLASSES[repr_name] = cls
get_reprdiff_cls_hash.cache_clear()
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
lambda self, comp=f"_{component}": getattr(self, comp),
doc=f"Component '{component}' of the Differential.",
),
)
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError(
f"Differential class {cls} is not compatible with the "
f"base (representation) class {base.__class__}"
)
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, f"d_{name}", None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# This is quite a bit faster than using to_system() or going
# through Quantity()
d_unit_si = d_unit.decompose(u.si.bases)
d_unit_si._scale = 1 # remove the scale from the unit
return str(d_unit_si)
raise RuntimeError(
"Invalid representation-differential units! This likely happened "
"because either the representation or the associated differential "
"have non-standard units. Check that the input positional data have "
"positional units, and the input velocity data have velocity units, "
"or are both dimensionless."
)
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
`~astropy.coordinates.CartesianDifferential`
This object, converted.
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add,
(
getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)
),
)
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other
The object to convert into this differential.
base : `~astropy.coordinates.BaseRepresentation`
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Will be converted to ``cls.base_representation`` if needed.
Returns
-------
`~astropy.coordinates.BaseDifferential` subclass instance
A new differential object that is this class' type.
"""
base = base.represent_as(cls.base_representation)
base_e, base_sf = cls._get_base_vectors(base)
return cls(
*(other.dot(e / base_sf[component]) for component, e in base_e.items()),
copy=False,
)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation)
)
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
from .cartesian import CartesianDifferential
# route transformation through Cartesian
cdiff = self.represent_as(CartesianDifferential, base=base).transform(matrix)
# move back to original representation
return cdiff.represent_as(self.__class__, transformed_base)
def _scale_operation(self, op, *args, scaled_base=False):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
scaled_base : bool, optional
Whether the base was scaled the same way. This affects whether
differential components should be scaled. For instance, a differential
in longitude should not be scaled if its spherical base is scaled
in radius.
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(
*[op(getattr(first, c), getattr(second, c)) for c in self.components]
)
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but Cartesian differentials or radial differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
from .cartesian import CartesianDifferential
# RadialDifferential overrides this function, so there is no handling here
if not isinstance(self, CartesianDifferential) and base is None:
raise ValueError(
"`base` must be provided to calculate the norm of a"
f" {type(self).__name__}"
)
return self.to_cartesian(base).norm()
| BaseDifferential |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_returned.py | {
"start": 586,
"end": 725
} | class ____:
""" __len__ returns a negative integer """
def __len__(self): # [invalid-length-returned]
return -1
| FirstBadLen |
python | doocs__leetcode | solution/1600-1699/1639.Number of Ways to Form a Target String Given a Dictionary/Solution2.py | {
"start": 0,
"end": 660
} | class ____:
def numWays(self, words: List[str], target: str) -> int:
m, n = len(target), len(words[0])
cnt = [[0] * 26 for _ in range(n)]
for w in words:
for j, c in enumerate(w):
cnt[j][ord(c) - ord('a')] += 1
mod = 10**9 + 7
f = [[0] * (n + 1) for _ in range(m + 1)]
f[0] = [1] * (n + 1)
for i in range(1, m + 1):
for j in range(1, n + 1):
f[i][j] = (
f[i][j - 1]
+ f[i - 1][j - 1] * cnt[j - 1][ord(target[i - 1]) - ord('a')]
)
f[i][j] %= mod
return f[m][n]
| Solution |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 53056,
"end": 53301
} | class ____:
xlDown = -4121 # from enum XlDirection
xlToLeft = -4159 # from enum XlDirection
xlToRight = -4161 # from enum XlDirection
xlUp = -4162 # from enum XlDirection
directions = ("down", "left", "right", "up")
| Direction |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/axislines.py | {
"start": 8881,
"end": 9028
} | class ____: # Backcompat.
Fixed = FixedAxisArtistHelperRectilinear
Floating = FloatingAxisArtistHelperRectilinear
| AxisArtistHelperRectlinear |
python | pypa__pipenv | pipenv/vendor/click/core.py | {
"start": 56511,
"end": 68664
} | class ____(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: The result callback to attach to this multi
command. This can be set or changed later with the
:meth:`result_callback` decorator.
:param attrs: Other command arguments described in :class:`Command`.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(
self,
name: t.Optional[str] = None,
invoke_without_command: bool = False,
no_args_is_help: t.Optional[bool] = None,
subcommand_metavar: t.Optional[str] = None,
chain: bool = False,
result_callback: t.Optional[t.Callable[..., t.Any]] = None,
**attrs: t.Any,
) -> None:
super().__init__(name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
else:
subcommand_metavar = "COMMAND [ARGS]..."
self.subcommand_metavar = subcommand_metavar
self.chain = chain
# The result callback that is stored. This can be set or
# overridden with the :func:`result_callback` decorator.
self._result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"Multi commands in chain mode cannot have"
" optional arguments."
)
def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict(ctx)
commands = {}
for name in self.list_commands(ctx):
command = self.get_command(ctx, name)
if command is None:
continue
sub_ctx = ctx._make_sub_context(command)
with sub_ctx.scope(cleanup=False):
commands[name] = command.to_info_dict(sub_ctx)
info_dict.update(commands=commands, chain=self.chain)
return info_dict
def collect_usage_pieces(self, ctx: Context) -> t.List[str]:
rv = super().collect_usage_pieces(ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
super().format_options(ctx, formatter)
self.format_commands(ctx, formatter)
def result_callback(self, replace: bool = False) -> t.Callable[[F], F]:
"""Adds a result callback to the command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.result_callback()
def process_result(result, input):
return result + input
:param replace: if set to `True` an already existing result
callback will be removed.
.. versionchanged:: 8.0
Renamed from ``resultcallback``.
.. versionadded:: 3.0
"""
def decorator(f: F) -> F:
old_callback = self._result_callback
if old_callback is None or replace:
self._result_callback = f
return f
def function(__value, *args, **kwargs): # type: ignore
inner = old_callback(__value, *args, **kwargs)
return f(inner, *args, **kwargs)
self._result_callback = rv = update_wrapper(t.cast(F, function), f)
return rv
return decorator
def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section(_("Commands")):
formatter.write_dl(rows)
def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]:
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = super().parse_args(ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx: Context) -> t.Any:
def _process_result(value: t.Any) -> t.Any:
if self._result_callback is not None:
value = ctx.invoke(self._result_callback, value, **ctx.params)
return value
if not ctx.protected_args:
if self.invoke_without_command:
# No subcommand was invoked, so the result callback is
# invoked with the group return value for regular
# groups, or an empty list for chained groups.
with ctx:
rv = super().invoke(ctx)
return _process_result([] if self.chain else rv)
ctx.fail(_("Missing command."))
# Fetch args back out
args = [*ctx.protected_args, *ctx.args]
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
assert cmd is not None
ctx.invoked_subcommand = cmd_name
super().invoke(ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = "*" if args else None
super().invoke(ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
assert cmd is not None
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(
self, ctx: Context, args: t.List[str]
) -> t.Tuple[t.Optional[str], t.Optional[Command], t.List[str]]:
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name))
return cmd_name if cmd else None, cmd, args[1:]
def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]:
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError
def list_commands(self, ctx: Context) -> t.List[str]:
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. Looks
at the names of options, subcommands, and chained
multi-commands.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from pipenv.vendor.click.shell_completion import CompletionItem
results = [
CompletionItem(name, help=command.get_short_help_str())
for name, command in _complete_visible_commands(ctx, incomplete)
]
results.extend(super().shell_complete(ctx, incomplete))
return results
| MultiCommand |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_finetuning_callback.py | {
"start": 10870,
"end": 12696
} | class ____(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.module_dict = nn.ModuleDict({"conv": nn.Conv2d(in_channels, out_channels, 3), "act": nn.ReLU()})
# add trivial test parameter to convblock to validate parent (non-leaf) module parameter handling
self.parent_param = nn.Parameter(torch.zeros((1), dtype=torch.float))
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.module_dict["conv"](x)
x = self.module_dict["act"](x)
return self.bn(x)
def test_complex_nested_model():
"""Test flattening, freezing, and thawing of models which contain parent (non-leaf) modules with parameters
directly themselves rather than exclusively their submodules containing parameters."""
model = nn.Sequential(
OrderedDict([
("encoder", nn.Sequential(ConvBlockParam(3, 64), ConvBlock(64, 128))),
("decoder", ConvBlock(128, 10)),
])
)
# There are 10 leaf modules or parent modules w/ parameters in the test model
assert len(BaseFinetuning.flatten_modules(model)) == 10
BaseFinetuning.freeze(model.encoder, train_bn=True)
assert not model.encoder[0].module_dict["conv"].weight.requires_grad # Validate a leaf module parameter is frozen
assert not model.encoder[0].parent_param.requires_grad # Validate the parent module parameter is frozen
assert model.encoder[0].bn.weight.requires_grad
BaseFinetuning.make_trainable(model)
encoder_params = list(BaseFinetuning.filter_params(model.encoder, train_bn=True))
# The 9 parameters of the encoder are:
# conv0.weight, conv0.bias, bn0.weight, bn0.bias, parent_param
# conv1.weight, conv1.bias, bn1.weight, bn1.bias
assert len(encoder_params) == 9
| ConvBlockParam |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.