language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/types/evals/run_cancel_response.py | {
"start": 2436,
"end": 4913
} | class ____(BaseModel):
type: Literal["responses"]
"""The type of run data source. Always `responses`."""
created_after: Optional[int] = None
"""Only include items created after this timestamp (inclusive).
This is a query parameter used to select responses.
"""
created_before: Optional[int] = None
"""Only include items created before this timestamp (inclusive).
This is a query parameter used to select responses.
"""
instructions_search: Optional[str] = None
"""Optional string to search the 'instructions' field.
This is a query parameter used to select responses.
"""
metadata: Optional[object] = None
"""Metadata filter for the responses.
This is a query parameter used to select responses.
"""
model: Optional[str] = None
"""The name of the model to find responses for.
This is a query parameter used to select responses.
"""
reasoning_effort: Optional[ReasoningEffort] = None
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
"""
temperature: Optional[float] = None
"""Sampling temperature. This is a query parameter used to select responses."""
tools: Optional[List[str]] = None
"""List of tool names. This is a query parameter used to select responses."""
top_p: Optional[float] = None
"""Nucleus sampling parameter. This is a query parameter used to select responses."""
users: Optional[List[str]] = None
"""List of user identifiers. This is a query parameter used to select responses."""
DataSourceResponsesSource: TypeAlias = Annotated[
Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses],
PropertyInfo(discriminator="type"),
]
| DataSourceResponsesSourceResponses |
python | astropy__astropy | astropy/__init__.py | {
"start": 4094,
"end": 6143
} | class ____(base_constants_version):
"""
The version of astronomical constants to use.
"""
# Maintainers: update when new constants are added
_value = "iau2015"
_versions = dict(
iau2015="iau2015",
iau2012="iau2012",
astropyconst80="iau2015",
astropyconst40="iau2015",
astropyconst20="iau2015",
astropyconst13="iau2012",
)
# Create the test() function
from .tests.runner import TestRunner
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="The TestRunner")
test = TestRunner.make_test_runner_in(__path__[0])
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
refs = (Path(__file__).parent / "CITATION").read_text().split("@ARTICLE")[1:]
return f"@ARTICLE{refs[0]}" if refs else ""
__citation__ = __bibtex__ = _get_bibtex()
from .logger import _init_log, _teardown_log
log = _init_log()
from types import ModuleType as __module_type__
from .utils.misc import find_api_page, online_docs_root, online_help
# Clean up top-level namespace--delete everything that isn't in __all__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not (
(varname.startswith("__") and varname.endswith("__"))
or varname in __all__
or (
varname[0] != "_"
and isinstance(locals()[varname], __module_type__)
and locals()[varname].__name__.startswith(__name__ + ".")
)
):
# The last clause in the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
| astronomical_constants |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py | {
"start": 968,
"end": 1456
} | class ____:
"""CENTER_XYWH contains axis indices for the CENTER_XYWH format.
All values in the CENTER_XYWH format should be absolute pixel values.
The CENTER_XYWH format consists of the following required indices:
- X: X coordinate of the center of the bounding box
- Y: Y coordinate of the center of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
| CENTER_XYWH |
python | pytorch__pytorch | test/distributed/tensor/test_op_strategy.py | {
"start": 23886,
"end": 24903
} | class ____(DTensorTestBase):
@with_comms
def test_call_with_different_nontensor_args(self):
mesh = self.build_device_mesh()
global_tensor = torch.tensor(
[
[29.0, 45.0, 3.0, 61.0],
[25.0, 6.0, 21.0, 0.0],
[1.0, 63.0, 49.0, 38.0],
[48.0, 9.0, 55.0, 18.0],
]
)
shard_spec = [Shard(1)]
sharded_dtensor = distribute_tensor(global_tensor, mesh, shard_spec)
with op_strategy_context(torch.ops.aten.sort.default, replicate_op_strategy):
# intentionally do not supply `schema_info=RuntimeSchemaInfo(1)`
torch.sort(sharded_dtensor, dim=0) # sort each column
out1, _ = torch.sort(sharded_dtensor, dim=1) # sort each row
with op_strategy_context(torch.ops.aten.sort.default, replicate_op_strategy):
out2, _ = torch.sort(sharded_dtensor, dim=1)
self.assertEqual(out1.full_tensor(), out2.full_tensor())
| TestStrategyHashing |
python | redis__redis-py | redis/commands/search/hybrid_query.py | {
"start": 8335,
"end": 11991
} | class ____:
def __init__(self) -> None:
"""
Create a new hybrid post processing configuration object.
"""
self._load_statements = []
self._apply_statements = []
self._groupby_statements = []
self._sortby_fields = []
self._filter = None
self._limit = None
def load(self, *fields: str) -> Self:
"""
Add load statement parameters to the query.
"""
if fields:
fields_str = " ".join(fields)
fields_list = fields_str.split(" ")
self._load_statements.extend(("LOAD", len(fields_list), *fields_list))
return self
def group_by(self, fields: List[str], *reducers: Reducer) -> Self:
"""
Specify by which fields to group the aggregation.
Args:
fields: Fields to group by. This can either be a single string or a list
of strings. In both cases, the field should be specified as `@field`.
reducers: One or more reducers. Reducers may be found in the
`aggregation` module.
"""
fields = [fields] if isinstance(fields, str) else fields
ret = ["GROUPBY", str(len(fields)), *fields]
for reducer in reducers:
ret.extend(("REDUCE", reducer.NAME, str(len(reducer.args))))
ret.extend(reducer.args)
if reducer._alias is not None:
ret.extend(("AS", reducer._alias))
self._groupby_statements.extend(ret)
return self
def apply(self, **kwexpr) -> Self:
"""
Specify one or more projection expressions to add to each result.
Args:
kwexpr: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")`.
"""
apply_args = []
for alias, expr in kwexpr.items():
ret = ["APPLY", expr]
if alias is not None:
ret.extend(("AS", alias))
apply_args.extend(ret)
self._apply_statements.extend(apply_args)
return self
def sort_by(self, *sortby: "SortbyField") -> Self:
"""
Add sortby parameters to the query.
"""
self._sortby_fields = [*sortby]
return self
def filter(self, filter: "HybridFilter") -> Self:
"""
Add a numeric or string filter to the query.
Currently, only one of each filter is supported by the engine.
Args:
filter: A NumericFilter or GeoFilter object, used on a corresponding field.
"""
self._filter = filter
return self
def limit(self, offset: int, num: int) -> Self:
"""
Add limit parameters to the query.
"""
self._limit = Limit(offset, num)
return self
def build_args(self) -> List[str]:
args = []
if self._load_statements:
args.extend(self._load_statements)
if self._groupby_statements:
args.extend(self._groupby_statements)
if self._apply_statements:
args.extend(self._apply_statements)
if self._sortby_fields:
sortby_args = []
for f in self._sortby_fields:
sortby_args.extend(f.args)
args.extend(("SORTBY", len(sortby_args), *sortby_args))
if self._filter:
args.extend(self._filter.args)
if self._limit:
args.extend(self._limit.build_args())
return args
@experimental
| HybridPostProcessingConfig |
python | aimacode__aima-python | learning4e.py | {
"start": 30538,
"end": 31176
} | class ____:
"""Return a predictor that takes a weighted vote."""
def __init__(self, predictors, weights):
self.predictors = predictors
self.weights = weights
def predict(self, example):
return weighted_mode((predictor.predict(example) for predictor in self.predictors), self.weights)
def weighted_mode(values, weights):
"""
Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1, 2, 3, 1, 2])
'b'
"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(totals, key=totals.__getitem__)
| weighted_majority |
python | google__pytype | pytype/utils.py | {
"start": 4869,
"end": 5844
} | class ____:
"""A dynamically scoped variable.
This is a per-thread dynamic variable, with an initial value of None.
The bind() call establishes a new value that will be in effect for the
duration of the resulting context manager. This is intended to be used
in conjunction with a decorator.
"""
def __init__(self):
self._local = threading.local()
def _values(self):
values = getattr(self._local, "values", None)
if values is None:
values = [None] # Stack of bindings, with an initial default of None.
self._local.values = values
return values
@contextlib.contextmanager
def bind(self, value):
"""Bind the dynamic variable to the supplied value."""
values = self._values()
try:
values.append(value) # Push the new binding.
yield
finally:
values.pop() # Pop the binding.
def get(self):
"""Return the current value of the dynamic variable."""
return self._values()[-1]
| DynamicVar |
python | pypa__warehouse | tests/unit/accounts/test_views.py | {
"start": 2220,
"end": 3948
} | class ____:
def test_too_many_failed_logins(self, pyramid_request):
exc = TooManyFailedLogins(resets_in=datetime.timedelta(seconds=600))
resp = views.failed_logins(exc, pyramid_request)
assert resp.status == "429 Too Many Failed Login Attempts"
assert resp.detail == (
"There have been too many unsuccessful login attempts. "
"You have been locked out for 10 minutes. "
"Please try again later."
)
assert dict(resp.headers).get("Retry-After") == "600"
def test_too_many_emails_added(self, pyramid_request):
exc = TooManyEmailsAdded(resets_in=datetime.timedelta(seconds=600))
resp = views.unverified_emails(exc, pyramid_request)
assert resp.status == "429 Too Many Requests"
assert resp.detail == (
"Too many emails have been added to this account without verifying "
"them. Check your inbox and follow the verification links. (IP: "
f"{pyramid_request.remote_addr})"
)
assert dict(resp.headers).get("Retry-After") == "600"
def test_too_many_password_reset_requests(self, pyramid_request):
exc = TooManyPasswordResetRequests(resets_in=datetime.timedelta(seconds=600))
resp = views.incomplete_password_resets(exc, pyramid_request)
assert resp.status == "429 Too Many Requests"
assert resp.detail == (
"Too many password resets have been requested for this account without "
"completing them. Check your inbox and follow the verification links. (IP: "
f"{pyramid_request.remote_addr})"
)
assert dict(resp.headers).get("Retry-After") == "600"
| TestFailedLoginView |
python | getsentry__sentry | src/sentry/notifications/notifications/missing_members_nudge.py | {
"start": 806,
"end": 3032
} | class ____(BaseNotification):
metrics_key = "missing_members_nudge"
template_path = "sentry/emails/missing-members-nudge"
def get_specific_analytics_event(self, provider: ExternalProviders) -> analytics.Event | None:
return MissingMembersNudgeEvent(
organization_id=self.organization.id,
)
RoleBasedRecipientStrategyClass = MemberWriteRoleRecipientStrategy
notification_setting_type_enum = NotificationSettingEnum.APPROVAL
def __init__(
self,
organization: Organization,
commit_authors: Sequence[dict[str, Any]],
provider: str,
) -> None:
super().__init__(organization)
for author in commit_authors:
author["profile_link"] = PROVIDER_TO_URL[provider] + author["external_id"]
self.commit_authors = commit_authors
self.provider = provider
self.role_based_recipient_strategy = self.RoleBasedRecipientStrategyClass(organization)
@property
def reference(self) -> Model | None:
return None
def get_subject(self, context: Mapping[str, Any] | None = None) -> str:
return "Invite your developers to Sentry"
def get_notification_providers(self) -> Iterable[ExternalProviders]:
# only email
return [ExternalProviders.EMAIL]
def get_members_list_url(
self, provider: ExternalProviders, recipient: Actor | None = None
) -> str:
url = self.organization.absolute_url(
f"/settings/{self.organization.slug}/members/",
query=self.get_sentry_query_params(provider, recipient),
)
url += "&inviteMissingMembers=true"
return url
def get_context(self) -> MutableMapping[str, Any]:
return {
"organization": self.organization,
"top_missing_members": self.commit_authors,
"members_list_url": self.get_members_list_url(provider=ExternalProviders.EMAIL),
"provider": self.provider.capitalize(),
}
def determine_recipients(self) -> list[Actor]:
# owners and managers have org:write
return Actor.many_from_object(self.role_based_recipient_strategy.determine_recipients())
| MissingMembersNudgeNotification |
python | getsentry__sentry | src/sentry/projects/services/project/model.py | {
"start": 1844,
"end": 3243
} | class ____(RpcModel):
id: int = -1
slug: str = ""
name: str = ""
organization_id: int = -1
status: int = Field(default_factory=_project_status_visible)
first_event: datetime | None = None
platform: str | None = None
external_id: str | None = None
def __hash__(self) -> int:
# Mimic the behavior of hashing a Django ORM entity, for compatibility with
# serializers, as this project object is often used for that.
return hash((self.id, self.organization_id, self.slug))
def get_option(
self,
key: str,
default: Any | None = None,
validate: Callable[[object], bool] | None = None,
) -> Any:
from sentry.projects.services.project import project_service
keyed_result, well_known_result = project_service.get_option(project=self, key=key)
if validate is None or validate(keyed_result):
return keyed_result
if default is not None:
return default
return well_known_result
def update_option(self, key: str, value: Any) -> bool:
from sentry.projects.services.project import project_service
return project_service.update_option(self, key, value)
def delete_option(self, key: str) -> None:
from sentry.projects.services.project import project_service
project_service.delete_option(self, key)
| RpcProject |
python | joke2k__faker | tests/providers/test_file.py | {
"start": 53,
"end": 3381
} | class ____(unittest.TestCase):
"""Tests file"""
def setUp(self):
self.fake = Faker()
Faker.seed(0)
def test_file_name(self):
for _ in range(100):
file_name = self.fake.file_name()
assert re.search(r"\w+\.\w+", file_name)
file_name = self.fake.file_name(extension=None)
assert re.search(r"\w+\.\w+", file_name)
file_name = self.fake.file_name(extension="pdf")
assert re.search(r"\w+\.pdf$", file_name)
file_name = self.fake.file_name(category="image")
assert re.search(r"\w+\.(bmp|gif|jpeg|jpg|png|tiff)$", file_name)
file_name = self.fake.file_name(category="image", extension="abcdef")
assert re.search(r"\w+\.abcdef$", file_name)
file_name = self.fake.file_name(extension="")
assert re.search(r"\w+$", file_name)
def test_file_path(self):
for _ in range(100):
file_path = self.fake.file_path()
assert re.search(r"\/\w+\/\w+\.\w+", file_path)
file_path = self.fake.file_path(absolute=False)
assert re.search(r"\w+\/\w+\.\w+", file_path)
file_path = self.fake.file_path(depth=3)
assert re.search(r"\/\w+\/\w+\/\w+\.\w+", file_path)
file_path = self.fake.file_path(extension="pdf")
assert re.search(r"\/\w+\/\w+\.pdf$", file_path)
file_path = self.fake.file_path(extension=["a", "bc", "def", "ghij", "klmno"])
assert re.search(r"\/\w+\/\w+\.(a|bc|def|ghij|klmno)$", file_path)
file_path = self.fake.file_path(extension=None)
assert re.search(r"\/\w+\/\w+\.\w+", file_path)
file_path = self.fake.file_path(extension="")
assert re.search(r"\/\w+\/\w+$", file_path)
file_path = self.fake.file_path(extension=[])
assert re.search(r"\/\w+\/\w+$", file_path)
file_path = self.fake.file_path(category="image")
assert re.search(r"\/\w+\/\w+\.(bmp|gif|jpeg|jpg|png|tiff)", file_path)
file_path = self.fake.file_path(file_system_rule="windows")
assert re.search(r"\\\w+\\\w+\.\w+", file_path)
file_path = self.fake.file_path(file_system_rule="windows", category="image", absolute=True)
assert re.search(r"^[a-zA-Z]:\\\w+\\\w+\.\w+", file_path)
assert re.search(r"\\\w+\\\w+\.(bmp|gif|jpeg|jpg|png|tiff)$", file_path)
def test_unix_device(self):
reg_device = re.compile(r"^/dev/(vd|sd|xvd)[a-z]$")
# Test default
for _ in range(100):
path = self.fake.unix_device()
assert reg_device.match(path)
# Test with prefix
for _ in range(100):
path = self.fake.unix_device("sd")
assert reg_device.match(path)
assert path.startswith("/dev/sd")
def test_unix_partition(self):
reg_part = re.compile(r"^/dev/(vd|sd|xvd)[a-z]\d$")
# Test default
for _ in range(100):
path = self.fake.unix_partition()
assert reg_part.match(path)
# Test with prefix
for _ in range(100):
path = self.fake.unix_partition("sd")
assert reg_part.match(path)
assert path.startswith("/dev/sd")
| TestFile |
python | encode__django-rest-framework | rest_framework/utils/encoders.py | {
"start": 2826,
"end": 3126
} | class ____:
"""
CustomScalar that knows how to encode timedelta that renderer
can understand.
"""
@classmethod
def represent_timedelta(cls, dumper, data):
value = str(data.total_seconds())
return dumper.represent_scalar('tag:yaml.org,2002:str', value)
| CustomScalar |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 89033,
"end": 92095
} | class ____(Request):
"""
Archive tasks
:param ids: IDs of the tasks to archive
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "archive_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "IDs of the tasks to archive",
"items": {"type": "string"},
"type": "array",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"response": {
"properties": {
"succeeded": {
"items": {
"properties": {
"archived": {
"description": "Indicates whether the task was archived",
"type": "boolean",
}
}
}
}
}
},
"type": "object",
}
def __init__(
self, ids: List[str], status_reason: Optional[str] = None, status_message: Optional[str] = None, **kwargs: Any
) -> None:
super(ArchiveManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| ArchiveManyRequest |
python | pyinstaller__pyinstaller | bootloader/waflib/Configure.py | {
"start": 447,
"end": 14831
} | class ____(Context.Context):
'''configures the project'''
cmd = 'configure'
error_handlers = []
def __init__(self, **kw):
super(ConfigurationContext, self).__init__(**kw)
self.environ = dict(os.environ)
self.all_envs = {}
self.top_dir = None
self.out_dir = None
self.tools = []
self.hash = 0
self.files = []
self.tool_cache = []
self.setenv('')
def setenv(self, name, env=None):
if name not in self.all_envs or env:
if not env:
env = ConfigSet.ConfigSet()
self.prepare_env(env)
else:
env = env.derive()
self.all_envs[name] = env
self.variant = name
def get_env(self):
return self.all_envs[self.variant]
def set_env(self, val):
self.all_envs[self.variant] = val
env = property(get_env, set_env)
def init_dirs(self):
top = self.top_dir
if not top:
top = Options.options.top
if not top:
top = getattr(Context.g_module, Context.TOP, None)
if not top:
top = self.path.abspath()
top = os.path.abspath(top)
self.srcnode = (os.path.isabs(top) and self.root or self.path).find_dir(top)
assert (self.srcnode)
out = self.out_dir
if not out:
out = Options.options.out
if not out:
out = getattr(Context.g_module, Context.OUT, None)
if not out:
out = Options.lockfile.replace('.lock-waf_%s_' % sys.platform, '').replace('.lock-waf', '')
out = os.path.realpath(out)
self.bldnode = (os.path.isabs(out) and self.root or self.path).make_node(out)
self.bldnode.mkdir()
if not os.path.isdir(self.bldnode.abspath()):
self.fatal('Could not create the build directory %s' % self.bldnode.abspath())
def execute(self):
self.init_dirs()
self.cachedir = self.bldnode.make_node(Build.CACHE_DIR)
self.cachedir.mkdir()
path = os.path.join(self.bldnode.abspath(), WAF_CONFIG_LOG)
self.logger = Logs.make_logger(path, 'cfg')
app = getattr(Context.g_module, 'APPNAME', '')
if app:
ver = getattr(Context.g_module, 'VERSION', '')
if ver:
app = "%s (%s)" % (app, ver)
params = {
'now': time.ctime(),
'pyver': sys.hexversion,
'systype': sys.platform,
'args': " ".join(sys.argv),
'wafver': Context.WAFVERSION,
'abi': Context.ABI,
'app': app
}
self.to_log(conf_template % params)
self.msg('Setting top to', self.srcnode.abspath())
self.msg('Setting out to', self.bldnode.abspath())
if id(self.srcnode) == id(self.bldnode):
Logs.warn('Setting top == out')
elif id(self.path) != id(self.srcnode):
if self.srcnode.is_child_of(self.path):
Logs.warn('Are you certain that you do not want to set top="." ?')
super(ConfigurationContext, self).execute()
self.store()
Context.top_dir = self.srcnode.abspath()
Context.out_dir = self.bldnode.abspath()
env = ConfigSet.ConfigSet()
env.argv = sys.argv
env.options = Options.options.__dict__
env.config_cmd = self.cmd
env.run_dir = Context.run_dir
env.top_dir = Context.top_dir
env.out_dir = Context.out_dir
env.hash = self.hash
env.files = self.files
env.environ = dict(self.environ)
env.launch_dir = Context.launch_dir
if not (
self.env.NO_LOCK_IN_RUN or env.environ.get('NO_LOCK_IN_RUN') or getattr(Options.options, 'no_lock_in_run')
):
env.store(os.path.join(Context.run_dir, Options.lockfile))
if not (
self.env.NO_LOCK_IN_TOP or env.environ.get('NO_LOCK_IN_TOP') or getattr(Options.options, 'no_lock_in_top')
):
env.store(os.path.join(Context.top_dir, Options.lockfile))
if not (
self.env.NO_LOCK_IN_OUT or env.environ.get('NO_LOCK_IN_OUT') or getattr(Options.options, 'no_lock_in_out')
):
env.store(os.path.join(Context.out_dir, Options.lockfile))
def prepare_env(self, env):
if not env.PREFIX:
if Options.options.prefix or Utils.is_win32:
env.PREFIX = Options.options.prefix
else:
env.PREFIX = '/'
if not env.BINDIR:
if Options.options.bindir:
env.BINDIR = Options.options.bindir
else:
env.BINDIR = Utils.subst_vars('${PREFIX}/bin', env)
if not env.LIBDIR:
if Options.options.libdir:
env.LIBDIR = Options.options.libdir
else:
env.LIBDIR = Utils.subst_vars('${PREFIX}/lib%s' % Utils.lib64(), env)
def store(self):
n = self.cachedir.make_node('build.config.py')
n.write('version = 0x%x\ntools = %r\n' % (Context.HEXVERSION, self.tools))
if not self.all_envs:
self.fatal('nothing to store in the configuration context!')
for key in self.all_envs:
tmpenv = self.all_envs[key]
tmpenv.store(os.path.join(self.cachedir.abspath(), key + Build.CACHE_SUFFIX))
def load(self, tool_list, tooldir=None, funs=None, with_sys_path=True, cache=False):
tools = Utils.to_list(tool_list)
if tooldir:
tooldir = Utils.to_list(tooldir)
for tool in tools:
if cache:
mag = (tool, id(self.env), tooldir, funs)
if mag in self.tool_cache:
self.to_log('(tool %s is already loaded, skipping)' % tool)
continue
self.tool_cache.append(mag)
module = None
try:
module = Context.load_tool(tool, tooldir, ctx=self, with_sys_path=with_sys_path)
except ImportError as e:
self.fatal(
'Could not load the Waf tool %r from %r\n%s' % (tool, getattr(e, 'waf_sys_path', sys.path), e)
)
except Exception as e:
self.to_log('imp %r (%r & %r)' % (tool, tooldir, funs))
self.to_log(traceback.format_exc())
raise
if funs is not None:
self.eval_rules(funs)
else:
func = getattr(module, 'configure', None)
if func:
if type(func) is type(Utils.readf):
func(self)
else:
self.eval_rules(func)
self.tools.append({'tool': tool, 'tooldir': tooldir, 'funs': funs})
def post_recurse(self, node):
super(ConfigurationContext, self).post_recurse(node)
self.hash = Utils.h_list((self.hash, node.read('rb')))
self.files.append(node.abspath())
def eval_rules(self, rules):
self.rules = Utils.to_list(rules)
for x in self.rules:
f = getattr(self, x)
if not f:
self.fatal('No such configuration function %r' % x)
f()
def conf(f):
def fun(*k, **kw):
mandatory = kw.pop('mandatory', True)
try:
return f(*k, **kw)
except Errors.ConfigurationError:
if mandatory:
raise
fun.__name__ = f.__name__
setattr(ConfigurationContext, f.__name__, fun)
setattr(Build.BuildContext, f.__name__, fun)
return f
@conf
def add_os_flags(self, var, dest=None, dup=False):
try:
flags = shlex.split(self.environ[var])
except KeyError:
return
if dup or ''.join(flags) not in ''.join(Utils.to_list(self.env[dest or var])):
self.env.append_value(dest or var, flags)
@conf
def cmd_to_list(self, cmd):
if isinstance(cmd, str):
if os.path.isfile(cmd):
return [cmd]
if os.sep == '/':
return shlex.split(cmd)
else:
try:
return shlex.split(cmd, posix=False)
except TypeError:
return shlex.split(cmd)
return cmd
@conf
def check_waf_version(self, mini='1.9.99', maxi='2.1.0', **kw):
self.start_msg('Checking for waf version in %s-%s' % (str(mini), str(maxi)), **kw)
ver = Context.HEXVERSION
if Utils.num2ver(mini) > ver:
self.fatal('waf version should be at least %r (%r found)' % (Utils.num2ver(mini), ver))
if Utils.num2ver(maxi) < ver:
self.fatal('waf version should be at most %r (%r found)' % (Utils.num2ver(maxi), ver))
self.end_msg('ok', **kw)
@conf
def find_file(self, filename, path_list=[]):
for n in Utils.to_list(filename):
for d in Utils.to_list(path_list):
p = os.path.expanduser(os.path.join(d, n))
if os.path.exists(p):
return p
self.fatal('Could not find %r' % filename)
@conf
def find_program(self, filename, **kw):
exts = kw.get('exts', Utils.is_win32 and '.exe,.com,.bat,.cmd' or ',.sh,.pl,.py')
environ = kw.get('environ', getattr(self, 'environ', os.environ))
ret = ''
filename = Utils.to_list(filename)
msg = kw.get('msg', ', '.join(filename))
var = kw.get('var', '')
if not var:
var = re.sub(r'[-.]', '_', filename[0].upper())
path_list = kw.get('path_list', '')
if path_list:
path_list = Utils.to_list(path_list)
else:
path_list = environ.get('PATH', '').split(os.pathsep)
if kw.get('value'):
ret = self.cmd_to_list(kw['value'])
elif environ.get(var):
ret = self.cmd_to_list(environ[var])
elif self.env[var]:
ret = self.cmd_to_list(self.env[var])
else:
if not ret:
ret = self.find_binary(filename, exts.split(','), path_list)
if not ret and Utils.winreg:
ret = Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER, filename)
if not ret and Utils.winreg:
ret = Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE, filename)
ret = self.cmd_to_list(ret)
if ret:
if len(ret) == 1:
retmsg = ret[0]
else:
retmsg = ret
else:
retmsg = False
self.msg('Checking for program %r' % msg, retmsg, **kw)
if not kw.get('quiet'):
self.to_log('find program=%r paths=%r var=%r -> %r' % (filename, path_list, var, ret))
if not ret:
self.fatal(kw.get('errmsg', '') or 'Could not find the program %r' % filename)
interpreter = kw.get('interpreter')
if interpreter is None:
if not Utils.check_exe(ret[0], env=environ):
self.fatal('Program %r is not executable' % ret)
self.env[var] = ret
else:
self.env[var] = self.env[interpreter] + ret
return ret
@conf
def find_binary(self, filenames, exts, paths):
for f in filenames:
for ext in exts:
exe_name = f + ext
if os.path.isabs(exe_name):
if os.path.isfile(exe_name):
return exe_name
else:
for path in paths:
x = os.path.expanduser(os.path.join(path, exe_name))
if os.path.isfile(x):
return x
return None
@conf
def run_build(self, *k, **kw):
buf = []
for key in sorted(kw.keys()):
v = kw[key]
if isinstance(v, ConfigSet.ConfigSet):
continue
elif hasattr(v, '__call__'):
buf.append(Utils.h_fun(v))
else:
buf.append(str(v))
h = Utils.h_list(buf)
dir = self.bldnode.abspath() + os.sep + (not Utils.is_win32 and '.' or '') + 'conf_check_' + Utils.to_hex(h)
cachemode = kw.get('confcache', getattr(Options.options, 'confcache', None))
if not cachemode and os.path.exists(dir):
shutil.rmtree(dir)
try:
os.makedirs(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
self.fatal('cannot use the configuration test folder %r' % dir)
if cachemode == 1:
try:
proj = ConfigSet.ConfigSet(os.path.join(dir, 'cache_run_build'))
except EnvironmentError:
pass
else:
ret = proj['cache_run_build']
if isinstance(ret, str) and ret.startswith('Test does not build'):
self.fatal(ret)
return ret
bdir = os.path.join(dir, 'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
cls_name = kw.get('run_build_cls') or getattr(self, 'run_build_cls', 'build')
self.test_bld = bld = Context.create_context(cls_name, top_dir=dir, out_dir=bdir)
bld.init_dirs()
bld.progress_bar = 0
bld.targets = '*'
bld.logger = self.logger
bld.all_envs.update(self.all_envs)
bld.env = kw['env']
bld.kw = kw
bld.conf = self
kw['build_fun'](bld)
ret = -1
try:
try:
bld.compile()
except Errors.WafError:
ret = 'Test does not build: %s' % traceback.format_exc()
self.fatal(ret)
else:
ret = getattr(bld, 'retval', 0)
finally:
if cachemode:
proj = ConfigSet.ConfigSet()
proj['cache_run_build'] = ret
proj.store(os.path.join(dir, 'cache_run_build'))
else:
shutil.rmtree(dir)
return ret
@conf
def ret_msg(self, msg, args):
if isinstance(msg, str):
return msg
return msg(args)
@conf
def test(self, *k, **kw):
if not 'env' in kw:
kw['env'] = self.env.derive()
if kw.get('validate'):
kw['validate'](kw)
self.start_msg(kw['msg'], **kw)
ret = None
try:
ret = self.run_build(*k, **kw)
except self.errors.ConfigurationError:
self.end_msg(kw['errmsg'], 'YELLOW', **kw)
if Logs.verbose > 1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success'] = ret
if kw.get('post_check'):
ret = kw['post_check'](kw)
if ret:
self.end_msg(kw['errmsg'], 'YELLOW', **kw)
self.fatal('The configuration failed %r' % ret)
else:
self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw)
return ret
| ConfigurationContext |
python | celery__celery | t/unit/app/test_routes.py | {
"start": 7372,
"end": 7502
} | class ____:
def route_for_task(self, task, args, kwargs):
if task == 'celery.xaza':
return 'bar'
| TestRouter |
python | huggingface__transformers | src/transformers/models/glm4_moe/modeling_glm4_moe.py | {
"start": 2155,
"end": 9037
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Glm4MoeConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Glm4MoeConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Keep half or full tensor for later concatenation
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
# Apply rotary embeddings on the first half or full tensor
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
# Concatenate back to full shape
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
| Glm4MoeRotaryEmbedding |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec13.py | {
"start": 1733,
"end": 2778
} | class ____: ...
CheckFunc = CoroFunc[Concatenate[ClassA, _P], bool]
async def my_check_func(obj: ClassA, a: int, b: str) -> bool:
print(a, b)
return str(a) == b
async def takes_check_func(
check_func: CheckFunc[_P], *args: _P.args, **kwargs: _P.kwargs
):
await check_func(ClassA(), *args, **kwargs)
asyncio.run(takes_check_func(my_check_func, 1, "2"))
# This should generate an error because the signature doesn't match.
asyncio.run(takes_check_func(my_check_func, 1, 2))
TA1: TypeAlias = Callable[_P, Any]
ta1_1: TA1[()] = lambda: 0
# This should generate an error.
ta1_2: TA1[()] = lambda x: x
TA2: TypeAlias = Callable[Concatenate[int, _P], None]
TA3: TypeAlias = TA2[int, int]
TA4: TypeAlias = TA2[_P]
# This should generate an error.
TA5: TypeAlias = TA2[[int, _P]]
# This should generate an error.
TA6: TypeAlias = TA2[[int, ...]]
TA7: TypeAlias = TA2[Concatenate[int, _P]]
TA8: TypeAlias = TA2[Concatenate[int, ...]]
# This should generate two errors.
TA9: TypeAlias = TA2[int, Concatenate[int, _P]]
| ClassA |
python | weaviate__weaviate-python-client | weaviate/rbac/executor.py | {
"start": 1054,
"end": 12858
} | class ____(Generic[ConnectionType]):
def __init__(self, connection: ConnectionType):
self._connection = connection
def list_all(self) -> executor.Result[Dict[str, Role]]:
"""Get all roles.
Returns:
A dictionary with user names as keys and the `Role` objects as values.
"""
path = "/authz/roles"
def resp(res: Response) -> Dict[str, Role]:
return {role["name"]: Role._from_weaviate_role(role) for role in res.json()}
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg="Could not get roles",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get roles"),
)
@deprecated(
"""This method is deprecated and will be removed in Q4 25. Please use `users.get_my_user()` instead."""
)
def get_current_roles(self) -> executor.Result[List[Role]]:
# TODO: Add documentation here and this method to the stubs plus tests
path = "/authz/users/own-roles"
def resp(res: Response) -> List[Role]:
return [Role._from_weaviate_role(role) for role in cast(List[WeaviateRole], res.json())]
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg="Could not get roles",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get own roles"),
)
def exists(self, role_name: str) -> executor.Result[bool]:
"""Check if a role exists.
Args:
role_name: The name of the role to check.
Returns:
True if the role exists, False otherwise.
"""
path = f"/authz/roles/{role_name}"
def resp(res: Response) -> bool:
return res.status_code == 200
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg=f"Could not get role {role_name}",
status_codes=_ExpectedStatusCodes(ok_in=[200, 404], error="Get role"),
)
def get(self, role_name: str) -> executor.Result[Optional[Role]]:
"""Get the permissions granted to this role.
Args:
role_name: The name of the role to get the permissions for.
Returns:
A `Role` object or `None` if it does not exist.
"""
path = f"/authz/roles/{role_name}"
def resp(res: Response) -> Optional[Role]:
if res.status_code == 404:
return None
return Role._from_weaviate_role(cast(WeaviateRole, res.json()))
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg=f"Could not get role {role_name}",
status_codes=_ExpectedStatusCodes(ok_in=[200, 404], error="Get role"),
)
def create(self, *, role_name: str, permissions: PermissionsInputType) -> executor.Result[Role]:
"""Create a new role.
Args:
role_name: The name of the role.
permissions: The permissions of the role.
Returns:
The created role.
"""
path = "/authz/roles"
perms = []
for perm in _flatten_permissions(permissions):
perms.extend(perm._to_weaviate())
role: WeaviateRole = {
"name": role_name,
"permissions": perms,
}
def resp(res: Response) -> Role:
return Role._from_weaviate_role(role)
return executor.execute(
response_callback=resp,
method=self._connection.post,
path=path,
weaviate_object=role,
error_msg=f"Could not create role: {json.dumps(role)}",
status_codes=_ExpectedStatusCodes(ok_in=[201], error="Create role"),
)
def get_user_assignments(self, role_name: str) -> executor.Result[List[UserAssignment]]:
"""Get the ids and usertype of users that have been assigned this role.
Args:
role_name: The role to get the users for.
Returns:
A list of Assignments.
"""
path = f"/authz/roles/{role_name}/user-assignments"
def resp(res: Response) -> List[UserAssignment]:
return [
UserAssignment(
user_id=assignment["userId"],
user_type=UserTypes(assignment["userType"]),
)
for assignment in res.json()
]
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg=f"Could not get users of role {role_name}",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get users of role"),
)
def get_group_assignments(self, role_name: str) -> executor.Result[List[GroupAssignment]]:
"""Get the ids and group type of groups that have been assigned this role.
Args:
role_name: The role to get the users for.
Returns:
A list of Assignments.
"""
path = f"/authz/roles/{role_name}/group-assignments"
def resp(res: Response) -> List[GroupAssignment]:
return [
GroupAssignment(
group_id=assignment["groupId"],
group_type=GroupTypes(assignment["groupType"]),
)
for assignment in res.json()
]
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg=f"Could not get groups of role {role_name}",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get groups of role"),
)
@deprecated(
"""This method is deprecated and will be removed in Q4 25. Please use `roles.get_user_assignments` instead."""
)
def get_assigned_user_ids(
self,
role_name: str,
) -> executor.Result[List[str]]:
"""Get the ids of user that have been assigned this role.
Args:
role_name: The role to get the users for.
Returns:
A list of ids.
"""
path = f"/authz/roles/{role_name}/users"
def resp(res: Response) -> List[str]:
return cast(List[str], res.json())
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg=f"Could not get users of role {role_name}",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get users of role"),
)
def delete(
self,
role_name: str,
) -> executor.Result[None]:
"""Delete a role.
Args:
role_name: The name of the role to delete.
"""
path = f"/authz/roles/{role_name}"
def resp(res: Response) -> None:
return None
return executor.execute(
response_callback=resp,
method=self._connection.delete,
path=path,
error_msg=f"Could not delete role {role_name}",
status_codes=_ExpectedStatusCodes(ok_in=[204], error="Delete role"),
)
def add_permissions(
self, *, permissions: PermissionsInputType, role_name: str
) -> executor.Result[None]:
"""Add permissions to a role.
Note: This method is an upsert operation. If the permission already exists, it will be updated. If it does not exist, it will be created.
Args:
permissions: The permissions to add to the role.
role_name: The name of the role to add the permissions to.
"""
path = f"/authz/roles/{role_name}/add-permissions"
if isinstance(permissions, _Permission):
permissions = [permissions]
def resp(res: Response) -> None:
return None
return executor.execute(
response_callback=resp,
method=self._connection.post,
path=path,
weaviate_object={
"permissions": [
weav_perm
for perm in _flatten_permissions(permissions)
for weav_perm in perm._to_weaviate()
]
},
error_msg="Could not add permissions",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Add permissions"),
)
def remove_permissions(
self, *, permissions: PermissionsInputType, role_name: str
) -> executor.Result[None]:
"""Remove permissions from a role.
Note: This method is a downsert operation. If the permission does not exist, it will be ignored. If these permissions are the only permissions of the role, the role will be deleted.
Args:
permissions: The permissions to remove from the role.
role_name: The name of the role to remove the permissions from.
"""
path = f"/authz/roles/{role_name}/remove-permissions"
if isinstance(permissions, _Permission):
permissions = [permissions]
def resp(res: Response) -> None:
return None
return executor.execute(
response_callback=resp,
method=self._connection.post,
path=path,
weaviate_object={
"permissions": [
weav_perm
for perm in _flatten_permissions(permissions)
for weav_perm in perm._to_weaviate()
]
},
error_msg="Could not remove permissions",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Remove permissions"),
)
def __has_permission(
self,
*,
permission: WeaviatePermission,
role: str,
) -> executor.Result[bool]:
path = f"/authz/roles/{role}/has-permission"
def resp(res: Response) -> bool:
return res.status_code == 200
return executor.execute(
response_callback=resp,
method=self._connection.post,
path=path,
weaviate_object=permission,
error_msg="Could not check permission",
status_codes=_ExpectedStatusCodes(ok_in=[200, 404], error="Check permission"),
)
def has_permissions(
self,
*,
permissions: Union[
PermissionsInputType, PermissionsOutputType, Sequence[PermissionsOutputType]
],
role: str,
) -> executor.Result[bool]:
"""Check if a role has a specific set of permission.
Args:
permission: The permission to check.
role: The role to check the permission for.
Returns:
True if the role has the permission, False otherwise.
"""
if isinstance(self._connection, ConnectionAsync):
async def execute() -> bool:
return all(
await asyncio.gather(
*[
executor.aresult(self.__has_permission(permission=weav_perm, role=role))
for permission in _flatten_permissions(permissions)
for weav_perm in permission._to_weaviate()
]
)
)
return execute()
return all(
executor.result(self.__has_permission(permission=weav_perm, role=role))
for permission in _flatten_permissions(permissions)
for weav_perm in permission._to_weaviate()
)
| _RolesExecutor |
python | redis__redis-py | redis/commands/cluster.py | {
"start": 25807,
"end": 29094
} | class ____(DataAccessCommands):
"""
A class for Redis Cluster Data Access Commands
The class inherits from Redis's core DataAccessCommand class and do the
required adjustments to work with cluster mode
"""
def stralgo(
self,
algo: Literal["LCS"],
value1: KeyT,
value2: KeyT,
specific_argument: Union[Literal["strings"], Literal["keys"]] = "strings",
len: bool = False,
idx: bool = False,
minmatchlen: Optional[int] = None,
withmatchlen: bool = False,
**kwargs,
) -> ResponseT:
"""
Implements complex algorithms that operate on strings.
Right now the only algorithm implemented is the LCS algorithm
(longest common substring). However new algorithms could be
implemented in the future.
``algo`` Right now must be LCS
``value1`` and ``value2`` Can be two strings or two keys
``specific_argument`` Specifying if the arguments to the algorithm
will be keys or strings. strings is the default.
``len`` Returns just the len of the match.
``idx`` Returns the match positions in each string.
``minmatchlen`` Restrict the list of matches to the ones of a given
minimal length. Can be provided only when ``idx`` set to True.
``withmatchlen`` Returns the matches with the len of the match.
Can be provided only when ``idx`` set to True.
For more information see https://redis.io/commands/stralgo
"""
target_nodes = kwargs.pop("target_nodes", None)
if specific_argument == "strings" and target_nodes is None:
target_nodes = "default-node"
kwargs.update({"target_nodes": target_nodes})
return super().stralgo(
algo,
value1,
value2,
specific_argument,
len,
idx,
minmatchlen,
withmatchlen,
**kwargs,
)
def scan_iter(
self,
match: Optional[PatternT] = None,
count: Optional[int] = None,
_type: Optional[str] = None,
**kwargs,
) -> Iterator:
# Do the first query with cursor=0 for all nodes
cursors, data = self.scan(match=match, count=count, _type=_type, **kwargs)
yield from data
cursors = {name: cursor for name, cursor in cursors.items() if cursor != 0}
if cursors:
# Get nodes by name
nodes = {name: self.get_node(node_name=name) for name in cursors.keys()}
# Iterate over each node till its cursor is 0
kwargs.pop("target_nodes", None)
while cursors:
for name, cursor in cursors.items():
cur, data = self.scan(
cursor=cursor,
match=match,
count=count,
_type=_type,
target_nodes=nodes[name],
**kwargs,
)
yield from data
cursors[name] = cur[name]
cursors = {
name: cursor for name, cursor in cursors.items() if cursor != 0
}
| ClusterDataAccessCommands |
python | encode__starlette | starlette/_utils.py | {
"start": 1377,
"end": 1578
} | class ____(Protocol):
async def close(self) -> None: ... # pragma: no cover
SupportsAsyncCloseType = TypeVar("SupportsAsyncCloseType", bound=SupportsAsyncClose, covariant=False)
| SupportsAsyncClose |
python | huggingface__transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | {
"start": 3177,
"end": 3880
} | class ____(nn.Module):
"""
Wrapper for torch.matmul(). This makes flop-counting easier to implement. Note that if you directly call
torch.matmul() in your code, the flop counter will typically ignore the flops of the matmul.
"""
def __init__(self):
super().__init__()
def forward(self, mat1, mat2):
"""
:param inputs: two torch tensors :return: matmul of these tensors
Here are the typical dimensions found in BERT (the B is optional) mat1.shape: [B, <optional extra dims>, M, K]
mat2.shape: [B, <optional extra dims>, K, N] output shape: [B, <optional extra dims>, M, N]
"""
return torch.matmul(mat1, mat2)
| MatMulWrapper |
python | celery__celery | t/integration/test_serialization.py | {
"start": 170,
"end": 1651
} | class ____:
def test_accept(self, celery_app):
app = celery_app
# Redefine env to use in subprocess
# broker_url and result backend are different for each integration test backend
passenv = {
**os.environ,
"CELERY_BROKER_URL": app.conf.broker_url,
"CELERY_RESULT_BACKEND": app.conf.result_backend,
}
with ThreadPoolExecutor(max_workers=2) as executor:
f1 = executor.submit(get_worker_error_messages, "w1", passenv)
f2 = executor.submit(get_worker_error_messages, "w2", passenv)
time.sleep(3)
log1 = f1.result()
log2 = f2.result()
for log in [log1, log2]:
assert log.find(disabled_error_message) == -1, log
def get_worker_error_messages(name, env):
"""run a worker and return its stderr
:param name: the name of the worker
:param env: the environment to run the worker in
worker must be running in other process because of avoiding conflict."""
worker = subprocess.Popen(
[
"celery",
"--config",
"t.integration.test_serialization_config",
"worker",
"-c",
"2",
"-n",
f"{name}@%%h",
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env,
)
worker.terminate()
err = worker.stderr.read().decode("utf-8")
return err
| test_config_serialization |
python | python__mypy | mypyc/ir/ops.py | {
"start": 31634,
"end": 32596
} | class ____(RegisterOp):
"""dest = (reg, ...) (for fixed-length tuple)"""
error_kind = ERR_NEVER
def __init__(self, items: list[Value], line: int) -> None:
super().__init__(line)
self.items = items
# Don't keep track of the fact that an int is short after it
# is put into a tuple, since we don't properly implement
# runtime subtyping for tuples.
self.tuple_type = RTuple(
[
arg.type if not is_short_int_rprimitive(arg.type) else int_rprimitive
for arg in items
]
)
self.type = self.tuple_type
def sources(self) -> list[Value]:
return self.items.copy()
def stolen(self) -> list[Value]:
return self.items.copy()
def set_sources(self, new: list[Value]) -> None:
self.items = new[:]
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_tuple_set(self)
@final
| TupleSet |
python | getsentry__sentry | src/sentry/core/endpoints/organization_projects.py | {
"start": 1989,
"end": 8288
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
permission_classes = (OrganizationAndStaffPermission,)
@extend_schema(
operation_id="List an Organization's Projects",
parameters=[GlobalParams.ORG_ID_OR_SLUG, CursorQueryParam],
request=None,
responses={
200: inline_sentry_response_serializer(
"OrganizationProjectResponseDict", list[OrganizationProjectResponse]
),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=OrganizationExamples.LIST_PROJECTS,
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Return a list of projects bound to a organization.
"""
stats_period = request.GET.get("statsPeriod")
collapse = request.GET.getlist("collapse", [])
if stats_period not in (None, "", "1h", "24h", "7d", "14d", "30d", "90d"):
return Response(
{"error": {"params": {"stats_period": {"message": ERR_INVALID_STATS_PERIOD}}}},
status=400,
)
elif not stats_period:
# disable stats
stats_period = None
datasetName = request.GET.get("dataset", "discover")
dataset = get_dataset(datasetName)
queryset: QuerySet[Project]
if request.auth and not request.user.is_authenticated:
# TODO: remove this, no longer supported probably
if hasattr(request.auth, "project"):
queryset = Project.objects.filter(id=request.auth.project.id)
elif request.auth.organization_id is not None:
org = request.auth.organization_id
team_list = list(Team.objects.filter(organization_id=org))
queryset = Project.objects.filter(teams__in=team_list)
else:
return Response(
{"detail": "Current access does not point to " "organization."}, status=400
)
else:
queryset = Project.objects.filter(organization=organization)
order_by = ["slug"]
if request.user.is_authenticated:
queryset = queryset.extra(
select={
"is_bookmarked": """exists (
select *
from sentry_projectbookmark spb
where spb.project_id = sentry_project.id and spb.user_id = %s
)"""
},
select_params=(request.user.id,),
)
order_by.insert(0, "-is_bookmarked")
query = request.GET.get("query")
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "query":
value_s = " ".join(value)
queryset = queryset.filter(
Q(name__icontains=value_s) | Q(slug__icontains=value_s)
)
elif key == "id":
if all(v.isdigit() for v in value):
queryset = queryset.filter(id__in=value)
else:
return Response(
{
"error": {
"params": {
"stats_period": {
"message": "All 'id' values must be integers."
}
}
}
},
status=400,
)
elif key == "slug":
queryset = queryset.filter(slug__in=value)
elif key == "team":
team_list = list(Team.objects.filter(organization=organization, slug__in=value))
queryset = queryset.filter(teams__in=team_list)
elif key == "!team":
team_list = list(Team.objects.filter(organization=organization, slug__in=value))
queryset = queryset.exclude(teams__in=team_list)
elif key == "is_member":
queryset = queryset.filter(teams__organizationmember__user_id=request.user.id)
else:
queryset = queryset.none()
queryset = queryset.filter(status=ObjectStatus.ACTIVE).distinct()
# TODO(davidenwang): remove this after frontend requires only paginated projects
get_all_projects = request.GET.get("all_projects") == "1"
if get_all_projects:
queryset = queryset.order_by("slug").select_related("organization")
return Response(
serialize(
list(queryset),
request.user,
ProjectSummarySerializer(collapse=collapse, dataset=dataset),
)
)
else:
expand = set()
if request.GET.get("transactionStats"):
expand.add("transaction_stats")
if request.GET.get("sessionStats"):
expand.add("session_stats")
expand_context = {"options": request.GET.getlist("options") or []}
if expand_context:
expand.add("options")
def serialize_on_result(result):
environment_id = get_environment_id(request, organization.id)
serializer = ProjectSummarySerializer(
environment_id=environment_id,
stats_period=stats_period,
expand=expand,
expand_context=expand_context,
collapse=collapse,
dataset=dataset,
)
return serialize(result, request.user, serializer)
return self.paginate(
request=request,
queryset=queryset,
order_by=order_by,
on_results=serialize_on_result,
paginator_cls=OffsetPaginator,
)
@region_silo_endpoint
| OrganizationProjectsEndpoint |
python | doocs__leetcode | solution/1700-1799/1791.Find Center of Star Graph/Solution.py | {
"start": 0,
"end": 144
} | class ____:
def findCenter(self, edges: List[List[int]]) -> int:
return edges[0][0] if edges[0][0] in edges[1] else edges[0][1]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/distribute-money-to-maximum-children.py | {
"start": 467,
"end": 936
} | class ____(object):
def distMoney(self, money, children):
"""
:type money: int
:type children: int
:rtype: int
"""
if money < children*1:
return -1
money -= children*1
q, r = divmod(money, 7)
if q > children:
return children-1
if q == children:
return q-int(r != 0)
if q == children-1:
return q-int(r == 3)
return q
| Solution2 |
python | huggingface__transformers | tests/models/pvt/test_modeling_pvt.py | {
"start": 4660,
"end": 8190
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (PvtModel, PvtForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": PvtModel, "image-classification": PvtForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = PvtModelTester(self)
self.config_tester = PvtConfigTester(self, config_class=PvtConfig)
def test_batching_equivalence(self, atol=1e-4, rtol=1e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Pvt does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Pvt does not have get_input_embeddings method and get_output_embeddings methods")
def test_model_get_set_embeddings(self):
pass
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = sum(self.model_tester.depths) + 1
self.assertEqual(len(hidden_states), expected_num_layers)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[
self.model_tester.batch_size,
(self.model_tester.image_size // 4) ** 2,
self.model_tester.image_size // 4,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@slow
def test_model_from_pretrained(self):
model_name = "Zetatech/pvt-tiny-224"
model = PvtModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
| PvtModelTest |
python | pypa__pipenv | pipenv/patched/pip/_vendor/urllib3/util/url.py | {
"start": 3003,
"end": 14311
} | class ____(namedtuple("Url", url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(
cls,
scheme=None,
auth=None,
host=None,
port=None,
path=None,
query=None,
fragment=None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(
cls, scheme, auth, host, port, path, query, fragment
)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return "%s:%d" % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u"://"
if auth is not None:
url += auth + u"@"
if host is not None:
url += host
if port is not None:
url += u":" + str(port)
if path is not None:
url += path
if query is not None:
url += u"?" + query
if fragment is not None:
url += u"#" + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, "", None
return s[:min_idx], s[min_idx + 1 :], min_delim
def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = six.ensure_text(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
return encoded_component.decode(encoding)
def _remove_path_dot_segments(path):
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = path.split("/") # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == ".":
continue
# Anything other than '..', should be appended to the output
elif segment != "..":
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if path.startswith("/") and (not output or output[0]):
output.insert(0, "")
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if path.endswith(("/.", "/..")):
output.append("")
return "/".join(output)
def _normalize_host(host, scheme):
if host:
if isinstance(host, six.binary_type):
host = six.ensure_str(host)
if scheme in NORMALIZABLE_SCHEMES:
is_ipv6 = IPV6_ADDRZ_RE.match(host)
if is_ipv6:
# IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
# such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
# separator as necessary to return a valid RFC 4007 scoped IP.
match = ZONE_ID_RE.search(host)
if match:
start, end = match.span(1)
zone_id = host[start:end]
if zone_id.startswith("%25") and zone_id != "%25":
zone_id = zone_id[3:]
else:
zone_id = zone_id[1:]
zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
return host[:start].lower() + zone_id + host[end:]
else:
return host.lower()
elif not IPV4_RE.match(host):
return six.ensure_str(
b".".join([_idna_encode(label) for label in host.split(".")])
)
return host
def _idna_encode(name):
if name and any(ord(x) >= 128 for x in name):
try:
from pipenv.patched.pip._vendor import idna
except ImportError:
six.raise_from(
LocationParseError("Unable to parse URL without the 'idna' module"),
None,
)
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
six.raise_from(
LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
)
return name.lower().encode("ascii")
def _encode_target(target):
"""Percent-encodes a request target so that there are no invalid characters"""
path, query = TARGET_RE.match(target).groups()
target = _encode_invalid_chars(path, PATH_CHARS)
query = _encode_invalid_chars(query, QUERY_CHARS)
if query is not None:
target += "?" + query
return target
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 and RFC 6874 compliant.
The parser logic and helper functions are based heavily on
work done in the ``rfc3986`` module.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
source_url = url
if not SCHEME_RE.search(url):
url = "//" + url
try:
scheme, authority, path, query, fragment = URI_RE.match(url).groups()
normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
if scheme:
scheme = scheme.lower()
if authority:
auth, _, host_port = authority.rpartition("@")
auth = auth or None
host, port = _HOST_PORT_RE.match(host_port).groups()
if auth and normalize_uri:
auth = _encode_invalid_chars(auth, USERINFO_CHARS)
if port == "":
port = None
else:
auth, host, port = None, None, None
if port is not None:
port = int(port)
if not (0 <= port <= 65535):
raise LocationParseError(url)
host = _normalize_host(host, scheme)
if normalize_uri and path:
path = _remove_path_dot_segments(path)
path = _encode_invalid_chars(path, PATH_CHARS)
if normalize_uri and query:
query = _encode_invalid_chars(query, QUERY_CHARS)
if normalize_uri and fragment:
fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
except (ValueError, AttributeError):
return six.raise_from(LocationParseError(source_url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
if not path:
if query is not None or fragment is not None:
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
if isinstance(url, six.text_type):
ensure_func = six.ensure_text
else:
ensure_func = six.ensure_str
def ensure_type(x):
return x if x is None else ensure_func(x)
return Url(
scheme=ensure_type(scheme),
auth=ensure_type(auth),
host=ensure_type(host),
port=port,
path=ensure_type(path),
query=ensure_type(query),
fragment=ensure_type(fragment),
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or "http", p.hostname, p.port
| Url |
python | encode__django-rest-framework | rest_framework/utils/serializer_helpers.py | {
"start": 1329,
"end": 1924
} | class ____(list):
"""
Return object from `serializer.data` for the `SerializerList` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super().__init__(*args, **kwargs)
def __repr__(self):
return list.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (list, (list(self),))
| ReturnList |
python | pandas-dev__pandas | pandas/_typing.py | {
"start": 8907,
"end": 15341
} | class ____(ReadBuffer[AnyStr_co], Protocol):
__module__: str = "pandas.api.typing.aliases"
def __iter__(self) -> Iterator[AnyStr_co]:
# for engine=python
...
def fileno(self) -> int:
# for _MMapWrapper
...
def readline(self) -> AnyStr_co:
# for engine=python
...
@property
def closed(self) -> bool:
# for engine=pyarrow
...
FilePath: TypeAlias = str | PathLike[str]
# for arbitrary kwargs passed during reading/writing files
StorageOptions: TypeAlias = dict[str, Any] | None
# compression keywords and compression
CompressionDict: TypeAlias = dict[str, Any]
CompressionOptions: TypeAlias = (
Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"] | CompressionDict | None
)
ParquetCompressionOptions: TypeAlias = (
Literal["snappy", "gzip", "brotli", "lz4", "zstd"] | None
)
# types in DataFrameFormatter
FormattersType: TypeAlias = (
list[Callable] | tuple[Callable, ...] | Mapping[str | int, Callable]
)
ColspaceType: TypeAlias = Mapping[Hashable, str | int]
FloatFormatType: TypeAlias = Union[str, Callable, "EngFormatter"]
ColspaceArgType: TypeAlias = (
str | int | Sequence[str | int] | Mapping[Hashable, str | int]
)
# Arguments for fillna()
FillnaOptions: TypeAlias = Literal["backfill", "bfill", "ffill", "pad"]
InterpolateOptions: TypeAlias = Literal[
"linear",
"time",
"index",
"values",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"polynomial",
"krogh",
"piecewise_polynomial",
"spline",
"pchip",
"akima",
"cubicspline",
"from_derivatives",
]
# internals
Manager: TypeAlias = Union["BlockManager", "SingleBlockManager"]
# indexing
# PositionalIndexer -> valid 1D positional indexer, e.g. can pass
# to ndarray.__getitem__
# ScalarIndexer is for a single value as the index
# SequenceIndexer is for list like or slices (but not tuples)
# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays
# These are used in various __getitem__ overloads
# TODO(typing#684): add Ellipsis, see
# https://github.com/python/typing/issues/684#issuecomment-548203158
# https://bugs.python.org/issue41810
# Using List[int] here rather than Sequence[int] to disallow tuples.
ScalarIndexer: TypeAlias = int | np.integer
SequenceIndexer: TypeAlias = slice | list[int] | np.ndarray
PositionalIndexer: TypeAlias = ScalarIndexer | SequenceIndexer
PositionalIndexerTuple: TypeAlias = tuple[PositionalIndexer, PositionalIndexer]
PositionalIndexer2D: TypeAlias = PositionalIndexer | PositionalIndexerTuple
TakeIndexer: TypeAlias = Sequence[int] | Sequence[np.integer] | npt.NDArray[np.integer]
# Shared by functions such as drop and astype
IgnoreRaise: TypeAlias = Literal["ignore", "raise"]
# Windowing rank methods
WindowingRankType: TypeAlias = Literal["average", "min", "max"]
# read_csv engines
CSVEngine: TypeAlias = Literal["c", "python", "pyarrow", "python-fwf"]
# read_json engines
JSONEngine: TypeAlias = Literal["ujson", "pyarrow"]
# read_xml parsers
XMLParsers: TypeAlias = Literal["lxml", "etree"]
# read_html flavors
HTMLFlavors: TypeAlias = Literal["lxml", "html5lib", "bs4"]
# Interval closed type
IntervalLeftRight: TypeAlias = Literal["left", "right"]
IntervalClosedType: TypeAlias = IntervalLeftRight | Literal["both", "neither"]
# datetime and NaTType
DatetimeNaTType: TypeAlias = Union[datetime, "NaTType"]
DateTimeErrorChoices: TypeAlias = Literal["raise", "coerce"]
# sort_index
SortKind: TypeAlias = Literal["quicksort", "mergesort", "heapsort", "stable"]
NaPosition: TypeAlias = Literal["first", "last"]
# Arguments for nsmallest and nlargest
NsmallestNlargestKeep: TypeAlias = Literal["first", "last", "all"]
# quantile interpolation
QuantileInterpolation: TypeAlias = Literal[
"linear", "lower", "higher", "midpoint", "nearest"
]
# plotting
PlottingOrientation: TypeAlias = Literal["horizontal", "vertical"]
# dropna
AnyAll: TypeAlias = Literal["any", "all"]
# merge
MergeHow: TypeAlias = Literal[
"left", "right", "inner", "outer", "cross", "left_anti", "right_anti"
]
MergeValidate: TypeAlias = Literal[
"one_to_one",
"1:1",
"one_to_many",
"1:m",
"many_to_one",
"m:1",
"many_to_many",
"m:m",
]
# join
JoinHow: TypeAlias = Literal["left", "right", "inner", "outer"]
JoinValidate: TypeAlias = Literal[
"one_to_one",
"1:1",
"one_to_many",
"1:m",
"many_to_one",
"m:1",
"many_to_many",
"m:m",
]
# reindex
ReindexMethod: TypeAlias = FillnaOptions | Literal["nearest"]
MatplotlibColor: TypeAlias = str | Sequence[float]
TimeGrouperOrigin: TypeAlias = Union[
"Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"]
]
TimeAmbiguous: TypeAlias = Literal["infer", "NaT", "raise"] | npt.NDArray[np.bool_]
TimeNonexistent: TypeAlias = (
Literal["shift_forward", "shift_backward", "NaT", "raise"] | timedelta
)
DropKeep: TypeAlias = Literal["first", "last", False]
CorrelationMethod: TypeAlias = (
Literal["pearson", "kendall", "spearman"]
| Callable[[np.ndarray, np.ndarray], float]
)
AlignJoin: TypeAlias = Literal["outer", "inner", "left", "right"]
DtypeBackend: TypeAlias = Literal["pyarrow", "numpy_nullable"]
TimeUnit: TypeAlias = Literal["s", "ms", "us", "ns"]
OpenFileErrors: TypeAlias = Literal[
"strict",
"ignore",
"replace",
"surrogateescape",
"xmlcharrefreplace",
"backslashreplace",
"namereplace",
]
# update
UpdateJoin: TypeAlias = Literal["left"]
# applymap
NaAction: TypeAlias = Literal["ignore"]
# from_dict
FromDictOrient: TypeAlias = Literal["columns", "index", "tight"]
# to_stata
ToStataByteorder: TypeAlias = Literal[">", "<", "little", "big"]
# ExcelWriter
ExcelWriterIfSheetExists: TypeAlias = Literal["error", "new", "replace", "overlay"]
ExcelWriterMergeCells: TypeAlias = bool | Literal["columns"]
# Offsets
OffsetCalendar: TypeAlias = Union[np.busdaycalendar, "AbstractHolidayCalendar"]
# read_csv: usecols
UsecolsArgType: TypeAlias = (
SequenceNotStr[Hashable] | range | AnyArrayLike | Callable[[HashableT], bool] | None
)
# maintain the sub-type of any hashable sequence
SequenceT = TypeVar("SequenceT", bound=Sequence[Hashable])
SliceType: TypeAlias = Hashable | None
# Arrow PyCapsule Interface
# from https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html#protocol-typehints
| ReadCsvBuffer |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 57302,
"end": 57588
} | class ____:
"Missing __getitem__ and __iter__"
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn):
raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
| X |
python | coleifer__peewee | examples/hexastore.py | {
"start": 107,
"end": 2981
} | class ____(object):
def __init__(self, database=':memory:', **options):
if isinstance(database, str):
self.db = SqliteDatabase(database, **options)
elif isinstance(database, Database):
self.db = database
else:
raise ValueError('Expected database filename or a Database '
'instance. Got: %s' % repr(database))
self.v = _VariableFactory()
self.G = self.get_model()
def get_model(self):
class Graph(Model):
subj = TextField()
pred = TextField()
obj = TextField()
class Meta:
database = self.db
indexes = (
(('pred', 'obj'), False),
(('obj', 'subj'), False),
)
primary_key = CompositeKey('subj', 'pred', 'obj')
self.db.create_tables([Graph])
return Graph
def store(self, s, p, o):
self.G.create(subj=s, pred=p, obj=o)
def store_many(self, items):
fields = [self.G.subj, self.G.pred, self.G.obj]
self.G.insert_many(items, fields=fields).execute()
def delete(self, s, p, o):
return (self.G.delete()
.where(self.G.subj == s, self.G.pred == p, self.G.obj == o)
.execute())
def query(self, s=None, p=None, o=None):
fields = (self.G.subj, self.G.pred, self.G.obj)
expressions = [(f == v) for f, v in zip(fields, (s, p, o))
if v is not None]
return self.G.select().where(*expressions)
def search(self, *conditions):
accum = []
binds = {}
variables = set()
fields = {'s': 'subj', 'p': 'pred', 'o': 'obj'}
for i, condition in enumerate(conditions):
if isinstance(condition, dict):
condition = (condition['s'], condition['p'], condition['o'])
GA = self.G.alias('g%s' % i)
for part, val in zip('spo', condition):
if isinstance(val, Variable):
binds.setdefault(val, [])
binds[val].append(getattr(GA, fields[part]))
variables.add(val)
else:
accum.append(getattr(GA, fields[part]) == val)
selection = []
sources = set()
for var, fields in binds.items():
selection.append(fields[0].alias(var.name))
pairwise = [(fields[i - 1] == fields[i])
for i in range(1, len(fields))]
if pairwise:
accum.append(reduce(operator.and_, pairwise))
sources.update([field.source for field in fields])
return (self.G
.select(*selection)
.from_(*list(sources))
.where(*accum)
.dicts())
| Hexastore |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/cyaml.py | {
"start": 710,
"end": 1301
} | class ____(CParser, BaseConstructor, BaseResolver): # type: ignore
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
CParser.__init__(self, stream)
self._parser = self._composer = self
BaseConstructor.__init__(self, loader=self)
BaseResolver.__init__(self, loadumper=self)
# self.descend_resolver = self._resolver.descend_resolver
# self.ascend_resolver = self._resolver.ascend_resolver
# self.resolve = self._resolver.resolve
| CBaseLoader |
python | celery__celery | t/unit/utils/test_serialization.py | {
"start": 910,
"end": 1283
} | class ____:
def test_json_py3(self):
expected = (1, "<class 'object'>")
actual = ensure_serializable([1, object], encoder=json.dumps)
assert expected == actual
def test_pickle(self):
expected = (1, object)
actual = ensure_serializable(expected, encoder=pickle.dumps)
assert expected == actual
| test_ensure_serializable |
python | doocs__leetcode | solution/2400-2499/2450.Number of Distinct Binary Strings After Applying Operations/Solution.py | {
"start": 0,
"end": 127
} | class ____:
def countDistinctStrings(self, s: str, k: int) -> int:
return pow(2, len(s) - k + 1) % (10**9 + 7)
| Solution |
python | spack__spack | lib/spack/spack/vendor/jsonschema/exceptions.py | {
"start": 3771,
"end": 4014
} | class ____(_Error):
"""
A schema was invalid under its corresponding metaschema.
"""
_word_for_schema_in_error_message = "metaschema"
_word_for_instance_in_error_message = "schema"
@spack.vendor.attr.s(hash=True)
| SchemaError |
python | conda__conda | conda/plugins/reporter_backends/console.py | {
"start": 900,
"end": 1165
} | class ____(ProgressBarBase):
"""
Progress bar class used when no output should be printed
"""
def update_to(self, fraction) -> None:
pass
def refresh(self) -> None:
pass
def close(self) -> None:
pass
| QuietProgressBar |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-operations-to-move-ones-to-the-end.py | {
"start": 413,
"end": 769
} | class ____(object):
def maxOperations(self, s):
"""
:type s: str
:rtype: int
"""
result = curr = 0
for i in xrange(len(s)):
if s[i] != '1':
continue
curr += 1
if i+1 < len(s) and s[i+1] == '0':
result += curr
return result
| Solution2 |
python | Textualize__textual | src/textual/css/transition.py | {
"start": 32,
"end": 417
} | class ____(NamedTuple):
duration: float = 1.0
easing: str = "linear"
delay: float = 0.0
def __str__(self) -> str:
duration, easing, delay = self
if delay:
return f"{duration:.1f}s {easing} {delay:.1f}"
elif easing != "linear":
return f"{duration:.1f}s {easing}"
else:
return f"{duration:.1f}s"
| Transition |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 532514,
"end": 541431
} | class ____:
def __abs__(self):
if self.is_empty or self.is_infinite:
return 0.0
return (self.x1 - self.x0) * (self.y1 - self.y0)
def __add__(self, p):
if hasattr(p, "__float__"):
return Rect(self.x0 + p, self.y0 + p, self.x1 + p, self.y1 + p)
if len(p) != 4:
raise ValueError("Rect: bad seq len")
return Rect(self.x0 + p[0], self.y0 + p[1], self.x1 + p[2], self.y1 + p[3])
def __and__(self, x):
if not hasattr(x, "__len__"):
raise ValueError("bad operand 2")
r1 = Rect(x)
r = Rect(self)
return r.intersect(r1)
def __bool__(self):
return not (max(self) == min(self) == 0)
def __contains__(self, x):
if hasattr(x, "__float__"):
return x in tuple(self)
l = len(x)
if l == 2:
return util_is_point_in_rect(x, self)
if l == 4:
r = INFINITE_RECT()
try:
r = Rect(x)
except Exception:
if g_exceptions_verbose > 1: exception_info()
r = Quad(x).rect
return (self.x0 <= r.x0 <= r.x1 <= self.x1 and
self.y0 <= r.y0 <= r.y1 <= self.y1)
return False
def __eq__(self, rect):
if not hasattr(rect, "__len__"):
return False
return len(rect) == 4 and not (self - rect)
def __getitem__(self, i):
return (self.x0, self.y0, self.x1, self.y1)[i]
def __hash__(self):
return hash(tuple(self))
def __init__(self, *args, p0=None, p1=None, x0=None, y0=None, x1=None, y1=None):
"""
Rect() - all zeros
Rect(x0, y0, x1, y1)
Rect(top-left, x1, y1)
Rect(x0, y0, bottom-right)
Rect(top-left, bottom-right)
Rect(Rect or IRect) - new copy
Rect(sequence) - from 'sequence'
Explicit keyword args p0, p1, x0, y0, x1, y1 override earlier settings
if not None.
"""
x0, y0, x1, y1 = util_make_rect( *args, p0=p0, p1=p1, x0=x0, y0=y0, x1=x1, y1=y1)
self.x0 = float( x0)
self.y0 = float( y0)
self.x1 = float( x1)
self.y1 = float( y1)
def __len__(self):
return 4
def __mul__(self, m):
if hasattr(m, "__float__"):
return Rect(self.x0 * m, self.y0 * m, self.x1 * m, self.y1 * m)
r = Rect(self)
r = r.transform(m)
return r
def __neg__(self):
return Rect(-self.x0, -self.y0, -self.x1, -self.y1)
def __nonzero__(self):
return not (max(self) == min(self) == 0)
def __or__(self, x):
if not hasattr(x, "__len__"):
raise ValueError("bad operand 2")
r = Rect(self)
if len(x) == 2:
return r.include_point(x)
if len(x) == 4:
return r.include_rect(x)
raise ValueError("bad operand 2")
def __pos__(self):
return Rect(self)
def __repr__(self):
return "Rect" + str(tuple(self))
def __setitem__(self, i, v):
v = float(v)
if i == 0: self.x0 = v
elif i == 1: self.y0 = v
elif i == 2: self.x1 = v
elif i == 3: self.y1 = v
else:
raise IndexError("index out of range")
return None
def __sub__(self, p):
if hasattr(p, "__float__"):
return Rect(self.x0 - p, self.y0 - p, self.x1 - p, self.y1 - p)
if len(p) != 4:
raise ValueError("Rect: bad seq len")
return Rect(self.x0 - p[0], self.y0 - p[1], self.x1 - p[2], self.y1 - p[3])
def __truediv__(self, m):
if hasattr(m, "__float__"):
return Rect(self.x0 * 1./m, self.y0 * 1./m, self.x1 * 1./m, self.y1 * 1./m)
im = util_invert_matrix(m)[1]
if not im:
raise ZeroDivisionError(f"Matrix not invertible: {m}")
r = Rect(self)
r = r.transform(im)
return r
@property
def bottom_left(self):
"""Bottom-left corner."""
return Point(self.x0, self.y1)
@property
def bottom_right(self):
"""Bottom-right corner."""
return Point(self.x1, self.y1)
def contains(self, x):
"""Check if containing point-like or rect-like x."""
return self.__contains__(x)
@property
def height(self):
return max(0, self.y1 - self.y0)
def get_area(self, *args) -> float:
"""Calculate area of rectangle.\nparameter is one of 'px' (default), 'in', 'cm', or 'mm'."""
return _rect_area(self.width, self.height, args)
def include_point(self, p):
"""Extend to include point-like p."""
if len(p) != 2:
raise ValueError("Point: bad seq len")
self.x0, self.y0, self.x1, self.y1 = util_include_point_in_rect(self, p)
return self
def include_rect(self, r):
"""Extend to include rect-like r."""
if len(r) != 4:
raise ValueError("Rect: bad seq len")
r = Rect(r)
if r.is_infinite or self.is_infinite:
self.x0, self.y0, self.x1, self.y1 = FZ_MIN_INF_RECT, FZ_MIN_INF_RECT, FZ_MAX_INF_RECT, FZ_MAX_INF_RECT
elif r.is_empty:
return self
elif self.is_empty:
self.x0, self.y0, self.x1, self.y1 = r.x0, r.y0, r.x1, r.y1
else:
self.x0, self.y0, self.x1, self.y1 = util_union_rect(self, r)
return self
def intersect(self, r):
"""Restrict to common rect with rect-like r."""
if not len(r) == 4:
raise ValueError("Rect: bad seq len")
r = Rect(r)
if r.is_infinite:
return self
elif self.is_infinite:
self.x0, self.y0, self.x1, self.y1 = r.x0, r.y0, r.x1, r.y1
elif r.is_empty:
self.x0, self.y0, self.x1, self.y1 = r.x0, r.y0, r.x1, r.y1
elif self.is_empty:
return self
else:
self.x0, self.y0, self.x1, self.y1 = util_intersect_rect(self, r)
return self
def intersects(self, x):
"""Check if intersection with rectangle x is not empty."""
rect2 = Rect(x)
return (1
and not self.is_empty
and not self.is_infinite
and not rect2.is_empty
and not rect2.is_infinite
and self.x0 < rect2.x1
and rect2.x0 < self.x1
and self.y0 < rect2.y1
and rect2.y0 < self.y1
)
@property
def is_empty(self):
"""True if rectangle area is empty."""
return self.x0 >= self.x1 or self.y0 >= self.y1
@property
def is_infinite(self):
"""True if this is the infinite rectangle."""
return self.x0 == self.y0 == FZ_MIN_INF_RECT and self.x1 == self.y1 == FZ_MAX_INF_RECT
@property
def is_valid(self):
"""True if rectangle is valid."""
return self.x0 <= self.x1 and self.y0 <= self.y1
def morph(self, p, m):
"""Morph with matrix-like m and point-like p.
Returns a new quad."""
if self.is_infinite:
return INFINITE_QUAD()
return self.quad.morph(p, m)
def norm(self):
return math.sqrt(sum([c*c for c in self]))
def normalize(self):
"""Replace rectangle with its finite version."""
if self.x1 < self.x0:
self.x0, self.x1 = self.x1, self.x0
if self.y1 < self.y0:
self.y0, self.y1 = self.y1, self.y0
return self
@property
def quad(self):
"""Return Quad version of rectangle."""
return Quad(self.tl, self.tr, self.bl, self.br)
def round(self):
"""Return the IRect."""
return IRect(util_round_rect(self))
@property
def top_left(self):
"""Top-left corner."""
return Point(self.x0, self.y0)
@property
def top_right(self):
"""Top-right corner."""
return Point(self.x1, self.y0)
def torect(self, r):
"""Return matrix that converts to target rect."""
r = Rect(r)
if self.is_infinite or self.is_empty or r.is_infinite or r.is_empty:
raise ValueError("rectangles must be finite and not empty")
return (
Matrix(1, 0, 0, 1, -self.x0, -self.y0)
* Matrix(r.width / self.width, r.height / self.height)
* Matrix(1, 0, 0, 1, r.x0, r.y0)
)
def transform(self, m):
"""Replace with the transformation by matrix-like m."""
if not len(m) == 6:
raise ValueError("Matrix: bad seq len")
self.x0, self.y0, self.x1, self.y1 = util_transform_rect(self, m)
return self
@property
def width(self):
return max(0, self.x1 - self.x0)
__div__ = __truediv__
bl = bottom_left
br = bottom_right
irect = property(round)
tl = top_left
tr = top_right
| Rect |
python | sphinx-doc__sphinx | sphinx/domains/python/__init__.py | {
"start": 9097,
"end": 9381
} | class ____(PyMethod):
"""Description of a staticmethod."""
option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()
def run(self) -> list[Node]:
self.name = 'py:method'
self.options['staticmethod'] = True
return super().run()
| PyStaticMethod |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 19545,
"end": 21468
} | class ____(nn.Module):
def __init__(self, config, use_flexible_linear=False):
super().__init__()
self.activation_fn = ACT2FN[config.hidden_act]
ffn_dim = config.ffn_dim
hidden_size = config.hidden_size
num_layers = config.num_codebooks if use_flexible_linear else 1
if num_layers == 1:
self.fc1 = nn.Linear(hidden_size, ffn_dim, bias=False)
self.fc2 = nn.Linear(ffn_dim // 2, hidden_size, bias=False)
else:
self.fc1 = MoshiFlexibleLinear(hidden_size, ffn_dim, num_layers)
self.fc2 = MoshiFlexibleLinear(ffn_dim // 2, hidden_size, num_layers)
def forward(self, hidden_states: torch.Tensor, layer_idx: Optional[int] = None) -> torch.Tensor:
hidden_states = self.fc1(hidden_states) if layer_idx is None else self.fc1(hidden_states, layer_idx)
batch_size, sequence_length, _ = hidden_states.shape
hidden_states = hidden_states.view(batch_size, sequence_length, 2, -1)
hidden_states = self.activation_fn(hidden_states[..., 0, :]) * hidden_states[..., 1, :]
hidden_states = self.fc2(hidden_states) if layer_idx is None else self.fc2(hidden_states, layer_idx)
return hidden_states
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
| MoshiGatingMLP |
python | uqfoundation__dill | setup.py | {
"start": 3112,
"end": 4585
} | class ____(Distribution):
"""Distribution which forces a binary package with platform name"""
def has_ext_modules(foo):
return True
# define dependencies
ctypes_version = 'ctypes>=1.0.1'
objgraph_version = 'objgraph>=1.7.2'
gprof2dot_version = 'gprof2dot>=2022.7.29'
pyreadline_version = 'pyreadline>=1.7.1'
# add dependencies
depend = [ctypes_version]
if sys.platform[:3] == 'win':
extras = {'readline': [pyreadline_version], 'graph': [objgraph_version], 'profile': [gprof2dot_version]}
else:
extras = {'readline': [], 'graph': [objgraph_version], 'profile': [gprof2dot_version]}
# update setup kwds
if has_setuptools:
setup_kwds.update(
zip_safe=False,
# distclass=BinaryDistribution,
# install_requires=depend,
extras_require=extras,
)
# call setup
setup(**setup_kwds)
# if dependencies are missing, print a warning
try:
pass
#import ctypes
#import objgraph
#import gprof2dot
#import readline
except ImportError:
print ("\n***********************************************************")
print ("WARNING: One of the following dependencies is unresolved:")
# print (" %s" % ctypes_version)
print (" %s (optional)" % objgraph_version)
print (" %s (optional)" % gprof2dot_version)
if sys.platform[:3] == 'win':
print (" %s (optional)" % pyreadline_version)
print ("***********************************************************\n")
| BinaryDistribution |
python | vyperlang__vyper | vyper/abi_types.py | {
"start": 5932,
"end": 6475
} | class ____(ABIType):
def __init__(self, subtyps):
self.subtyps = subtyps
def is_dynamic(self):
return any([t.is_dynamic() for t in self.subtyps])
def static_size(self):
return sum([t.embedded_static_size() for t in self.subtyps])
def dynamic_size_bound(self):
return sum([t.embedded_dynamic_size_bound() for t in self.subtyps])
def is_complex_type(self):
return True
def selector_name(self):
return "(" + ",".join(t.selector_name() for t in self.subtyps) + ")"
| ABI_Tuple |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_rds.py | {
"start": 4526,
"end": 6735
} | class ____:
@classmethod
def setup_class(cls):
cls.dag = DAG(
dag_id="test_dag",
schedule=None,
default_args={"owner": "airflow", "start_date": DEFAULT_DATE},
)
cls.hook = RdsHook()
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
def test_template_fields(self):
sensor = RdsSnapshotExistenceSensor(
task_id="test_template_fields",
db_type="instance",
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
aws_conn_id=AWS_CONN,
region_name="us-east-1",
)
validate_template_fields(sensor)
@mock_aws
def test_db_instance_snapshot_poke_true(self):
_create_db_instance_snapshot(self.hook)
op = RdsSnapshotExistenceSensor(
task_id="test_instance_snap_true",
db_type="instance",
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
assert op.poke(None)
@mock_aws
def test_db_instance_snapshot_poke_false(self):
_create_db_instance(self.hook)
op = RdsSnapshotExistenceSensor(
task_id="test_instance_snap_false",
db_type="instance",
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
assert not op.poke(None)
@mock_aws
def test_db_instance_cluster_poke_true(self):
_create_db_cluster_snapshot(self.hook)
op = RdsSnapshotExistenceSensor(
task_id="test_cluster_snap_true",
db_type="cluster",
db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
assert op.poke(None)
@mock_aws
def test_db_instance_cluster_poke_false(self):
op = RdsSnapshotExistenceSensor(
task_id="test_cluster_snap_false",
db_type="cluster",
db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
assert not op.poke(None)
| TestRdsSnapshotExistenceSensor |
python | spack__spack | lib/spack/spack/llnl/util/lock.py | {
"start": 28546,
"end": 28626
} | class ____(Exception):
"""Raised for any errors related to locks."""
| LockError |
python | pytorch__pytorch | torch/_dynamo/variables/torch.py | {
"start": 11033,
"end": 18223
} | class ____(BaseTorchVariable):
"""Points to a context manager class in torch.* that dynamo has implementations"""
def __repr__(self) -> str:
return f"TorchCtxManagerClassVariable({self.value})"
@staticmethod
def is_matching_cls(value):
# Unwrap if it's a functools.lru_cache wrapper
value = unwrap_if_wrapper(value)
# We can't do isinstance(value, type) check because some ctx managers
# are implemented as a function decorated by contextlib.contextmanager,
# E.g., torch._functorch.vmap.vmap_increment_nesting.
return (
# Context manager type or function with @contextmanager is callable
callable(value)
and (
hashable(value) # accesses value.__hash__()
and value in supported_ctx_manager_classes
)
)
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
from . import (
DisabledSavedTensorsHooksVariable,
DualLevelContextManager,
FSDPParamGroupUseTrainingStateVariable,
FxTracebackAnnotateVariable,
GradIncrementNestingCtxManagerVariable,
GradInplaceRequiresGradCtxManagerVariable,
GradModeVariable,
InferenceModeVariable,
JvpIncrementNestingCtxManagerVariable,
SDPAKernelVariable,
SetFwdGradEnabledContextManager,
StreamVariable,
VmapIncrementNestingCtxManagerVariable,
)
if self.value is torch.no_grad:
if len(args) == 1 and isinstance(
args[0], variables.functions.BaseUserFunctionVariable
):
ctx = GradModeVariable.create(tx, False)
return ctx.call_function(tx, args, kwargs)
else:
return GradModeVariable.create(tx, False)
elif self.value is torch.enable_grad:
if len(args) == 1 and isinstance(
args[0], variables.functions.BaseUserFunctionVariable
):
ctx = GradModeVariable.create(tx, True)
return ctx.call_function(tx, args, kwargs)
return GradModeVariable.create(tx, True)
elif self.value is torch.set_grad_enabled and len(args) == 1:
return GradModeVariable.create(
tx, args[0].as_python_constant(), initialized=True
)
elif self.value is torch.inference_mode:
assert len(args) <= 1 and len(kwargs) == 0
inf_mode = args[0].as_python_constant() if len(args) == 1 else True
return InferenceModeVariable.create(tx, inf_mode)
elif self.value in (
torch.fx.traceback.annotate,
torch.fx.traceback.annotate.__wrapped__, # type: ignore[attr-defined]
):
assert len(args) <= 1 and len(kwargs) == 0
return FxTracebackAnnotateVariable(
args[0].as_python_constant(), source=self.source
)
elif inspect.isclass(self.value) and issubclass(self.value, torch.Stream):
from torch._dynamo.variables.builder import wrap_fx_proxy_cls
return wrap_fx_proxy_cls(
StreamVariable,
tx,
tx.output.create_proxy(
"call_function",
self.value,
(),
{},
),
)
elif self.value in (
torch.amp.autocast_mode.autocast,
torch.cuda.amp.autocast,
torch.cpu.amp.autocast,
):
# pyrefly: ignore [bad-argument-type]
return AutocastModeVariable.create(self.value, args, kwargs)
elif self.value in (
# NOTE any class added here must align with the semantic
# requirements of `ProfilerContextVariable`.
torch.profiler.profile,
torch.profiler.record_function,
torch.autograd.profiler.profile,
torch.autograd.profiler.record_function,
):
warning_once(log, "Profiler function %s will be ignored", self.value)
return ProfilerContextVariable()
elif (
self.value is torch._C.DisableTorchFunctionSubclass
or self.value is torch._C.DisableTorchFunction
):
assert not (args or kwargs)
return TorchFunctionDisableVariable.create(
tx, only_subclass=self.value is torch._C.DisableTorchFunctionSubclass
)
elif self.value is torch._functorch.vmap.vmap_increment_nesting:
assert len(args) == 2
return VmapIncrementNestingCtxManagerVariable.create(
tx,
args,
)
elif self.value is torch._functorch.eager_transforms.jvp_increment_nesting:
assert len(args) == 0
return JvpIncrementNestingCtxManagerVariable.create(tx)
elif self.value is torch.autograd.forward_ad._set_fwd_grad_enabled:
assert len(args) == 1
return SetFwdGradEnabledContextManager.create(
tx,
[guard_if_dyn(x) for x in args],
)
elif self.value is torch.autograd.forward_ad.dual_level:
assert len(args) == 0
return DualLevelContextManager.create(tx)
elif self.value is torch._functorch.eager_transforms.grad_increment_nesting:
assert len(args) == 0
return GradIncrementNestingCtxManagerVariable.create(tx)
elif (
self.value is torch._functorch.eager_transforms.enable_inplace_requires_grad
):
assert len(args) == 1
return GradInplaceRequiresGradCtxManagerVariable.create(
tx,
[guard_if_dyn(x) for x in args],
)
elif self.value is torch.autograd.graph.disable_saved_tensors_hooks:
assert len(args) == 1
return DisabledSavedTensorsHooksVariable.create(
tx, args[0].as_python_constant()
)
elif (
_fsdp_param_group is not None
and self.value is _fsdp_param_group.FSDPParamGroup.use_training_state
):
assert len(args) == 2
return FSDPParamGroupUseTrainingStateVariable.create(
tx, args[0], args[1].as_python_constant()
)
elif self.value is torch.nn.attention.sdpa_kernel.__wrapped__: # type: ignore[attr-defined]
name_to_arg_map = bind_args_cached(
# pyrefly: ignore[bad-argument-type]
self.value,
tx,
self.source,
args,
kwargs,
)
backends = name_to_arg_map["backends"].as_python_constant()
set_priority = name_to_arg_map["set_priority"].as_python_constant()
return SDPAKernelVariable.create(tx, backends, set_priority)
return super().call_function(tx, args, kwargs)
| TorchCtxManagerClassVariable |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/backfill.py | {
"start": 4534,
"end": 25419
} | class ____(
NamedTuple(
"_PartitionBackfill",
[
("backfill_id", str),
("status", BulkActionStatus),
("from_failure", bool),
("tags", Mapping[str, str]),
("backfill_timestamp", float),
("error", Optional[SerializableErrorInfo]),
("asset_selection", Optional[Sequence[AssetKey]]),
("title", Optional[str]),
("description", Optional[str]),
("run_config", Optional[Mapping[str, Any]]),
# fields that are only used by job backfills
("partition_set_origin", Optional[RemotePartitionSetOrigin]),
("partition_names", Optional[Sequence[str]]),
("last_submitted_partition_name", Optional[str]),
("reexecution_steps", Optional[Sequence[str]]),
# only used by asset backfills
("serialized_asset_backfill_data", Optional[str]),
("asset_backfill_data", Optional[AssetBackfillData]),
("failure_count", int),
("submitting_run_requests", Sequence[RunRequest]),
("reserved_run_ids", Sequence[str]),
("backfill_end_timestamp", Optional[float]),
],
),
):
def __new__(
cls,
backfill_id: str,
status: BulkActionStatus,
from_failure: bool,
tags: Optional[Mapping[str, str]],
backfill_timestamp: float,
error: Optional[SerializableErrorInfo] = None,
asset_selection: Optional[Sequence[AssetKey]] = None,
title: Optional[str] = None,
description: Optional[str] = None,
run_config: Optional[Mapping[str, Any]] = None,
partition_set_origin: Optional[RemotePartitionSetOrigin] = None,
partition_names: Optional[Sequence[str]] = None,
last_submitted_partition_name: Optional[str] = None,
reexecution_steps: Optional[Sequence[str]] = None,
serialized_asset_backfill_data: Optional[str] = None,
asset_backfill_data: Optional[AssetBackfillData] = None,
failure_count: Optional[int] = None,
submitting_run_requests: Optional[Sequence[RunRequest]] = None,
reserved_run_ids: Optional[Sequence[str]] = None,
backfill_end_timestamp: Optional[float] = None,
):
check.invariant(
not (asset_selection and reexecution_steps),
"Can't supply both an asset_selection and reexecution_steps to a PartitionBackfill.",
)
if serialized_asset_backfill_data is not None:
check.invariant(partition_set_origin is None)
check.invariant(partition_names is None)
check.invariant(last_submitted_partition_name is None)
check.invariant(reexecution_steps is None)
return super().__new__(
cls,
backfill_id=check.str_param(backfill_id, "backfill_id"),
status=check.inst_param(status, "status", BulkActionStatus),
from_failure=check.bool_param(from_failure, "from_failure"),
tags=check.opt_mapping_param(tags, "tags", key_type=str, value_type=str),
backfill_timestamp=check.float_param(backfill_timestamp, "backfill_timestamp"),
error=check.opt_inst_param(error, "error", SerializableErrorInfo),
asset_selection=check.opt_nullable_sequence_param(
asset_selection, "asset_selection", of_type=AssetKey
),
title=check_valid_title(title),
description=check.opt_str_param(description, "description"),
run_config=check.opt_mapping_param(run_config, "run_config", key_type=str),
partition_set_origin=check.opt_inst_param(
partition_set_origin, "partition_set_origin", RemotePartitionSetOrigin
),
partition_names=check.opt_nullable_sequence_param(
partition_names, "partition_names", of_type=str
),
last_submitted_partition_name=check.opt_str_param(
last_submitted_partition_name, "last_submitted_partition_name"
),
reexecution_steps=check.opt_nullable_sequence_param(
reexecution_steps, "reexecution_steps", of_type=str
),
serialized_asset_backfill_data=check.opt_str_param(
serialized_asset_backfill_data, "serialized_asset_backfill_data"
),
asset_backfill_data=check.opt_inst_param(
asset_backfill_data, "asset_backfill_data", AssetBackfillData
),
failure_count=check.opt_int_param(failure_count, "failure_count", 0),
submitting_run_requests=check.opt_sequence_param(
submitting_run_requests, "submitting_run_requests", of_type=RunRequest
),
reserved_run_ids=check.opt_sequence_param(
reserved_run_ids, "reserved_run_ids", of_type=str
),
backfill_end_timestamp=check.opt_float_param(
backfill_end_timestamp, "backfill_end_timestamp"
),
)
@property
def selector_id(self):
return self.partition_set_origin.get_selector_id() if self.partition_set_origin else None
@property
def is_asset_backfill(self) -> bool:
return (
self.serialized_asset_backfill_data is not None or self.asset_backfill_data is not None
)
def get_asset_backfill_data(self, asset_graph: BaseAssetGraph) -> AssetBackfillData:
if self.serialized_asset_backfill_data:
asset_backfill_data = AssetBackfillData.from_serialized(
self.serialized_asset_backfill_data, asset_graph, self.backfill_timestamp
)
elif self.asset_backfill_data:
asset_backfill_data = self.asset_backfill_data
else:
check.failed("Expected either serialized_asset_backfill_data or asset_backfill_data")
return asset_backfill_data
@property
def bulk_action_type(self) -> BulkActionType:
if self.is_asset_backfill:
return BulkActionType.MULTI_RUN_ASSET_ACTION
else:
return BulkActionType.PARTITION_BACKFILL
@property
def partition_set_name(self) -> Optional[str]:
if self.partition_set_origin is None:
return None
return self.partition_set_origin.partition_set_name
@property
def job_name(self) -> Optional[str]:
if self.is_asset_backfill:
return None
return (
job_name_for_partition_set_snap_name(self.partition_set_name)
if self.partition_set_name
else None
)
@property
def log_storage_prefix(self) -> Sequence[str]:
return ["backfill", self.backfill_id]
@property
def user(self) -> Optional[str]:
if self.tags:
return self.tags.get(USER_TAG)
return None
def is_valid_serialization(self, workspace: BaseWorkspaceRequestContext) -> bool:
if self.is_asset_backfill:
if self.serialized_asset_backfill_data:
return AssetBackfillData.is_valid_serialization(
self.serialized_asset_backfill_data,
workspace.asset_graph,
)
else:
return True
else:
return True
def get_backfill_status_per_asset_key(
self, workspace: BaseWorkspaceRequestContext
) -> Sequence[Union[PartitionedAssetBackfillStatus, UnpartitionedAssetBackfillStatus]]:
"""Returns a sequence of backfill statuses for each targeted asset key in the asset graph,
in topological order.
"""
if not self.is_valid_serialization(workspace):
return []
if self.is_asset_backfill:
asset_graph = workspace.asset_graph
try:
asset_backfill_data = self.get_asset_backfill_data(asset_graph)
except DagsterDefinitionChangedDeserializationError:
return []
return asset_backfill_data.get_backfill_status_per_asset_key(asset_graph)
else:
return []
def get_target_partitions_subset(
self, workspace: BaseWorkspaceRequestContext, asset_key: AssetKey
) -> Optional[PartitionsSubset]:
if not self.is_valid_serialization(workspace):
return None
if self.is_asset_backfill:
asset_graph = workspace.asset_graph
try:
asset_backfill_data = self.get_asset_backfill_data(asset_graph)
except DagsterDefinitionChangedDeserializationError:
return None
return asset_backfill_data.get_target_partitions_subset(asset_key)
else:
return None
def get_target_root_partitions_subset(
self, workspace: BaseWorkspaceRequestContext
) -> Optional[PartitionsSubset]:
if not self.is_valid_serialization(workspace):
return None
if self.is_asset_backfill:
asset_graph = workspace.asset_graph
try:
asset_backfill_data = self.get_asset_backfill_data(asset_graph)
except DagsterDefinitionChangedDeserializationError:
return None
return asset_backfill_data.get_target_root_partitions_subset(asset_graph)
else:
return None
def get_num_partitions(self, workspace: BaseWorkspaceRequestContext) -> Optional[int]:
if not self.is_valid_serialization(workspace):
return 0
if self.is_asset_backfill:
asset_graph = workspace.asset_graph
try:
asset_backfill_data = self.get_asset_backfill_data(asset_graph)
except DagsterDefinitionChangedDeserializationError:
return 0
return asset_backfill_data.get_num_partitions()
else:
if self.partition_names is None:
check.failed("Non-asset backfills should have a non-null partition_names field")
return len(self.partition_names)
def get_partition_names(
self, workspace: BaseWorkspaceRequestContext
) -> Optional[Sequence[str]]:
if not self.is_valid_serialization(workspace):
return []
if self.is_asset_backfill:
asset_graph = workspace.asset_graph
try:
asset_backfill_data = self.get_asset_backfill_data(asset_graph)
except DagsterDefinitionChangedDeserializationError:
return None
return asset_backfill_data.get_partition_names()
else:
if self.partition_names is None:
check.failed("Non-asset backfills should have a non-null partition_names field")
return self.partition_names
def get_num_cancelable(self) -> int:
"""This method is only valid for job backfills. It eturns the number of partitions that are have
not yet been requested by the backfill.
For asset backfills, returns 0.
"""
if self.is_asset_backfill:
return 0
if self.status != BulkActionStatus.REQUESTED:
return 0
if self.partition_names is None:
check.failed("Expected partition_names to not be None for job backfill")
checkpoint = self.last_submitted_partition_name
total_count = len(self.partition_names)
checkpoint_idx = (
self.partition_names.index(checkpoint) + 1
if checkpoint and checkpoint in self.partition_names
else 0
)
return max(0, total_count - checkpoint_idx)
def with_status(self, status):
check.inst_param(status, "status", BulkActionStatus)
return self._replace(status=status)
def with_partition_checkpoint(self, last_submitted_partition_name):
check.opt_str_param(last_submitted_partition_name, "last_submitted_partition_name")
return self._replace(last_submitted_partition_name=last_submitted_partition_name)
def with_submitting_run_requests(
self, submitting_run_requests: Sequence[RunRequest], reserved_run_ids: Sequence[str]
) -> "PartitionBackfill":
return self._replace(
submitting_run_requests=submitting_run_requests,
reserved_run_ids=reserved_run_ids,
)
def with_failure_count(self, new_failure_count: int):
return self._replace(failure_count=new_failure_count)
def with_error(self, error):
check.opt_inst_param(error, "error", SerializableErrorInfo)
return self._replace(error=error)
def with_end_timestamp(self, end_timestamp: float) -> "PartitionBackfill":
check.float_param(end_timestamp, "end_timestamp")
return self._replace(backfill_end_timestamp=end_timestamp)
def with_asset_backfill_data(
self,
asset_backfill_data: AssetBackfillData,
dynamic_partitions_store: DynamicPartitionsStore,
asset_graph: BaseAssetGraph,
) -> "PartitionBackfill":
is_backcompat = self.serialized_asset_backfill_data is not None
return self._replace(
serialized_asset_backfill_data=(
asset_backfill_data.serialize(
dynamic_partitions_store=dynamic_partitions_store, asset_graph=asset_graph
)
if is_backcompat
else None
),
asset_backfill_data=(asset_backfill_data if not is_backcompat else None),
)
@classmethod
def from_asset_partitions(
cls,
backfill_id: str,
asset_graph: BaseAssetGraph,
partition_names: Optional[Sequence[str]],
asset_selection: Sequence[AssetKey],
backfill_timestamp: float,
tags: Mapping[str, str],
dynamic_partitions_store: DynamicPartitionsStore,
all_partitions: bool,
title: Optional[str],
description: Optional[str],
run_config: Optional[Mapping[str, Any]],
) -> "PartitionBackfill":
"""If all the selected assets that have PartitionsDefinitions have the same partitioning, then
the backfill will target the provided partition_names for all those assets.
Otherwise, the backfill must consist of a partitioned "anchor" asset and a set of other
assets that descend from it. In that case, the backfill will target the partition_names of
the anchor asset, as well as all partitions of other selected assets that are downstream
of those partitions of the anchor asset.
"""
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=partition_names,
asset_selection=asset_selection,
dynamic_partitions_store=dynamic_partitions_store,
all_partitions=all_partitions,
backfill_start_timestamp=backfill_timestamp,
)
return cls(
backfill_id=backfill_id,
status=BulkActionStatus.REQUESTED,
from_failure=False,
tags=tags,
backfill_timestamp=backfill_timestamp,
asset_selection=asset_selection,
serialized_asset_backfill_data=None,
asset_backfill_data=asset_backfill_data,
title=title,
description=description,
run_config=run_config,
)
@classmethod
def from_partitions_by_assets(
cls,
backfill_id: str,
asset_graph: BaseAssetGraph,
backfill_timestamp: float,
tags: Mapping[str, str],
dynamic_partitions_store: DynamicPartitionsStore,
partitions_by_assets: Sequence[PartitionsByAssetSelector],
title: Optional[str],
description: Optional[str],
run_config: Optional[Mapping[str, Any]],
):
asset_backfill_data = AssetBackfillData.from_partitions_by_assets(
asset_graph=asset_graph,
dynamic_partitions_store=dynamic_partitions_store,
backfill_start_timestamp=backfill_timestamp,
partitions_by_assets=partitions_by_assets,
)
return cls(
backfill_id=backfill_id,
status=BulkActionStatus.REQUESTED,
from_failure=False,
tags=tags,
backfill_timestamp=backfill_timestamp,
serialized_asset_backfill_data=None,
asset_backfill_data=asset_backfill_data,
asset_selection=[selector.asset_key for selector in partitions_by_assets],
title=title,
description=description,
run_config=run_config,
)
@classmethod
def from_asset_graph_subset(
cls,
backfill_id: str,
backfill_timestamp: float,
tags: Mapping[str, str],
dynamic_partitions_store: DynamicPartitionsStore,
asset_graph_subset: AssetGraphSubset,
title: Optional[str],
description: Optional[str],
run_config: Optional[Mapping[str, Any]],
):
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=asset_graph_subset,
dynamic_partitions_store=dynamic_partitions_store,
backfill_start_timestamp=backfill_timestamp,
)
return cls(
backfill_id=backfill_id,
status=BulkActionStatus.REQUESTED,
from_failure=False,
tags=tags,
backfill_timestamp=backfill_timestamp,
serialized_asset_backfill_data=None,
asset_backfill_data=asset_backfill_data,
asset_selection=list(asset_graph_subset.asset_keys),
title=title,
description=description,
run_config=run_config,
)
def cancel_backfill_runs_and_cancellation_complete(
instance: "DagsterInstance", backfill_id: str, logger: logging.Logger
) -> bool:
"""Cancels all cancelable runs associated with the backfill_id. Ensures that
all runs for the backfill are in a terminal state before indicating that the backfill can be
marked CANCELED. Yields a boolean indicating the backfill can be considered canceled
(ie all runs are canceled).
"""
if not instance.run_coordinator:
check.failed("The instance must have a run coordinator in order to cancel runs")
canceled_any_runs = False
while True:
# Cancel all cancelable runs for the backfill in batches
# start with the queued runs since those will be faster to cancel
runs_to_cancel_in_iteration = instance.run_storage.get_runs(
filters=RunsFilter(
statuses=[DagsterRunStatus.QUEUED],
tags={
BACKFILL_ID_TAG: backfill_id,
},
),
limit=CANCELABLE_RUNS_BATCH_SIZE,
ascending=True,
)
if not runs_to_cancel_in_iteration:
# once all queued runs are canceled, cancel all other cancelable runs
runs_to_cancel_in_iteration = instance.run_storage.get_runs(
filters=RunsFilter(
statuses=CANCELABLE_RUN_STATUSES,
tags={
BACKFILL_ID_TAG: backfill_id,
},
),
limit=CANCELABLE_RUNS_BATCH_SIZE,
ascending=True,
)
if not runs_to_cancel_in_iteration:
break
canceled_any_runs = True
for run in runs_to_cancel_in_iteration:
run_id = run.run_id
logger.info(f"Terminating submitted run {run_id}")
# in both cases this will synchonrously set its status to CANCELING or CANCELED,
# ensuring that it will not be returned in the next loop
if run.status == DagsterRunStatus.QUEUED:
instance.report_run_canceling(
run,
message="Canceling run from the queue.",
)
instance.report_run_canceled(run)
else:
instance.run_launcher.terminate(run_id)
if canceled_any_runs:
# since we are canceling some runs in this iteration, we know that there is more work to do.
# Either cancelling more runs, or waiting for the canceled runs to get to a terminal state
return False
# If there are no runs to cancel, check if there are any runs still in progress. If there are,
# then we want to wait for them to reach a terminal state before the backfill is marked CANCELED.
run_waiting_to_cancel = instance.get_run_ids(
RunsFilter(
tags={BACKFILL_ID_TAG: backfill_id},
statuses=NOT_FINISHED_STATUSES,
),
limit=1,
)
work_done = len(run_waiting_to_cancel) == 0
return work_done
| PartitionBackfill |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_scalar_methods.py | {
"start": 4394,
"end": 5363
} | class ____(TestCase):
@parametrize("str_value", ["inf", "nan"])
@parametrize("code", np.typecodes["Float"])
def test_special(self, code, str_value):
cls = np.dtype(code).type
value = cls(str_value)
assert not value.is_integer()
@parametrize(
"code", "efd" + "Bbhil"
) # np.typecodes["Float"] + np.typecodes["AllInteger"])
def test_true(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
for value in float_array:
assert value.is_integer()
@parametrize("code", "bhil") # np.typecodes["Float"])
def test_false(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
float_array *= 1.1
for value in float_array:
if value == 0:
continue
assert not value.is_integer()
@skip(reason="XXX: implementation details of the type system differ")
@instantiate_parametrized_tests
| TestIsInteger |
python | walkccc__LeetCode | solutions/576. Out of Boundary Paths/576-2.py | {
"start": 0,
"end": 824
} | class ____:
def findPaths(
self,
m: int,
n: int,
maxMove: int,
startRow: int,
startColumn: int,
) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
MOD = 1_000_000_007
ans = 0
# dp[i][j] := the number of paths to move the ball (i, j) out-of-bounds
dp = [[0] * n for _ in range(m)]
dp[startRow][startColumn] = 1
for _ in range(maxMove):
newDp = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if dp[i][j] > 0:
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
ans = (ans + dp[i][j]) % MOD
else:
newDp[x][y] = (newDp[x][y] + dp[i][j]) % MOD
dp = newDp
return ans
| Solution |
python | pytorch__pytorch | torch/_higher_order_ops/schema.py | {
"start": 459,
"end": 791
} | class ____:
# Could give a name to the operand by default it's empty string.
name: str
example_value: Any
# Provide an default_value
default_value: Any
# Whether this argument gets mutated in the hop subgraph.
# For output, this should always be False
is_mutated: bool
kw_only: bool
| HopArgumentInfo |
python | neetcode-gh__leetcode | python/0021-merge-two-sorted-lists.py | {
"start": 624,
"end": 990
} | class ____:
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
if not list1:
return list2
if not list2:
return list1
lil, big = (list1, list2) if list1.val < list2.val else (list2, list1)
lil.next = self.mergeTwoLists(lil.next, big)
return lil
| Solution |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 3690,
"end": 3945
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, foo):
return torch.add(foo, foo)
@staticmethod
def backward(ctx, grad_output):
print("graph break!")
return grad_output
| CustomFuncBwdPrintGraphBreak |
python | plotly__plotly.py | plotly/graph_objs/splom/marker/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8539
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom.marker.colorbar"
_path_str = "splom.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.marker.c
olorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | Pylons__pyramid | tests/test_security.py | {
"start": 17477,
"end": 18097
} | class ____:
def __init__(self, result):
self.result = result
def effective_principals(self, request):
return self.result
def unauthenticated_userid(self, request):
return self.result
def authenticated_userid(self, request):
return self.result
def remember(self, request, userid, **kw):
headers = [(_TEST_HEADER, userid)]
self._header_remembered = headers[0]
return headers
def forget(self, request):
headers = [(_TEST_HEADER, 'logout')]
self._header_forgotten = headers[0]
return headers
| DummyAuthenticationPolicy |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 747722,
"end": 748983
} | class ____(Geometry):
"""
MultiPoint schema wrapper.
MultiPoint geometry object. https://tools.ietf.org/html/rfc7946#section-3.1.3
Parameters
----------
coordinates : Sequence[Sequence[float], :class:`Position`]
type : Literal['MultiPoint']
Specifies the type of GeoJSON object.
bbox : :class:`BBox`, Sequence[float]
Bounding box of the coordinate range of the object's Geometries, Features, or
Feature Collections. The value of the bbox member is an array of length 2*n where n
is the number of dimensions represented in the contained geometries, with all axes
of the most southwesterly point followed by all axes of the more northeasterly
point. The axes order of a bbox follows the axes order of geometries.
https://tools.ietf.org/html/rfc7946#section-5
"""
_schema = {"$ref": "#/definitions/MultiPoint"}
def __init__(
self,
coordinates: Optional[Sequence[SchemaBase | Sequence[float]]] = Undefined,
type: Optional[Literal["MultiPoint"]] = Undefined,
bbox: Optional[SchemaBase | Sequence[float]] = Undefined,
**kwds,
):
super().__init__(coordinates=coordinates, type=type, bbox=bbox, **kwds)
| MultiPoint |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/forms.py | {
"start": 1599,
"end": 1708
} | class ____(ContactWizard):
storage_name = 'formtools.wizard.storage.cookie.CookieStorage'
| CookieContactWizard |
python | django__django | tests/admin_views/admin.py | {
"start": 29027,
"end": 29157
} | class ____(admin.ModelAdmin):
def view_on_site(self, obj):
return "/worker/%s/%s/" % (obj.surname, obj.name)
| WorkerAdmin |
python | protocolbuffers__protobuf | python/google/protobuf/internal/type_checkers.py | {
"start": 5012,
"end": 6355
} | class ____(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
global _BoolWarningCount
if type(proposed_value) == bool and _BoolWarningCount > 0:
_BoolWarningCount -= 1
message = (
'%.1024r has type %s, but expected one of: %s. This warning '
'will turn into error in 7.34.0, please fix it before that.'
% (
proposed_value,
type(proposed_value),
(int,),
)
)
# TODO: Raise errors in 2026 Q1 release
warnings.warn(message)
if not hasattr(proposed_value, '__index__') or (
type(proposed_value).__module__ == 'numpy' and
type(proposed_value).__name__ == 'ndarray'):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if not self._MIN <= int(proposed_value) <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
# We force all values to int to make alternate implementations where the
# distinction is more significant (e.g. the C++ implementation) simpler.
proposed_value = int(proposed_value)
return proposed_value
def DefaultValue(self):
return 0
| IntValueChecker |
python | google__jax | tests/pallas/tpu_pallas_pipeline_test.py | {
"start": 3195,
"end": 6861
} | class ____(parameterized.TestCase):
def setUp(self):
if not jtu.is_device_tpu_at_least(5):
self.skipTest('Only works with TPU v5')
super().setUp()
def test_pipeline_without_inputs(self):
def kernel(o_hbm_ref):
def body(o_ref):
o_ref[...] = jnp.full(o_ref.shape, 42, dtype=o_ref.dtype)
pltpu.emit_pipeline(
body, grid=(4,), out_specs=pl.BlockSpec((8, 128), lambda i: (0, i))
)(o_hbm_ref)
out = pl.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((8, 512), jnp.int32),
out_specs=pl.BlockSpec(memory_space=pltpu.MemorySpace.ANY),
)()
np.testing.assert_allclose(out, jnp.full_like(out, 42))
@parameterized.product(
no_pipelining=[False, True],
)
def test_pipeline_matmul(self, no_pipelining):
k1, k2 = jax.random.split(jax.random.key(0))
x = jax.random.uniform(k1, (512, 512))
y = jax.random.uniform(k2, (512, 512))
def matmul_pipeline(x_ref, y_ref, z_ref):
@pl.when(pl.program_id(2) == 0)
def _():
z_ref[...] = jnp.zeros(z_ref.shape, jnp.float32)
z_ref[...] += x_ref[...] @ y_ref[...]
def matmul_kernel(x_ref, y_ref, z_ref):
pltpu.emit_pipeline(
matmul_pipeline,
grid=(4, 4, 4),
in_specs=[
pl.BlockSpec((128, 128), lambda i, j, k: (i, k)),
pl.BlockSpec((128, 128), lambda i, j, k: (k, j)),
],
out_specs=pl.BlockSpec((128, 128), lambda i, j, k: (i, j)),
no_pipelining=no_pipelining,
)(x_ref, y_ref, z_ref)
z = pl.pallas_call(
matmul_kernel,
out_shape=jax.ShapeDtypeStruct((512, 512), jnp.float32),
in_specs=[
pl.BlockSpec(memory_space=pltpu.ANY),
pl.BlockSpec(memory_space=pltpu.ANY),
],
out_specs=pl.BlockSpec(memory_space=pltpu.ANY),
)
jax.block_until_ready(z(x, y))
jax.block_until_ready(jnp.dot(x, y))
out = jax.block_until_ready(z(x, y))
expected_out = jax.block_until_ready(jnp.dot(x, y))
np.testing.assert_allclose(out, expected_out, atol=5e-5)
@parameterized.named_parameters(
('vmem', pltpu.VMEM),
('hbm', pltpu.ANY),
)
def test_double_pipeline_matmul(self, memory_space):
# TODO(b/358121809): Re-enable this test once the bug is fixed.
self.skipTest('Broken test.')
k1, k2 = jax.random.split(jax.random.key(0))
x = jax.random.uniform(k1, (512, 512))
y = jax.random.uniform(k2, (512, 512))
def matmul_pipeline(x_ref, y_ref, z_ref):
@pl.when(pl.program_id(2) == 0)
def _():
z_ref[...] = jnp.zeros(z_ref.shape, jnp.float32)
z_ref[...] += x_ref[...] @ y_ref[...]
def matmul_kernel(x_ref, y_ref, z_ref):
def emit_pipeline(should_accumulate_out):
pltpu.emit_pipeline(
matmul_pipeline,
grid=(4, 4, 4),
in_specs=[
pl.BlockSpec((128, 128), lambda i, j, k: (i, k)),
pl.BlockSpec((128, 128), lambda i, j, k: (k, j)),
],
out_specs=pl.BlockSpec((128, 128), lambda i, j, k: (i, j)),
should_accumulate_out=should_accumulate_out,
)(x_ref, y_ref, z_ref)
emit_pipeline(False)
emit_pipeline(True)
z = pl.pallas_call(
matmul_kernel,
out_shape=jax.ShapeDtypeStruct((512, 512), jnp.float32),
in_specs=[
pl.BlockSpec(memory_space=memory_space),
pl.BlockSpec(memory_space=memory_space),
],
out_specs=pl.BlockSpec(memory_space=memory_space),
)(x, y)
np.testing.assert_allclose(z, jnp.dot(x, y) + jnp.dot(x, y))
| PallasCallPipelineTest |
python | pandas-dev__pandas | asv_bench/benchmarks/sparse.py | {
"start": 2962,
"end": 3836
} | class ____:
params = ([0.1, 0.01], [0, np.nan])
param_names = ["dense_proportion", "fill_value"]
def setup(self, dense_proportion, fill_value):
N = 10**6
arr1 = make_array(N, dense_proportion, fill_value, np.int64)
self.array1 = SparseArray(arr1, fill_value=fill_value)
arr2 = make_array(N, dense_proportion, fill_value, np.int64)
self.array2 = SparseArray(arr2, fill_value=fill_value)
def time_make_union(self, dense_proportion, fill_value):
self.array1.sp_index.make_union(self.array2.sp_index)
def time_intersect(self, dense_proportion, fill_value):
self.array1.sp_index.intersect(self.array2.sp_index)
def time_add(self, dense_proportion, fill_value):
self.array1 + self.array2
def time_divide(self, dense_proportion, fill_value):
self.array1 / self.array2
| Arithmetic |
python | modin-project__modin | modin/pandas/accessor.py | {
"start": 5920,
"end": 6305
} | class ____(ClassLogger):
def __init__(self, name: str, accessor) -> None:
self._name = name
self._accessor = accessor
def __get__(self, obj, cls): # noqa: GL08
if obj is None:
return self._accessor
accessor_obj = self._accessor(obj)
object.__setattr__(obj, self._name, accessor_obj)
return accessor_obj
| CachedAccessor |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/public.py | {
"start": 1373,
"end": 2264
} | class ____(FilterContextMixin, OrganizationView, DetailView):
"""Display information about an organization."""
template_name = "organizations/organization_detail.html"
admin_only = False
filterset_class = OrganizationProjectListFilterSet
strict = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
org = self.get_object()
projects = Project.objects.for_user(self.request.user).filter(organizations=org).all()
context["filter"] = self.get_filterset(
queryset=projects,
organization=org,
)
projects = self.get_filtered_queryset()
context["projects"] = projects
context["notifications"] = Notification.objects.for_user(
self.request.user,
resource=org,
)
return context
# Member Views
| DetailOrganization |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 99540,
"end": 103062
} | class ____(Response):
"""
Response of models.update_for_task endpoint.
:param id: ID of the model
:type id: str
:param created: Was the model created
:type created: bool
:param updated: Number of models updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "models"
_action = "update_for_task"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"created": {
"description": "Was the model created",
"type": ["boolean", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"id": {"description": "ID of the model", "type": ["string", "null"]},
"updated": {
"description": "Number of models updated (0 or 1)",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
created: Optional[bool] = None,
updated: Optional[int] = None,
fields: Optional[dict] = None,
**kwargs: Any
) -> None:
super(UpdateForTaskResponse, self).__init__(**kwargs)
self.id = id
self.created = created
self.updated = updated
self.fields = fields
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("created")
def created(self) -> Optional[bool]:
return self._property_created
@created.setter
def created(self, value: Optional[bool]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", (bool,))
self._property_created = value
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
response_mapping = {
GetByIdRequest: GetByIdResponse,
GetByTaskIdRequest: GetByTaskIdResponse,
GetAllRequest: GetAllResponse,
UpdateForTaskRequest: UpdateForTaskResponse,
CreateRequest: CreateResponse,
EditRequest: EditResponse,
UpdateRequest: UpdateResponse,
SetReadyRequest: SetReadyResponse,
DeleteRequest: DeleteResponse,
MakePublicRequest: MakePublicResponse,
MakePrivateRequest: MakePrivateResponse,
}
| UpdateForTaskResponse |
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 1979,
"end": 5483
} | class ____(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = RootView.as_view()
def test_get_root_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
def test_head_root_view(self):
"""
HEAD requests to ListCreateAPIView should return 200.
"""
request = factory.head('/')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_200_OK
def test_post_root_view(self):
"""
POST requests to ListCreateAPIView should create a new object.
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_201_CREATED
assert response.data == {'id': 4, 'text': 'foobar'}
created = self.objects.get(id=4)
assert created.text == 'foobar'
def test_put_root_view(self):
"""
PUT requests to ListCreateAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.put('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.data == {"detail": 'Method "PUT" not allowed.'}
def test_delete_root_view(self):
"""
DELETE requests to ListCreateAPIView should not be allowed
"""
request = factory.delete('/')
with self.assertNumQueries(0):
response = self.view(request).render()
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.data == {"detail": 'Method "DELETE" not allowed.'}
def test_post_cannot_set_id(self):
"""
POST requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_201_CREATED
assert response.data == {'id': 4, 'text': 'foobar'}
created = self.objects.get(id=4)
assert created.text == 'foobar'
def test_post_error_root_view(self):
"""
POST requests to ListCreateAPIView in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.post('/', data, HTTP_ACCEPT='text/html')
response = self.view(request).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
assert expected_error in response.rendered_content.decode()
EXPECTED_QUERIES_FOR_PUT = 2
| TestRootView |
python | pytorch__pytorch | tools/code_coverage/package/util/setting.py | {
"start": 613,
"end": 681
} | class ____(Enum):
CPP = "cxx_test"
PY = "python_test"
| TestType |
python | allegroai__clearml | clearml/storage/callbacks.py | {
"start": 199,
"end": 4972
} | class ____(object):
report_upload_chunk_size_mb = None
report_download_chunk_size_mb = None
def __init__(
self,
verbose: bool,
total_size: float,
log: logging.Logger,
report_chunk_size_mb: float,
description_prefix: Optional[str] = None,
description_suffix: Optional[str] = None,
max_time_between_reports_sec: float = 10.0,
report_start: Optional[bool] = None,
) -> None:
self.current_status_mb = 0.0
self.last_reported = 0.0
self._tic = time()
self._verbose = verbose
self._report_chunk_size = report_chunk_size_mb
self._log = log
self._log_flag = False
self._total_size = total_size
self._description_prefix = description_prefix
self._description_suffix = description_suffix
self._max_time_between_reports_sec = max_time_between_reports_sec
self._report_start = report_start if report_start is not None else bool(tqdm is not None)
self._tqdm = None
self._tqdm_init = False
def close(
self,
report_completed: bool = False,
report_summary: bool = False,
report_prefix: Optional[str] = None,
report_suffix: Optional[str] = None,
) -> None:
# call this one when we are done
if self._tqdm is not None:
# if we created a self._tqdm object we need to close it
if report_completed:
self._tqdm.update(self._tqdm.total - min(self._tqdm.total, self.last_reported))
self._tqdm.close()
self._tqdm = None
if report_summary:
self._log.info(
"{} {:.2f} MB successfully {}".format(
report_prefix or self._description_prefix,
self._total_size,
report_suffix or self._description_suffix,
).strip()
)
def _get_tqdm(self) -> Optional["tqdm.std.tqdm"]:
if self._tqdm_init:
return self._tqdm
self._tqdm_init = True
# create the tqdm progress bar
if tqdm:
# noinspection PyBroadException
try:
self._tqdm = tqdm(
total=round(float(self._total_size), 2),
# desc="{} {}".format(description_prefix, description_suffix).strip(),
unit="MB",
unit_scale=False,
ncols=80,
bar_format="{bar} {percentage:3.0f}% | {n:.2f}/{total_fmt} MB "
"[{elapsed}<{remaining}, {rate_fmt}{postfix}]: {desc}",
)
except Exception:
# failed initializing TQDM (maybe interface changed?)
self._tqdm = None
return self._tqdm
def __call__(self, chunk_size: float, *_: Any, **__: Any) -> None:
chunk_size /= 1024.0 * 1024.0
self.current_status_mb += chunk_size
last_part = self.current_status_mb - self.last_reported
if (
self._verbose
or (last_part >= self._report_chunk_size)
or (self.last_reported and self.current_status_mb >= self._total_size - 0.01)
or (time() - self._tic > self._max_time_between_reports_sec)
):
time_diff = time() - self._tic
self.speed = (last_part / time_diff) if time_diff != 0 else 0
self._report(self._total_size, self.current_status_mb, self.speed)
self._tic = time()
self.last_reported = self.current_status_mb
def _report(self, total_mb: float, current_mb: float, speed_mbps: float) -> None:
if self._report_start and self.last_reported <= 0:
# first time - print before initializing the tqdm bar
self._log.info(
"{}: {:.2f}MB {}".format(self._description_prefix, total_mb, self._description_suffix).strip(" :")
)
# initialize or reuse the bar
_tqdm = self._get_tqdm()
if _tqdm:
# make sure we do not spill over due to rounding
if round(float(current_mb), 2) >= _tqdm.total:
_tqdm.update(
max(min(_tqdm.total - getattr(_tqdm, "n", self.last_reported), _tqdm.total - self.last_reported), 0)
)
else:
_tqdm.update(current_mb - self.last_reported)
else:
self._log.info(
"{}: {:.2f}MB / {:.2f}MB @ {:.2f}MBs {}".format(
self._description_prefix,
current_mb,
total_mb,
speed_mbps,
self._description_suffix,
).strip(" :")
)
| ProgressReport |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 18461,
"end": 18558
} | class ____(BasePlace):
serves_hot_dogs = models.BooleanField(default=False)
| InheritedRestaurant |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 37859,
"end": 54863
} | class ____(UniformQuantizationObserverBase):
r"""
The module records the running histogram of tensor values along with
min/max values. ``calculate_qparams`` will calculate scale and zero_point.
Args:
bins: Number of bins to use for the histogram
dtype: dtype argument to the `quantize` node needed to implement the
reference model spec
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
The scale and zero point are computed as follows:
1. Create the histogram of the incoming inputs.
The histogram is computed continuously, and the ranges per bin change
with every new tensor observed.
2. Search the distribution in the histogram for optimal min/max values.
The search for the min/max values ensures the minimization of the
quantization error with respect to the floating point model.
3. Compute the scale and zero point the same way as in the
:class:`~torch.ao.quantization.MinMaxObserver`
"""
histogram: torch.Tensor
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
bins: int = 2048,
dtype: torch.dtype = torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
is_dynamic=False,
**kwargs,
) -> None:
if not is_per_tensor(qscheme):
raise NotImplementedError(
"HistogramObserver's qscheme only support torch.per_tensor_symmetric \
and torch.per_tensor_affine."
)
if is_dynamic:
raise NotImplementedError(
"HistogramObserver doesn't support dynamic quantization"
)
# bins: The number of bins used for histogram calculation.
super().__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
is_dynamic=is_dynamic,
**kwargs,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.bins = bins
self.register_buffer("histogram", torch.zeros(self.bins, **factory_kwargs))
self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs))
self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs))
self.dst_nbins = 2 ** torch.iinfo(self.dtype).bits
self.upsample_rate = (
16 # used to reduce quantization errors when upscaling histogram
)
def _get_norm(
self, delta_begin: torch.Tensor, delta_end: torch.Tensor, density: torch.Tensor
) -> torch.Tensor:
r"""
Compute the norm of the values uniformaly distributed between
delta_begin and delta_end.
Currently only L2 norm is supported.
norm = density * (integral_{begin, end} x^2)
= density * (end^3 - begin^3) / 3
"""
norm = (
delta_end * delta_end * delta_end - delta_begin * delta_begin * delta_begin
) / 3
return density * norm
def _compute_quantization_error(self, next_start_bin: int, next_end_bin: int):
r"""
Compute the quantization error if we use start_bin to end_bin as the
min and max to do the quantization.
"""
bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
if dst_bin_width == 0.0:
return 0.0
src_bin = torch.arange(self.bins, device=self.histogram.device)
# distances from the beginning of first dst_bin to the beginning and
# end of src_bin
src_bin_begin = (src_bin - next_start_bin) * bin_width
src_bin_end = src_bin_begin + bin_width
# which dst_bins the beginning and end of src_bin belong to?
dst_bin_of_begin = torch.clamp(
torch.div(src_bin_begin, dst_bin_width, rounding_mode="floor"),
0,
self.dst_nbins - 1,
)
dst_bin_of_begin_center = (dst_bin_of_begin + 0.5) * dst_bin_width
dst_bin_of_end = torch.clamp(
torch.div(src_bin_end, dst_bin_width, rounding_mode="floor"),
0,
self.dst_nbins - 1,
)
density = self.histogram / bin_width
norm = torch.zeros(self.bins, device=self.histogram.device)
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = dst_bin_width / 2
norm += self._get_norm(
delta_begin,
torch.ones(self.bins, device=self.histogram.device) * delta_end,
density,
)
norm += (dst_bin_of_end - dst_bin_of_begin - 1) * self._get_norm(
torch.tensor(-dst_bin_width / 2), torch.tensor(dst_bin_width / 2), density
)
dst_bin_of_end_center = dst_bin_of_end * dst_bin_width + dst_bin_width / 2
delta_begin = -dst_bin_width / 2
delta_end = src_bin_end - dst_bin_of_end_center
norm += self._get_norm(torch.tensor(delta_begin), delta_end, density)
return norm.sum().item()
def _non_linear_param_search(self) -> tuple[torch.Tensor, torch.Tensor]:
r"""Non-linear parameter search.
An approximation for L2 error minimization for selecting min/max.
By selecting new min/max, we filter out outliers in input distribution.
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
caffe2/quantization/server/norm_minimization.cc
"""
if self.histogram.size()[0] != self.bins:
raise AssertionError("bins mismatch")
bin_width = (self.max_val - self.min_val) / self.bins
# cumulative sum
total = torch.sum(self.histogram).item()
cSum = torch.cumsum(self.histogram, dim=0)
stepsize = 1e-5 # granularity
alpha = 0.0 # lower bound
beta = 1.0 # upper bound
start_bin = 0
end_bin = self.bins - 1
norm_min = float("inf")
while alpha < beta:
# Find the next step
next_alpha = alpha + stepsize
next_beta = beta - stepsize
# find the left and right bins between the quantile bounds
l = start_bin
r = end_bin
while l < end_bin and cSum[l] < next_alpha * total:
l = l + 1
while r > start_bin and cSum[r] > next_beta * total:
r = r - 1
# decide the next move
next_start_bin = start_bin
next_end_bin = end_bin
if (l - start_bin) > (end_bin - r):
# move the start bin
next_start_bin = l
alpha = next_alpha
else:
# move the end bin
next_end_bin = r
beta = next_beta
if next_start_bin == start_bin and next_end_bin == end_bin:
continue
# calculate the quantization error using next_start_bin and next_end_bin
norm = self._compute_quantization_error(next_start_bin, next_end_bin)
if norm > norm_min:
break
norm_min = norm
start_bin = next_start_bin
end_bin = next_end_bin
new_min = self.min_val + bin_width * start_bin
new_max = self.min_val + bin_width * (end_bin + 1)
return new_min, new_max
def _upscale_histogram(
self,
histogram: torch.Tensor,
orig_min: torch.Tensor,
orig_max: torch.Tensor,
update_min: torch.Tensor,
update_max: torch.Tensor,
):
# this turns the histogram into a more fine-coarsed histogram to reduce
# bin quantization errors
histogram = histogram.repeat_interleave(self.upsample_rate) / self.upsample_rate
bin_size = (orig_max - orig_min) / (self.bins * self.upsample_rate)
mid_points_histogram = (
torch.linspace(
orig_min,
orig_max,
self.bins * self.upsample_rate + 1,
device=orig_min.device,
)[:-1].to(histogram.device)
+ 0.5 * bin_size
)
boundaries_new_histogram = torch.linspace(
update_min, update_max, self.bins + 1, device=update_min.device
).to(histogram.device)
# this maps the mid-points of the histogram to the new histogram's space
bucket_assignments = (
torch.bucketize(mid_points_histogram, boundaries_new_histogram, right=True)
- 1
)
# this then maps the histogram mid-points in the new space, weighted by the original histogram's values
# this is just the old histogram in the new histogram's space
# In case due to numerical issues the values land higher/lower than the maximum/minimum
bucket_assignments[bucket_assignments >= self.bins] = self.bins - 1
bucket_assignments[bucket_assignments < 0] = 0
update_histogram = torch.bincount(
bucket_assignments, weights=histogram, minlength=self.bins
)
return update_histogram
def _combine_histograms(
self,
orig_hist: torch.Tensor,
orig_min: torch.Tensor,
orig_max: torch.Tensor,
update_hist: torch.Tensor,
update_min: torch.Tensor,
update_max: torch.Tensor,
) -> torch.Tensor:
# If the new min and max are the same as the current min and max,
# we can just add the new histogram to the original histogram
if update_min == orig_min and update_max == orig_max:
return orig_hist + update_hist
# If the orig hist only has one value (i.e., the min and max are the same)
# we can just add it into new histogram
if orig_min == orig_max:
bin_value = torch.sum(orig_hist)
transformed_orig_hist = (
torch.histc(orig_min, bins=self.bins, min=update_min, max=update_max) # type: ignore[arg-type]
* bin_value
)
return transformed_orig_hist + update_hist
# We assume the update_hist is already in the target range, we will map the orig_max to it
if update_min > orig_min:
raise AssertionError("update_min must be <= orig_min")
if update_max < orig_max:
raise AssertionError("update_max must be >= orig_max")
# Now we need to turn the old_histogram, into the range of the new histogram
transformed_orig_hist = self._upscale_histogram(
orig_hist,
orig_min,
orig_max,
update_min,
update_max,
)
return update_hist + transformed_orig_hist
def reset_histogram(
self, x: torch.Tensor, min_val: torch.Tensor, max_val: torch.Tensor
) -> None:
self.min_val.resize_(min_val.shape)
self.min_val.copy_(min_val)
self.max_val.resize_(max_val.shape)
self.max_val.copy_(max_val)
if min_val.numel() != 1 or max_val.numel() != 1:
raise AssertionError("histogram min/max values must be scalar.")
new_histogram = torch.histc(x, self.bins, min=min_val, max=max_val) # type: ignore[arg-type]
self.histogram.detach_().resize_(new_histogram.shape)
self.histogram.copy_(new_histogram)
def forward(self, x_orig: torch.Tensor) -> torch.Tensor: # pyre-ignore[14]
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach()
x_min, x_max = torch.aminmax(x)
# want to ignore torch.inf since we don't actually
# want to make our quantization range infinite
# and in practice those values will be clamped
if x_min == -torch.inf or x_max == torch.inf:
warnings.warn(
"torch.inf detected in input tensor, ignoring input", stacklevel=2
)
x = x[x.abs() != torch.inf]
if x.numel() == 0:
return x_orig
x_min, x_max = torch.aminmax(x)
current_min = self.min_val
current_max = self.max_val
is_uninitialized = self.min_val == float("inf") or self.max_val == float("-inf")
if is_uninitialized:
self.reset_histogram(x, x_min, x_max)
else:
update_min, update_max = x_min, x_max
new_min = torch.min(current_min, update_min)
new_max = torch.max(current_max, update_max)
# TODO: For some reason, this is required for it to pass torchscript test
# new_min and new_max should already have requires_grad set to False
new_min, new_max = new_min.detach(), new_max.detach()
update_histogram = torch.histc(
x,
self.bins,
min=new_min, # type: ignore[arg-type]
max=new_max, # type: ignore[arg-type]
).to(self.histogram.device)
if new_min == current_min and new_max == current_max:
combined_histogram = self.histogram + update_histogram
self.histogram.detach_().resize_(combined_histogram.shape)
self.histogram.copy_(combined_histogram)
else:
combined_histogram = self._combine_histograms(
self.histogram,
current_min,
current_max,
update_histogram,
new_min,
new_max,
)
self.histogram.detach_().resize_(combined_histogram.shape)
self.histogram.copy_(combined_histogram)
self.min_val.detach_().resize_(new_min.shape)
self.min_val.copy_(new_min)
self.max_val.detach_().resize_(new_max.shape)
self.max_val.copy_(new_max)
return x_orig
@torch.jit.export
def calculate_qparams(self): # type: ignore[override]
is_uninitialized = self.min_val == float("inf") and self.max_val == float(
"-inf"
)
if is_uninitialized:
warnings.warn(
"must run observer before calling calculate_qparams.\
Returning default scale and zero point ",
stacklevel=2,
)
return torch.tensor([1.0], device=self.min_val.device.type), torch.tensor(
[0], device=self.min_val.device.type
)
if self.bins != len(self.histogram):
raise AssertionError(
"The number of bins in histogram should be equal to the number of bins "
"supplied while making this observer"
)
new_min, new_max = self._non_linear_param_search()
return self._calculate_qparams(new_min, new_max)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "min_val"] = self.min_val
destination[prefix + "max_val"] = self.max_val
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 3:
# if min_val and max_val are not initialized, update their shape
# to account for the differences between v2 and v3
min_val_name, max_val_name = prefix + "min_val", prefix + "max_val"
if min_val_name in state_dict:
if state_dict[min_val_name].shape == torch.Size([0]):
state_dict[min_val_name] = torch.tensor(float("inf"))
if max_val_name in state_dict:
if state_dict[max_val_name].shape == torch.Size([0]):
state_dict[max_val_name] = torch.tensor(float("-inf"))
local_state = ["min_val", "max_val"]
for name in local_state:
key = prefix + name
if key in state_dict:
val = state_dict[key]
setattr(self, name, val)
elif strict:
missing_keys.append(key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def extra_repr(self):
return f"min_val={self.min_val}, max_val={self.max_val}"
| HistogramObserver |
python | sympy__sympy | sympy/core/tests/test_expr.py | {
"start": 4458,
"end": 5255
} | class ____(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic SymPy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x, y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for xo in all_objs:
for yo in all_objs:
s(xo, yo)
return True
def test_basic():
def j(a, b):
x = a
x = +a
x = -a
x = a + b
x = a - b
x = a*b
x = a/b
x = a**b
del x
assert dotest(j)
def test_ibasic():
def s(a, b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
| F1_1 |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 1185,
"end": 1236
} | class ____(Place, ParkingLot4):
pass
| ParkingLot4B |
python | getsentry__sentry | tests/sentry/ratelimits/test_leaky_bucket.py | {
"start": 281,
"end": 6518
} | class ____(TestCase):
def setUp(self) -> None:
self.limiter = LeakyBucketRateLimiter(burst_limit=5, drip_rate=2)
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog: pytest.LogCaptureFixture) -> None:
self._caplog = caplog
def test_basic(self) -> None:
with freeze_time("2077-09-13"):
# burst limit is 5, so we should be able to get 5 requests in
for _ in range(5):
assert not self.limiter.is_limited("foo")
# after 5 requests, we should be limited
assert self.limiter.is_limited("foo")
# subsequent requests should still be limited
for _ in range(3):
assert self.limiter.is_limited("foo")
def test_incr_by(self) -> None:
with freeze_time("2077-09-13"):
assert not self.limiter.is_limited("foo", incr_by=3)
assert self.limiter.is_limited("foo", incr_by=3)
assert not self.limiter.is_limited("foo", incr_by=2)
assert self.limiter.is_limited("foo")
def test_invalid_incr_by(self) -> None:
with pytest.raises(ValueError) as ex:
self.limiter.is_limited("foo", incr_by=0)
assert ex.value.args[0] == "incr_by must be an integer greater than 0"
with pytest.raises(ValueError) as ex:
self.limiter.is_limited("foo", incr_by="foo") # type: ignore[arg-type]
assert ex.value.args[0] == "incr_by must be an integer greater than 0"
def test_default_key(self) -> None:
limiter = LeakyBucketRateLimiter(burst_limit=5, drip_rate=2, key="my_default_key")
assert limiter._redis_key() == "leaky_bucket_limiter:my_default_key"
assert limiter._redis_key("foo") == "leaky_bucket_limiter:foo"
with mock.patch.object(limiter, "_redis_key", wraps=limiter._redis_key) as _redis_key_spy:
limiter.is_limited()
limiter.is_limited("foo")
assert _redis_key_spy.call_args_list == [
mock.call(None),
mock.call("foo"),
]
def test_key_required(self) -> None:
with pytest.raises(ValueError):
self.limiter.is_limited()
assert "Either key or default_key must be set" in self._caplog.text
def test_drip_rate(self) -> None:
with freeze_time("2077-09-13") as time_traveler:
# exhaust the burst limit
for _ in range(5):
self.limiter.is_limited("foo")
for i in range(1, 11):
time_traveler.shift(0.1)
if i % 5: # at 10 reqs/sec, every 5th request should be allowed
assert self.limiter.is_limited("foo")
else:
assert not self.limiter.is_limited("foo")
def test_decorator(self) -> None:
@self.limiter("foo")
def foo() -> Never:
assert False, "This should not be executed when limited"
with freeze_time("2077-09-13"):
for _ in range(5):
with pytest.raises(AssertionError):
foo()
assert foo() is None
@self.limiter("bar", raise_exception=True)
def bar() -> Never:
assert False, "This should not be executed when limited"
with freeze_time("2077-09-13"):
for _ in range(5):
with pytest.raises(AssertionError):
bar()
with pytest.raises(self.limiter.LimitExceeded):
bar()
last_info: list[LeakyBucketLimitInfo] = []
def callback(info: LeakyBucketLimitInfo, context: dict[str, Any]) -> str:
last_info.append(info)
return "rate limited"
@self.limiter("baz", limited_handler=callback)
def baz() -> str:
return "normal value"
with freeze_time("2077-09-13"):
for i in range(5):
assert baz() == "normal value"
assert len(last_info) == 0
baz_rv = baz()
assert baz_rv == "rate limited"
assert len(last_info) == 1
info = last_info[0]
assert info.wait_time > 0
assert info.current_level == 5
def test_decorator_default_key(self) -> None:
limiter = LeakyBucketRateLimiter(burst_limit=5, drip_rate=2)
with mock.patch.object(limiter, "_redis_key", wraps=limiter._redis_key) as _redis_key_spy:
@limiter()
def foo() -> Any:
pass
foo()
assert _redis_key_spy.call_args_list == [
mock.call("LeakyBucketRateLimiterTest.test_decorator_default_key.<locals>.foo")
]
def test_get_bucket_state(self) -> None:
with freeze_time("2077-09-13"):
info = self.limiter.get_bucket_state("foo")
assert info.current_level == 0.0
assert info.wait_time == 0.0
for i in range(1, 6):
self.limiter.is_limited("foo")
info = self.limiter.get_bucket_state("foo")
assert info.current_level == i
def test_redis_failures(self) -> None:
caplog = self._caplog
with mock.patch("sentry.ratelimits.leaky_bucket.leaky_bucket_info") as lua_script:
lua_script.side_effect = Exception("Boom")
# fails open
for _ in range(6):
caplog.clear()
assert not self.limiter.is_limited("foo")
assert "Could not determine leaky bucket limiter state" in caplog.text
with mock.patch.object(self.limiter, "client") as redis_client:
redis_client.side_effect = Exception("Boom")
caplog.clear()
info = self.limiter.get_bucket_state("foo")
assert info.current_level == 0
assert "Could not get bucket state" in caplog.text
def test_validate(self) -> None:
self.limiter.validate()
with mock.patch.object(self.limiter, "client") as redis_client:
redis_client.ping.side_effect = Exception("Boom")
with pytest.raises(InvalidConfiguration):
self.limiter.validate()
| LeakyBucketRateLimiterTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/repository.py | {
"start": 1197,
"end": 109601
} | class ____(ReadableDeploymentStorage):
"""
Interact with files stored on GitHub repositories.
"""
_block_type_name = "GitHub Repository"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/41971cfecfea5f79ff334164f06ecb34d1038dd4-250x250.png" # noqa: E501
_documentation_url = "https://docs.prefect.io/integrations/prefect-github" # noqa
repository_url: str = Field(
default=...,
title="Repository URL",
description=(
"The URL of a GitHub repository to read from, in either HTTPS or SSH "
"format. If you are using a private repo, it must be in the HTTPS format."
),
)
reference: Optional[str] = Field(
default=None,
description="An optional reference to pin to; can be a branch name or tag.",
)
credentials: Optional[GitHubCredentials] = Field(
default=None,
description="An optional GitHubCredentials block for using private GitHub repos.", # noqa: E501
)
def _create_repo_url(self) -> str:
"""Format the URL provided to the `git clone` command.
For private repos: https://<oauth-key>@github.com/<username>/<repo>.git
All other repos should be the same as `self.repository`.
"""
url_components = urlparse(self.repository_url)
if url_components.scheme == "https" and self.credentials is not None:
token_value = self.credentials.token.get_secret_value()
updated_components = url_components._replace(
netloc=f"{token_value}@{url_components.netloc}"
)
full_url = urlunparse(updated_components)
else:
full_url = self.repository_url
return full_url
@staticmethod
def _get_paths(
dst_dir: Union[str, None], src_dir: str, sub_directory: str
) -> Tuple[str, str]:
"""Returns the fully formed paths for GitHubRepository contents in the form
(content_source, content_destination).
"""
if dst_dir is None:
content_destination = Path(".").absolute()
else:
content_destination = Path(dst_dir)
content_source = Path(src_dir)
if sub_directory:
content_destination = content_destination.joinpath(sub_directory)
content_source = content_source.joinpath(sub_directory)
return str(content_source), str(content_destination)
@sync_compatible
async def get_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
"""
Clones a GitHub project specified in `from_path` to the provided `local_path`;
defaults to cloning the repository reference configured on the Block to the
present working directory.
Args:
from_path: If provided, interpreted as a subdirectory of the underlying
repository that will be copied to the provided local path.
local_path: A local path to clone to; defaults to present working directory.
"""
# CONSTRUCT COMMAND
cmd = f"git clone {self._create_repo_url()}"
if self.reference:
cmd += f" -b {self.reference}"
# Limit git history
cmd += " --depth 1"
# Clone to a temporary directory and move the subdirectory over
with TemporaryDirectory(suffix="prefect") as tmp_dir:
tmp_path_str = tmp_dir
# wrap the directory with quotes, because shlex removes windows-style slashes "//" - fixes issue 13180
cmd += f' "{tmp_path_str}"'
cmd = shlex.split(cmd)
err_stream = io.StringIO()
out_stream = io.StringIO()
process = await run_process(cmd, stream_output=(out_stream, err_stream))
if process.returncode != 0:
err_stream.seek(0)
raise RuntimeError(f"Failed to pull from remote:\n {err_stream.read()}")
content_source, content_destination = self._get_paths(
dst_dir=local_path, src_dir=tmp_path_str, sub_directory=from_path
)
shutil.copytree(
src=content_source, dst=content_destination, dirs_exist_ok=True
)
@task
async def query_repository( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
The query root of GitHub's GraphQL interface.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a repository
referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
)
op_stack = ("repository",)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]
@task
async def query_repository_ref( # noqa
owner: str,
name: str,
qualified_name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Fetch a given ref from the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
qualified_name: The ref to retrieve. Fully qualified matches are
checked in order (`refs/heads/master`) before falling back
onto checks for short name matches (`master`).
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).ref(
**strip_kwargs(
qualified_name=qualified_name,
)
)
op_stack = (
"repository",
"ref",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["ref"]
@task
async def query_repository_refs( # noqa
owner: str,
name: str,
ref_prefix: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
query: Optional[str] = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
direction: graphql_schema.OrderDirection = None,
order_by: graphql_schema.RefOrder = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Fetch a list of refs from the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
ref_prefix: A ref name prefix like `refs/heads/`, `refs/tags/`,
etc.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
query: Filters refs with query on name.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
direction: DEPRECATED: use orderBy. The ordering direction.
order_by: Ordering options for refs returned from the connection.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).refs(
**strip_kwargs(
ref_prefix=ref_prefix,
query=query,
after=after,
before=before,
first=first,
last=last,
direction=direction,
order_by=order_by,
)
)
op_stack = (
"repository",
"refs",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["refs"]
@task
async def query_repository_owner( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
The User owner of the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).owner(**strip_kwargs())
op_stack = (
"repository",
"owner",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["owner"]
@task
async def query_repository_forks( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
privacy: graphql_schema.RepositoryPrivacy = None,
order_by: graphql_schema.RepositoryOrder = None,
affiliations: Iterable[graphql_schema.RepositoryAffiliation] = None,
owner_affiliations: Iterable[graphql_schema.RepositoryAffiliation] = (
"OWNER",
"COLLABORATOR",
),
is_locked: Optional[bool] = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of direct forked repositories.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
privacy: If non-null, filters repositories according to privacy.
order_by: Ordering options for repositories returned from the
connection.
affiliations: Array of viewer's affiliation options for
repositories returned from the connection. For example,
OWNER will include only repositories that the current viewer
owns.
owner_affiliations: Array of owner's affiliation options for
repositories returned from the connection. For example,
OWNER will include only repositories that the organization
or user being viewed owns.
is_locked: If non-null, filters repositories according to whether
they have been locked.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).forks(
**strip_kwargs(
privacy=privacy,
order_by=order_by,
affiliations=affiliations,
owner_affiliations=owner_affiliations,
is_locked=is_locked,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"forks",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["forks"]
@task
async def query_repository_issue( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a single issue from the current repository by number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The number for the issue to be returned.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).issue(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"issue",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["issue"]
@task
async def query_repository_label( # noqa
owner: str,
name: str,
label_name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a single label by name.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
label_name: Label name.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).label(
**strip_kwargs(
name=label_name,
)
)
op_stack = (
"repository",
"label",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["label"]
@task
async def query_repository_issues( # noqa
owner: str,
name: str,
labels: Iterable[str],
states: Iterable[graphql_schema.IssueState],
github_credentials: GitHubCredentials,
follow_renames: bool = True,
order_by: graphql_schema.IssueOrder = None,
filter_by: graphql_schema.IssueFilters = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of issues that have been opened in the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
labels: A list of label names to filter the pull requests by.
states: A list of states to filter the issues by.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
order_by: Ordering options for issues returned from the
connection.
filter_by: Filtering options for issues returned from the
connection.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).issues(
**strip_kwargs(
labels=labels,
states=states,
order_by=order_by,
filter_by=filter_by,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"issues",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["issues"]
@task
async def query_repository_labels( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
order_by: graphql_schema.LabelOrder = {"field": "CREATED_AT", "direction": "ASC"},
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
query: Optional[str] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of labels associated with the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
order_by: Ordering options for labels returned from the
connection.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
query: If provided, searches labels by name and description.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).labels(
**strip_kwargs(
order_by=order_by,
after=after,
before=before,
first=first,
last=last,
query=query,
)
)
op_stack = (
"repository",
"labels",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["labels"]
@task
async def query_repository_object( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
oid: Optional[datetime] = None,
expression: Optional[str] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A Git object in the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
oid: The Git object ID.
expression: A Git revision expression suitable for rev-parse.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).object(
**strip_kwargs(
oid=oid,
expression=expression,
)
)
op_stack = (
"repository",
"object",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["object"]
@task
async def query_repository_project( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Find project by number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The project number to find.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).project(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"project",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["project"]
@task
async def query_repository_release( # noqa
owner: str,
name: str,
tag_name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Lookup a single release given various criteria.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
tag_name: The name of the Tag the Release was created from.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).release(
**strip_kwargs(
tag_name=tag_name,
)
)
op_stack = (
"repository",
"release",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["release"]
@task
async def query_repository_projects( # noqa
owner: str,
name: str,
states: Iterable[graphql_schema.ProjectState],
github_credentials: GitHubCredentials,
follow_renames: bool = True,
order_by: graphql_schema.ProjectOrder = None,
search: Optional[str] = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of projects under the owner.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
states: A list of states to filter the projects by.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
order_by: Ordering options for projects returned from the
connection.
search: Query to search projects by, currently only searching
by name.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).projects(
**strip_kwargs(
states=states,
order_by=order_by,
search=search,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"projects",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["projects"]
@task
async def query_repository_packages( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
names: Optional[Iterable[str]] = None,
repository_id: Optional[str] = None,
package_type: graphql_schema.PackageType = None,
order_by: graphql_schema.PackageOrder = {
"field": "CREATED_AT",
"direction": "DESC",
},
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of packages under the owner.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
names: Find packages by their names.
repository_id: Find packages in a repository by ID.
package_type: Filter registry package by type.
order_by: Ordering of the returned packages.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).packages(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
names=names,
repository_id=repository_id,
package_type=package_type,
order_by=order_by,
)
)
op_stack = (
"repository",
"packages",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["packages"]
@task
async def query_repository_releases( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
order_by: graphql_schema.ReleaseOrder = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
List of releases which are dependent on this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
order_by: Order for connection.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).releases(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
order_by=order_by,
)
)
op_stack = (
"repository",
"releases",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["releases"]
@task
async def query_repository_watchers( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of users watching the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).watchers(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"watchers",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["watchers"]
@task
async def query_repository_languages( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
order_by: graphql_schema.LanguageOrder = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list containing a breakdown of the language composition of the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
order_by: Order for connection.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).languages(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
order_by=order_by,
)
)
op_stack = (
"repository",
"languages",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["languages"]
@task
async def query_repository_milestone( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a single milestone from the current repository by number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The number for the milestone to be returned.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).milestone(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"milestone",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["milestone"]
@task
async def query_repository_project_v2( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Finds and returns the Project according to the provided Project number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The Project number.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).project_v2(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"projectV2",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["projectV2"]
@task
async def query_repository_stargazers( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
order_by: graphql_schema.StarOrder = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of users who have starred this starrable.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
order_by: Order for connection.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).stargazers(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
order_by=order_by,
)
)
op_stack = (
"repository",
"stargazers",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["stargazers"]
@task
async def query_repository_deploy_keys( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of deploy keys that are on this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).deploy_keys(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"deployKeys",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["deployKeys"]
@task
async def query_repository_discussion( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a single discussion from the current repository by number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The number for the discussion to be returned.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).discussion(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"discussion",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["discussion"]
@task
async def query_repository_milestones( # noqa
owner: str,
name: str,
states: Iterable[graphql_schema.MilestoneState],
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
order_by: graphql_schema.MilestoneOrder = None,
query: Optional[str] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of milestones associated with the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
states: Filter by the state of the milestones.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
order_by: Ordering options for milestones.
query: Filters milestones with a query on the title.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).milestones(
**strip_kwargs(
states=states,
after=after,
before=before,
first=first,
last=last,
order_by=order_by,
query=query,
)
)
op_stack = (
"repository",
"milestones",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["milestones"]
@task
async def query_repository_projects_v2( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
query: Optional[str] = None,
order_by: graphql_schema.ProjectV2Order = {"field": "NUMBER", "direction": "DESC"},
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
List of projects linked to this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
query: A project to search for linked to the repo.
order_by: How to order the returned projects.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).projects_v2(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
query=query,
order_by=order_by,
)
)
op_stack = (
"repository",
"projectsV2",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["projectsV2"]
@task
async def query_repository_submodules( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a list of all submodules in this repository parsed from the .gitmodules
file as of the default branch's HEAD commit.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before the
specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).submodules(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"submodules",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["submodules"]
@task
async def query_repository_license_info( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
The license associated with the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).license_info(**strip_kwargs())
op_stack = (
"repository",
"licenseInfo",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["licenseInfo"]
@task
async def query_repository_deployments( # noqa
owner: str,
name: str,
environments: Iterable[str],
github_credentials: GitHubCredentials,
follow_renames: bool = True,
order_by: graphql_schema.DeploymentOrder = {
"field": "CREATED_AT",
"direction": "ASC",
},
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Deployments associated with the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
environments: Environments to list deployments for.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
order_by: Ordering options for deployments returned from the
connection.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).deployments(
**strip_kwargs(
environments=environments,
order_by=order_by,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"deployments",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["deployments"]
@task
async def query_repository_discussions( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
category_id: Optional[str] = None,
order_by: graphql_schema.DiscussionOrder = {
"field": "UPDATED_AT",
"direction": "DESC",
},
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of discussions that have been opened in the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
category_id: Only include discussions that belong to the
category with this ID.
order_by: Ordering options for discussions returned from the
connection.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).discussions(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
category_id=category_id,
order_by=order_by,
)
)
op_stack = (
"repository",
"discussions",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["discussions"]
@task
async def query_repository_environment( # noqa
owner: str,
name: str,
environment_name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a single active environment from the current repository by name.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
environment_name: The name of the environment to be returned.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).environment(
**strip_kwargs(
name=environment_name,
)
)
op_stack = (
"repository",
"environment",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["environment"]
@task
async def query_repository_project_next( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Finds and returns the Project (beta) according to the provided Project (beta)
number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The ProjectNext number.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).project_next(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"projectNext",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["projectNext"]
@task
async def query_repository_pull_request( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a single pull request from the current repository by number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The number for the pull request to be returned.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).pull_request(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"pullRequest",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["pullRequest"]
@task
async def query_repository_contact_links( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a list of contact links associated to the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).contact_links(**strip_kwargs())
op_stack = (
"repository",
"contactLinks",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["contactLinks"]
@task
async def query_repository_environments( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of environments that are in this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after the
specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).environments(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"environments",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["environments"]
@task
async def query_repository_funding_links( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
The funding links for this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).funding_links(**strip_kwargs())
op_stack = (
"repository",
"fundingLinks",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["fundingLinks"]
@task
async def query_repository_pinned_issues( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of pinned issues for this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after
the specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).pinned_issues(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"pinnedIssues",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["pinnedIssues"]
@task
async def query_repository_projects_next( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
query: Optional[str] = None,
sort_by: graphql_schema.ProjectNextOrderField = "TITLE",
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
List of projects (beta) linked to this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after
the specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
query: A project (beta) to search for linked to the repo.
sort_by: How to order the returned project (beta) objects.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).projects_next(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
query=query,
sort_by=sort_by,
)
)
op_stack = (
"repository",
"projectsNext",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["projectsNext"]
@task
async def query_repository_pull_requests( # noqa
owner: str,
name: str,
states: Iterable[graphql_schema.PullRequestState],
labels: Iterable[str],
github_credentials: GitHubCredentials,
follow_renames: bool = True,
head_ref_name: Optional[str] = None,
base_ref_name: Optional[str] = None,
order_by: graphql_schema.IssueOrder = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of pull requests that have been opened in the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
states: A list of states to filter the pull requests by.
labels: A list of label names to filter the pull requests
by.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
head_ref_name: The head ref name to filter the pull
requests by.
base_ref_name: The base ref name to filter the pull
requests by.
order_by: Ordering options for pull requests returned from
the connection.
after: Returns the elements in the list that come after
the specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).pull_requests(
**strip_kwargs(
states=states,
labels=labels,
head_ref_name=head_ref_name,
base_ref_name=base_ref_name,
order_by=order_by,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"pullRequests",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["pullRequests"]
@task
async def query_repository_code_of_conduct( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns the code of conduct for this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).code_of_conduct(**strip_kwargs())
op_stack = (
"repository",
"codeOfConduct",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["codeOfConduct"]
@task
async def query_repository_collaborators( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
affiliation: graphql_schema.CollaboratorAffiliation = None,
query: Optional[str] = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of collaborators associated with the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
affiliation: Collaborators affiliation level with a
repository.
query: Filters users with query on user name and login.
after: Returns the elements in the list that come after
the specified cursor.
before: Returns the elements in the list that come before
the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).collaborators(
**strip_kwargs(
affiliation=affiliation,
query=query,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"collaborators",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["collaborators"]
@task
async def query_repository_latest_release( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Get the latest release for the repository if one exists.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).latest_release(**strip_kwargs())
op_stack = (
"repository",
"latestRelease",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["latestRelease"]
@task
async def query_repository_recent_projects( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Recent projects that this user has modified in the context of the owner.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after
the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).recent_projects(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"recentProjects",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["recentProjects"]
@task
async def query_repository_commit_comments( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of commit comments associated with the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come after
the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).commit_comments(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"commitComments",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["commitComments"]
@task
async def query_repository_issue_templates( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a list of issue templates associated to the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).issue_templates(**strip_kwargs())
op_stack = (
"repository",
"issueTemplates",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["issueTemplates"]
@task
async def query_repository_assignable_users( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
query: Optional[str] = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of users that can be assigned to issues in this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
query: Filters users with query on user name and login.
after: Returns the elements in the list that come after
the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).assignable_users(
**strip_kwargs(
query=query,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"assignableUsers",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["assignableUsers"]
@task
async def query_repository_primary_language( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
The primary language of the repository's code.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).primary_language(**strip_kwargs())
op_stack = (
"repository",
"primaryLanguage",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["primaryLanguage"]
@task
async def query_repository_default_branch_ref( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
The Ref associated with the repository's default branch.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).default_branch_ref(**strip_kwargs())
op_stack = (
"repository",
"defaultBranchRef",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["defaultBranchRef"]
@task
async def query_repository_mentionable_users( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
query: Optional[str] = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of Users that can be mentioned in the context of the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
query: Filters users with query on user name and login.
after: Returns the elements in the list that come
after the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).mentionable_users(
**strip_kwargs(
query=query,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"mentionableUsers",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["mentionableUsers"]
@task
async def query_repository_repository_topics( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of applied repository-topic associations for this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come
after the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).repository_topics(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"repositoryTopics",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["repositoryTopics"]
@task
async def query_repository_pinned_discussions( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of discussions that have been pinned in this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come
after the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).pinned_discussions(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"pinnedDiscussions",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["pinnedDiscussions"]
@task
async def query_repository_discussion_category( # noqa
owner: str,
name: str,
slug: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A discussion category by slug.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
slug: The slug of the discussion category to be
returned.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).discussion_category(
**strip_kwargs(
slug=slug,
)
)
op_stack = (
"repository",
"discussionCategory",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["discussionCategory"]
@task
async def query_repository_interaction_ability( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
The interaction ability settings for this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).interaction_ability(**strip_kwargs())
op_stack = (
"repository",
"interactionAbility",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["interactionAbility"]
@task
async def query_repository_issue_or_pull_request( # noqa
owner: str,
name: str,
number: int,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a single issue-like object from the current repository by number.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
number: The number for the issue to be returned.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).issue_or_pull_request(
**strip_kwargs(
number=number,
)
)
op_stack = (
"repository",
"issueOrPullRequest",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["issueOrPullRequest"]
@task
async def query_repository_vulnerability_alerts( # noqa
owner: str,
name: str,
states: Iterable[graphql_schema.RepositoryVulnerabilityAlertState],
dependency_scopes: Iterable[
graphql_schema.RepositoryVulnerabilityAlertDependencyScope
],
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of vulnerability alerts that are on this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
states: Filter by the state of the alert.
dependency_scopes: Filter by the scope of the
alert's dependency.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come
after the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the
list.
last: Returns the last _n_ elements from the list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).vulnerability_alerts(
**strip_kwargs(
states=states,
dependency_scopes=dependency_scopes,
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"vulnerabilityAlerts",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["vulnerabilityAlerts"]
@task
async def query_repository_discussion_categories( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
filter_by_assignable: bool = False,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of discussion categories that are available in the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that come
after the specified cursor.
before: Returns the elements in the list that come
before the specified cursor.
first: Returns the first _n_ elements from the
list.
last: Returns the last _n_ elements from the list.
filter_by_assignable: Filter by categories that
are assignable by the viewer.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).discussion_categories(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
filter_by_assignable=filter_by_assignable,
)
)
op_stack = (
"repository",
"discussionCategories",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["discussionCategories"]
@task
async def query_repository_pull_request_templates( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
Returns a list of pull request templates associated to the repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).pull_request_templates(**strip_kwargs())
op_stack = (
"repository",
"pullRequestTemplates",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["pullRequestTemplates"]
@task
async def query_repository_branch_protection_rules( # noqa
owner: str,
name: str,
github_credentials: GitHubCredentials,
follow_renames: bool = True,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
return_fields: Optional[Iterable[str]] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
A list of branch protection rules for this repository.
Args:
owner: The login field of a user or organization.
name: The name of the repository.
github_credentials: Credentials to use for authentication with GitHub.
follow_renames: Follow repository renames. If disabled, a
repository referenced by its old name will return an error.
after: Returns the elements in the list that
come after the specified cursor.
before: Returns the elements in the list that
come before the specified cursor.
first: Returns the first _n_ elements from the
list.
last: Returns the last _n_ elements from the
list.
return_fields: Subset the return fields (as snake_case); defaults to
fields listed in configs/query/*.json.
Returns:
A dict of the returned fields.
"""
op = Operation(graphql_schema.Query)
op_selection = op.repository(
**strip_kwargs(
owner=owner,
name=name,
follow_renames=follow_renames,
)
).branch_protection_rules(
**strip_kwargs(
after=after,
before=before,
first=first,
last=last,
)
)
op_stack = (
"repository",
"branchProtectionRules",
)
op_selection = _subset_return_fields(
op_selection, op_stack, return_fields, return_fields_defaults
)
result = await _execute_graphql_op(op, github_credentials)
return result["repository"]["branchProtectionRules"]
| GitHubRepository |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops_v2.py | {
"start": 3909,
"end": 5633
} | class ____(Initializer):
"""Initializer that generates tensors initialized to 0.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.zeros_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([0., 0., 0.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
"""
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Argument `dtype` expected to be numeric or boolean. "
f"Received {dtype}.")
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.zeros(shape, dtype)
@tf_export("ones_initializer", v1=[])
| Zeros |
python | getsentry__sentry | src/sentry/issues/endpoints/group_similar_issues.py | {
"start": 586,
"end": 2156
} | class ____(GroupEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, group: Group) -> Response:
features = similarity.features
limit_s = request.GET.get("limit", None)
if limit_s is not None:
limit: int | None = int(limit_s) + 1 # the target group will always be included
else:
limit = None
group_ids = []
group_scores = []
for group_id, scores in features.compare(group, limit=limit):
if group_id != group.id:
group_ids.append(group_id)
group_scores.append(scores)
serialized_groups = {
int(g["id"]): g
for g in serialize(
list(Group.objects.get_many_from_cache(group_ids)), user=request.user
)
}
results = []
# We need to preserve the ordering of the Redis results, as that
# ordering is directly shown in the UI
for group_id, scores in zip(group_ids, group_scores):
serialized_group = serialized_groups.get(group_id)
if serialized_group is None:
# TODO(tkaemming): This should log when we filter out a group that is
# unable to be retrieved from the database. (This will soon be
# unexpected behavior, but still possible.)
continue
results.append((serialized_group, {_fix_label(k): v for k, v in scores.items()}))
return Response(results)
| GroupSimilarIssuesEndpoint |
python | kamyu104__LeetCode-Solutions | Python/prefix-and-suffix-search.py | {
"start": 245,
"end": 1411
} | class ____(object):
def __init__(self, words):
"""
:type words: List[str]
"""
_trie = lambda: collections.defaultdict(_trie)
self.__trie = _trie()
for weight, word in enumerate(words):
word += '#'
for i in xrange(len(word)):
cur = self.__trie
cur["_weight"] = weight
for j in xrange(i, 2*len(word)-1):
cur = cur[word[j%len(word)]]
cur["_weight"] = weight
def f(self, prefix, suffix):
"""
:type prefix: str
:type suffix: str
:rtype: int
"""
cur = self.__trie
for letter in suffix + '#' + prefix:
if letter not in cur:
return -1
cur = cur[letter]
return cur["_weight"]
# Time: ctor: O(w * l), w is the number of words, l is the word length on average
# search: O(p + s + max(m, n)), p is the length of the prefix, s is the length of the suffix,
# m is the number of the prefix match, n is the number of the suffix match
# Space: O(w * l)
| WordFilter |
python | facebook__pyre-check | pyre_extensions/tests/safe_json_test.py | {
"start": 507,
"end": 583
} | class ____(Movie):
dictionary: Dict[str, Any]
| MovieWithArbitraryDictionary |
python | pydata__xarray | asv_bench/benchmarks/accessors.py | {
"start": 144,
"end": 634
} | class ____:
def setup(self, calendar):
np.random.randn(NTIME)
time = xr.date_range("2000", periods=30 * 365, calendar=calendar)
data = np.ones((NTIME,))
self.da = xr.DataArray(data, dims="time", coords={"time": time})
def time_dayofyear(self, calendar):
_ = self.da.time.dt.dayofyear
def time_year(self, calendar):
_ = self.da.time.dt.year
def time_floor(self, calendar):
_ = self.da.time.dt.floor("D")
| DateTimeAccessor |
python | mahmoud__glom | glom/core.py | {
"start": 65197,
"end": 67102
} | class ____(_ObjStyleKeysMeta('_AbstractKeys', (object,), {})):
__metaclass__ = _ObjStyleKeysMeta
@staticmethod
def get_keys(obj):
ret = obj.__dict__.keys()
return ret
def _get_sequence_item(target, index):
return target[int(index)]
# handlers are 3-arg callables, with args (spec, target, scope)
# spec is the first argument for convenience in the case
# that the handler is a method of the spec type
def _handle_dict(target, spec, scope):
ret = type(spec)() # TODO: works for dict + ordereddict, but sufficient for all?
for field, subspec in spec.items():
val = scope[glom](target, subspec, scope)
if val is SKIP:
continue
if type(field) in (Spec, TType):
field = scope[glom](target, field, scope)
ret[field] = val
return ret
def _handle_list(target, spec, scope):
subspec = spec[0]
iterate = scope[TargetRegistry].get_handler('iterate', target, path=scope[Path])
try:
iterator = iterate(target)
except Exception as e:
raise TypeError('failed to iterate on instance of type %r at %r (got %r)'
% (target.__class__.__name__, Path(*scope[Path]), e))
ret = []
base_path = scope[Path]
for i, t in enumerate(iterator):
scope[Path] = base_path + [i]
val = scope[glom](t, subspec, scope)
if val is SKIP:
continue
if val is STOP:
break
ret.append(val)
return ret
def _handle_tuple(target, spec, scope):
res = target
for subspec in spec:
scope = chain_child(scope)
nxt = scope[glom](res, subspec, scope)
if nxt is SKIP:
continue
if nxt is STOP:
break
res = nxt
if not isinstance(subspec, list):
scope[Path] += [getattr(subspec, '__name__', subspec)]
return res
| _ObjStyleKeys |
python | pytorch__pytorch | test/distributed/checkpoint/test_file_system_checkpoint.py | {
"start": 2633,
"end": 2965
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear_1 = torch.nn.Linear(5, 5)
self.linear_2 = torch.nn.Linear(5, 1)
self.emb = torch.nn.EmbeddingBag(5, 10)
# The ShardedModels are borrowed from test/distributed/_sharded_tensor/test_sharded_tensor.py
| MyTestModule |
python | coleifer__peewee | tests/regressions.py | {
"start": 44284,
"end": 45160
} | class ____(ModelTestCase):
requires = [Bits]
def assertBits(self, bf, expected):
b1_1, b1_2, b2_1, b2_2 = expected
self.assertEqual(bf.b1_1, b1_1)
self.assertEqual(bf.b1_2, b1_2)
self.assertEqual(bf.b2_1, b2_1)
self.assertEqual(bf.b2_2, b2_2)
def test_bit_field_name(self):
bf = Bits.create()
self.assertBits(bf, (True, False, False, False))
bf.b1_1 = False
bf.b1_2 = True
bf.b2_1 = True
bf.save()
self.assertBits(bf, (False, True, True, False))
bf = Bits.get(Bits.id == bf.id)
self.assertBits(bf, (False, True, True, False))
self.assertEqual(bf.b1, 2)
self.assertEqual(bf.b2, 1)
self.assertEqual(Bits.select().where(Bits.b1_2).count(), 1)
self.assertEqual(Bits.select().where(Bits.b2_2).count(), 0)
| TestBitFieldName |
python | ray-project__ray | python/ray/dashboard/modules/reporter/gpu_providers.py | {
"start": 745,
"end": 1077
} | class ____(TypedDict):
"""GPU utilization information for a single GPU device."""
index: int
name: str
uuid: str
utilization_gpu: Optional[Percentage]
memory_used: Megabytes
memory_total: Megabytes
processes_pids: Optional[Dict[int, ProcessGPUInfo]]
# tpu utilization for google tpu
| GpuUtilizationInfo |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 9729,
"end": 9860
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
pass
@dataclasses.dataclass(frozen=True)
| ShowStatusRequestClientCapabilities |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 2543,
"end": 4863
} | class ____(nn.Module):
def __init__(self, input_size, output_size, num_layers):
super().__init__()
# Stack the weights for N layers into a single tensor (num_layers, output_size, input_size)
self.weight = nn.Parameter(torch.randn(num_layers, output_size, input_size))
def forward(self, x, layer_idx=None):
"""
`KyutaiSpeechToTextFlexibleLinear` creates one linear layer per codebook. There's multiple ways to use it.
In the default case, `sequence_length=num_layers`, so each element of the sequence will be matmul to the weights corresponding to its index on the sequence.
For more advanced cases, one can specify which codebook's layer(s) to use with `layer_idx`.
If `layer_idx` indicates a single integer, all of the element of the sequence will be matmul to this single codebook's layer.
But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`.
Args:
x (`torch.FloatTensor): input to the layer of shape `(batch, num_layers, embed_dim)` or of shape `(batch, seq_length, embed_dim)`
layer_idx (`torch.Tensor`, *optional*):
Can be used to specify which codebook's layers(s) to use.
If it's a tensor of shape `(seq_length,)`, will matmul each element of the sequence to the corresponding weights.
But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`.
"""
# Use torch.gather to select the corresponding weights for each sample
# (codebooks, output_size, hidden_size)
selected_weights = torch.index_select(self.weight, 0, layer_idx) if layer_idx is not None else self.weight
# (1, codebooks, hidden_size, output_size)
selected_weights = selected_weights.transpose(1, 2)[None, :, :, :]
# (batch_size, codebooks, 1, hidden_size) x (1, codebooks, hidden_size, output_size)
# -> (batch_size, codebooks, 1, output_size)
x = torch.matmul(x[:, :, None, :], selected_weights)
# (batch_size, codebooks, output_size)
return x.squeeze(2)
@auto_docstring
| KyutaiSpeechToTextFlexibleLinear |
python | huggingface__transformers | src/transformers/models/patchtst/modeling_patchtst.py | {
"start": 45879,
"end": 47106
} | class ____(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
if config.scaling == "mean" or config.scaling is True:
self.scaler = PatchTSTMeanScaler(config)
elif config.scaling == "std":
self.scaler = PatchTSTStdScaler(config)
else:
self.scaler = PatchTSTNOPScaler(config)
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Input for scaler calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, um_input_channels)`)
"""
data, loc, scale = self.scaler(data, observed_indicator)
return data, loc, scale
@auto_docstring
| PatchTSTScaler |
python | psf__black | tests/data/miscellaneous/force_pyi.py | {
"start": 270,
"end": 468
} | class ____ (A , C): ...
def spam() -> None: ...
@overload
def spam(arg: str) -> str: ...
var : int = 1
def eggs() -> Union[str, int]: ...
# output
from typing import Union
@bird
def zoo(): ...
| F |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass3.py | {
"start": 446,
"end": 490
} | class ____(metaclass=SubMeta3):
pass
| Base5 |
python | langchain-ai__langchain | libs/core/langchain_core/tools/base.py | {
"start": 44881,
"end": 45134
} | class ____:
"""Annotation for tool arguments that are injected at runtime.
Tool arguments annotated with this class are not included in the tool
schema sent to language models and are instead injected during execution.
"""
| InjectedToolArg |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 287670,
"end": 291907
} | class ____(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_values [ExprNode] RHS coerced to all distinct LHS types
# cloned_values [ExprNode] cloned RHS value for each LHS
# assignment_overloads [Bool] If each assignment uses a C++ operator=
child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"]
cloned_values = None
coerced_values = None
assignment_overloads = None
def _check_const_assignment(self, node):
if isinstance(node, CascadedAssignmentNode):
for lhs in node.lhs_list:
self._warn_on_const_assignment(lhs, node.rhs)
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp=0):
from .ExprNodes import CloneNode, ProxyNode
# collect distinct types used on the LHS
lhs_types = set()
for i, lhs in enumerate(self.lhs_list):
lhs = self.lhs_list[i] = lhs.analyse_target_types(env)
lhs.gil_assignment_check(env)
lhs_types.add(lhs.type)
rhs = self.rhs.analyse_types(env)
# common special case: only one type needed on the LHS => coerce only once
if len(lhs_types) == 1:
# Avoid coercion for overloaded assignment operators.
if next(iter(lhs_types)).is_cpp_class:
op = env.lookup_operator('=', [lhs, self.rhs])
if not op:
rhs = rhs.coerce_to(lhs_types.pop(), env)
else:
rhs = rhs.coerce_to(lhs_types.pop(), env)
if not rhs.is_name and not rhs.is_literal and (
use_temp or rhs.is_attribute or rhs.type.is_pyobject):
rhs = rhs.coerce_to_temp(env)
else:
rhs = rhs.coerce_to_simple(env)
self.rhs = ProxyNode(rhs) if rhs.result_in_temp() else rhs
# clone RHS and coerce it to all distinct LHS types
self.coerced_values = []
coerced_values = {}
self.assignment_overloads = []
for lhs in self.lhs_list:
overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs])
self.assignment_overloads.append(overloaded)
if lhs.type not in coerced_values and lhs.type != rhs.type:
rhs = CloneNode(self.rhs)
if not overloaded:
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_values.append(rhs)
coerced_values[lhs.type] = rhs
# clone coerced values for all LHS assignments
self.cloned_values = []
for lhs in self.lhs_list:
rhs = coerced_values.get(lhs.type, self.rhs)
self.cloned_values.append(CloneNode(rhs))
return self
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code, overloaded_assignment=False):
# prepare all coercions
for rhs in self.coerced_values:
rhs.generate_evaluation_code(code)
# assign clones to LHS
for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads):
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload)
# dispose of coerced values and original RHS
for rhs_value in self.coerced_values:
rhs_value.generate_disposal_code(code)
rhs_value.free_temps(code)
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
for rhs in self.coerced_values:
rhs.annotate(code)
for lhs, rhs in zip(self.lhs_list, self.cloned_values):
lhs.annotate(code)
rhs.annotate(code)
self.rhs.annotate(code)
| CascadedAssignmentNode |
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable.py | {
"start": 4939,
"end": 5096
} | class ____:
myattr = 1
mylambda = lambda: LambdaClass.myattr
# Need different classes to make sure
# consumed variables don't get in the way
| LambdaClass |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 77063,
"end": 84763
} | class ____(ReturnsRows):
"""Sub-base of ReturnsRows for elements that deliver rows
directly, namely SELECT and INSERT/UPDATE/DELETE..RETURNING"""
_label_style: SelectLabelStyle = LABEL_STYLE_NONE
def _generate_columns_plus_names(
self,
anon_for_dupe_key: bool,
cols: Optional[_SelectIterable] = None,
) -> List[_ColumnsPlusNames]:
"""Generate column names as rendered in a SELECT statement by
the compiler, as well as tokens used to populate the .c. collection
on a :class:`.FromClause`.
This is distinct from the _column_naming_convention generator that's
intended for population of the Select.selected_columns collection,
different rules. the collection returned here calls upon the
_column_naming_convention as well.
"""
if cols is None:
cols = self._all_selected_columns
key_naming_convention = SelectState._column_naming_convention(
self._label_style
)
names = {}
result: List[_ColumnsPlusNames] = []
result_append = result.append
table_qualified = self._label_style is LABEL_STYLE_TABLENAME_PLUS_COL
label_style_none = self._label_style is LABEL_STYLE_NONE
# a counter used for "dedupe" labels, which have double underscores
# in them and are never referred by name; they only act
# as positional placeholders. they need only be unique within
# the single columns clause they're rendered within (required by
# some dbs such as mysql). So their anon identity is tracked against
# a fixed counter rather than hash() identity.
dedupe_hash = 1
for c in cols:
repeated = False
if not c._render_label_in_columns_clause:
effective_name = required_label_name = fallback_label_name = (
None
)
elif label_style_none:
if TYPE_CHECKING:
assert is_column_element(c)
effective_name = required_label_name = None
fallback_label_name = c._non_anon_label or c._anon_name_label
else:
if TYPE_CHECKING:
assert is_column_element(c)
if table_qualified:
required_label_name = effective_name = (
fallback_label_name
) = c._tq_label
else:
effective_name = fallback_label_name = c._non_anon_label
required_label_name = None
if effective_name is None:
# it seems like this could be _proxy_key and we would
# not need _expression_label but it isn't
# giving us a clue when to use anon_label instead
expr_label = c._expression_label
if expr_label is None:
repeated = c._anon_name_label in names
names[c._anon_name_label] = c
effective_name = required_label_name = None
if repeated:
# here, "required_label_name" is sent as
# "None" and "fallback_label_name" is sent.
if table_qualified:
fallback_label_name = (
c._dedupe_anon_tq_label_idx(dedupe_hash)
)
dedupe_hash += 1
else:
fallback_label_name = c._dedupe_anon_label_idx(
dedupe_hash
)
dedupe_hash += 1
else:
fallback_label_name = c._anon_name_label
else:
required_label_name = effective_name = (
fallback_label_name
) = expr_label
if effective_name is not None:
if TYPE_CHECKING:
assert is_column_element(c)
if effective_name in names:
# when looking to see if names[name] is the same column as
# c, use hash(), so that an annotated version of the column
# is seen as the same as the non-annotated
if hash(names[effective_name]) != hash(c):
# different column under the same name. apply
# disambiguating label
if table_qualified:
required_label_name = fallback_label_name = (
c._anon_tq_label
)
else:
required_label_name = fallback_label_name = (
c._anon_name_label
)
if anon_for_dupe_key and required_label_name in names:
# here, c._anon_tq_label is definitely unique to
# that column identity (or annotated version), so
# this should always be true.
# this is also an infrequent codepath because
# you need two levels of duplication to be here
assert hash(names[required_label_name]) == hash(c)
# the column under the disambiguating label is
# already present. apply the "dedupe" label to
# subsequent occurrences of the column so that the
# original stays non-ambiguous
if table_qualified:
required_label_name = fallback_label_name = (
c._dedupe_anon_tq_label_idx(dedupe_hash)
)
dedupe_hash += 1
else:
required_label_name = fallback_label_name = (
c._dedupe_anon_label_idx(dedupe_hash)
)
dedupe_hash += 1
repeated = True
else:
names[required_label_name] = c
elif anon_for_dupe_key:
# same column under the same name. apply the "dedupe"
# label so that the original stays non-ambiguous
if table_qualified:
required_label_name = fallback_label_name = (
c._dedupe_anon_tq_label_idx(dedupe_hash)
)
dedupe_hash += 1
else:
required_label_name = fallback_label_name = (
c._dedupe_anon_label_idx(dedupe_hash)
)
dedupe_hash += 1
repeated = True
else:
names[effective_name] = c
result_append(
_ColumnsPlusNames(
required_label_name,
key_naming_convention(c),
fallback_label_name,
c,
repeated,
)
)
return result
| SelectsRows |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 10455,
"end": 11430
} | class ____(HtmlRenderer):
"""
Renderer to display interactive figures in the classic Jupyter Notebook.
This renderer is also useful for notebooks that will be converted to
HTML using nbconvert/nbviewer as it will produce standalone HTML files
that include interactive figures.
This renderer automatically performs global notebook initialization when
activated.
mime type: 'text/html'
"""
def __init__(
self,
connected=False,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
include_plotlyjs=False,
):
super(NotebookRenderer, self).__init__(
connected=connected,
full_html=False,
global_init=True,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
include_plotlyjs=include_plotlyjs,
)
| NotebookRenderer |
python | huggingface__transformers | src/transformers/models/flaubert/modeling_flaubert.py | {
"start": 65477,
"end": 70993
} | class ____(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.qa_outputs = FlaubertSQuADHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[dict[str, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
is_impossible: Optional[torch.Tensor] = None,
cls_index: Optional[torch.Tensor] = None,
p_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, FlaubertForQuestionAnsweringOutput]:
r"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the classification token to use as input for computing plausibility of the
answer.
p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
masked. 0.0 mean token is not masked.
Example:
```python
>>> from transformers import AutoTokenizer, FlaubertForQuestionAnswering
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> model = FlaubertForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
outputs = self.qa_outputs(
output,
start_positions=start_positions,
end_positions=end_positions,
cls_index=cls_index,
is_impossible=is_impossible,
p_mask=p_mask,
return_dict=return_dict,
)
if not return_dict:
return outputs + transformer_outputs[1:]
return FlaubertForQuestionAnsweringOutput(
loss=outputs.loss,
start_top_log_probs=outputs.start_top_log_probs,
start_top_index=outputs.start_top_index,
end_top_log_probs=outputs.end_top_log_probs,
end_top_index=outputs.end_top_index,
cls_logits=outputs.cls_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
# Copied from transformers.models.xlm.modeling_xlm.XLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
| FlaubertForQuestionAnswering |
python | scipy__scipy | scipy/optimize/_numdiff.py | {
"start": 31862,
"end": 35820
} | class ____:
# Permits pickling of a wrapped function
def __init__(self, fun, x0, args, kwargs):
self.fun = fun
self.x0 = x0
self.args = args
self.kwargs = kwargs
def __call__(self, x):
# send user function same fp type as x0. (but only if cs is not being
# used
xp = array_namespace(self.x0)
if xp.isdtype(x.dtype, "real floating"):
x = xp.astype(x, self.x0.dtype)
f = np.atleast_1d(self.fun(x, *self.args, **self.kwargs))
if f.ndim > 1:
raise RuntimeError("`fun` return value has "
"more than 1 dimension.")
return f
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs=None):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse array with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize._numdiff import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
if kwargs is None:
kwargs = {}
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_array(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
| _Fun_Wrapper |
python | kubernetes-client__python | kubernetes/client/api/internal_apiserver_v1alpha1_api.py | {
"start": 543,
"end": 123682
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_storage_version(self, body, **kwargs): # noqa: E501
"""create_storage_version # noqa: E501
create a StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_version(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1StorageVersion body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_storage_version_with_http_info(body, **kwargs) # noqa: E501
def create_storage_version_with_http_info(self, body, **kwargs): # noqa: E501
"""create_storage_version # noqa: E501
create a StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_version_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1StorageVersion body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_storage_version`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_storage_version(self, **kwargs): # noqa: E501
"""delete_collection_storage_version # noqa: E501
delete collection of StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_storage_version(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_storage_version_with_http_info(**kwargs) # noqa: E501
def delete_collection_storage_version_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_storage_version # noqa: E501
delete collection of StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_storage_version_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_storage_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_storage_version(self, name, **kwargs): # noqa: E501
"""delete_storage_version # noqa: E501
delete a StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_version(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_storage_version_with_http_info(name, **kwargs) # noqa: E501
def delete_storage_version_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_storage_version # noqa: E501
delete a StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_version_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_storage_version`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_storage_version(self, **kwargs): # noqa: E501
"""list_storage_version # noqa: E501
list or watch objects of kind StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_version(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_storage_version_with_http_info(**kwargs) # noqa: E501
def list_storage_version_with_http_info(self, **kwargs): # noqa: E501
"""list_storage_version # noqa: E501
list or watch objects of kind StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_version_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersionList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storage_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersionList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_version(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version # noqa: E501
partially update the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_storage_version_with_http_info(name, body, **kwargs) # noqa: E501
def patch_storage_version_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version # noqa: E501
partially update the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_storage_version`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_storage_version`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_version_status(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version_status # noqa: E501
partially update status of the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_storage_version_status_with_http_info(name, body, **kwargs) # noqa: E501
def patch_storage_version_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_storage_version_status # noqa: E501
partially update status of the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_version_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_version_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_storage_version_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_storage_version_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_version(self, name, **kwargs): # noqa: E501
"""read_storage_version # noqa: E501
read the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_storage_version_with_http_info(name, **kwargs) # noqa: E501
def read_storage_version_with_http_info(self, name, **kwargs): # noqa: E501
"""read_storage_version # noqa: E501
read the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_storage_version`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_version_status(self, name, **kwargs): # noqa: E501
"""read_storage_version_status # noqa: E501
read status of the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_storage_version_status_with_http_info(name, **kwargs) # noqa: E501
def read_storage_version_status_with_http_info(self, name, **kwargs): # noqa: E501
"""read_storage_version_status # noqa: E501
read status of the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_version_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_version_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_storage_version_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_version(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version # noqa: E501
replace the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param V1alpha1StorageVersion body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_storage_version_with_http_info(name, body, **kwargs) # noqa: E501
def replace_storage_version_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version # noqa: E501
replace the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param V1alpha1StorageVersion body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_storage_version`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_storage_version`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_version_status(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version_status # noqa: E501
replace status of the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param V1alpha1StorageVersion body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1StorageVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_storage_version_status_with_http_info(name, body, **kwargs) # noqa: E501
def replace_storage_version_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_storage_version_status # noqa: E501
replace status of the specified StorageVersion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_version_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the StorageVersion (required)
:param V1alpha1StorageVersion body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_version_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_storage_version_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_storage_version_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StorageVersion', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| InternalApiserverV1alpha1Api |
python | kamyu104__LeetCode-Solutions | Python/count-number-of-nice-subarrays.py | {
"start": 29,
"end": 608
} | class ____(object):
def numberOfSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def atMost(nums, k):
result, left, count = 0, 0, 0
for right, x in enumerate(nums):
count += x%2
while count > k:
count -= nums[left]%2
left += 1
result += right-left+1
return result
return atMost(nums, k) - atMost(nums, k-1)
# Time: O(n)
# Space: O(k)
import collections
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 48735,
"end": 49276
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("project_column_id", "content_id", "note", "client_mutation_id")
project_column_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="projectColumnId"
)
content_id = sgqlc.types.Field(ID, graphql_name="contentId")
note = sgqlc.types.Field(String, graphql_name="note")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AddProjectCardInput |
python | kamyu104__LeetCode-Solutions | Python/construct-binary-tree-from-preorder-and-postorder-traversal.py | {
"start": 154,
"end": 766
} | class ____(object):
def constructFromPrePost(self, pre, post):
"""
:type pre: List[int]
:type post: List[int]
:rtype: TreeNode
"""
stack = [TreeNode(pre[0])]
j = 0
for i in xrange(1, len(pre)):
node = TreeNode(pre[i])
while stack[-1].val == post[j]:
stack.pop()
j += 1
if not stack[-1].left:
stack[-1].left = node
else:
stack[-1].right = node
stack.append(node)
return stack[0]
# Time: O(n)
# Space: O(n)
| Solution |
python | huggingface__transformers | src/transformers/models/jamba/modeling_jamba.py | {
"start": 14842,
"end": 27737
} | class ____(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
"""
def __init__(self, config: JambaConfig, layer_idx):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = config.mamba_expand * config.hidden_size
self.time_step_rank = config.mamba_dt_rank
self.use_conv_bias = config.mamba_conv_bias
self.use_bias = config.mamba_proj_bias
self.conv1d = nn.Conv1d(
in_channels=self.intermediate_size,
out_channels=self.intermediate_size,
bias=self.use_conv_bias,
kernel_size=self.conv_kernel_size,
groups=self.intermediate_size,
padding=self.conv_kernel_size - 1,
)
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_fast_kernels = config.use_mamba_kernels
# projection of the input hidden states
self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias)
# selective projection used to make dt, B and C input dependent
self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
# time step projection (discretization)
self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.ssm_state_size + 1)[None, :]
A = A.expand(self.intermediate_size, -1).contiguous()
self.A_log = nn.Parameter(torch.log(A))
self.D = nn.Parameter(torch.ones(self.intermediate_size))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
self.dt_layernorm = JambaRMSNorm(self.time_step_rank, eps=config.rms_norm_eps)
self.b_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps)
self.c_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config"
)
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
batch_size, seq_len, _ = hidden_states.shape
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
)
# 1. Gated MLP's linear projection
projected_states = self.in_proj(hidden_states).transpose(1, 2)
# We can't use `mamba_inner_fn` even if in training and without cache params because we have the
# inner layernorms which isn't supported by this fused kernel
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 2. Convolution sequence transformation
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
if use_precomputed_states:
hidden_states = causal_conv1d_update(
hidden_states.squeeze(-1),
cache_params.conv_states[self.layer_idx],
conv_weights,
self.conv1d.bias,
self.activation,
)
hidden_states = hidden_states.unsqueeze(-1)
else:
if cache_params is not None:
conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. input varying initialization of time_step, B and C
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
time_step = self.dt_layernorm(time_step)
B = self.b_layernorm(B)
C = self.c_layernorm(C)
# Here we need to apply dt_proj without the bias, as the bias is added in the selective scan kernel.
# This is a hack to apply dt_proj while still using the forward pass of `torch.nn.Linear`, which is needed
# in order to make quantization work. Quantization code replaces `torch.nn.Linear` layers with quantized
# linear layers, and requires to call the forward pass directly.
# Quantized model can't work with the original code:
# ```discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)```
time_proj_bias = self.dt_proj.bias.data
with torch.no_grad():
self.dt_proj.bias.data = torch.zeros_like(self.dt_proj.bias.data)
discrete_time_step = self.dt_proj(time_step).transpose(1, 2)
with torch.no_grad():
self.dt_proj.bias.data = time_proj_bias
A = -torch.exp(self.A_log.float())
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
time_proj_bias = time_proj_bias.float() if time_proj_bias is not None else None
if use_precomputed_states:
scan_outputs = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states[..., 0],
discrete_time_step[..., 0],
A,
B[:, 0],
C[:, 0],
self.D,
gate[..., 0],
time_proj_bias,
dt_softplus=True,
).unsqueeze(-1)
else:
scan_outputs, ssm_state = selective_scan_fn(
hidden_states,
discrete_time_step,
A,
B.transpose(1, 2),
C.transpose(1, 2),
self.D.float(),
gate,
time_proj_bias,
delta_softplus=True,
return_last_state=True,
)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
return contextualized_states
# fmt: off
def slow_forward(self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, attention_mask: Optional[torch.LongTensor] = None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
projected_states = self.in_proj(input_states).transpose(1, 2)
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
use_cache = isinstance(cache_params, HybridMambaAttentionDynamicCache)
# 2. Convolution sequence transformation
if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size:
if self.training:
# In training mode, we don't want to perform in-place operations on ssm_state so we can compute the backwards pass
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
else:
ssm_state = cache_params.ssm_states[self.layer_idx]
ssm_state = ssm_state.to(hidden_states.device)
if cache_params.has_previous_state and seq_len == 1 and \
cache_params.conv_states[self.layer_idx].shape[0] == batch_size:
conv_state = cache_params.conv_states[self.layer_idx]
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
conv_state[:, :, -1] = hidden_states[:, :, 0]
cache_params.conv_states[self.layer_idx] = conv_state
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1)
else:
conv_state = nn.functional.pad(
hidden_states,
(self.conv_kernel_size - hidden_states.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx] = conv_state
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
else:
ssm_state = torch.zeros(
(batch_size, self.intermediate_size, self.ssm_state_size),
device=hidden_states.device, dtype=dtype
)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
time_step = self.dt_layernorm(time_step)
B = self.b_layernorm(B)
C = self.c_layernorm(C)
discrete_time_step = self.dt_proj(time_step)
discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2)
# 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
A = -torch.exp(self.A_log.float())
discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None])
discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float()
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
scan_outputs = []
for i in range(seq_len):
ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :]
scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1))
scan_outputs.append(scan_output[:, :, 0])
scan_output = torch.stack(scan_outputs, dim=-1)
scan_output = scan_output + (hidden_states * self.D[None, :, None])
scan_output = (scan_output * self.act(gate))
if use_cache:
cache_params.ssm_states[self.layer_idx] = ssm_state
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.transpose(1, 2))
return contextualized_states
# fmt: on
def forward(
self,
hidden_states,
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
if self.use_fast_kernels:
if not is_fast_path_available or "cuda" not in self.x_proj.weight.device.type:
raise ValueError(
"Fast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device"
)
return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask)
return self.slow_forward(hidden_states, cache_params, attention_mask)
| JambaMambaMixer |
python | spack__spack | lib/spack/spack/modules/common.py | {
"start": 37685,
"end": 37858
} | class ____(AttributeError, ModulesError):
"""Raised if the attribute ``hide_cmd_format`` has not been specified
in the derived classes.
"""
| HideCmdFormatNotDefined |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.