language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py | {
"start": 9279,
"end": 20807
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
key = graphene.NonNull(GrapheneAssetKey)
assetMaterializations = graphene.Field(
non_null_list(GrapheneMaterializationEvent),
partitions=graphene.List(graphene.NonNull(graphene.String)),
beforeTimestampMillis=graphene.String(),
afterTimestampMillis=graphene.String(),
limit=graphene.Int(),
)
assetObservations = graphene.Field(
non_null_list(GrapheneObservationEvent),
partitions=graphene.List(graphene.NonNull(graphene.String)),
beforeTimestampMillis=graphene.String(),
afterTimestampMillis=graphene.String(),
limit=graphene.Int(),
)
assetEventHistory = graphene.Field(
graphene.NonNull(GrapheneAssetResultEventHistoryConnection),
partitions=graphene.List(graphene.NonNull(graphene.String)),
beforeTimestampMillis=graphene.String(),
afterTimestampMillis=graphene.String(),
limit=graphene.NonNull(graphene.Int),
eventTypeSelectors=graphene.NonNull(
graphene.List(graphene.NonNull(GrapheneAssetEventHistoryEventTypeSelector))
),
cursor=graphene.String(),
)
definition = graphene.Field("dagster_graphql.schema.asset_graph.GrapheneAssetNode")
latestEventSortKey = graphene.Field(graphene.ID)
assetHealth = graphene.Field(GrapheneAssetHealth)
latestMaterializationTimestamp = graphene.Float()
hasDefinitionOrRecord = graphene.NonNull(graphene.Boolean)
latestFailedToMaterializeTimestamp = graphene.Float()
freshnessStatusChangedTimestamp = graphene.Float()
class Meta:
name = "Asset"
def __init__(self, key):
self._asset_key = key
super().__init__(
key=GrapheneAssetKey(path=key.path),
)
def resolve_id(self, _) -> str:
return self._asset_key.to_string()
def resolve_definition(self, graphene_info: ResolveInfo) -> Optional["GrapheneAssetNode"]:
from dagster_graphql.schema.asset_graph import GrapheneAssetNode
remote_asset_node = (
graphene_info.context.asset_graph.get(self._asset_key)
if graphene_info.context.asset_graph.has(self._asset_key)
else None
)
return GrapheneAssetNode(remote_asset_node) if remote_asset_node else None
async def resolve_assetMaterializations(
self,
graphene_info: ResolveInfo,
partitions: Optional[Sequence[str]] = None,
beforeTimestampMillis: Optional[str] = None,
afterTimestampMillis: Optional[str] = None,
limit: Optional[int] = None,
) -> Sequence[GrapheneMaterializationEvent]:
from dagster_graphql.implementation.fetch_assets import get_asset_materializations
before_timestamp = parse_timestamp(beforeTimestampMillis)
after_timestamp = parse_timestamp(afterTimestampMillis)
if limit == 1 and not partitions and not before_timestamp and not after_timestamp:
record = await AssetRecord.gen(graphene_info.context, self._asset_key)
latest_materialization_event = (
record.asset_entry.last_materialization if record else None
)
if not latest_materialization_event:
return []
return [GrapheneMaterializationEvent(event=latest_materialization_event)]
events = get_asset_materializations(
graphene_info,
self._asset_key,
partitions=partitions,
before_timestamp=before_timestamp,
after_timestamp=after_timestamp,
limit=limit,
)
return [GrapheneMaterializationEvent(event=event) for event in events]
def resolve_assetEventHistory(
self,
graphene_info: ResolveInfo,
eventTypeSelectors: Sequence[GrapheneAssetEventHistoryEventTypeSelector],
limit: Optional[int],
partitions: Optional[Sequence[str]] = None,
beforeTimestampMillis: Optional[str] = None,
afterTimestampMillis: Optional[str] = None,
cursor: Optional[str] = None,
) -> GrapheneAssetResultEventHistoryConnection:
from dagster_graphql.implementation.fetch_assets import (
get_asset_failed_to_materialize_event_records,
get_asset_materialization_event_records,
get_asset_observation_event_records,
)
before_timestamp = parse_timestamp(beforeTimestampMillis)
after_timestamp = parse_timestamp(afterTimestampMillis)
failure_events = []
success_events = []
observation_events = []
if GrapheneAssetEventHistoryEventTypeSelector.FAILED_TO_MATERIALIZE in eventTypeSelectors:
failure_events = [
(record.storage_id, GrapheneFailedToMaterializeEvent(event=record.event_log_entry))
for record in get_asset_failed_to_materialize_event_records(
graphene_info,
self._asset_key,
partitions=partitions,
before_timestamp=before_timestamp,
after_timestamp=after_timestamp,
limit=limit,
cursor=cursor,
)
]
if GrapheneAssetEventHistoryEventTypeSelector.MATERIALIZATION in eventTypeSelectors:
success_events = [
(record.storage_id, GrapheneMaterializationEvent(event=record.event_log_entry))
for record in get_asset_materialization_event_records(
graphene_info,
self._asset_key,
partitions=partitions,
before_timestamp=before_timestamp,
after_timestamp=after_timestamp,
limit=limit,
cursor=cursor,
)
]
if GrapheneAssetEventHistoryEventTypeSelector.OBSERVATION in eventTypeSelectors:
observation_events = [
(record.storage_id, GrapheneObservationEvent(event=record.event_log_entry))
for record in get_asset_observation_event_records(
graphene_info,
self._asset_key,
partitions=partitions,
before_timestamp=before_timestamp,
after_timestamp=after_timestamp,
limit=limit,
cursor=cursor,
)
]
all_events_tuples = failure_events + success_events + observation_events
sorted_limited_event_tuples = sorted(
all_events_tuples, key=lambda event_tuple: event_tuple[0], reverse=True
)[:limit]
new_cursor = (
EventLogCursor.from_storage_id(sorted_limited_event_tuples[-1][0]).to_string()
if sorted_limited_event_tuples
else EventLogCursor.from_storage_id(-1).to_string()
)
return GrapheneAssetResultEventHistoryConnection(
results=[event for _, event in sorted_limited_event_tuples],
cursor=new_cursor,
)
def resolve_assetObservations(
self,
graphene_info: ResolveInfo,
partitions: Optional[Sequence[str]] = None,
beforeTimestampMillis: Optional[str] = None,
afterTimestampMillis: Optional[str] = None,
limit: Optional[int] = None,
) -> Sequence[GrapheneObservationEvent]:
from dagster_graphql.implementation.fetch_assets import get_asset_observations
before_timestamp = parse_timestamp(beforeTimestampMillis)
after_timestamp = parse_timestamp(afterTimestampMillis)
return [
GrapheneObservationEvent(event=event)
for event in get_asset_observations(
graphene_info,
self._asset_key,
partitions=partitions,
before_timestamp=before_timestamp,
after_timestamp=after_timestamp,
limit=limit,
)
]
async def resolve_latestEventSortKey(self, graphene_info):
asset_record = await AssetRecord.gen(graphene_info.context, self._asset_key)
if asset_record:
return asset_record.asset_entry.last_event_storage_id
return None
def resolve_assetHealth(self, graphene_info: ResolveInfo) -> Optional[GrapheneAssetHealth]:
if not graphene_info.context.instance.dagster_asset_health_queries_supported():
return None
return GrapheneAssetHealth(
asset_key=self._asset_key,
dynamic_partitions_loader=graphene_info.context.dynamic_partitions_loader,
)
async def resolve_hasDefinitionOrRecord(self, graphene_info: ResolveInfo) -> bool:
return (
graphene_info.context.asset_graph.has(self._asset_key)
or await AssetRecord.gen(graphene_info.context, self._asset_key) is not None
)
async def resolve_latestMaterializationTimestamp(
self, graphene_info: ResolveInfo
) -> Optional[float]:
min_materialization_state = await MinimalAssetMaterializationHealthState.gen(
graphene_info.context, self._asset_key
)
if min_materialization_state is not None:
return (
min_materialization_state.latest_materialization_timestamp
* 1000 # FE prefers timestamp in milliseconds
if min_materialization_state.latest_materialization_timestamp
else None
)
record = await AssetRecord.gen(graphene_info.context, self._asset_key)
latest_materialization_event = record.asset_entry.last_materialization if record else None
return (
latest_materialization_event.timestamp * 1000 # FE prefers timestamp in milliseconds
if latest_materialization_event
else None
)
async def resolve_latestFailedToMaterializeTimestamp(
self, graphene_info: ResolveInfo
) -> Optional[float]:
materialization_state = await MinimalAssetMaterializationHealthState.gen(
graphene_info.context, self._asset_key
)
if materialization_state is not None:
ts = materialization_state.latest_failed_to_materialize_timestamp
else:
record = await AssetRecord.gen(graphene_info.context, self._asset_key)
latest_failed_to_materialize_event = (
record.asset_entry.last_failed_to_materialize_entry if record else None
)
ts = (
latest_failed_to_materialize_event.timestamp
if latest_failed_to_materialize_event
else None
)
return ts * 1000 if ts else None # FE prefers timestamp in milliseconds
async def resolve_freshnessStatusChangedTimestamp(
self, graphene_info: ResolveInfo
) -> Optional[float]:
freshness_state = await AssetFreshnessHealthState.gen(
graphene_info.context, self._asset_key
)
if freshness_state is not None:
ts = freshness_state.updated_timestamp
else:
freshness_state_record = graphene_info.context.instance.get_freshness_state_records(
[self._asset_key]
).get(self._asset_key)
ts = freshness_state_record.updated_at.timestamp() if freshness_state_record else None
return ts * 1000 if ts else None # FE prefers timestamp in milliseconds
| GrapheneAsset |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/containers.py | {
"start": 34259,
"end": 37087
} | class ____:
"""
Float for use in a :class:`.FloatContainer`.
Except for the `content` parameter, all other options are optional.
:param content: :class:`.Container` instance.
:param width: :class:`.Dimension` or callable which returns a :class:`.Dimension`.
:param height: :class:`.Dimension` or callable which returns a :class:`.Dimension`.
:param left: Distance to the left edge of the :class:`.FloatContainer`.
:param right: Distance to the right edge of the :class:`.FloatContainer`.
:param top: Distance to the top of the :class:`.FloatContainer`.
:param bottom: Distance to the bottom of the :class:`.FloatContainer`.
:param attach_to_window: Attach to the cursor from this window, instead of
the current window.
:param hide_when_covering_content: Hide the float when it covers content underneath.
:param allow_cover_cursor: When `False`, make sure to display the float
below the cursor. Not on top of the indicated position.
:param z_index: Z-index position. For a Float, this needs to be at least
one. It is relative to the z_index of the parent container.
:param transparent: :class:`.Filter` indicating whether this float needs to be
drawn transparently.
"""
def __init__(
self,
content: AnyContainer,
top: int | None = None,
right: int | None = None,
bottom: int | None = None,
left: int | None = None,
width: int | Callable[[], int] | None = None,
height: int | Callable[[], int] | None = None,
xcursor: bool = False,
ycursor: bool = False,
attach_to_window: AnyContainer | None = None,
hide_when_covering_content: bool = False,
allow_cover_cursor: bool = False,
z_index: int = 1,
transparent: bool = False,
) -> None:
assert z_index >= 1
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.width = width
self.height = height
self.xcursor = xcursor
self.ycursor = ycursor
self.attach_to_window = (
to_window(attach_to_window) if attach_to_window else None
)
self.content = to_container(content)
self.hide_when_covering_content = hide_when_covering_content
self.allow_cover_cursor = allow_cover_cursor
self.z_index = z_index
self.transparent = to_filter(transparent)
def get_width(self) -> int | None:
if callable(self.width):
return self.width()
return self.width
def get_height(self) -> int | None:
if callable(self.height):
return self.height()
return self.height
def __repr__(self) -> str:
return f"Float(content={self.content!r})"
| Float |
python | django__django | django/contrib/flatpages/templatetags/flatpages.py | {
"start": 207,
"end": 3552
} | class ____(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
if "request" in context:
site_pk = get_current_site(context["request"]).pk
else:
site_pk = settings.SITE_ID
flatpages = FlatPage.objects.filter(sites__id=site_pk)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context)
)
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated:
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ""
@register.tag
def get_flatpages(parser, token):
"""
Retrieve all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populate the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause controls the user whose permissions are used in
determining which flatpages are visible.
An optional argument, ``starts_with``, limits the returned flatpages to
those beginning with a particular base URL. This argument can be a variable
or a string, as it resolves from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = (
"%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" % {"tag_name": bits[0]}
)
# Must have at 3-6 bits in the tag
if 3 <= len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != "as":
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != "for":
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
| FlatpageNode |
python | ray-project__ray | python/ray/_private/ray_logging/logging_config.py | {
"start": 397,
"end": 649
} | class ____(ABC):
@abstractmethod
def get_supported_encodings(self) -> Set[str]:
raise NotImplementedError
@abstractmethod
def configure(self, logging_config: "LoggingConfig"):
raise NotImplementedError
| LoggingConfigurator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/missingSuper1.py | {
"start": 295,
"end": 328
} | class ____:
pass
@final
| ParentC |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py | {
"start": 4074,
"end": 4237
} | class ____(BaseModel):
"""Asset event collection response."""
asset_events: Iterable[AssetEventResponse]
total_entries: int
| AssetEventCollectionResponse |
python | PyCQA__pylint | tests/functional/a/attribute_defined_outside_init_py38.py | {
"start": 55,
"end": 165
} | class ____(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
self.i = 42
| AsyncioTestCase |
python | ray-project__ray | doc/source/serve/doc_code/grpc_proxy/grpc_guide.py | {
"start": 7108,
"end": 9609
} | class ____:
def __init__(self):
self.preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
def __call__(self, image: Image):
input_tensor = self.preprocess(image)
return input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
image_downloader = ImageDownloader.bind()
data_preprocessor = DataPreprocessor.bind()
g2 = ImageClassifier.options(name="grpc-image-classifier").bind(
image_downloader, data_preprocessor
)
# __end_model_composition_deployment__
# __begin_model_composition_deploy__
app2 = "app2"
serve.run(target=g2, name=app2, route_prefix=f"/{app2}")
# __end_model_composition_deploy__
# __begin_model_composition_client__
import grpc
from user_defined_protos_pb2_grpc import ImageClassificationServiceStub
from user_defined_protos_pb2 import ImageData
channel = grpc.insecure_channel("localhost:9000")
stub = ImageClassificationServiceStub(channel)
request = ImageData(url="https://github.com/pytorch/hub/raw/master/images/dog.jpg")
metadata = (("application", "app2"),) # Make sure application metadata is passed.
response, call = stub.Predict.with_call(request=request, metadata=metadata)
print(f"status code: {call.code()}") # grpc.StatusCode.OK
print(f"Classes: {response.classes}") # ['Samoyed', ...]
print(f"Probabilities: {response.probabilities}") # [0.8846230506896973, ...]
# __end_model_composition_client__
# __begin_error_handle__
import grpc
from user_defined_protos_pb2_grpc import UserDefinedServiceStub
from user_defined_protos_pb2 import UserDefinedMessage
channel = grpc.insecure_channel("localhost:9000")
stub = UserDefinedServiceStub(channel)
request = UserDefinedMessage(name="foo", num=30, origin="bar")
try:
response = stub.__call__(request=request)
except grpc.RpcError as rpc_error:
print(f"status code: {rpc_error.code()}") # StatusCode.NOT_FOUND
print(f"details: {rpc_error.details()}") # Application metadata not set...
# __end_error_handle__
# __begin_grpc_context_define_app__
from user_defined_protos_pb2 import UserDefinedMessage, UserDefinedResponse
from ray import serve
from ray.serve.grpc_util import RayServegRPCContext
import grpc
from typing import Tuple
@serve.deployment
| DataPreprocessor |
python | python-pillow__Pillow | src/PIL/DdsImagePlugin.py | {
"start": 1258,
"end": 1417
} | class ____(IntFlag):
ALPHAPIXELS = 0x1
ALPHA = 0x2
FOURCC = 0x4
PALETTEINDEXED8 = 0x20
RGB = 0x40
LUMINANCE = 0x20000
# dxgiformat.h
| DDPF |
python | aimacode__aima-python | csp.py | {
"start": 28186,
"end": 33598
} | class ____(CSP):
"""
A Sudoku problem.
The box grid is a 3x3 array of boxes, each a 3x3 array of cells.
Each cell holds a digit in 1..9. In each box, all digits are
different; the same for each row and column as a 9x9 grid.
>>> e = Sudoku(easy1)
>>> e.display(e.infer_assignment())
. . 3 | . 2 . | 6 . .
9 . . | 3 . 5 | . . 1
. . 1 | 8 . 6 | 4 . .
------+-------+------
. . 8 | 1 . 2 | 9 . .
7 . . | . . . | . . 8
. . 6 | 7 . 8 | 2 . .
------+-------+------
. . 2 | 6 . 9 | 5 . .
8 . . | 2 . 3 | . . 9
. . 5 | . 1 . | 3 . .
>>> AC3(e) # doctest: +ELLIPSIS
(True, ...)
>>> e.display(e.infer_assignment())
4 8 3 | 9 2 1 | 6 5 7
9 6 7 | 3 4 5 | 8 2 1
2 5 1 | 8 7 6 | 4 9 3
------+-------+------
5 4 8 | 1 3 2 | 9 7 6
7 2 9 | 5 6 4 | 1 3 8
1 3 6 | 7 9 8 | 2 4 5
------+-------+------
3 7 2 | 6 8 9 | 5 1 4
8 1 4 | 2 5 3 | 7 6 9
6 9 5 | 4 1 7 | 3 8 2
>>> h = Sudoku(harder1)
>>> backtracking_search(h, select_unassigned_variable=mrv, inference=forward_checking) is not None
True
"""
R3 = _R3
Cell = _CELL
bgrid = _BGRID
boxes = _BOXES
rows = _ROWS
cols = _COLS
neighbors = _NEIGHBORS
def __init__(self, grid):
"""Build a Sudoku problem from a string representing the grid:
the digits 1-9 denote a filled cell, '.' or '0' an empty one;
other characters are ignored."""
squares = iter(re.findall(r'\d|\.', grid))
domains = {var: [ch] if ch in '123456789' else '123456789'
for var, ch in zip(flatten(self.rows), squares)}
for _ in squares:
raise ValueError("Not a Sudoku grid", grid) # Too many squares
CSP.__init__(self, None, domains, self.neighbors, different_values_constraint)
def display(self, assignment):
def show_box(box): return [' '.join(map(show_cell, row)) for row in box]
def show_cell(cell): return str(assignment.get(cell, '.'))
def abut(lines1, lines2): return list(
map(' | '.join, list(zip(lines1, lines2))))
print('\n------+-------+------\n'.join(
'\n'.join(reduce(
abut, map(show_box, brow))) for brow in self.bgrid))
# ______________________________________________________________________________
# The Zebra Puzzle
def Zebra():
"""Return an instance of the Zebra Puzzle."""
Colors = 'Red Yellow Blue Green Ivory'.split()
Pets = 'Dog Fox Snails Horse Zebra'.split()
Drinks = 'OJ Tea Coffee Milk Water'.split()
Countries = 'Englishman Spaniard Norwegian Ukranian Japanese'.split()
Smokes = 'Kools Chesterfields Winston LuckyStrike Parliaments'.split()
variables = Colors + Pets + Drinks + Countries + Smokes
domains = {}
for var in variables:
domains[var] = list(range(1, 6))
domains['Norwegian'] = [1]
domains['Milk'] = [3]
neighbors = parse_neighbors("""Englishman: Red;
Spaniard: Dog; Kools: Yellow; Chesterfields: Fox;
Norwegian: Blue; Winston: Snails; LuckyStrike: OJ;
Ukranian: Tea; Japanese: Parliaments; Kools: Horse;
Coffee: Green; Green: Ivory""")
for type in [Colors, Pets, Drinks, Countries, Smokes]:
for A in type:
for B in type:
if A != B:
if B not in neighbors[A]:
neighbors[A].append(B)
if A not in neighbors[B]:
neighbors[B].append(A)
def zebra_constraint(A, a, B, b, recurse=0):
same = (a == b)
next_to = abs(a - b) == 1
if A == 'Englishman' and B == 'Red':
return same
if A == 'Spaniard' and B == 'Dog':
return same
if A == 'Chesterfields' and B == 'Fox':
return next_to
if A == 'Norwegian' and B == 'Blue':
return next_to
if A == 'Kools' and B == 'Yellow':
return same
if A == 'Winston' and B == 'Snails':
return same
if A == 'LuckyStrike' and B == 'OJ':
return same
if A == 'Ukranian' and B == 'Tea':
return same
if A == 'Japanese' and B == 'Parliaments':
return same
if A == 'Kools' and B == 'Horse':
return next_to
if A == 'Coffee' and B == 'Green':
return same
if A == 'Green' and B == 'Ivory':
return a - 1 == b
if recurse == 0:
return zebra_constraint(B, b, A, a, 1)
if ((A in Colors and B in Colors) or
(A in Pets and B in Pets) or
(A in Drinks and B in Drinks) or
(A in Countries and B in Countries) or
(A in Smokes and B in Smokes)):
return not same
raise Exception('error')
return CSP(variables, domains, neighbors, zebra_constraint)
def solve_zebra(algorithm=min_conflicts, **args):
z = Zebra()
ans = algorithm(z, **args)
for h in range(1, 6):
print('House', h, end=' ')
for (var, val) in ans.items():
if val == h:
print(var, end=' ')
print()
return ans['Zebra'], ans['Water'], z.nassigns, ans
# ______________________________________________________________________________
# n-ary Constraint Satisfaction Problem
| Sudoku |
python | realpython__materials | python-first-steps/classes.py | {
"start": 0,
"end": 214
} | class ____:
def __init__(self, name, age):
self.name = name
self.age = age
def bark(self):
return "Woof! Woof!"
fido = Dog("Fido", 3)
print(fido.name, fido.age)
print(fido.bark())
| Dog |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 1659,
"end": 6877
} | class ____(NamedTuple):
binary_attr : str = "none"
alpha : float = 1.0
unary_attr : str = "none"
scalars : list = []
algorithm : str = ""
# Make sure we won't have overflows from vpmaddubsw instruction used in FBGEMM.
# On the current Intel x86 architecture, we need to utilize vpmaddubsw instruction
# for the 8-bit int multiplication. This instruction vertically multiplies each
# unsigned 8-bit integer from a with the corresponding signed 8-bit integer from
# b, producing intermediate signed 16-bit integers. This function modifies the
# weights to eliminate the overflow on the signed 16-bit integers.
def avoid_vpmaddubsw_overflow_linear(
batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
if Version(np.__version__) >= Version("2.1"):
raise unittest.SkipTest("numpy 2.1 overflow error")
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 > (1 << 15) - 1:
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# Reference quantized Linear operator
def qlinear_ref(X_q, X_scale, X_zp, W_q, W_scale, W_zp, b_q, Y_scale, Y_zp, dtype=np.uint8):
X_q = np.reshape(X_q, (-1, X_q.shape[X_q.ndim - 1]))
row_offsets_ref = X_q.sum(axis=1).astype(np.int32).reshape((-1, 1))
col_offsets_ref = W_q.sum(axis=1).astype(np.int32).reshape((1, -1))
assert X_q.ndim == 2
batch_size, input_channels = X_q.shape
Prod_XqWq_ref = (
np.matmul(X_q.astype(np.int32), W_q.astype(np.int32).T)
- W_zp * row_offsets_ref
- X_zp * col_offsets_ref
+ input_channels * X_zp * W_zp
)
if b_q is not None:
Prod_XqWq_ref += b_q
Y_q_ref = _quantize(Prod_XqWq_ref, Y_scale / (X_scale * W_scale), Y_zp, dtype=dtype)
return Y_q_ref
"""Computes the output shape given pooling parameters."""
def pool_output_shape(input_size, kernel_size, padding, stride,
dilation, ceiling_mode=False):
if stride is None:
stride = kernel_size
output_size = (
(input_size + 2 * padding - dilation * (kernel_size - 1) - 1
+ (stride - 1 if ceiling_mode else 0)) // stride + 1)
if (ceiling_mode and
((output_size - 1) * stride >= input_size + padding)):
output_size -= 1
return output_size
"""
Util for creating a random tensor and quantization params when Hypothesis
is undesirable.
"""
def _get_random_tensor_and_q_params(shapes, rand_scale, torch_type):
X = (torch.rand(*shapes, dtype=torch.float) - 0.5) * rand_scale
# Calculate reasonable quantization params
min_val = torch.min(X)
max_val = torch.max(X)
if torch_type == torch.qint32:
X_zero_point = int(torch.randint(-1 * (2 ** 31), 2 ** 31 - 1, (1,)))
num_bins = 2 ** 32
X_scale = float(max_val - min_val) / num_bins
elif torch_type == torch.qint8:
X_zero_point = int(torch.randint(-128, 127, (1,)))
num_bins = 2 ** 8
X_scale = float(max_val - min_val) / num_bins
else: # torch.quint8
X_zero_point = 127
num_bins = 2 ** 8
X_scale = float(max_val - min_val) / num_bins
if X_scale == 0:
X_scale = 1e-10
return X, X_scale, X_zero_point
def _quantize_fp8e4m3(t: torch.Tensor, channelwise: bool, scale: Optional[torch.Tensor] = None):
quant_max = torch.finfo(torch.float8_e4m3fn).max
eps = torch.Tensor([torch.finfo(torch.float32).eps])
if channelwise:
scale = scale or t.reshape(t.shape[0], -1).abs().max(-1)[0] / quant_max
scale = torch.max(scale, eps)
scale_reshape = scale.reshape((-1,) + (1,) * (t.dim() - 1))
qt = t / scale_reshape
else:
scale = scale or t.abs().max().reshape([1]) / quant_max
scale = torch.max(scale, eps) if isinstance(scale, torch.Tensor) else max(scale, eps.item())
qt = t / scale
# Clamp to avoid NaN. Convert in two steps to align with fp32 -> fp16 -> fp8
qt = qt.clamp(-448, 448).half().to(torch.float8_e4m3fn)
return qt, scale
def _dequantize_fp8e4m3(qt: torch.Tensor, scale: torch.Tensor):
dqt = qt.float()
if scale.numel() == 1:
# per tensor
dqt = dqt * scale
else:
# per channel
scale_reshape = scale.reshape((-1,) + (1,) * (qt.dim() - 1))
dqt = dqt * scale_reshape
return dqt
| PointwisePostOp |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gcs/source_gcs/cursor.py | {
"start": 246,
"end": 3166
} | class ____(DefaultFileBasedCursor):
@staticmethod
def get_file_uri(file: GCSUploadableRemoteFile) -> str:
file_uri = file.displayed_uri if file.displayed_uri else file.uri
return file_uri.split("?")[0]
def add_file(self, file: GCSUploadableRemoteFile) -> None:
uri = self.get_file_uri(file)
self._file_to_datetime_history[uri] = file.last_modified.strftime(self.DATE_TIME_FORMAT)
if len(self._file_to_datetime_history) > self.DEFAULT_MAX_HISTORY_SIZE:
# Get the earliest file based on its last modified date and its uri
oldest_file = self._compute_earliest_file_in_history()
if oldest_file:
del self._file_to_datetime_history[oldest_file.uri]
else:
raise Exception(
"The history is full but there is no files in the history. This should never happen and might be indicative of a bug in the CDK."
)
def _should_sync_file(self, file: GCSUploadableRemoteFile, logger: logging.Logger) -> bool:
uri = self.get_file_uri(file)
if uri in self._file_to_datetime_history:
# If the file's uri is in the history, we should sync the file if it has been modified since it was synced
updated_at_from_history = datetime.strptime(self._file_to_datetime_history[uri], self.DATE_TIME_FORMAT)
if file.last_modified < updated_at_from_history:
logger.warning(
f"The file {uri}'s last modified date is older than the last time it was synced. This is unexpected. Skipping the file."
)
else:
return file.last_modified > updated_at_from_history
return file.last_modified > updated_at_from_history
if self._is_history_full():
if self._initial_earliest_file_in_history is None:
return True
if file.last_modified > self._initial_earliest_file_in_history.last_modified:
# If the history is partial and the file's datetime is strictly greater than the earliest file in the history,
# we should sync it
return True
elif file.last_modified == self._initial_earliest_file_in_history.last_modified:
# If the history is partial and the file's datetime is equal to the earliest file in the history,
# we should sync it if its uri is strictly greater than the earliest file in the history
return uri > self._initial_earliest_file_in_history.uri
else:
# Otherwise, only sync the file if it has been modified since the start of the time window
return file.last_modified >= self.get_start_time()
else:
# The file is not in the history and the history is complete. We know we need to sync the file
return True
| Cursor |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/tests/test_generate.py | {
"start": 395,
"end": 1821
} | class ____:
@pytest.fixture()
def processed_schema(self):
sys.path.append(str(TEST_DIR / ".." / "scripts"))
from generate import preprocess_fn # noqa
path = TEST_DIR / "mock_schema.yaml"
with open(path, "r") as f:
mock_schema = yaml.safe_load(f)
processed_schema = preprocess_fn(mock_schema)
return processed_schema
def test_node_type_id(self, processed_schema):
new_cluster = processed_schema["components"]["schemas"]["NewCluster"]
node_type_id = new_cluster["properties"]["node_type_id"]
node_type_id_description = node_type_id["description"]
# description should be updated
assert node_type_id_description.endswith("`instance_pool_id` is specified.")
# node_type_id should be deleted from required
assert new_cluster["required"] == ["spark_version"]
def test_force_required_into_list(self, processed_schema):
new_cluster = processed_schema["components"]["schemas"]["GitSource"]
git_provider = new_cluster["properties"]["git_provider"]
assert git_provider["required"] == [True]
def test_items_to_have_type(self, processed_schema):
new_cluster = processed_schema["components"]["schemas"]["GitSource"]
totally_made_up = new_cluster["properties"]["totally_made_up"]
assert totally_made_up["items"] == {"type": "imagination"}
| TestPreprocessFn |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_imei.py | {
"start": 1842,
"end": 4527
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid IMEI (International Mobile Equipment Identity)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"35686800-004141-20",
"3568680000414120",
"35686800-004141-20",
"354178036859789",
"35-686800-004141-8",
],
"some_other": [
"35686800-004141-20",
"3568680000414120",
"35686800-004141-20",
"354178036859789",
"35-417803-685978-1",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.to_be_valid_imei"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["python-stdnum"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidImei().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidImei |
python | huggingface__transformers | src/transformers/models/deberta/tokenization_deberta.py | {
"start": 1036,
"end": 8211
} | class ____(TokenizersBackend):
"""
Construct a "fast" DeBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import DebertaTokenizer
>>> tokenizer = DebertaTokenizer.from_pretrained("microsoft/deberta-base")
>>> tokenizer("Hello world")["input_ids"]
[1, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[1, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Deberta tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
def __init__(
self,
vocab_file=None,
vocab=None,
merges=None,
errors="replace",
bos_token="[CLS]",
eos_token="[SEP]",
sep_token="[SEP]",
cls_token="[CLS]",
unk_token="[UNK]",
pad_token="[PAD]",
mask_token="[MASK]",
add_prefix_space=False,
**kwargs,
):
self.vocab_file = vocab_file
self.add_prefix_space = add_prefix_space
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {
str(unk_token): 0,
str(cls_token): 1,
str(sep_token): 2,
str(pad_token): 3,
str(mask_token): 4,
}
if merges is not None and isinstance(merges, list) and len(merges) > 0:
self._merges = [tuple(m) if isinstance(m, list) else m for m in merges]
else:
self._merges = []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
unk_token=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.normalizer = None
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
self._tokenizer.decoder = decoders.ByteLevel()
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"{self.cls_token} $A {self.sep_token}",
pair=f"{self.cls_token} $A {self.sep_token} {self.sep_token} $B {self.sep_token}",
special_tokens=[
(self.cls_token, self.cls_token_id),
(self.sep_token, self.sep_token_id),
],
)
self._post_init()
@property
def mask_token(self) -> str:
"""
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
Deberta tokenizer has a special mask token to be used in the fill-mask pipeline. The mask token will greedily
comprise the space before the *[MASK]*.
"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def mask_token(self, value):
"""
Overriding the default behavior of the mask token to have it eat the space before it.
"""
# Mask token behave like a normal word, i.e. include the space before it
# So we set lstrip to True
value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
self._mask_token = value
__all__ = ["DebertaTokenizer"]
| DebertaTokenizer |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_cache_implementation.py | {
"start": 1857,
"end": 1950
} | class ____(GenericCache):
def new_entry(self, key, value):
return value
| ValueScored |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 52002,
"end": 56832
} | class ____(_TestNormBase):
# Define the part for 2d arrays separately, so we can subclass this
# and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
array = np.array
def test_matrix_empty(self):
assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
def test_matrix_return_type(self):
a = self.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes['AllInteger']
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
# within `norm`.
inexact_types = 'fdFD'
all_types = exact_types + inexact_types
for each_type in all_types:
at = a.astype(each_type)
an = norm(at, -np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', "divide by zero encountered", RuntimeWarning)
an = norm(at, -1)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
an = norm(at, 1)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 2)
self.check_dtype(at, an)
assert_almost_equal(an, 3.0**(1.0 / 2.0))
an = norm(at, -2)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
an = norm(at, np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 'fro')
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 'nuc')
self.check_dtype(at, an)
# Lower bar needed to support low precision floats.
# They end up being off by 1 in the 7th place.
np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
def test_matrix_2x2(self):
A = self.array([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84 ** 0.5)
assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_matrix_3x3(self):
# This test has been added because the 2x2 example
# happened to have equal nuclear norm and induced 1-norm.
# The 1/10 scaling factor accommodates the absolute tolerance
# used in assert_almost_equal.
A = (1 / 10) * \
self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
assert_almost_equal(norm(A, inf), 1.1)
assert_almost_equal(norm(A, -inf), 0.6)
assert_almost_equal(norm(A, 1), 1.0)
assert_almost_equal(norm(A, -1), 0.4)
assert_almost_equal(norm(A, 2), 0.88722940323461277)
assert_almost_equal(norm(A, -2), 0.19456584790481812)
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` or any other string raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
assert_raises(ValueError, norm, [3, 4], 'test', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(AxisError, norm, B, None, 3)
assert_raises(AxisError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
| _TestNorm2D |
python | realpython__materials | python-namedtuple/typed_namedtuple_time.py | {
"start": 632,
"end": 997
} | class ____(NamedTuple):
x: int
y: int
z: int
namedtuple_time = average_time(PointNamedTuple(x=1, y=2, z=3), time_structure)
typed_namedtuple_time = average_time(
PointTypedNamedTuple(x=1, y=2, z=3), time_structure
)
print(f"namedtuple: {namedtuple_time:.2f} ns")
print(f"typing.NamedTuple: {typed_namedtuple_time:.2f} ns")
| PointTypedNamedTuple |
python | keras-team__keras | benchmarks/layer_benchmark/base_benchmark.py | {
"start": 3044,
"end": 9434
} | class ____:
def __init__(
self,
layer_name,
init_args,
input_shape,
flat_call_inputs=True,
jit_compile=True,
keras_layer=None,
tf_keras_layer=None,
):
self.layer_name = layer_name
_keras_layer_class = getattr(keras.layers, layer_name)
_tf_keras_layer_class = getattr(tf.keras.layers, layer_name)
if keras_layer is None:
# Sometimes you want to initialize the keras layer and tf_keras
# layer in a different way. For example, `Bidirectional` layer,
# which takes in `keras.layers.Layer` and
# `tf.keras.layer.Layer` separately.
self._keras_layer = _keras_layer_class(**init_args)
else:
self._keras_layer = keras_layer
if tf_keras_layer is None:
self._tf_keras_layer = _tf_keras_layer_class(**init_args)
else:
self._tf_keras_layer = tf_keras_layer
self.input_shape = input_shape
self._keras_model = self._build_keras_model(
input_shape, flat_call_inputs
)
self._tf_keras_model = self._build_tf_keras_model(
input_shape, flat_call_inputs
)
self._keras_model.compile(
loss="mse", optimizer="sgd", jit_compile=jit_compile
)
self._tf_keras_model.compile(
loss="mse", optimizer="sgd", jit_compile=jit_compile
)
self.flat_call_inputs = flat_call_inputs
self.jit_compile = jit_compile
self.input_shape = input_shape
def _build_keras_model(self, input_shape, flat_call_inputs=True):
inputs = []
if not isinstance(input_shape[0], (tuple, list)):
input_shape = [input_shape]
for shape in input_shape:
inputs.append(keras.Input(shape=shape))
if flat_call_inputs:
outputs = self._keras_layer(*inputs)
else:
outputs = self._keras_layer(inputs)
return keras.Model(inputs=inputs, outputs=outputs)
def _build_tf_keras_model(self, input_shape, flat_call_inputs=True):
inputs = []
if not isinstance(input_shape[0], (tuple, list)):
input_shape = [input_shape]
for shape in input_shape:
inputs.append(tf.keras.Input(shape=shape))
if flat_call_inputs:
outputs = self._tf_keras_layer(*inputs)
else:
outputs = self._tf_keras_layer(inputs)
return tf.keras.Model(inputs=inputs, outputs=outputs)
def benchmark_predict(self, num_samples, batch_size, data=None):
if data is None:
# Generate default data if not provided.
if isinstance(self.input_shape[0], (tuple, list)):
# The layer has multiple inputs.
data = []
for data_shape in self.input_shape:
data_shape = [num_samples] + list(data_shape)
data.append(np.random.normal(size=data_shape))
else:
data_shape = [num_samples] + list(self.input_shape)
data = np.random.normal(size=data_shape)
num_iterations = num_samples // batch_size - 1
callback = KerasCoreBenchmarkMetricsCallback(stop_batch=num_iterations)
tf_keras_callback = TFKerasBenchmarkMetricsCallback(
stop_batch=num_iterations
)
self._keras_model.predict(
data,
batch_size=batch_size,
callbacks=[callback],
)
self._tf_keras_model.predict(
data,
batch_size=batch_size,
callbacks=[tf_keras_callback],
)
keras_throughput = callback._callback.state["throughput"] * batch_size
tf_keras_throughput = (
tf_keras_callback._callback.state["throughput"] * batch_size
)
print(
f"Keras 3 throughput of forward pass of {self.layer_name}: "
f"{keras_throughput:.2f} samples/sec."
)
print(
f"TF Keras throughput of forward pass of {self.layer_name}: "
f"{tf_keras_throughput:.2f} samples/sec."
)
def benchmark_train(self, num_samples, batch_size, data=None, label=None):
if data is None:
# Generate default data if not provided.
if isinstance(self.input_shape[0], (tuple, list)):
# The layer has multiple inputs.
data = []
for data_shape in self.input_shape:
data_shape = [num_samples] + list(data_shape)
data.append(np.random.normal(size=data_shape))
else:
data_shape = [num_samples] + list(self.input_shape)
data = [np.random.normal(size=data_shape)]
if label is None:
# Generate default label if not provided.
if self.flat_call_inputs:
# Scale by a small factor to avoid zero gradients.
label = (
keras.backend.convert_to_numpy(self._keras_layer(*data))
* 1.001
)
else:
label = (
keras.backend.convert_to_numpy(self._keras_layer(data))
* 1.001
)
num_iterations = num_samples // batch_size - 1
callback = KerasCoreBenchmarkMetricsCallback(stop_batch=num_iterations)
tf_keras_callback = TFKerasBenchmarkMetricsCallback(
stop_batch=num_iterations
)
self._keras_model.fit(
data,
label,
batch_size=batch_size,
callbacks=[callback],
)
self._tf_keras_model.fit(
data,
label,
batch_size=batch_size,
callbacks=[tf_keras_callback],
)
keras_throughput = callback._callback.state["throughput"] * batch_size
tf_keras_throughput = (
tf_keras_callback._callback.state["throughput"] * batch_size
)
print(
f"Keras 3 throughput of forward & backward pass of "
f"{self.layer_name}: {keras_throughput:.2f} samples/sec."
)
print(
f"TF Keras throughput of forward & backward pass of "
f"{self.layer_name}: {tf_keras_throughput:.2f} samples/sec."
)
| LayerBenchmark |
python | scikit-learn__scikit-learn | sklearn/tree/_export.py | {
"start": 1736,
"end": 6214
} | class ____:
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
@validate_params(
{
"decision_tree": [DecisionTreeClassifier, DecisionTreeRegressor],
"max_depth": [Interval(Integral, 0, None, closed="left"), None],
"feature_names": ["array-like", None],
"class_names": ["array-like", "boolean", None],
"label": [StrOptions({"all", "root", "none"})],
"filled": ["boolean"],
"impurity": ["boolean"],
"node_ids": ["boolean"],
"proportion": ["boolean"],
"rounded": ["boolean"],
"precision": [Interval(Integral, 0, None, closed="left"), None],
"ax": "no_validation", # delegate validation to matplotlib
"fontsize": [Interval(Integral, 0, None, closed="left"), None],
},
prefer_skip_nested_validation=True,
)
def plot_tree(
decision_tree,
*,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
impurity=True,
node_ids=False,
proportion=False,
rounded=False,
precision=3,
ax=None,
fontsize=None,
):
"""Plot a decision tree.
The sample counts that are shown are weighted with any sample_weights that
might be present.
The visualization is fit automatically to the size of the axis.
Use the ``figsize`` or ``dpi`` arguments of ``plt.figure`` to control
the size of the rendering.
Read more in the :ref:`User Guide <tree>`.
.. versionadded:: 0.21
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be plotted.
max_depth : int, default=None
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : array-like of str, default=None
Names of each of the features.
If None, generic names will be used ("x[0]", "x[1]", ...).
class_names : array-like of str or True, default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, default='all'
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, default=False
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
impurity : bool, default=True
When set to ``True``, show the impurity at each node.
node_ids : bool, default=False
When set to ``True``, show the ID number on each node.
proportion : bool, default=False
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rounded : bool, default=False
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
precision : int, default=3
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
ax : matplotlib axis, default=None
Axes to plot to. If None, use current axis. Any previous content
is cleared.
fontsize : int, default=None
Size of text font. If None, determined automatically to fit figure.
Returns
-------
annotations : list of artists
List containing the artists for the annotation boxes making up the
tree.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.plot_tree(clf)
[...]
"""
check_is_fitted(decision_tree)
exporter = _MPLTreeExporter(
max_depth=max_depth,
feature_names=feature_names,
class_names=class_names,
label=label,
filled=filled,
impurity=impurity,
node_ids=node_ids,
proportion=proportion,
rounded=rounded,
precision=precision,
fontsize=fontsize,
)
return exporter.export(decision_tree, ax=ax)
| Sentinel |
python | davidhalter__parso | conftest.py | {
"start": 1696,
"end": 2675
} | class ____:
"""
Static Analysis cases lie in the static_analysis folder.
The tests also start with `#!`, like the goto_definition tests.
"""
def __init__(self, path):
self.path = path
self.name = os.path.basename(path)
match = re.search(r'python([\d.]+)\.py', self.name)
self.python_version = match and match.group(1)
def colllect_normalizer_tests(base_dir):
for f_name in os.listdir(base_dir):
if f_name.endswith(".py"):
path = os.path.join(base_dir, f_name)
yield NormalizerIssueCase(path)
def pytest_configure(config):
if config.option.logging:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
#ch = logging.StreamHandler(sys.stdout)
#ch.setLevel(logging.DEBUG)
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#ch.setFormatter(formatter)
#root.addHandler(ch)
| NormalizerIssueCase |
python | boto__boto3 | tests/integration/test_s3.py | {
"start": 9351,
"end": 26474
} | class ____(unittest.TestCase):
"""Tests for the high level boto3.s3.transfer module."""
def setUp(self):
self.region = _DEFAULT_REGION
self.bucket_name = _SHARED_BUCKET
clear_out_bucket(self.bucket_name, self.region)
self.session = boto3.session.Session(region_name=self.region)
self.client = self.session.client('s3', self.region)
self.files = FileCreator()
self.progress = 0
def tearDown(self):
self.files.remove_all()
def delete_object(self, key):
self.client.delete_object(Bucket=self.bucket_name, Key=key)
def object_exists(self, key):
waiter = self.client.get_waiter('object_exists')
waiter.wait(Bucket=self.bucket_name, Key=key)
return True
def wait_until_object_exists(
self, key_name, extra_params=None, min_successes=3
):
waiter = self.client.get_waiter('object_exists')
params = {'Bucket': self.bucket_name, 'Key': key_name}
if extra_params is not None:
params.update(extra_params)
for _ in range(min_successes):
waiter.wait(**params)
def create_s3_transfer(self, config=None):
return boto3.s3.transfer.S3Transfer(self.client, config=config)
def assert_has_public_read_acl(self, response):
grants = response['Grants']
public_read = [
g['Grantee'].get('URI', '')
for g in grants
if g['Permission'] == 'READ'
]
self.assertIn('groups/global/AllUsers', public_read[0])
def test_copy(self):
self.client.put_object(
Bucket=self.bucket_name, Key='foo', Body='beach'
)
self.addCleanup(self.delete_object, 'foo')
self.client.copy(
CopySource={'Bucket': self.bucket_name, 'Key': 'foo'},
Bucket=self.bucket_name,
Key='bar',
)
self.addCleanup(self.delete_object, 'bar')
self.object_exists('bar')
def test_upload_fileobj(self):
fileobj = io.BytesIO(b'foo')
self.client.upload_fileobj(
Fileobj=fileobj, Bucket=self.bucket_name, Key='foo'
)
self.addCleanup(self.delete_object, 'foo')
self.object_exists('foo')
def test_upload_fileobj_progress(self):
# This has to be an integration test because the fileobj will never
# actually be read from when using the stubber and therefore the
# progress callbacks will not be invoked.
chunksize = 5 * (1024**2)
config = boto3.s3.transfer.TransferConfig(
multipart_chunksize=chunksize,
multipart_threshold=chunksize,
max_concurrency=1,
)
fileobj = io.BytesIO(b'0' * (chunksize * 3))
def progress_callback(amount):
self.progress += amount
self.client.upload_fileobj(
Fileobj=fileobj,
Bucket=self.bucket_name,
Key='foo',
Config=config,
Callback=progress_callback,
)
self.addCleanup(self.delete_object, 'foo')
self.object_exists('foo')
self.assertEqual(self.progress, chunksize * 3)
def test_download_fileobj(self):
fileobj = io.BytesIO()
self.client.put_object(
Bucket=self.bucket_name, Key='foo', Body=b'beach'
)
self.addCleanup(self.delete_object, 'foo')
self.wait_until_object_exists('foo')
self.client.download_fileobj(
Bucket=self.bucket_name, Key='foo', Fileobj=fileobj
)
self.assertEqual(fileobj.getvalue(), b'beach')
def test_upload_via_path(self):
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size('path.txt', filesize=1024)
transfer.upload_file(Path(filename), self.bucket_name, 'path.txt')
self.addCleanup(self.delete_object, 'path.txt')
self.assertTrue(self.object_exists('path.txt'))
def test_upload_below_threshold(self):
config = boto3.s3.transfer.TransferConfig(
multipart_threshold=2 * 1024 * 1024
)
transfer = self.create_s3_transfer(config)
filename = self.files.create_file_with_size(
'foo.txt', filesize=1024 * 1024
)
transfer.upload_file(filename, self.bucket_name, 'foo.txt')
self.addCleanup(self.delete_object, 'foo.txt')
self.assertTrue(self.object_exists('foo.txt'))
def test_upload_above_threshold(self):
config = boto3.s3.transfer.TransferConfig(
multipart_threshold=2 * 1024 * 1024
)
transfer = self.create_s3_transfer(config)
filename = self.files.create_file_with_size(
'20mb.txt', filesize=20 * 1024 * 1024
)
transfer.upload_file(filename, self.bucket_name, '20mb.txt')
self.addCleanup(self.delete_object, '20mb.txt')
self.assertTrue(self.object_exists('20mb.txt'))
def test_upload_file_above_threshold_with_acl(self):
config = boto3.s3.transfer.TransferConfig(
multipart_threshold=5 * 1024 * 1024
)
transfer = self.create_s3_transfer(config)
filename = self.files.create_file_with_size(
'6mb.txt', filesize=6 * 1024 * 1024
)
extra_args = {'ACL': 'public-read'}
transfer.upload_file(
filename, self.bucket_name, '6mb.txt', extra_args=extra_args
)
self.addCleanup(self.delete_object, '6mb.txt')
self.assertTrue(self.object_exists('6mb.txt'))
response = self.client.get_object_acl(
Bucket=self.bucket_name, Key='6mb.txt'
)
self.assert_has_public_read_acl(response)
def test_upload_file_above_threshold_with_ssec(self):
key_bytes = os.urandom(32)
extra_args = {
'SSECustomerKey': key_bytes,
'SSECustomerAlgorithm': 'AES256',
}
config = boto3.s3.transfer.TransferConfig(
multipart_threshold=5 * 1024 * 1024
)
transfer = self.create_s3_transfer(config)
filename = self.files.create_file_with_size(
'6mb.txt', filesize=6 * 1024 * 1024
)
transfer.upload_file(
filename, self.bucket_name, '6mb.txt', extra_args=extra_args
)
self.addCleanup(self.delete_object, '6mb.txt')
# A head object will fail if it has a customer key
# associated with it and it's not provided in the HeadObject
# request so we can use this to verify our functionality.
response = self.client.head_object(
Bucket=self.bucket_name, Key='6mb.txt', **extra_args
)
self.assertEqual(response['SSECustomerAlgorithm'], 'AES256')
def test_progress_callback_on_upload(self):
self.amount_seen = 0
lock = threading.Lock()
def progress_callback(amount):
with lock:
self.amount_seen += amount
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size(
'20mb.txt', filesize=20 * 1024 * 1024
)
transfer.upload_file(
filename, self.bucket_name, '20mb.txt', callback=progress_callback
)
self.addCleanup(self.delete_object, '20mb.txt')
# The callback should have been called enough times such that
# the total amount of bytes we've seen (via the "amount"
# arg to the callback function) should be the size
# of the file we uploaded.
self.assertEqual(self.amount_seen, 20 * 1024 * 1024)
def test_callback_called_once_with_sigv4(self):
# Verify #98, where the callback was being invoked
# twice when using signature version 4.
self.amount_seen = 0
lock = threading.Lock()
def progress_callback(amount):
with lock:
self.amount_seen += amount
client = self.session.client(
's3', self.region, config=Config(signature_version='s3v4')
)
transfer = boto3.s3.transfer.S3Transfer(client)
filename = self.files.create_file_with_size(
'10mb.txt', filesize=10 * 1024 * 1024
)
transfer.upload_file(
filename, self.bucket_name, '10mb.txt', callback=progress_callback
)
self.addCleanup(self.delete_object, '10mb.txt')
self.assertEqual(self.amount_seen, 10 * 1024 * 1024)
def test_can_send_extra_params_on_upload(self):
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size('foo.txt', filesize=1024)
transfer.upload_file(
filename,
self.bucket_name,
'foo.txt',
extra_args={'ACL': 'public-read'},
)
self.addCleanup(self.delete_object, 'foo.txt')
response = self.client.get_object_acl(
Bucket=self.bucket_name, Key='foo.txt'
)
self.assert_has_public_read_acl(response)
def test_can_configure_threshold(self):
config = boto3.s3.transfer.TransferConfig(
multipart_threshold=6 * 1024 * 1024
)
transfer = self.create_s3_transfer(config)
filename = self.files.create_file_with_size(
'foo.txt', filesize=8 * 1024 * 1024
)
transfer.upload_file(filename, self.bucket_name, 'foo.txt')
self.addCleanup(self.delete_object, 'foo.txt')
self.assertTrue(self.object_exists('foo.txt'))
def test_can_send_extra_params_on_download(self):
# We're picking the customer provided sse feature
# of S3 to test the extra_args functionality of
# S3.
key_bytes = os.urandom(32)
extra_args = {
'SSECustomerKey': key_bytes,
'SSECustomerAlgorithm': 'AES256',
}
self.client.put_object(
Bucket=self.bucket_name,
Key='foo.txt',
Body=b'hello world',
**extra_args,
)
self.addCleanup(self.delete_object, 'foo.txt')
transfer = self.create_s3_transfer()
download_path = os.path.join(self.files.rootdir, 'downloaded.txt')
self.wait_until_object_exists('foo.txt', extra_params=extra_args)
transfer.download_file(
self.bucket_name, 'foo.txt', download_path, extra_args=extra_args
)
with open(download_path, 'rb') as f:
self.assertEqual(f.read(), b'hello world')
def test_progress_callback_on_download(self):
self.amount_seen = 0
lock = threading.Lock()
def progress_callback(amount):
with lock:
self.amount_seen += amount
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size(
'20mb.txt', filesize=20 * 1024 * 1024
)
with open(filename, 'rb') as f:
self.client.put_object(
Bucket=self.bucket_name, Key='20mb.txt', Body=f
)
self.addCleanup(self.delete_object, '20mb.txt')
download_path = os.path.join(self.files.rootdir, 'downloaded.txt')
transfer.download_file(
self.bucket_name,
'20mb.txt',
download_path,
callback=progress_callback,
)
self.assertEqual(self.amount_seen, 20 * 1024 * 1024)
def test_download_below_threshold(self):
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size(
'foo.txt', filesize=1024 * 1024
)
with open(filename, 'rb') as f:
self.client.put_object(
Bucket=self.bucket_name, Key='foo.txt', Body=f
)
self.addCleanup(self.delete_object, 'foo.txt')
download_path = os.path.join(self.files.rootdir, 'downloaded.txt')
self.wait_until_object_exists('foo.txt')
transfer.download_file(self.bucket_name, 'foo.txt', download_path)
assert_files_equal(filename, download_path)
def test_download_above_threshold(self):
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size(
'foo.txt', filesize=20 * 1024 * 1024
)
with open(filename, 'rb') as f:
self.client.put_object(
Bucket=self.bucket_name, Key='foo.txt', Body=f
)
self.addCleanup(self.delete_object, 'foo.txt')
download_path = os.path.join(self.files.rootdir, 'downloaded.txt')
self.wait_until_object_exists('foo.txt')
transfer.download_file(self.bucket_name, 'foo.txt', download_path)
assert_files_equal(filename, download_path)
def test_download_file_with_directory_not_exist(self):
transfer = self.create_s3_transfer()
self.client.put_object(
Bucket=self.bucket_name, Key='foo.txt', Body=b'foo'
)
self.addCleanup(self.delete_object, 'foo.txt')
download_path = os.path.join(
self.files.rootdir, 'a', 'b', 'c', 'downloaded.txt'
)
self.wait_until_object_exists('foo.txt')
with self.assertRaises(IOError):
transfer.download_file(self.bucket_name, 'foo.txt', download_path)
def test_download_large_file_directory_not_exist(self):
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size(
'foo.txt', filesize=20 * 1024 * 1024
)
with open(filename, 'rb') as f:
self.client.put_object(
Bucket=self.bucket_name, Key='foo.txt', Body=f
)
self.addCleanup(self.delete_object, 'foo.txt')
download_path = os.path.join(
self.files.rootdir, 'a', 'b', 'c', 'downloaded.txt'
)
self.wait_until_object_exists('foo.txt')
with self.assertRaises(IOError):
transfer.download_file(self.bucket_name, 'foo.txt', download_path)
def test_transfer_methods_through_client(self):
# This is really just a sanity check to ensure that the interface
# from the clients work. We're not exhaustively testing through
# this client interface.
filename = self.files.create_file_with_size(
'foo.txt', filesize=1024 * 1024
)
self.client.upload_file(
Filename=filename, Bucket=self.bucket_name, Key='foo.txt'
)
self.addCleanup(self.delete_object, 'foo.txt')
download_path = os.path.join(self.files.rootdir, 'downloaded.txt')
self.wait_until_object_exists('foo.txt')
self.client.download_file(
Bucket=self.bucket_name, Key='foo.txt', Filename=download_path
)
assert_files_equal(filename, download_path)
def test_transfer_methods_do_not_use_threads(self):
# This is just a smoke test to make sure that
# setting use_threads to False has no issues transferring files as
# the non-threaded implementation is ran under the same integration
# and functional tests in s3transfer as the normal threaded
# implementation
#
# The methods used are arbitrary other than one of the methods
# use ``boto3.s3.transfer.S3Transfer`` and the other should be
# using ``s3transfer.manager.TransferManager`` directly
content = b'my content'
filename = self.files.create_file('myfile', content.decode('utf-8'))
key = 'foo'
config = boto3.s3.transfer.TransferConfig(use_threads=False)
self.client.upload_file(
Bucket=self.bucket_name, Key=key, Filename=filename, Config=config
)
self.addCleanup(self.delete_object, key)
self.assertTrue(self.object_exists(key))
fileobj = io.BytesIO()
self.client.download_fileobj(
Bucket=self.bucket_name, Key='foo', Fileobj=fileobj, Config=config
)
self.assertEqual(fileobj.getvalue(), content)
def test_transfer_methods_through_bucket(self):
# This is just a sanity check to ensure that the bucket interface work.
key = 'bucket.txt'
bucket = self.session.resource('s3').Bucket(self.bucket_name)
filename = self.files.create_file_with_size(key, 1024 * 1024)
bucket.upload_file(Filename=filename, Key=key)
self.addCleanup(self.delete_object, key)
download_path = os.path.join(self.files.rootdir, unique_id('foo'))
bucket.download_file(Key=key, Filename=download_path)
assert_files_equal(filename, download_path)
def test_transfer_methods_through_object(self):
# This is just a sanity check to ensure that the object interface work.
key = 'object.txt'
obj = self.session.resource('s3').Object(self.bucket_name, key)
filename = self.files.create_file_with_size(key, 1024 * 1024)
obj.upload_file(Filename=filename)
self.addCleanup(self.delete_object, key)
download_path = os.path.join(self.files.rootdir, unique_id('foo'))
obj.download_file(Filename=download_path)
assert_files_equal(filename, download_path)
| TestS3Transfers |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/test_trainer.py | {
"start": 61523,
"end": 62337
} | class ____(BoringModel):
def validation_step(self, batch, batch_idx):
loss = self.step(batch)
self.log("x", loss)
@RunIf(skip_windows=True)
def test_fit_test_synchronization(tmp_path):
"""Test that the trainer synchronizes processes before returning control back to the caller."""
model = TestDummyModelForCheckpoint()
checkpoint = ModelCheckpoint(dirpath=tmp_path, monitor="x", mode="min", save_top_k=1)
trainer = Trainer(
default_root_dir=tmp_path,
max_epochs=2,
strategy="ddp_spawn",
accelerator="cpu",
devices=2,
callbacks=[checkpoint],
)
trainer.fit(model)
assert os.path.exists(checkpoint.best_model_path), f"Could not find checkpoint at rank {trainer.global_rank}"
trainer.test()
| TestDummyModelForCheckpoint |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 18396,
"end": 20719
} | class ____(nn.Module):
def __init__(self, config: ClvpConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.self_attn = ClvpSelfAttention(config)
self.mlp = ClvpEncoderMLP(config)
self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.FloatTensor,
rotary_pos_emb: torch.FloatTensor,
attention_mask: torch.LongTensor,
position_ids: torch.LongTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
input to the layer.
rotary_pos_emb (`torch.FloatTensor`):
rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module.
attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`):
attention mask where padding elements are indicated by very large negative values.
position_ids (`torch.LongTensor`):
Denotes position ids of the input tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.input_rmsnorm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
rotary_pos_emb=rotary_pos_emb,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_rmsnorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states, attn_weights
# Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->Clvp
| ClvpEncoderLayer |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core_tests/utils_tests/test_naming.py | {
"start": 60,
"end": 2999
} | class ____:
"""Test the snakecase function, particularly around handling of uppercase sequences."""
@pytest.mark.parametrize(
"input_str,expected",
[
# Basic cases
("SimpleComponent", "simple_component"),
("Component", "component"),
("MyComponent", "my_component"),
# Multiple words
("DatabaseConnection", "database_connection"),
# Improved handling of consecutive uppercase letters - these are the new expectations
("ACMEDatabricksJobComponent", "acme_databricks_job_component"),
("HTTPSCertificateValidator", "https_certificate_validator"),
("XMLHTTPRequest", "xmlhttp_request"),
("URLParser", "url_parser"),
("APIKey", "api_key"),
("JSONResponse", "json_response"),
("CSVFile", "csv_file"),
("PDFGenerator", "pdf_generator"),
# Additional test cases for consecutive uppercase handling
("HTTPSConnection", "https_connection"),
("XMLParser", "xml_parser"),
("IOManager", "io_manager"),
("UIComponent", "ui_component"),
("HTTPClient", "http_client"),
# Edge cases
("", ""),
("a", "a"),
("A", "a"),
("AB", "ab"), # Improved: consecutive uppercase at end
("ABC", "abc"), # Improved: consecutive uppercase at end
# With numbers
("Component2", "component2"),
("XMLParser2", "xml_parser2"),
("HTTP2Connection", "http2_connection"),
# With underscores already present - should clean up nicely now
("My_Component", "my_component"),
("API_Key", "api_key"),
# Mixed with special characters - should clean up nicely now
("Component-Name", "component_name"),
("Component.Name", "component_name"),
("Component Name", "component_name"),
],
)
def test_snakecase_conversions(self, input_str: str, expected: str):
"""Test various snakecase conversion scenarios."""
assert snakecase(input_str) == expected
def test_scaffold_component_naming_scenarios(self):
"""Test naming scenarios that commonly occur in component scaffolding."""
# Common framework component names
test_cases = [
("DatabricksJobComponent", "databricks_job_component"),
("SnowflakeIOManager", "snowflake_i_o_manager"),
("BigQueryAssetLoader", "big_query_asset_loader"),
("S3FileSystem", "s3_file_system"),
("GCSStorage", "g_c_s_storage"),
("AWSCredentials", "a_w_s_credentials"),
]
for input_name, _expected in test_cases:
actual = snakecase(input_name)
assert isinstance(actual, str)
assert len(actual) > 0
| TestSnakecase |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/loader.py | {
"start": 702,
"end": 1276
} | class ____(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
self.comment_handling = None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
BaseConstructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
| BaseLoader |
python | tensorflow__tensorflow | tensorflow/python/saved_model/builder_impl.py | {
"start": 2164,
"end": 21379
} | class ____(object):
"""Builds the `SavedModel` protocol buffer and saves variables and assets.
The `SavedModelBuilder` class provides the functionality to build a
`SavedModel` protocol buffer. Specifically, this allows multiple meta
graphs to be saved as part of a single language-neutral `SavedModel`,
while sharing variables and assets.
To build a SavedModel, the first meta graph must be saved with variables.
Subsequent meta graphs will simply be saved with their graph definitions. If
assets need to be saved and written or copied to disk, they can be provided
when the meta graph def is added. If multiple meta graph defs are associated
an asset of the same name, only the first version is retained.
Each meta graph added to the SavedModel must be annotated with tags. The tags
provide a means to identify the specific meta graph to load and restore, along
with the shared set of variables and assets.
Typical usage for the `SavedModelBuilder`:
```python
...
builder = tf.compat.v1.saved_model.Builder(export_dir)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
assets_list=foo_assets)
...
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"])
...
builder.save()
```
Note: This function will only be available through the v1 compatibility
library as tf.compat.v1.saved_model.builder.SavedModelBuilder or
tf.compat.v1.saved_model.Builder. Tensorflow 2.0 will introduce a new
object-based method of creating SavedModels.
"""
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if file_io.file_exists(export_dir):
if file_io.list_directory(export_dir):
raise AssertionError(
f"Export directory {export_dir} already exists, and isn't empty. "
"Please choose a different export directory, or delete all the "
"contents of the specified directory.")
else:
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
self._saved_asset_files = set()
def _save_and_write_assets(self, meta_graph_def, assets_list=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
meta_graph_def: The meta graph def to which the assets will be added.
assets_list: The list where the asset paths are setup.
"""
# Creates a function that adds assets into the meta graph def.
write_fn = functools.partial(_add_asset_to_metagraph, meta_graph_def)
asset_filename_map = _maybe_save_assets(write_fn, assets_list)
# Return if there are no assets to write.
if not asset_filename_map:
tf_logging.info("No assets to write.")
return
# Copy assets from source path to destination path.
copy_assets_to_destination_dir(asset_filename_map, self._export_dir,
self._saved_asset_files)
def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):
"""Tags the meta graph def and adds it to the SavedModel.
Tags the meta graph def with the supplied tags, adds signature defs to it if
provided and appends the meta graph def to the SavedModel proto.
Args:
meta_graph_def: The meta graph def to add to the SavedModel.
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
"""
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
if signature_def_map is not None:
for key in signature_def_map:
meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])
proto_meta_graph_def = self._saved_model.meta_graphs.add()
proto_meta_graph_def.CopyFrom(meta_graph_def)
def _validate_tensor_info(self, tensor_info):
"""Validates the `TensorInfo` proto.
Checks if the `encoding` (`name` or `coo_sparse` or `type_spec`) and
`dtype` fields exist and are non-empty.
Args:
tensor_info: `TensorInfo` protocol buffer to validate.
Raises:
AssertionError: If the `encoding` or `dtype` fields of the supplied
`TensorInfo` proto are not populated.
"""
if tensor_info is None:
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have the name "
"and dtype fields set.")
if tensor_info.WhichOneof("encoding") is None:
# TODO(soergel) validate each of the fields of coo_sparse
raise AssertionError(
f"Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used "
"in the SignatureDefs must have one of the 'encoding' fields (e.g., "
"name or coo_sparse) set.")
if tensor_info.WhichOneof("encoding") == "composite_tensor":
for component in tensor_info.composite_tensor.components:
self._validate_tensor_info(component)
elif tensor_info.dtype == types_pb2.DT_INVALID:
raise AssertionError(
f"Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in"
" the SignatureDefs must have the dtype field set.")
def _validate_signature_def_map(self, signature_def_map):
"""Validates the `SignatureDef` entries in the signature def map.
Validation of entries in the signature def map includes ensuring that the
`name` and `dtype` fields of the TensorInfo protos of the `inputs` and
`outputs` of each `SignatureDef` are populated. Also ensures that reserved
SignatureDef keys for the initialization and train ops are not used.
Args:
signature_def_map: The map of signature defs to be validated.
Raises:
AssertionError: If a TensorInfo is not valid.
KeyError: If a reserved signature key is used in the map.
"""
for signature_def_key in signature_def_map:
signature_def = signature_def_map[signature_def_key]
inputs = signature_def.inputs
outputs = signature_def.outputs
for inputs_key in inputs:
self._validate_tensor_info(inputs[inputs_key])
for outputs_key in outputs:
self._validate_tensor_info(outputs[outputs_key])
if constants.INIT_OP_SIGNATURE_KEY in signature_def_map:
raise KeyError(
f"SignatureDef map key \"{constants.INIT_OP_SIGNATURE_KEY}\" is "
"reserved for initialization. Please use a different key.")
if constants.TRAIN_OP_SIGNATURE_KEY in signature_def_map:
raise KeyError(
f"SignatureDef map key \"{constants.TRAIN_OP_SIGNATURE_KEY}\" is "
f"reserved for the train op. Please use a different key.")
def _maybe_create_saver(self, saver=None):
"""Creates a sharded saver if one does not already exist."""
if not saver:
# Initialize a saver to generate a sharded output for all saveables in the
# current scope.
saver = tf_saver.Saver(
variables._all_saveable_objects(), # pylint: disable=protected-access
sharded=True,
write_version=saver_pb2.SaverDef.V2,
allow_empty=True)
return saver
def add_meta_graph(self,
tags,
signature_def_map=None,
assets_list=None,
clear_devices=False,
init_op=None,
train_op=None,
saver=None):
"""Adds the current meta graph to the SavedModel.
Creates a Saver in the current scope and uses the Saver to export the meta
graph def. Invoking this API requires the `add_meta_graph_and_variables()`
API to have been invoked before.
Args:
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
assets_list: Assets to be saved with SavedModel. Note
that this list should be a subset of the assets saved as part of
the first meta graph in the SavedModel.
clear_devices: Set to true if the device info on the default graph should
be cleared.
init_op: Op or group of ops to execute when the graph is loaded. Note
that when the init_op is specified it is run after the restore op at
load-time.
train_op: Op or group of opts that trains the model when run. This will
not be run automatically when the graph is loaded, instead saved in
a SignatureDef accessible through the exported MetaGraph.
saver: An instance of tf.compat.v1.train.Saver that will be used to export
the metagraph. If None, a sharded Saver that restores all variables will
be used.
Raises:
AssertionError: If the variables for the SavedModel have not been saved
yet, or if the graph already contains one or more legacy init ops.
"""
if not self._has_saved_variables:
raise AssertionError(
"Graph state including variables and assets has not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
signature_def_map = signature_def_map or {}
self._validate_signature_def_map(signature_def_map)
# Create a SignatureDef pointing to the graph initialization op, which will
# be added to the MetaGraphDef.
_add_op_to_signature_def_map(signature_def_map, init_op,
constants.INIT_OP_SIGNATURE_KEY)
_add_op_to_signature_def_map(signature_def_map, train_op,
constants.TRAIN_OP_SIGNATURE_KEY)
saver = self._maybe_create_saver(saver)
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=True)
# Save asset files and write them to disk, if any.
self._save_and_write_assets(meta_graph_def, assets_list)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_list=None,
clear_devices=False,
init_op=None,
train_op=None,
strip_default_attrs=False,
saver=None):
# pylint: disable=line-too-long
"""Adds the current meta graph to the SavedModel and saves variables.
Creates a Saver to save the variables from the provided session. Exports the
corresponding meta graph def. This function assumes that the variables to be
saved have been initialized. For a given `SavedModelBuilder`, this API must
be called exactly once and for the first meta graph to save. For subsequent
meta graph defs to be added, the `add_meta_graph()` API must be used.
Args:
sess: The TensorFlow session from which to save the meta graph and
variables.
tags: The set of tags with which to save the meta graph.
signature_def_map: The map of signature def map to add to the meta graph
def.
assets_list: Assets to be saved with SavedModel.
clear_devices: Set to true if the device info on the default graph should
be cleared.
init_op: Op or group of ops to execute when the graph is loaded. Note
that when the init_op is specified it is run after the restore op at
load-time.
train_op: Op or group of ops that trains the model when run. This will
not be run automatically when the graph is loaded, instead saved in
a SignatureDef accessible through the exported MetaGraph.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
saver: An instance of tf.compat.v1.train.Saver that will be used to export the
metagraph and save variables. If None, a sharded Saver that restores
all variables will be used.
"""
# pylint: enable=line-too-long
if self._has_saved_variables:
raise AssertionError("Graph state including variables and assets has "
"already been saved. Please invoke "
"`add_meta_graph()` instead.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
signature_def_map = signature_def_map or {}
self._validate_signature_def_map(signature_def_map)
# Create a SignatureDef pointing to the graph initialization op, which will
# be added to the MetaGraphDef.
_add_op_to_signature_def_map(signature_def_map, init_op,
constants.INIT_OP_SIGNATURE_KEY)
_add_op_to_signature_def_map(signature_def_map, train_op,
constants.TRAIN_OP_SIGNATURE_KEY)
path_helpers.get_or_create_variables_dir(self._export_dir)
variables_path = path_helpers.get_variables_path(self._export_dir)
saver = self._maybe_create_saver(saver)
# Save the variables. Also, disable writing the checkpoint state proto. The
# file is not used during SavedModel loading. In addition, since a
# SavedModel can be copied or moved, this avoids the checkpoint state to
# become outdated.
saver.save(sess, variables_path, write_meta_graph=False, write_state=False)
# Export the meta graph def.
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=strip_default_attrs)
# Save asset files and write them to disk, if any.
self._save_and_write_assets(meta_graph_def, assets_list)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
def save(self, as_text=False, experimental_image_format=False,
experimental_image_writer_options=None):
"""Writes a `SavedModel` protocol buffer to disk.
The function writes the SavedModel protocol buffer to the export directory
in a serialized format.
Args:
as_text: Writes the SavedModel protocol buffer in text format to disk.
Protocol buffers in text format are useful for debugging, but parsing
fails when it encounters an unknown field and so is not forward
compatible. This means changes to TensorFlow may prevent deployment of
new text format SavedModels to existing serving binaries. Do not deploy
`as_text` SavedModels to production.
experimental_image_format: Writes the SavedModel protobuf in the
experimental image format. See
https://www.tensorflow.org/api_docs/python/tf/saved_model/SaveOptions for
more details. This allows `SavedModelBuilder` to save models larger than
2 GiB.
experimental_image_writer_options: Optional options for the experimental
image writer. See
https://github.com/google/riegeli/blob/master/doc/record_writer_options.md
for available options.
Raises:
RuntimeError: When trying to use `proto_splitter` but `proto_splitter` is
not imported. This check is here because `proto_splitter` is not
available in OSS at the moment.
Returns:
The path to which the SavedModel protocol buffer was written.
"""
metrics.IncrementWriteApi(_SAVE_BUILDER_LABEL)
if not file_io.file_exists(self._export_dir):
file_io.recursive_create_dir(self._export_dir)
if as_text:
path = file_io.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
file_io.write_string_to_file(path, str(self._saved_model))
else:
if experimental_image_format:
path = file_io.join(
self._export_dir,
constants.SAVED_MODEL_FILENAME_PREFIX,
)
if (
locals().get("proto_splitter", globals().get("proto_splitter"))
is None
):
raise RuntimeError(
"No proto_splitter is provided, cannot use"
" experimental_image_format."
)
# Overwrites path to record whether the saved_model is split, i.e.,
# whether the suffix is `.pb` or `.cpb`.
path = proto_splitter.SavedModelSplitter(self._saved_model).write(
path, experimental_image_writer_options
)
else:
path = file_io.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB),
)
file_io.write_string_to_file(
path, self._saved_model.SerializeToString(deterministic=True)
)
# Placeholder for internal TF1 model fingerprint write
tf_logging.info("SavedModel written to: %s", compat.as_text(path))
metrics.IncrementWrite(write_version="1")
return path
@tf_export(v1=["saved_model.Builder", "saved_model.builder.SavedModelBuilder"]) # pylint: disable=missing-docstring
| _SavedModelBuilder |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/spanner.py | {
"start": 2010,
"end": 19711
} | class ____(GoogleBaseHook, DbApiHook):
"""
Hook for Google Cloud Spanner APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
conn_name_attr = "gcp_conn_id"
default_conn_name = "google_cloud_spanner_default"
conn_type = "gcpspanner"
hook_name = "Google Cloud Spanner"
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._client: Client | None = None
def _get_client(self, project_id: str) -> Client:
"""
Provide a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the Google Cloud project.
:return: Client
"""
if not self._client:
self._client = Client(
project=project_id, credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._client
def _get_conn_params(self) -> SpannerConnectionParams:
"""Extract spanner database connection parameters."""
extras = self.get_connection(self.gcp_conn_id).extra_dejson
project_id = get_field(extras, "project_id") or self.project_id
instance_id = get_field(extras, "instance_id")
database_id = get_field(extras, "database_id")
return SpannerConnectionParams(project_id, instance_id, database_id)
def get_uri(self) -> str:
"""Override DbApiHook get_uri method for get_sqlalchemy_engine()."""
project_id, instance_id, database_id = self._get_conn_params()
if not all([instance_id, database_id]):
raise AirflowException("The instance_id or database_id were not specified")
return f"spanner+spanner:///projects/{project_id}/instances/{instance_id}/databases/{database_id}"
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
project_id, _, _ = self._get_conn_params()
spanner_client = self._get_client(project_id=project_id)
return create_engine(self.get_uri(), connect_args={"client": spanner_client}, **engine_kwargs)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
instance_id: str,
project_id: str,
) -> Instance | None:
"""
Get information about a particular instance.
:param project_id: Optional, The ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param instance_id: The ID of the Cloud Spanner instance.
:return: Spanner instance
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
return None
return instance
def _apply_to_instance(
self,
project_id: str,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
func: Callable[[Instance], Operation],
) -> None:
"""
Invoke a method on a given instance by applying a specified Callable.
:param project_id: The ID of the Google Cloud project that owns the Cloud Spanner database.
:param instance_id: The ID of the instance.
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:param node_count: (Optional) Number of nodes allocated to the instance.
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:param func: Method of the instance to be called.
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id,
configuration_name=configuration_name,
node_count=node_count,
display_name=display_name,
)
try:
operation: Operation = func(instance)
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: str,
) -> None:
"""
Create a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:param display_name: (Optional) The display name for the instance in the Google Cloud Console.
Must be between 4 and 30 characters. If this value is not passed, the name falls back
to the instance ID.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
self._apply_to_instance(
project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.create()
)
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: str,
) -> None:
"""
Update an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:param display_name: (Optional) The display name for the instance in the Google Cloud
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
self._apply_to_instance(
project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.update()
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(self, instance_id: str, project_id: str) -> None:
"""
Delete an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
@GoogleBaseHook.fallback_to_default_project_id
def get_database(
self,
instance_id: str,
database_id: str,
project_id: str,
) -> Database | None:
"""
Retrieve a database in Cloud Spanner; return None if the database does not exist in the instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: Database object or None if database does not exist
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id)
if not database.exists():
return None
return database
@GoogleBaseHook.fallback_to_default_project_id
def create_database(
self,
instance_id: str,
database_id: str,
ddl_statements: list[str],
project_id: str,
) -> None:
"""
Create a new database in Cloud Spanner.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database to create in Cloud Spanner.
:param ddl_statements: The string list containing DDL for the new database.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id, ddl_statements=ddl_statements)
try:
operation: Operation = database.create()
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@GoogleBaseHook.fallback_to_default_project_id
def update_database(
self,
instance_id: str,
database_id: str,
ddl_statements: list[str],
project_id: str,
operation_id: str | None = None,
) -> None:
"""
Update DDL of a database in Cloud Spanner.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param ddl_statements: The string list containing DDL for the new database.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info(
"Replayed update_ddl message - the operation id %s was already done before.",
operation_id,
)
return
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
@GoogleBaseHook.fallback_to_default_project_id
def delete_database(self, instance_id: str, database_id, project_id: str) -> bool:
"""
Drop a database in Cloud Spanner.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: True if everything succeeded
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id)
if not database.exists():
self.log.info(
"The database %s is already deleted from instance %s. Exiting.", database_id, instance_id
)
return False
try:
database.drop()
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
return True
@GoogleBaseHook.fallback_to_default_project_id
def execute_dml(
self,
instance_id: str,
database_id: str,
queries: list[str],
project_id: str,
) -> list[int]:
"""
Execute an arbitrary DML query (INSERT, UPDATE, DELETE).
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param queries: The queries to execute.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: list of numbers of affected rows by DML query
"""
db = (
self._get_client(project_id=project_id)
.instance(instance_id=instance_id)
.database(database_id=database_id)
)
def _tx_runner(tx: Transaction) -> dict[str, int]:
return self._execute_sql_in_transaction(tx, queries)
result = db.run_in_transaction(_tx_runner)
result_rows_count_per_query = []
for i, (sql, rc) in enumerate(result.items(), start=1):
if not sql.startswith("SELECT"):
preview = sql if len(sql) <= 300 else sql[:300] + "…"
self.log.info("[DML %d/%d] affected rows=%d | %s", i, len(result), rc, preview)
result_rows_count_per_query.append(rc)
return result_rows_count_per_query
@staticmethod
def _execute_sql_in_transaction(transaction: Transaction, queries: list[str]) -> dict[str, int]:
counts: OrderedDict[str, int] = OrderedDict()
for sql in queries:
rc = transaction.execute_update(sql)
counts[sql] = rc
return counts
def _get_openlineage_authority_part(self, connection: Connection) -> str | None:
"""Build Spanner-specific authority part for OpenLineage. Returns {project}/{instance}."""
extras = connection.extra_dejson
project_id = extras.get("project_id")
instance_id = extras.get("instance_id")
if not project_id or not instance_id:
return None
return f"{project_id}/{instance_id}"
def get_openlineage_database_dialect(self, connection: Connection) -> str:
"""Return database dialect for OpenLineage."""
return "spanner"
def get_openlineage_database_info(self, connection: Connection) -> DatabaseInfo:
"""Return Spanner specific information for OpenLineage."""
extras = connection.extra_dejson
database_id = extras.get("database_id")
return DatabaseInfo(
scheme=self.get_openlineage_database_dialect(connection),
authority=self._get_openlineage_authority_part(connection),
database=database_id,
information_schema_columns=[
"table_schema",
"table_name",
"column_name",
"ordinal_position",
"spanner_type",
],
)
def get_openlineage_default_schema(self) -> str | None:
"""
Spanner expose 'public' or '' schema depending on dialect(Postgres vs GoogleSQL).
SQLAlchemy dialect for Spanner does not expose default schema, so we return None
to follow the same approach.
"""
return None
| SpannerHook |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 29209,
"end": 30364
} | class ____:
param_names = ["parallel"]
params = [[True, False]]
def setup(self, parallel):
N = 10**3
data = DataFrame(
{0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N},
columns=[0, 1],
)
self.parallel = parallel
self.grouper = data.groupby(0)
def time_series_numba(self, parallel):
def function(values, index):
return values * 5
self.grouper[1].transform(
function, engine="numba", engine_kwargs={"parallel": self.parallel}
)
def time_series_cython(self, parallel):
def function(values):
return values * 5
self.grouper[1].transform(function, engine="cython")
def time_dataframe_numba(self, parallel):
def function(values, index):
return values * 5
self.grouper.transform(
function, engine="numba", engine_kwargs={"parallel": self.parallel}
)
def time_dataframe_cython(self, parallel):
def function(values):
return values * 5
self.grouper.transform(function, engine="cython")
| TransformEngine |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 1078,
"end": 1312
} | class ____(BaseModel):
"""
Asset alias serializer for responses.
"""
id: Annotated[int, Field(title="Id")]
name: Annotated[str, Field(title="Name")]
group: Annotated[str, Field(title="Group")]
| AssetAliasResponse |
python | great-expectations__great_expectations | great_expectations/expectations/row_conditions.py | {
"start": 11176,
"end": 11740
} | class ____(Condition):
"""Represents an AND condition composed of multiple conditions."""
type: Literal["and"] = Field(default="and")
conditions: List[Condition]
@validator("conditions", pre=True, each_item=True)
def _deserialize_condition(cls, v):
"""Deserialize each condition in the list."""
if isinstance(v, dict):
return deserialize_row_condition(v)
return v
@override
def __repr__(self) -> str:
return "(" + " AND ".join(repr(c) for c in self.conditions) + ")"
@public_api
| AndCondition |
python | streamlit__streamlit | lib/streamlit/elements/pyplot.py | {
"start": 1261,
"end": 8438
} | class ____:
@gather_metrics("pyplot")
def pyplot(
self,
fig: Figure | None = None,
clear_figure: bool | None = None,
*,
width: Width = "stretch",
use_container_width: bool | None = None,
**kwargs: Any,
) -> DeltaGenerator:
"""Display a matplotlib.pyplot figure.
.. Important::
You must install ``matplotlib>=3.0.0`` to use this command. You can
install all charting dependencies (except Bokeh) as an extra with
Streamlit:
.. code-block:: shell
pip install streamlit[charts]
Parameters
----------
fig : Matplotlib Figure
The Matplotlib ``Figure`` object to render. See
https://matplotlib.org/stable/gallery/index.html for examples.
.. note::
When this argument isn't specified, this function will render the global
Matplotlib figure object. However, this feature is deprecated and
will be removed in a later version.
clear_figure : bool
If True, the figure will be cleared after being rendered.
If False, the figure will not be cleared after being rendered.
If left unspecified, we pick a default based on the value of ``fig``.
- If ``fig`` is set, defaults to ``False``.
- If ``fig`` is not set, defaults to ``True``. This simulates Jupyter's
approach to matplotlib rendering.
width : "stretch", "content", or int
The width of the chart element. This can be one of the following:
- ``"stretch"`` (default): The width of the element matches the
width of the parent container.
- ``"content"``: The width of the element matches the
width of its content, but doesn't exceed the width of the parent
container.
- An integer specifying the width in pixels: The element has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the element matches the width
of the parent container.
use_container_width : bool
Whether to override the figure's native width with the width of
the parent container. If ``use_container_width`` is ``True``
(default), Streamlit sets the width of the figure to match the
width of the parent container. If ``use_container_width`` is
``False``, Streamlit sets the width of the chart to fit its
contents according to the plotting library, up to the width of the
parent container.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
**kwargs : any
Arguments to pass to Matplotlib's savefig function.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> arr = rng(0).normal(1, 1, size=100)
>>> fig, ax = plt.subplots()
>>> ax.hist(arr, bins=20)
>>>
>>> st.pyplot(fig)
.. output::
https://doc-pyplot.streamlit.app/
height: 630px
Matplotlib supports several types of "backends". If you're getting an
error using Matplotlib with Streamlit, try setting your backend to "TkAgg"::
echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc
For more information, see https://matplotlib.org/faq/usage_faq.html.
"""
if use_container_width is not None:
show_deprecation_warning(
make_deprecated_name_warning(
"use_container_width",
"width",
"2025-12-31",
"For `use_container_width=True`, use `width='stretch'`. "
"For `use_container_width=False`, use `width='content'`.",
include_st_prefix=False,
),
show_in_browser=False,
)
width = "stretch" if use_container_width else "content"
if not fig:
show_deprecation_warning("""
Calling `st.pyplot()` without providing a figure argument has been deprecated
and will be removed in a later version as it requires the use of Matplotlib's
global figure object, which is not thread-safe.
To future-proof this code, you should pass in a figure as shown below:
```python
fig, ax = plt.subplots()
ax.scatter([1, 2, 3], [1, 2, 3])
# other plotting actions...
st.pyplot(fig)
```
If you have a specific use case that requires this functionality, please let us
know via [issue on Github](https://github.com/streamlit/streamlit/issues).
""")
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
image_list_proto = ImageListProto()
marshall(
self.dg._get_delta_path_str(),
image_list_proto,
layout_config,
fig,
clear_figure,
**kwargs,
)
return self.dg._enqueue("imgs", image_list_proto, layout_config=layout_config)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
coordinates: str,
image_list_proto: ImageListProto,
layout_config: LayoutConfig,
fig: Figure | None = None,
clear_figure: bool | None = True,
**kwargs: Any,
) -> None:
try:
import matplotlib.pyplot as plt
plt.ioff()
except ImportError:
raise ImportError("pyplot() command requires matplotlib")
# You can call .savefig() on a Figure object or directly on the pyplot
# module, in which case you're doing it to the latest Figure.
if not fig:
if clear_figure is None:
clear_figure = True
fig = cast("Figure", plt)
# Normally, dpi is set to 'figure', and the figure's dpi is set to 100.
# So here we pick double of that to make things look good in a high
# DPI display.
options = {"bbox_inches": "tight", "dpi": 200, "format": "png"}
# If some options are passed in from kwargs then replace the values in
# options with the ones from kwargs
options = {a: kwargs.get(a, b) for a, b in options.items()}
# Merge options back into kwargs.
kwargs.update(options)
image = io.BytesIO()
fig.savefig(image, **kwargs)
marshall_images(
coordinates=coordinates,
image=image,
caption=None,
layout_config=layout_config,
proto_imgs=image_list_proto,
clamp=False,
channels="RGB",
output_format="PNG",
)
# Clear the figure after rendering it. This means that subsequent
# plt calls will be starting fresh.
if clear_figure:
fig.clf()
| PyplotMixin |
python | kamyu104__LeetCode-Solutions | Python/sum-of-digit-differences-of-all-pairs.py | {
"start": 54,
"end": 515
} | class ____(object):
def sumDigitDifferences(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
base, l = 1, 0
while base <= nums[0]:
base *= 10
l += 1
cnts = [[0]*10 for _ in xrange(l)]
for x in nums:
for i in xrange(l):
cnts[i][x%10] += 1
x //= 10
return sum(c*(len(nums)-c) for cnt in cnts for c in cnt)//2
| Solution |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 121694,
"end": 123594
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "postgresql"
@testing.combinations(
(postgresql.BIT(), "BIT(1)"),
(postgresql.BIT(5), "BIT(5)"),
(postgresql.BIT(varying=True), "BIT VARYING"),
(postgresql.BIT(5, varying=True), "BIT VARYING(5)"),
)
def test_bit_compile(self, type_, expected):
self.assert_compile(type_, expected)
@testing.combinations(
(psycopg.dialect(),),
(psycopg2.dialect(),),
(asyncpg.dialect(),),
(pg8000.dialect(),),
argnames="dialect",
id_="n",
)
def test_network_address_cast(self, metadata, dialect):
t = Table(
"addresses",
metadata,
Column("id", Integer, primary_key=True),
Column("addr", postgresql.INET),
Column("addr2", postgresql.MACADDR),
Column("addr3", postgresql.CIDR),
Column("addr4", postgresql.MACADDR8),
)
stmt = select(t.c.id).where(
t.c.addr == "127.0.0.1",
t.c.addr2 == "08:00:2b:01:02:03",
t.c.addr3 == "192.168.100.128/25",
t.c.addr4 == "08:00:2b:01:02:03:04:05",
)
param, param2, param3, param4 = {
"format": ("%s", "%s", "%s", "%s"),
"numeric_dollar": ("$1", "$2", "$3", "$4"),
"pyformat": (
"%(addr_1)s",
"%(addr2_1)s",
"%(addr3_1)s",
"%(addr4_1)s",
),
}[dialect.paramstyle]
expected = (
"SELECT addresses.id FROM addresses "
f"WHERE addresses.addr = {param} "
f"AND addresses.addr2 = {param2} "
f"AND addresses.addr3 = {param3} "
f"AND addresses.addr4 = {param4}"
)
self.assert_compile(stmt, expected, dialect=dialect)
| SpecialTypesCompileTest |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 9254,
"end": 9475
} | class ____(GQLResult):
total_count: int = Field(alias="totalCount")
page_info: PageInfoFragment = Field(alias="pageInfo")
edges: List[VersionedArtifactConnectionFragmentEdges]
| VersionedArtifactConnectionFragment |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 120824,
"end": 121227
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2FieldOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| ProjectV2FieldOrder |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 7383,
"end": 7533
} | class ____(TorchDynamoException):
def __init__(self) -> None:
self.real_stack = torch._guards.TracingContext.extract_stack()
| StepUnsupported |
python | pexpect__pexpect | tests/test_timeout_pattern.py | {
"start": 1101,
"end": 3792
} | class ____(PexpectTestCase.PexpectTestCase):
def test_matches_exp_timeout (self):
'''This tests that we can raise and catch TIMEOUT.
'''
try:
raise pexpect.TIMEOUT("TIMEOUT match test")
except pexpect.TIMEOUT:
pass
#print "Correctly caught TIMEOUT when raising TIMEOUT."
else:
self.fail('TIMEOUT not caught by an except TIMEOUT clause.')
def test_pattern_printout (self):
'''Verify that a TIMEOUT returns the proper patterns it is trying to match against.
Make sure it is returning the pattern from the correct call.'''
try:
p = pexpect.spawn('cat')
p.sendline('Hello')
p.expect('Hello')
p.expect('Goodbye',timeout=5)
except pexpect.TIMEOUT:
assert p.match_index == None
else:
self.fail("Did not generate a TIMEOUT exception.")
def test_exp_timeout_notThrown (self):
'''Verify that a TIMEOUT is not thrown when we match what we expect.'''
try:
p = pexpect.spawn('cat')
p.sendline('Hello')
p.expect('Hello')
except pexpect.TIMEOUT:
self.fail("TIMEOUT caught when it shouldn't be raised because we match the proper pattern.")
def test_stacktraceMunging (self):
'''Verify that the stack trace returned with a TIMEOUT instance does not contain references to pexpect.'''
try:
p = pexpect.spawn('cat')
p.sendline('Hello')
p.expect('Goodbye',timeout=5)
except pexpect.TIMEOUT:
err = sys.exc_info()[1]
if err.get_trace().count("pexpect/__init__.py") != 0:
self.fail("The TIMEOUT get_trace() referenced pexpect.py. "
"It should only reference the caller.\n" + err.get_trace())
def test_correctStackTrace (self):
'''Verify that the stack trace returned with a TIMEOUT instance correctly handles function calls.'''
def nestedFunction (spawnInstance):
spawnInstance.expect("junk", timeout=3)
try:
p = pexpect.spawn('cat')
p.sendline('Hello')
nestedFunction(p)
except pexpect.TIMEOUT:
err = sys.exc_info()[1]
if err.get_trace().count("nestedFunction") == 0:
self.fail("The TIMEOUT get_trace() did not show the call "
"to the nestedFunction function.\n" + str(err) + "\n"
+ err.get_trace())
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(Exp_TimeoutTestCase)
| Exp_TimeoutTestCase |
python | django__django | tests/admin_views/models.py | {
"start": 10788,
"end": 10919
} | class ____(models.Model):
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
| Widget |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_export.py | {
"start": 25436,
"end": 26488
} | class ____(AdminTestMixin, TestCase):
"""
If a custom field is declared with no attribute the field will be present
but with an empty string.
"""
class _BookResource(ModelResource):
author_email = Field(column_name="Author Email")
class Meta:
model = Book
def setUp(self):
super().setUp()
self.book = Book.objects.create(
name="Moonraker", author_email="ian@fleming.com"
)
@patch("import_export.mixins.BaseExportMixin.choose_export_resource_class")
def test_export_with_declared_author_email_field(
self, mock_choose_export_resource_class
):
mock_choose_export_resource_class.return_value = self._BookResource
data = {"format": "0", "resource": "0", "bookresource_author_email": True}
self._prepend_form_prefix(data)
response = self._post_url_response(self.book_export_url, data)
s = 'Author Email\r\n""\r\n'
self.assertEqual(s, response.content.decode())
| DeclaredFieldWithNoAttributeExportTest |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qcat_test.py | {
"start": 715,
"end": 1951
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, L, dim, contig, dtype):
f_input = (torch.rand(M, N, K) - 0.5) * 256
self.qf = nnq.QFunctional()
scale = 1.0
zero_point = 0
self.qf.scale = scale
self.qf.zero_point = zero_point
assert contig in ("none", "one", "all")
q_input = torch.quantize_per_tensor(f_input, scale, zero_point, dtype)
permute_dims = tuple(range(q_input.ndim - 1, -1, -1))
q_input_non_contig = q_input.permute(permute_dims).contiguous()
q_input_non_contig = q_input_non_contig.permute(permute_dims)
if contig == "all":
self.input = (q_input, q_input)
elif contig == "one":
self.input = (q_input, q_input_non_contig)
elif contig == "none":
self.input = (q_input_non_contig, q_input_non_contig)
self.inputs = {"input": self.input, "dim": dim}
self.set_module_name("qcat")
def forward(self, input: list[torch.Tensor], dim: int):
return self.qf.cat(input, dim=dim)
op_bench.generate_pt_test(qcat_configs_short + qcat_configs_long, QCatBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| QCatBenchmark |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_radar03.py | {
"start": 315,
"end": 1366
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_radar03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "radar", "subtype": "filled"})
chart.axis_ids = [56802304, 56845440]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/cache/tests.py | {
"start": 84992,
"end": 86561
} | class ____(SimpleTestCase):
path = "/cache/test/"
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
return UpdateCacheMiddleware(lambda req: HttpResponse(msg))(request)
def test_head_caches_correctly(self):
test_content = "test content"
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = "test content"
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="settingsprefix",
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
},
LANGUAGES=[
("en", "English"),
("es", "Spanish"),
],
)
| CacheHEADTest |
python | walkccc__LeetCode | solutions/3457. Eat Pizzas!/3457.py | {
"start": 0,
"end": 259
} | class ____:
def maxWeight(self, pizzas: list[int]) -> int:
eat = len(pizzas) // 4
odd = math.ceil(eat / 2)
even = eat - odd
pizzas.sort(reverse=True)
return (sum(pizzas[:odd]) +
sum(pizzas[odd + 1:odd + 1 + even * 2:2]))
| Solution |
python | scikit-learn__scikit-learn | sklearn/decomposition/_nmf.py | {
"start": 42285,
"end": 57946
} | class ____(_BaseNMF):
"""Non-Negative Matrix Factorization (NMF).
Find two non-negative matrices, i.e. matrices with all non-negative elements, (W, H)
whose product approximates the non-negative matrix X. This factorization can be used
for example for dimensionality reduction, source separation or topic extraction.
The objective function is:
.. math::
L(W, H) &= 0.5 * ||X - WH||_{loss}^2
&+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
&+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
&+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
&+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2,
where :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) and
:math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm).
The generic norm :math:`||X - WH||_{loss}` may represent
the Frobenius norm or another supported beta-divergence loss.
The choice between options is controlled by the `beta_loss` parameter.
The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
`H` to keep their impact balanced with respect to one another and to the data fit
term as independent as possible of the size `n_samples` of the training set.
The objective function is minimized with an alternating minimization of W
and H.
Note that the transformed data is named W and the components matrix is named H. In
the NMF literature, the naming convention is usually the opposite since the data
matrix X is transposed.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or {'auto'} or None, default='auto'
Number of components. If `None`, all features are kept.
If `n_components='auto'`, the number of components is automatically inferred
from W or H shapes.
.. versionchanged:: 1.4
Added `'auto'` value.
.. versionchanged:: 1.6
Default value changed from `None` to `'auto'`.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
Method used to initialize the procedure.
Valid options:
- `None`: 'nndsvda' if n_components <= min(n_samples, n_features),
otherwise random.
- `'random'`: non-negative random matrices, scaled with:
`sqrt(X.mean() / n_components)`
- `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- `'nndsvda'`: NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- `'nndsvdar'` NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- `'custom'`: Use custom matrices `W` and `H` which must both be provided.
.. versionchanged:: 1.1
When `init=None` and n_components is less than n_samples and n_features
defaults to `nndsvda` instead of `nndsvd`.
solver : {'cd', 'mu'}, default='cd'
Numerical solver to use:
- 'cd' is a Coordinate Descent solver.
- 'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or {'frobenius', 'kullback-leibler', \
'itakura-saito'}, default='frobenius'
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default=1e-4
Tolerance of the stopping condition.
max_iter : int, default=200
Maximum number of iterations before timing out.
random_state : int, RandomState instance or None, default=None
Used for initialisation (when ``init`` == 'nndsvdar' or
'random'), and in Coordinate Descent. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
alpha_W : float, default=0.0
Constant that multiplies the regularization terms of `W`. Set it to zero
(default) to have no regularization on `W`.
.. versionadded:: 1.0
alpha_H : float or "same", default="same"
Constant that multiplies the regularization terms of `H`. Set it to zero to
have no regularization on `H`. If "same" (default), it takes the same value as
`alpha_W`.
.. versionadded:: 1.0
l1_ratio : float, default=0.0
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
verbose : int, default=0
Whether to be verbose.
shuffle : bool, default=False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_components_ : int
The number of components. It is same as the `n_components` parameter
if it was given. Otherwise, it will be same as the number of
features.
reconstruction_err_ : float
Frobenius norm of the matrix difference, or beta-divergence, between
the training data ``X`` and the reconstructed data ``WH`` from
the fitted model.
n_iter_ : int
Actual number of iterations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
PCA : Principal component analysis.
SparseCoder : Find a sparse representation of data from a fixed,
precomputed dictionary.
SparsePCA : Sparse Principal Components Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
References
----------
.. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
factorizations" <10.1587/transfun.E92.A.708>`
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
of electronics, communications and computer sciences 92.3: 708-721, 2009.
.. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
beta-divergence" <10.1162/NECO_a_00168>`
Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> W = model.fit_transform(X)
>>> H = model.components_
"""
_parameter_constraints: dict = {
**_BaseNMF._parameter_constraints,
"solver": [StrOptions({"mu", "cd"})],
"shuffle": ["boolean"],
}
def __init__(
self,
n_components="auto",
*,
init=None,
solver="cd",
beta_loss="frobenius",
tol=1e-4,
max_iter=200,
random_state=None,
alpha_W=0.0,
alpha_H="same",
l1_ratio=0.0,
verbose=0,
shuffle=False,
):
super().__init__(
n_components=n_components,
init=init,
beta_loss=beta_loss,
tol=tol,
max_iter=max_iter,
random_state=random_state,
alpha_W=alpha_W,
alpha_H=alpha_H,
l1_ratio=l1_ratio,
verbose=verbose,
)
self.solver = solver
self.shuffle = shuffle
def _check_params(self, X):
super()._check_params(X)
# solver
if self.solver != "mu" and self.beta_loss not in (2, "frobenius"):
# 'mu' is the only solver that handles other beta losses than 'frobenius'
raise ValueError(
f"Invalid beta_loss parameter: solver {self.solver!r} does not handle "
f"beta_loss = {self.beta_loss!r}"
)
if self.solver == "mu" and self.init == "nndsvd":
warnings.warn(
(
"The multiplicative update ('mu') solver cannot update "
"zeros present in the initialization, and so leads to "
"poorer results when used jointly with init='nndsvd'. "
"You may try init='nndsvda' or init='nndsvdar' instead."
),
UserWarning,
)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
X = validate_data(
self, X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
)
with config_context(assume_finite=True):
W, H, n_iter = self._fit_transform(X, W=W, H=H)
self.reconstruction_err_ = _beta_divergence(
X, W, H, self._beta_loss, square_root=True
)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
"""Learn a NMF model for the data X and returns the transformed data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed
y : Ignored
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
update_H : bool, default=True
If True, both W and H will be estimated from initial guesses,
this corresponds to a call to the 'fit_transform' method.
If False, only W will be estimated, this corresponds to a call
to the 'transform' method.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
H : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations.
"""
# check parameters
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError(
"When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values "
"to X, or use a positive beta_loss."
)
# initialize or check W and H
W, H = self._check_w_h(X, W, H, update_H)
# scale the regularization terms
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
if self.solver == "cd":
W, H, n_iter = _fit_coordinate_descent(
X,
W,
H,
self.tol,
self.max_iter,
l1_reg_W,
l1_reg_H,
l2_reg_W,
l2_reg_H,
update_H=update_H,
verbose=self.verbose,
shuffle=self.shuffle,
random_state=self.random_state,
)
elif self.solver == "mu":
W, H, n_iter, *_ = _fit_multiplicative_update(
X,
W,
H,
self._beta_loss,
self.max_iter,
self.tol,
l1_reg_W,
l1_reg_H,
l2_reg_W,
l2_reg_H,
update_H,
self.verbose,
)
else:
raise ValueError("Invalid solver parameter '%s'." % self.solver)
if n_iter == self.max_iter and self.tol > 0:
warnings.warn(
"Maximum number of iterations %d reached. Increase "
"it to improve convergence." % self.max_iter,
ConvergenceWarning,
)
return W, H, n_iter
def transform(self, X):
"""Transform the data X according to the fitted NMF model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
dtype=[np.float64, np.float32],
reset=False,
ensure_non_negative=True,
)
with config_context(assume_finite=True):
W, *_ = self._fit_transform(X, H=self.components_, update_H=False)
return W
| NMF |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v2_utils_test.py | {
"start": 1194,
"end": 3546
} | class ____(parameterized.TestCase, test.TestCase):
@parameterized.parameters(tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_accumulation_off(self, optimizer):
with self.assertRaisesRegex(ValueError, 'accumulation'):
optimizer(use_gradient_accumulation=False, clipvalue=0.)
with self.assertRaisesRegex(ValueError, 'accumulation'):
optimizer(use_gradient_accumulation=False, clipvalue=(None, 1.))
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_tuple(self, optimizer):
opt = optimizer(clipvalue=(-1., 1.))
self.assertEqual(-1., opt.clip_gradient_min)
self.assertEqual(1., opt.clip_gradient_max)
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_single_value(self, optimizer):
opt = optimizer(clipvalue=1.)
self.assertEqual(-1., opt.clip_gradient_min)
self.assertEqual(1., opt.clip_gradient_max)
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_grad_clip_with_tuple_and_none(self, optimizer):
opt = optimizer(clipvalue=(None, 1))
self.assertIsNone(opt.clip_gradient_min)
self.assertEqual(1., opt.clip_gradient_max)
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_equal_and_hash_function(self, optimizer):
opt1 = optimizer(0.1)
opt2 = optimizer(0.1)
opt3 = optimizer(0.2)
self.assertEqual(opt1, opt2)
self.assertEqual(hash(opt1), hash(opt2))
self.assertNotEqual(opt1, opt3)
self.assertNotEqual(hash(opt1), hash(opt3))
| TPUEmbeddingOptimizerTest |
python | vyperlang__vyper | vyper/semantics/analysis/base.py | {
"start": 7612,
"end": 9311
} | class ____:
variable: VarInfo
path: tuple[str | object, ...]
# A sentinel indicating a subscript access
SUBSCRIPT_ACCESS: ClassVar[Any] = object()
# custom __reduce__ and _produce implementations to work around
# a pickle bug.
# see https://github.com/python/cpython/issues/124937#issuecomment-2392227290
def __reduce__(self):
dict_obj = {f.name: getattr(self, f.name) for f in fields(self)}
return self.__class__._produce, (dict_obj,)
@classmethod
def _produce(cls, data):
return cls(**data)
@cached_property
def attrs(self):
ret = []
for s in self.path:
if s is self.SUBSCRIPT_ACCESS:
break
ret.append(s)
return tuple(ret)
def contains(self, other):
# VarAccess("v", ("a")) `contains` VarAccess("v", ("a", "b", "c"))
sub_attrs = other.attrs[: len(self.attrs)]
return self.variable == other.variable and sub_attrs == self.attrs
def to_dict(self):
var = self.variable
if var.decl_node is None:
# happens for builtins or `self` accesses
return None
# map SUBSCRIPT_ACCESS to `"$subscript_access"` (which is an identifier
# which can't be constructed by the user)
path = ["$subscript_access" if s is self.SUBSCRIPT_ACCESS else s for s in self.path]
if isinstance(var.decl_node, vy_ast.arg):
varname = var.decl_node.arg
else:
varname = var.decl_node.target.id
decl_node = var.decl_node.get_id_dict()
ret = {"name": varname, "decl_node": decl_node, "access_path": path}
return ret
@dataclass
| VarAccess |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/components.py | {
"start": 37386,
"end": 40857
} | class ____(SchemaLoader):
"""
Custom schema loader for HubSpot custom object streams.
This class generates a JSON schema based on the properties defined in the manifest.
These properties are injected into the parameters by the HttpComponentsResolver used within the DynamicDeclarativeStream.
"""
config: Mapping[str, Any]
parameters: InitVar[Mapping[str, Any]]
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
raw_schema_properties: List[Mapping[str, Any]] = parameters.get("schema_properties", {})
properties = self._get_properties(raw_schema=raw_schema_properties)
self._schema = self._generate_schema(properties)
def _get_properties(self, raw_schema: List[Mapping[str, Any]]) -> Mapping[str, Any]:
return {field["name"]: self._field_to_property_schema(field) for field in raw_schema}
def _field_to_property_schema(self, field: Mapping[str, Any]) -> Mapping[str, Any]:
field_type = field["type"]
if field_type in ["string", "enumeration", "phone_number", "object_coordinates", "json"]:
return {"type": ["null", "string"]}
elif field_type == "datetime" or field_type == "date-time":
return {"type": ["null", "string"], "format": "date-time"}
elif field_type == "date":
return {"type": ["null", "string"], "format": "date"}
elif field_type == "number":
return {"type": ["null", "number"]}
elif field_type == "boolean" or field_type == "bool":
return {"type": ["null", "boolean"]}
else:
logger.warn(f"Field {field['name']} has unrecognized type: {field['type']} casting to string.")
return {"type": ["null", "string"]}
def _generate_schema(self, properties: Mapping[str, Any]) -> Mapping[str, Any]:
unnested_properties = {f"properties_{property_name}": property_value for (property_name, property_value) in properties.items()}
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": ["null", "object"],
"additionalProperties": True,
"properties": {
"id": {"type": ["null", "string"]},
"createdAt": {"type": ["null", "string"], "format": "date-time"},
"updatedAt": {"type": ["null", "string"], "format": "date-time"},
"archived": {"type": ["null", "boolean"]},
"properties": {"type": ["null", "object"], "properties": properties},
**unnested_properties,
},
}
return schema
def get_json_schema(self) -> Mapping[str, Any]:
return self._schema
_TRUTHY_STRINGS = ("y", "yes", "t", "true", "on", "1")
_FALSEY_STRINGS = ("n", "no", "f", "false", "off", "0")
def _strtobool(value: str, /) -> int:
"""Mimic the behavior of distutils.util.strtobool.
From: https://docs.python.org/2/distutils/apiref.html#distutils.util.strtobool
> Convert a string representation of truth to true (1) or false (0).
> True values are y, yes, t, true, on and 1; false values are n, no, f, false, off and 0. Raises
> `ValueError` if val is anything else.
"""
normalized_str = value.lower().strip()
if normalized_str in _TRUTHY_STRINGS:
return 1
if normalized_str in _FALSEY_STRINGS:
return 0
raise ValueError(f"Invalid boolean value: {normalized_str}")
| HubspotCustomObjectsSchemaLoader |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 110086,
"end": 111416
} | class ____(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
Parameters
----------
linthresh : float
The range within which the plot is linear (to avoid having the plot
go to infinity around zero).
linscale : float, default: 1
This allows the linear range (-*linthresh* to *linthresh*) to be
stretched relative to the logarithmic range. Its value is the
number of decades to use for each half of the linear range. For
example, when *linscale* == 1.0 (the default), the space used for
the positive and negative halves of the linear range will be equal
to one decade in the logarithmic range.
base : float, default: 10
"""
@property
def linthresh(self):
return self._scale.linthresh
@linthresh.setter
def linthresh(self, value):
self._scale.linthresh = value
@make_norm_from_scale(
scale.AsinhScale,
init=lambda linear_width=1, vmin=None, vmax=None, clip=False: None)
| SymLogNorm |
python | pydantic__pydantic | pydantic-core/tests/benchmarks/test_serialization_micro.py | {
"start": 9905,
"end": 15654
} | class ____:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
@pytest.fixture(scope='module', name='fs_model_serializer')
def fs_model_serializer_fixture():
return SchemaSerializer(
core_schema.model_schema(
FieldsSetModel,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.int_schema()),
'b': core_schema.model_field(core_schema.int_schema()),
'c': core_schema.model_field(core_schema.int_schema()),
'd': core_schema.model_field(core_schema.int_schema()),
'e': core_schema.model_field(core_schema.int_schema()),
'f': core_schema.model_field(core_schema.int_schema()),
'g': core_schema.model_field(core_schema.int_schema()),
'h': core_schema.model_field(core_schema.int_schema()),
}
),
)
)
@pytest.mark.benchmark(group='model-exclude-unset')
def test_model_exclude_unset_false(benchmark, fs_model_serializer):
m = FieldsSetModel(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, __pydantic_fields_set__={'a', 'b', 'c', 'd', 'e', 'f'})
assert fs_model_serializer.to_python(m) == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8}
assert fs_model_serializer.to_python(m, exclude_unset=True) == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}
@benchmark
def r():
fs_model_serializer.to_python(m)
@pytest.mark.benchmark(group='model-exclude-unset')
def test_model_exclude_unset_true(benchmark, fs_model_serializer):
m = FieldsSetModel(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, __pydantic_fields_set__={'a', 'b', 'c', 'd', 'e', 'f'})
assert fs_model_serializer.to_python(m) == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8}
assert fs_model_serializer.to_python(m, exclude_unset=True) == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}
@benchmark
def r():
fs_model_serializer.to_python(m, exclude_unset=True)
@pytest.mark.benchmark(group='model-list-json')
def test_model_list_core_json(benchmark):
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(
core_schema.list_schema(
core_schema.int_schema(), serialization=core_schema.filter_seq_schema(exclude={1, 2})
)
)
}
),
)
)
m = BasicModel(a=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert s.to_json(m) == b'{"a":[0,3,4,5,6,7,8,9]}'
m_big = BasicModel(a=list(range(1000)))
j = s.to_json(m_big)
assert j.startswith(b'{"a":[0,3,4')
assert j.endswith(b'998,999]}')
@benchmark
def r():
s.to_json(m_big)
@pytest.mark.benchmark(group='temporal')
def test_datetime(benchmark):
v = SchemaSerializer(core_schema.datetime_schema())
d = datetime(2022, 12, 2, 12, 13, 14)
assert v.to_python(d, mode='json') == '2022-12-02T12:13:14'
@benchmark
def r():
v.to_python(d, mode='json')
@pytest.mark.benchmark(group='temporal')
def test_datetime_seconds(benchmark):
v = SchemaSerializer(
core_schema.datetime_schema(),
config={
'ser_json_temporal': 'seconds',
},
)
d = datetime(2022, 12, 2, 12, 13, 14)
assert v.to_python(d, mode='json') == 1669983194.0
@benchmark
def r():
v.to_python(d, mode='json')
@pytest.mark.benchmark(group='temporal')
def test_datetime_milliseconds(benchmark):
v = SchemaSerializer(core_schema.datetime_schema(), config={'ser_json_temporal': 'milliseconds'})
d = datetime(2022, 12, 2, 12, 13, 14)
assert v.to_python(d, mode='json') == 1669983194000.0
@benchmark
def r():
v.to_python(d, mode='json')
@pytest.mark.benchmark(group='model-list-json')
def test_uuid(benchmark):
v = SchemaSerializer(core_schema.uuid_schema())
u = UUID('12345678-1234-5678-1234-567812345678')
assert v.to_python(u, mode='json') == '12345678-1234-5678-1234-567812345678'
@benchmark
def r():
v.to_python(u, mode='json')
@pytest.mark.benchmark(group='to-string')
def test_to_string_format(benchmark):
s = SchemaSerializer(core_schema.any_schema(serialization=core_schema.format_ser_schema('d')))
assert s.to_json(123) == b'"123"'
benchmark(s.to_json, 123)
@pytest.mark.benchmark(group='to-string')
def test_to_string_direct(benchmark):
s = SchemaSerializer(core_schema.any_schema(serialization={'type': 'to-string'}))
assert s.to_json(123) == b'"123"'
benchmark(s.to_json, 123)
@pytest.mark.benchmark(group='filter')
def test_filter(benchmark):
v = SchemaSerializer(core_schema.list_schema(core_schema.any_schema()))
assert v.to_python(['a', 'b', 'c', 'd', 'e'], include={-1, -2}) == ['d', 'e']
@benchmark
def t():
v.to_python(['a', 'b', 'c', 'd', 'e'], include={-1, -2})
@pytest.mark.benchmark(group='list-of-lists')
def test_to_json_list_of_lists(benchmark):
data = [[i + j for j in range(10)] for i in range(1000)]
benchmark(to_json, data)
@pytest.mark.benchmark(group='list-of-lists')
def test_ser_list_of_lists(benchmark):
s = SchemaSerializer(core_schema.list_schema(core_schema.list_schema(core_schema.int_schema())))
data = [[i + j for j in range(10)] for i in range(1000)]
benchmark(s.to_json, data)
@dataclass
| FieldsSetModel |
python | wandb__wandb | wandb/automations/_filters/operators.py | {
"start": 5184,
"end": 5367
} | class ____(BaseOp):
val: Scalar = Field(alias="$lte")
@override
def __invert__(self) -> Gt:
"""Implements `~Lte(a) -> Gt(a)`."""
return Gt(val=self.val)
| Lte |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 9674,
"end": 9916
} | class ____(TestCase):
def test_generic(self):
vals = isneginf(np.array((-1.0, 0, 1)) / 0.0)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
# @xfail #(reason="not implemented")
| TestIsneginf |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 271905,
"end": 272181
} | class ____(VegaLiteSchema):
"""ConditionalValueDefstringExprRef schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalValueDef<(string|ExprRef)>"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalValueDefstringExprRef |
python | scrapy__scrapy | tests/test_downloadermiddleware_httpauth.py | {
"start": 1808,
"end": 2477
} | class ____:
def setup_method(self):
self.mw = HttpAuthMiddleware()
spider = AnyDomainSpider("foo")
self.mw.spider_opened(spider)
def teardown_method(self):
del self.mw
def test_auth(self):
req = Request("http://example.com/")
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == basic_auth_header("foo", "bar")
def test_auth_already_set(self):
req = Request("http://example.com/", headers={"Authorization": "Digest 123"})
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == b"Digest 123"
| TestHttpAuthAnyMiddleware |
python | python-markdown__markdown | markdown/extensions/meta.py | {
"start": 1364,
"end": 2600
} | class ____(Preprocessor):
""" Get Meta-Data. """
def run(self, lines: list[str]) -> list[str]:
""" Parse Meta-Data and store in Markdown.Meta. """
meta: dict[str, Any] = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.md.Meta = meta
return lines
def makeExtension(**kwargs): # pragma: no cover
return MetaExtension(**kwargs)
| MetaPreprocessor |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 898,
"end": 3040
} | class ____(WeaviateBaseError):
def __init__(self, message: str, response: Union[httpx.Response, AioRpcError, Call]):
"""Is raised in case the status code returned from Weaviate is not handled in the client implementation and suggests an error.
Custom code can act on the attributes:
- status_code
- json
Args:
message: An error message specific to the context, in which the error occurred.
response: The request response of which the status code was unexpected.
"""
if isinstance(response, httpx.Response):
self._status_code: int = response.status_code
# Set error message
try:
body = response.json()
except (httpx.DecodingError, JSONDecodeError):
body = None
msg = (
message
+ f"! Unexpected status code: {response.status_code}, with response body: {body}."
)
if response.status_code in ERROR_CODE_EXPLANATION:
msg += " " + ERROR_CODE_EXPLANATION[response.status_code]
self.__error = body
elif isinstance(response, AioRpcError):
self._status_code = int(response.code().value[0])
msg = (
message
+ f"! Unexpected status code: {response.code().value[1]}, with response body: {response.details()}."
)
self.__error: str | None = response.details()
elif isinstance(response, Call):
code = cast(StatusCode, response.code())
self._status_code = int(code.value[0])
msg = (
message
+ f"! Unexpected status code: {code.value[1]}, with response body: {response.details()}."
)
self.__error: str | None = response.details()
super().__init__(msg)
@property
def status_code(self) -> int:
return self._status_code
@property
def error(self) -> Optional[str]:
return self.__error
UnexpectedStatusCodeException = UnexpectedStatusCodeError
| UnexpectedStatusCodeError |
python | python__mypy | mypy/semanal_typeargs.py | {
"start": 1246,
"end": 13156
} | class ____(MixedTraverserVisitor):
def __init__(
self,
errors: Errors,
options: Options,
is_typeshed_file: bool,
named_type: Callable[[str, list[Type]], Instance],
) -> None:
super().__init__()
self.errors = errors
self.options = options
self.is_typeshed_file = is_typeshed_file
self.named_type = named_type
self.scope = Scope()
# Should we also analyze function definitions, or only module top-levels?
self.recurse_into_functions = True
# Keep track of the type aliases already visited. This is needed to avoid
# infinite recursion on types like A = Union[int, List[A]].
self.seen_aliases: set[TypeAliasType] = set()
def visit_mypy_file(self, o: MypyFile) -> None:
self.errors.set_file(o.path, o.fullname, scope=self.scope, options=self.options)
with self.scope.module_scope(o.fullname):
super().visit_mypy_file(o)
def visit_func(self, defn: FuncItem) -> None:
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
super().visit_func(defn)
def visit_class_def(self, defn: ClassDef) -> None:
with self.scope.class_scope(defn.info):
super().visit_class_def(defn)
def visit_block(self, o: Block) -> None:
if not o.is_unreachable:
super().visit_block(o)
def visit_type_alias_type(self, t: TypeAliasType) -> None:
super().visit_type_alias_type(t)
if t.is_recursive:
if t in self.seen_aliases:
# Avoid infinite recursion on recursive type aliases.
return
self.seen_aliases.add(t)
assert t.alias is not None, f"Unfixed type alias {t.type_ref}"
is_error, is_invalid = self.validate_args(
t.alias.name, tuple(t.args), t.alias.alias_tvars, t
)
if is_invalid:
# If there is an arity error (e.g. non-Parameters used for ParamSpec etc.),
# then it is safer to erase the arguments completely, to avoid crashes later.
# TODO: can we move this logic to typeanal.py?
t.args = erased_vars(t.alias.alias_tvars, TypeOfAny.from_error)
if not is_error:
# If there was already an error for the alias itself, there is no point in checking
# the expansion, most likely it will result in the same kind of error.
if t.args:
# Since we always allow unbounded type variables in alias definitions, we need
# to verify the arguments satisfy the upper bounds of the expansion as well.
get_proper_type(t).accept(self)
if t.is_recursive:
self.seen_aliases.discard(t)
def visit_tuple_type(self, t: TupleType) -> None:
t.items = flatten_nested_tuples(t.items)
# We could also normalize Tuple[*tuple[X, ...]] -> tuple[X, ...] like in
# expand_type() but we can't do this here since it is not a translator visitor,
# and we need to return an Instance instead of TupleType.
super().visit_tuple_type(t)
def visit_callable_type(self, t: CallableType) -> None:
super().visit_callable_type(t)
t.normalize_trivial_unpack()
def visit_instance(self, t: Instance) -> None:
super().visit_instance(t)
# Type argument counts were checked in the main semantic analyzer pass. We assume
# that the counts are correct here.
info = t.type
if isinstance(info, FakeInfo):
return # https://github.com/python/mypy/issues/11079
_, is_invalid = self.validate_args(info.name, t.args, info.defn.type_vars, t)
if is_invalid:
t.args = tuple(erased_vars(info.defn.type_vars, TypeOfAny.from_error))
if t.type.fullname == "builtins.tuple" and len(t.args) == 1:
# Normalize Tuple[*Tuple[X, ...], ...] -> Tuple[X, ...]
arg = t.args[0]
if isinstance(arg, UnpackType):
unpacked = get_proper_type(arg.type)
if isinstance(unpacked, Instance):
assert unpacked.type.fullname == "builtins.tuple"
t.args = unpacked.args
def validate_args(
self, name: str, args: tuple[Type, ...], type_vars: list[TypeVarLikeType], ctx: Context
) -> tuple[bool, bool]:
if any(isinstance(v, TypeVarTupleType) for v in type_vars):
prefix = next(i for (i, v) in enumerate(type_vars) if isinstance(v, TypeVarTupleType))
tvt = type_vars[prefix]
assert isinstance(tvt, TypeVarTupleType)
start, middle, end = split_with_prefix_and_suffix(
tuple(args), prefix, len(type_vars) - prefix - 1
)
args = start + (TupleType(list(middle), tvt.tuple_fallback),) + end
is_error = False
is_invalid = False
for arg, tvar in zip(args, type_vars):
context = ctx if arg.line < 0 else arg
if isinstance(tvar, TypeVarType):
if isinstance(arg, ParamSpecType):
is_invalid = True
self.fail(
INVALID_PARAM_SPEC_LOCATION.format(format_type(arg, self.options)),
context,
code=codes.VALID_TYPE,
)
self.note(
INVALID_PARAM_SPEC_LOCATION_NOTE.format(arg.name),
context,
code=codes.VALID_TYPE,
)
continue
if isinstance(arg, Parameters):
is_invalid = True
self.fail(
f"Cannot use {format_type(arg, self.options)} for regular type variable,"
" only for ParamSpec",
context,
code=codes.VALID_TYPE,
)
continue
if tvar.values:
if isinstance(arg, TypeVarType):
if self.in_type_alias_expr:
# Type aliases are allowed to use unconstrained type variables
# error will be checked at substitution point.
continue
arg_values = arg.values
if not arg_values:
is_error = True
self.fail(
message_registry.INVALID_TYPEVAR_AS_TYPEARG.format(arg.name, name),
context,
code=codes.TYPE_VAR,
)
continue
else:
arg_values = [arg]
if self.check_type_var_values(
name, arg_values, tvar.name, tvar.values, context
):
is_error = True
# Check against upper bound. Since it's object the vast majority of the time,
# add fast path to avoid a potentially slow subtype check.
upper_bound = tvar.upper_bound
object_upper_bound = (
type(upper_bound) is Instance
and upper_bound.type.fullname == "builtins.object"
)
if not object_upper_bound and not is_subtype(arg, upper_bound):
if self.in_type_alias_expr and isinstance(arg, TypeVarType):
# Type aliases are allowed to use unconstrained type variables
# error will be checked at substitution point.
continue
is_error = True
self.fail(
message_registry.INVALID_TYPEVAR_ARG_BOUND.format(
format_type(arg, self.options),
name,
format_type(upper_bound, self.options),
),
context,
code=codes.TYPE_VAR,
)
elif isinstance(tvar, ParamSpecType):
if not isinstance(
get_proper_type(arg), (ParamSpecType, Parameters, AnyType, UnboundType)
):
is_invalid = True
self.fail(
"Can only replace ParamSpec with a parameter types list or"
f" another ParamSpec, got {format_type(arg, self.options)}",
context,
code=codes.VALID_TYPE,
)
if is_invalid:
is_error = True
return is_error, is_invalid
def visit_unpack_type(self, typ: UnpackType) -> None:
super().visit_unpack_type(typ)
proper_type = get_proper_type(typ.type)
if isinstance(proper_type, TupleType):
return
if isinstance(proper_type, TypeVarTupleType):
return
# TODO: this should probably be .has_base("builtins.tuple"), also elsewhere. This is
# tricky however, since this needs map_instance_to_supertype() available in many places.
if isinstance(proper_type, Instance) and proper_type.type.fullname == "builtins.tuple":
return
if not isinstance(proper_type, (UnboundType, AnyType)):
# Avoid extra errors if there were some errors already. Also interpret plain Any
# as tuple[Any, ...] (this is better for the code in type checker).
self.fail(
message_registry.INVALID_UNPACK.format(format_type(proper_type, self.options)),
typ.type,
code=codes.VALID_TYPE,
)
typ.type = self.named_type("builtins.tuple", [AnyType(TypeOfAny.from_error)])
def check_type_var_values(
self, name: str, actuals: list[Type], arg_name: str, valids: list[Type], context: Context
) -> bool:
if self.in_type_alias_expr:
# See testValidTypeAliasValues - we do not enforce typevar compatibility
# at the definition site. We check instantiation validity later.
return False
is_error = False
for actual in get_proper_types(actuals):
# We skip UnboundType here, since they may appear in defn.bases,
# the error will be caught when visiting info.bases, that have bound type
# variables.
if not isinstance(actual, (AnyType, UnboundType)) and not any(
is_same_type(actual, value) for value in valids
):
is_error = True
if len(actuals) > 1 or not isinstance(actual, Instance):
self.fail(
message_registry.INVALID_TYPEVAR_ARG_VALUE.format(name),
context,
code=codes.TYPE_VAR,
)
else:
class_name = f'"{name}"'
actual_type_name = f'"{actual.type.name}"'
self.fail(
message_registry.INCOMPATIBLE_TYPEVAR_VALUE.format(
arg_name, class_name, actual_type_name
),
context,
code=codes.TYPE_VAR,
)
return is_error
def fail(self, msg: str, context: Context, *, code: ErrorCode | None = None) -> None:
self.errors.report(context.line, context.column, msg, code=code)
def note(self, msg: str, context: Context, *, code: ErrorCode | None = None) -> None:
self.errors.report(context.line, context.column, msg, severity="note", code=code)
| TypeArgumentAnalyzer |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/openai_functions.py | {
"start": 9949,
"end": 10597
} | class ____(PydanticOutputFunctionsParser):
"""Parse an output as an attribute of a Pydantic object."""
attr_name: str
"""The name of the attribute to return."""
@override
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
Returns:
The parsed JSON object.
"""
result = super().parse_result(result)
return getattr(result, self.attr_name)
| PydanticAttrOutputFunctionsParser |
python | google__jax | jax/_src/custom_derivatives.py | {
"start": 22931,
"end": 35183
} | class ____(Generic[ReturnValue]):
"""Set up a JAX-transformable function for a custom VJP rule definition.
This class is meant to be used as a function decorator. Instances are
callables that behave similarly to the underlying function to which the
decorator was applied, except when a reverse-mode differentiation
transformation (like :py:func:`jax.grad`) is applied, in which case a custom
user-supplied VJP rule function is used instead of tracing into and performing
automatic differentiation of the underlying function's implementation. There
is a single instance method, :py:func:`~jax.custom_vjp.defvjp`, which may be
used to define the custom VJP rule.
This decorator precludes the use of forward-mode automatic differentiation.
For example::
@jax.custom_vjp
def f(x, y):
return jnp.sin(x) * y
def f_fwd(x, y):
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
def f_bwd(res, g):
cos_x, sin_x, y = res
return (cos_x * g * y, sin_x * g)
f.defvjp(f_fwd, f_bwd)
For a more detailed introduction, see the tutorial_.
.. _tutorial: https://docs.jax.dev/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html
"""
def __init__(self,
fun: Callable[..., ReturnValue],
nondiff_argnums: Sequence[int] = (),
nondiff_argnames: Sequence[str] = ()):
update_wrapper(self, fun)
self.fun = fun
nondiff_argnums_: set[int] = set()
if nondiff_argnames:
sig = fun_signature(self.fun)
assert sig is not None
inferred_nondiff_argnums, _ = infer_argnums_and_argnames(
sig, None, nondiff_argnames
)
nondiff_argnums_.update(inferred_nondiff_argnums)
if nondiff_argnums:
nondiff_argnums_.update(nondiff_argnums)
self.nondiff_argnums = tuple(sorted(nondiff_argnums_))
self.fwd: Callable[..., tuple[ReturnValue, Any]] | None = None
self.bwd: Callable[..., tuple[Any, ...]] | None = None
self.symbolic_zeros = False
self.optimize_remat = False
__getattr__ = custom_api_util.forward_attr
def defvjp(self,
fwd: Callable[..., tuple[ReturnValue, Any]],
bwd: Callable[..., tuple[Any, ...]],
symbolic_zeros: bool = False,
optimize_remat: bool = False,
) -> None:
"""Define a custom VJP rule for the function represented by this instance.
Args:
fwd: a Python callable representing the forward pass of the custom VJP
rule. When there are no ``nondiff_argnums``, the ``fwd`` function has
the same input signature as the underlying primal function. It should
return as output a pair, where the first element represents the primal
output and the second element represents any "residual" values to store
from the forward pass for use on the backward pass by the function
``bwd``. Input arguments and elements of the output pair may be arrays
or nested tuples/lists/dicts thereof.
bwd: a Python callable representing the backward pass of the custom VJP
rule. When there are no ``nondiff_argnums``, the ``bwd`` function takes
two arguments, where the first is the "residual" values produced on the
forward pass by ``fwd``, and the second is the output cotangent with the
same structure as the primal function output. The output of ``bwd`` must
be a tuple of length equal to the number of arguments of the primal
function, and the tuple elements may be arrays or nested
tuples/lists/dicts thereof so as to match the structure of the primal
input arguments.
symbolic_zeros: boolean, determining whether to indicate symbolic zeros
to the ``fwd`` and ``bwd`` rules. Enabling this option allows custom
derivative rules to detect when certain inputs, and when certain
output cotangents, are not involved in differentiation. If ``True``:
* ``fwd`` must accept, in place of each leaf value ``x`` in
the pytree comprising an argument to the original function,
an object (of type
``jax.custom_derivatives.CustomVJPPrimal``) with two
attributes instead: ``value`` and ``perturbed``. The
``value`` field is the original primal argument, and
``perturbed`` is a boolean. The ``perturbed`` bit indicates
whether the argument is involved in differentiation (i.e.,
if it is ``False``, then the corresponding Jacobian "column"
is zero).
* ``bwd`` will be passed objects representing static symbolic zeros in
its cotangent argument in correspondence with unperturbed values;
otherwise, only standard JAX types (e.g. array-likes) are passed.
Setting this option to ``True`` allows these rules to detect whether
certain inputs and outputs are not involved in differentiation, but at
the cost of special handling. For instance:
* The signature of ``fwd`` changes, and the objects it is passed cannot
be output from the rule directly.
* The ``bwd`` rule is passed objects that are not entirely array-like,
and that cannot be passed to most ``jax.numpy`` functions.
* Any custom pytree nodes involved in the primal function's arguments
must accept, in their unflattening functions, the two-field record
objects that are given as input leaves to the ``fwd`` rule.
Default ``False``.
optimize_remat: boolean, an experimental flag to enable an automatic
optimization when this function is used under :func:`jax.remat`. This
will be most useful when the ``fwd`` rule is an opaque call such as a
Pallas kernel or a custom call. Default ``False``.
Returns:
None.
Examples:
>>> @jax.custom_vjp
... def f(x, y):
... return jnp.sin(x) * y
...
>>> def f_fwd(x, y):
... return f(x, y), (jnp.cos(x), jnp.sin(x), y)
...
>>> def f_bwd(res, g):
... cos_x, sin_x, y = res
... return (cos_x * g * y, sin_x * g)
...
>>> f.defvjp(f_fwd, f_bwd)
>>> x = jnp.float32(1.0)
>>> y = jnp.float32(2.0)
>>> with jnp.printoptions(precision=2):
... print(jax.value_and_grad(f)(x, y))
(Array(1.68, dtype=float32), Array(1.08, dtype=float32))
"""
self.fwd = fwd
self.bwd = bwd
self.symbolic_zeros = symbolic_zeros
self.optimize_remat = optimize_remat
if self.symbolic_zeros and self.optimize_remat:
raise NotImplementedError(
"remat optimization for custom_vjp does not support symbolic zeros")
@partial(traceback_util.api_boundary,
repro_api_name="jax.custom_vjp.__call__")
def __call__(self, *args: Any, **kwargs: Any) -> ReturnValue: # pytype: disable=invalid-annotation
debug_fun = debug_info("custom_vjp fun", self.fun, args, kwargs,
static_argnums=self.nondiff_argnums)
if not self.fwd or not self.bwd:
msg = f"No VJP defined for custom_vjp function {debug_fun.func_name} using defvjp."
raise AttributeError(msg)
try:
args = resolve_kwargs(self.fun, args, kwargs)
except TypeError as e:
raise TypeError(
"The input arguments to the custom_vjp-decorated function "
f"{debug_fun.func_name} could not be resolved to positional-only "
f"arguments. Binding failed with the error:\n{e}"
) from e
debug_fwd = debug_info("custom_vjp fwd", self.fwd, args, kwargs,
static_argnums=self.nondiff_argnums)
# TODO(necula): figure out how to construct the debug_bwd args
debug_bwd = debug_info("custom_vjp bwd", self.bwd, args, {})
if self.optimize_remat:
fwd = optimize_remat_of_custom_vjp_fwd(
self.fun, debug_fun, self.fwd, debug_fwd,
nondiff_argnums=self.nondiff_argnums,
symbolic_zeros=self.symbolic_zeros)
else:
fwd = self.fwd
if config.enable_custom_vjp_by_custom_transpose.value:
if self.nondiff_argnums:
raise NotImplementedError(
'nondiff_argnums not implemented for new custom_vjp')
return custom_vjp_by_custom_transpose(self.fun, self.fwd, self.bwd)(*args)
else:
if self.nondiff_argnums:
for i in self.nondiff_argnums: _check_for_tracers(args[i])
dyn_argnums = [i for i in range(len(args)) if i not in self.nondiff_argnums]
f_, dyn_args = argnums_partial(
lu.wrap_init(self.fun, debug_info=debug_fun), dyn_argnums,
args, require_static_args_hashable=False)
static_args = [args[i] for i in self.nondiff_argnums]
fwd_, _ = argnums_partial(lu.wrap_init(fwd, debug_info=debug_fwd),
dyn_argnums, args,
require_static_args_hashable=False)
bwd = prepend_static_args(lu.wrap_init(self.bwd, debug_info=debug_bwd),
static_args)
else:
f_, dyn_args = lu.wrap_init(self.fun, debug_info=debug_fun), args
fwd_ = lu.wrap_init(fwd, debug_info=debug_fwd)
bwd = lu.wrap_init(self.bwd, debug_info=debug_bwd)
args_flat, in_tree = tree_flatten(dyn_args)
in_avals = [core.get_aval(x) for x in args_flat]
if config.mutable_array_checks.value:
f_ = _check_primal_refs(f_, self.nondiff_argnums, f_.debug_info)
flat_fun, out_type = _flatten_fun_nokwargs(f_, in_tree)
flat_fwd, out_trees = _flatten_fwd(
fwd_, self.nondiff_argnums, self.symbolic_zeros, debug_fun,
debug_fwd, in_tree, out_type)
flat_bwd = _flatten_bwd(bwd, in_tree, in_avals, out_trees)
out_flat = custom_vjp_call_p.bind(flat_fun, flat_fwd, flat_bwd,
*args_flat, out_trees=out_trees,
symbolic_zeros=self.symbolic_zeros)
_, (out_tree, _, _) = lu.merge_linear_aux(out_type, out_trees)
return tree_unflatten(out_tree, out_flat)
@lu.transformation2
def _check_primal_refs(
f: Callable, nondiff_argnums: Sequence[int], debug: core.DebugInfo, *args):
_check_for_aliased_refs(f, nondiff_argnums, debug, args)
out = f(*args)
_check_for_returned_refs(f, out, 'primal', [], 0)
return out
def _check_for_aliased_refs(
f: Callable, nondiff_argnums: Sequence[int], debug: core.DebugInfo, args):
nondiff_argnums_ = set(nondiff_argnums)
argnums = [x for i, arg in enumerate(args)
for x in [i] * tree_structure(arg).num_leaves]
leaves = tree_leaves(args)
refs: dict[int, int] = {}
for i, (argnum, x) in enumerate(zip(argnums, leaves)):
if argnum in nondiff_argnums: continue
x = x.value if isinstance(x, CustomVJPPrimal) else x
if (isinstance((a := core.get_aval(x)), AbstractRef) and
(dup_idx := refs.setdefault(id(core.get_referent(x)), i)) != i):
arg_names = debug.safe_arg_names(len(leaves))
raise ValueError(
"only one reference to a mutable array may be passed as an argument "
f"to a function, but custom_vjp function {f} got the same mutable "
f"array reference of type {a.str_short()} at {arg_names[dup_idx]} and"
f" {arg_names[i]}.")
def _check_for_returned_refs(f, out, kind, args, after_idx):
args = [x.value if isinstance(x, CustomVJPPrimal) else x for x in args]
ids = {id(x) for x in args if isinstance(core.get_aval(x), AbstractRef)}
leaves = tree_leaves_with_path(out)
for i, (path, leaf) in enumerate(leaves):
if isinstance((a := core.get_aval(leaf)), AbstractRef):
loc = f' at output tree path {keystr(path)}' if path else ''
if i < after_idx:
raise ValueError(f"custom_vjp {kind} function {f} returned a mutable "
f"array reference of type {a.str_short()}{loc}, "
"but mutable array references cannot be returned there.")
if id(leaf) not in ids:
raise ValueError(f"custom_vjp {kind} function {f} returned a mutable "
f"array reference of type {a.str_short()}{loc} "
"that was not an argument.")
@dataclasses.dataclass
| custom_vjp |
python | pypa__warehouse | warehouse/accounts/forms.py | {
"start": 21861,
"end": 23043
} | class ____(wtforms.Form):
username_or_email = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(),
PreventNullBytesValidator(),
]
)
def validate_username_or_email(self, field):
"""
Check if the input is structurally correct, i.e. either a string or email.
Further validation happens in the View.
"""
if "@" in field.data:
# Additional checks for the validity of the address
try:
email_validator.validate_email(field.data, check_deliverability=True)
except email_validator.EmailNotValidError as e:
raise wtforms.validators.ValidationError(
message=INVALID_EMAIL_MESSAGE
) from e
else:
# the regexp below must match the CheckConstraint
# for the username field in accounts.models.User
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$", field.data):
raise wtforms.validators.ValidationError(
message=_("The username isn't valid. Try again.")
)
| RequestPasswordResetForm |
python | huggingface__transformers | src/transformers/models/sam2/image_processing_sam2_fast.py | {
"start": 14970,
"end": 30227
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
mask_size = {"height": 256, "width": 256}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = Sam2FastImageProcessorKwargs
# modular artefacts
do_pad = None
pad_size = None
mask_pad_size = None
def __init__(self, **kwargs: Unpack[Sam2FastImageProcessorKwargs]):
super().__init__(**kwargs)
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
mask_size: Optional[SizeDict] = None,
default_to_square: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["mask_size"] = mask_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
pixel_values = self._preprocess(images, **images_kwargs)
data = {
"pixel_values": pixel_values,
"original_sizes": original_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": pil_torch_interpolation_mapping[PILImageResampling.NEAREST],
"size": segmentation_maps_kwargs.pop("mask_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _preprocess(
self,
images: list["torch.Tensor"],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> "torch.Tensor":
return super()._preprocess(images, return_tensors=return_tensors, **kwargs).pixel_values
def generate_crop_boxes(
self,
image: "torch.Tensor",
target_size,
crop_n_layers: int = 0,
overlap_ratio: float = 512 / 1500,
points_per_crop: Optional[int] = 32,
crop_n_points_downscale_factor: Optional[list[int]] = 1,
device: Optional["torch.device"] = None,
):
"""
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Args:
image (`torch.Tensor`):
Input original image
target_size (`int`):
Target size of the resized image
crop_n_layers (`int`, *optional*, defaults to 0):
If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where
each layer has 2**i_layer number of image crops.
overlap_ratio (`float`, *optional*, defaults to 512/1500):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
points_per_crop (`int`, *optional*, defaults to 32):
Number of points to sam2ple from each crop.
crop_n_points_downscale_factor (`list[int]`, *optional*, defaults to 1):
The number of points-per-side sam2pled in layer n is scaled down by crop_n_points_downscale_factor**n.
device (`torch.device`, *optional*, defaults to None):
Device to use for the computation. If None, cpu will be used.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
return_tensors (`str`, *optional*, defaults to `pt`):
If `pt`, returns `torch.Tensor`.
"""
image = self._process_image(image)
crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes(
image,
target_size,
crop_n_layers,
overlap_ratio,
points_per_crop,
crop_n_points_downscale_factor,
)
if device is None:
device = torch.device("cpu")
crop_boxes = crop_boxes.to(device)
points_per_crop = points_per_crop.to(device)
# cropped_images stays as torch.Tensor
input_labels = input_labels.to(device)
return crop_boxes, points_per_crop, cropped_images, input_labels
def filter_masks(
self,
masks,
iou_scores,
original_size,
cropped_box_image,
pred_iou_thresh=0.88,
stability_score_thresh=0.95,
mask_threshold=0,
stability_score_offset=1,
):
"""
Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
bounding boxes and pad the predicted masks if necessary.
Args:
masks (`torch.Tensor`):
Input masks.
iou_scores (`torch.Tensor`):
List of IoU scores.
original_size (`tuple[int,int]`):
Size of the original image.
cropped_box_image (`torch.Tensor`):
The cropped image.
pred_iou_thresh (`float`, *optional*, defaults to 0.88):
The threshold for the iou scores.
stability_score_thresh (`float`, *optional*, defaults to 0.95):
The threshold for the stability score.
mask_threshold (`float`, *optional*, defaults to 0):
The threshold for the predicted masks.
stability_score_offset (`float`, *optional*, defaults to 1):
The offset for the stability score used in the `_compute_stability_score` method.
"""
original_height, original_width = original_size
iou_scores = iou_scores.flatten(0, 1)
masks = masks.flatten(0, 1)
if masks.shape[0] != iou_scores.shape[0]:
raise ValueError("masks and iou_scores must have the sam2e batch size.")
if masks.device != iou_scores.device:
iou_scores = iou_scores.to(masks.device)
batch_size = masks.shape[0]
keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device)
if pred_iou_thresh > 0.0:
keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
# compute stability score
if stability_score_thresh > 0.0:
stability_scores = _compute_stability_score(masks, mask_threshold, stability_score_offset)
keep_mask = keep_mask & (stability_scores > stability_score_thresh)
scores = iou_scores[keep_mask]
masks = masks[keep_mask]
# binarize masks
masks = masks > mask_threshold
converted_boxes = _batched_mask_to_box(masks)
keep_mask = ~_is_box_near_crop_edge(
converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
)
scores = scores[keep_mask]
masks = masks[keep_mask]
converted_boxes = converted_boxes[keep_mask]
masks = _pad_masks(masks, cropped_box_image, original_height, original_width)
# conversion to rle is necessary to run non-maximum suppression
masks = _mask_to_rle(masks)
return masks, scores, converted_boxes
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
# TODO: add connected components kernel for postprocessing
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
if apply_non_overlapping_constraints:
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
def post_process_for_mask_generation(self, all_masks, all_scores, all_boxes, crops_nms_thresh):
"""
Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.
Args:
all_masks (`torch.Tensor`):
List of all predicted segmentation masks
all_scores (`torch.Tensor`):
List of all predicted iou scores
all_boxes (`torch.Tensor`):
List of all bounding boxes of the predicted masks
crops_nms_thresh (`float`):
Threshold for NMS (Non Maximum Suppression) algorithm.
"""
return _post_process_for_mask_generation(all_masks, all_scores, all_boxes, crops_nms_thresh)
def _apply_non_overlapping_constraints(self, pred_masks: torch.Tensor) -> torch.Tensor:
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
__all__ = ["Sam2ImageProcessorFast"]
| Sam2ImageProcessorFast |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 14555,
"end": 14713
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("SIZE",)
| LanguageOrderField |
python | pydata__xarray | xarray/core/variable.py | {
"start": 2604,
"end": 12116
} | class ____(ValueError):
"""Error class used when we can't safely guess a dimension name."""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(
obj: T_DuckArray | Any, name=None, auto_convert: bool = True
) -> Variable | IndexVariable:
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
auto_convert : bool, optional
For internal use only! If True, convert a "dimension" variable into
an IndexVariable object (deprecated).
Returns
-------
var : Variable
The newly created variable.
"""
from xarray.core.dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
dims_, data_, *attrs = obj
except ValueError as err:
raise ValueError(
f"Tuple {obj} is not in the form (dims, data[, attrs])"
) from err
if isinstance(data_, DataArray):
raise TypeError(
f"Variable {name!r}: Using a DataArray object to construct a variable is"
" ambiguous, please extract the data using the .data property."
)
try:
obj = Variable(dims_, data_, *attrs)
except (TypeError, ValueError) as error:
raise error.__class__(
f"Variable {name!r}: Could not convert tuple of form "
f"(dims, data[, attrs, encoding]): {obj} to Variable."
) from error
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, set | dict):
raise TypeError(f"variable {name!r} has invalid type {type(obj)!r}")
elif name is not None:
data: T_DuckArray = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
f"cannot set variable {name!r} with {data.ndim!r}-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead."
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
f"Variable {name!r}: unable to convert object into a variable without an "
f"explicit list of dimensions: {obj!r}"
)
if auto_convert and name is not None and name in obj.dims and obj.ndim == 1:
# automatically convert the Variable into an Index
emit_user_level_warning(
f"variable {name!r} with name matching its dimension will not be "
"automatically converted into an `IndexVariable` object in the future.",
FutureWarning,
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexingAdapter and LazilyIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexingAdapter(data)
if isinstance(data, UNSUPPORTED_EXTENSION_ARRAY_TYPES):
return data.to_numpy()
if isinstance(
data, pd.api.extensions.ExtensionArray
) and is_allowed_extension_array(data):
return PandasExtensionArray(data)
return data
def _possibly_convert_objects(values):
"""Convert object arrays into datetime64 and timedelta64 according
to the pandas convention. For backwards compat, as of 3.0.0 pandas,
object dtype inputs are cast to strings by `pandas.Series`
but we output them as object dtype with the input metadata preserved as well.
* datetime.datetime
* datetime.timedelta
* pd.Timestamp
* pd.Timedelta
"""
as_series = pd.Series(values.ravel(), copy=False)
result = np.asarray(as_series).reshape(values.shape)
if not result.flags.writeable:
# GH8843, pandas copy-on-write mode creates read-only arrays by default
try:
result.flags.writeable = True
except ValueError:
result = result.copy()
# For why we need this behavior: https://github.com/pandas-dev/pandas/issues/61938
# Object datatype inputs that are strings
# will be converted to strings by `pandas.Series`, and as of 3.0.0, lose
# `dtype.metadata`. If the roundtrip back to numpy in this function yields an
# object array again, the dtype.metadata will be preserved.
if (
result.dtype.kind == "O"
and values.dtype.kind == "O"
and Version(pd.__version__) >= Version("3.0.0dev0")
):
result.dtype = values.dtype
return result
def as_compatible_data(
data: T_DuckArray | ArrayLike, fastpath: bool = False
) -> T_DuckArray:
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If it's a pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", None) is not None:
return cast("T_DuckArray", data)
from xarray.core.dataarray import DataArray
# TODO: do this uwrapping in the Variable/NamedArray constructor instead.
if isinstance(data, Variable):
return cast("T_DuckArray", data._data)
# TODO: do this uwrapping in the DataArray constructor instead.
if isinstance(data, DataArray):
return cast("T_DuckArray", data._variable._data)
def convert_non_numpy_type(data):
return cast("T_DuckArray", _maybe_wrap_data(data))
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return convert_non_numpy_type(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
# we don't want nested self-described arrays
if isinstance(data, pd.Series | pd.DataFrame):
if (
isinstance(data, pd.Series)
and is_allowed_extension_array(data.array)
# Some datetime types are not allowed as well as backing Variable types
and not isinstance(data.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES)
):
pandas_data = data.array
else:
pandas_data = data.values # type: ignore[assignment]
if isinstance(pandas_data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return convert_non_numpy_type(pandas_data)
else:
data = pandas_data
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
_dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = duck_array_ops.where_method(data, ~mask, fill_value)
else:
data = np.asarray(data)
if isinstance(data, np.matrix):
data = np.asarray(data)
# immediately return array-like types except `numpy.ndarray` and `numpy` scalars
# compare types with `is` instead of `isinstance` to allow `numpy.ndarray` subclasses
is_numpy = type(data) is np.ndarray or isinstance(data, np.generic)
if not is_numpy and (
hasattr(data, "__array_function__") or hasattr(data, "__array_namespace__")
):
return cast("T_DuckArray", data)
# anything left will be converted to `numpy.ndarray`, including `numpy` scalars
data = np.asarray(data)
if data.dtype.kind in "OMm":
data = _possibly_convert_objects(data)
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
data = np.asarray(data)
if data.ndim == 0:
kind = data.dtype.kind
if kind in "mM":
unit, _ = np.datetime_data(data.dtype)
if kind == "M":
data = np.datetime64(data, unit)
elif kind == "m":
data = np.timedelta64(data, unit)
return data
| MissingDimensionsError |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/cluster_resolver.py | {
"start": 15984,
"end": 23829
} | class ____(ClusterResolver):
"""Performs a union on underlying ClusterResolvers.
This class performs a union given two or more existing ClusterResolvers. It
merges the underlying ClusterResolvers, and returns one unified ClusterSpec
when cluster_spec is called. The details of the merge function is
documented in the cluster_spec function.
For additional ClusterResolver properties such as task type, task index,
rpc layer, environment, etc..., we will return the value from the first
ClusterResolver in the union.
An example to combine two cluster resolvers:
```Python
cluster_0 = tf.train.ClusterSpec({"worker": ["worker0.example.com:2222",
"worker1.example.com:2222"]})
cluster_resolver_0 = SimpleClusterResolver(cluster, task_type="worker",
task_id=0,
rpc_layer="grpc")
cluster_1 = tf.train.ClusterSpec({"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]})
cluster_resolver_1 = SimpleClusterResolver(cluster, task_type="ps",
task_id=0,
rpc_layer="grpc")
# Its task type would be "worker".
cluster_resolver = UnionClusterResolver(cluster_resolver_0,
cluster_resolver_1)
```
An example to override the number of GPUs in a TFConfigClusterResolver
instance:
```Python
tf_config = TFConfigClusterResolver()
gpu_override = SimpleClusterResolver(tf_config.cluster_spec(),
num_accelerators={"GPU": 1})
cluster_resolver = UnionResolver(gpu_override, tf_config)
```
"""
def __init__(self, *args, **kwargs):
"""Initializes a UnionClusterResolver with other ClusterResolvers.
Args:
*args: `ClusterResolver` objects to be unionized.
**kwargs:
rpc_layer - (Optional) Override value for the RPC layer used by
TensorFlow.
task_type - (Optional) Override value for the current task type.
task_id - (Optional) Override value for the current task index.
Raises:
TypeError: If any argument is not a subclass of `ClusterResolvers`.
ValueError: If there are no arguments passed.
"""
super(UnionClusterResolver, self).__init__()
self._rpc_layer = kwargs.pop('rpc_layer', None)
self._task_type = kwargs.pop('task_type', None)
self._task_id = kwargs.pop('task_id', None)
if kwargs:
raise ValueError('Unexpected kwargs provided {!r}'.format(kwargs))
if not args:
raise ValueError('At least one ClusterResolver is required.')
for cluster_resolver in args:
if not isinstance(cluster_resolver, ClusterResolver):
raise TypeError('All arguments must be a sub-class of '
'`ClusterResolver.`')
self._cluster_resolvers = args
def cluster_spec(self):
"""Returns a union of all the ClusterSpecs from the ClusterResolvers.
Returns:
A ClusterSpec containing host information merged from all the underlying
ClusterResolvers.
Raises:
KeyError: If there are conflicting keys detected when merging two or
more dictionaries, this exception is raised.
Note: If there are multiple ClusterResolvers exposing ClusterSpecs with the
same job name, we will merge the list/dict of workers.
If *all* underlying ClusterSpecs expose the set of workers as lists, we will
concatenate the lists of workers, starting with the list of workers from
the first ClusterResolver passed into the constructor.
If *any* of the ClusterSpecs expose the set of workers as a dict, we will
treat all the sets of workers as dicts (even if they are returned as lists)
and will only merge them into a dict if there is no conflicting keys. If
there is a conflicting key, we will raise a `KeyError`.
"""
merged_cluster = {}
# We figure out whether it is all lists for a particular job, or whether
# there are dicts inside.
for cluster_resolver in self._cluster_resolvers:
cluster_spec = cluster_resolver.cluster_spec()
cluster_dict = cluster_spec.as_dict()
for job_name, tasks in cluster_dict.items():
if job_name in merged_cluster:
# If we see a dict, then we write a dict out regardless.
if isinstance(tasks, dict):
merged_cluster[job_name] = {}
else:
# We take whichever type is present.
if isinstance(tasks, list):
merged_cluster[job_name] = []
else:
merged_cluster[job_name] = {}
# We then do the merge as appropriate in merged_cluster[job].
for cluster_resolver in self._cluster_resolvers:
cluster_spec = cluster_resolver.cluster_spec()
cluster_dict = cluster_spec.as_dict()
for job_name, tasks in cluster_dict.items():
if isinstance(merged_cluster[job_name], list):
# We all have lists, we can just concatenate and be done.
merged_cluster[job_name].extend(tasks)
else:
if isinstance(tasks, list):
# We convert to a dictionary if the type is a list.
task_dict = dict(zip(range(0, len(tasks)), tasks))
else:
# We can simply make a copy (for update) and be done.
task_dict = tasks.copy()
# We detect if there are duplicates, and raise an error if so.
task_keys = set(task_dict)
merged_keys = set(merged_cluster[job_name].keys())
intersected_keys = task_keys.intersection(merged_keys)
if intersected_keys:
raise KeyError('Duplicate keys detected when merging two '
'ClusterSpecs: %s' % repr(intersected_keys))
# We do the merge after all the processing.
merged_cluster[job_name].update(task_dict)
return ClusterSpec(merged_cluster)
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a session.
This usually returns the master from the first ClusterResolver passed in,
but you can override this by specifying the task_type and task_id.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (Optional) The type of the TensorFlow task of the master.
task_id: (Optional) The index of the TensorFlow task of the master.
rpc_layer: (Optional) The RPC protocol for the given cluster.
Returns:
The name or URL of the session master.
"""
if task_type is not None and task_id is not None:
master = self.cluster_spec().task_address(task_type, task_id)
return format_master_url(master, rpc_layer or self._rpc_layer)
return self._cluster_resolvers[0].master(rpc_layer=rpc_layer)
@property
def task_type(self):
return self._task_type or self._cluster_resolvers[0].task_type
@property
def task_id(self):
return self._task_id or self._cluster_resolvers[0].task_id
@task_type.setter
def task_type(self, task_type):
self._task_type = task_type
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def environment(self):
return self._cluster_resolvers[0].environment
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
return self._cluster_resolvers[0].num_accelerators(
task_type, task_id, config_proto)
@property
def rpc_layer(self):
return self._rpc_layer or self._cluster_resolvers[0].rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
| UnionClusterResolver |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/organizationmemberinvite.py | {
"start": 7043,
"end": 7307
} | class ____(serializers.Serializer):
trigger_regenerate_token = serializers.BooleanField(
required=False,
default=False,
help_text="Whether or not to regenerate the token for this invitation",
)
| OrganizationMemberReinviteRequestValidator |
python | ray-project__ray | python/ray/dashboard/subprocesses/tests/utils.py | {
"start": 5822,
"end": 6495
} | class ____(BaseTestModule):
@routes.get("/test1")
async def test(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
return aiohttp.web.Response(text="Hello from TestModule1")
@routes.get("/redirect_between_modules")
async def redirect_between_modules(
self, req: aiohttp.web.Request
) -> aiohttp.web.Response:
# Redirect to the /test route in TestModule
raise aiohttp.web.HTTPFound(location="/test")
@routes.get("/redirect_within_module")
async def redirect_within_module(
self, req: aiohttp.web.Request
) -> aiohttp.web.Response:
raise aiohttp.web.HTTPFound(location="/test1")
| TestModule1 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py | {
"start": 45027,
"end": 49082
} | class ____:
"""
Processes build configurations to add additional functionality to support the use of operators.
The following improvements are made:
* It is required to provide the source and only one type can be given,
* It is possible to provide the source as the URL address instead dict.
:param build: The request body of the build.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
"""
def __init__(self, build: dict | Build) -> None:
self.build = deepcopy(build)
def _verify_source(self) -> None:
if not exactly_one("storage_source" in self.build["source"], "repo_source" in self.build["source"]):
raise AirflowException(
"The source could not be determined. Please choose one data source from: "
"storage_source and repo_source."
)
def _reformat_source(self) -> None:
self._reformat_repo_source()
self._reformat_storage_source()
def _reformat_repo_source(self) -> None:
if "repo_source" not in self.build["source"]:
return
repo_source = self.build["source"]["repo_source"]
if not isinstance(repo_source, str):
return
self.build["source"]["repo_source"] = self._convert_repo_url_to_dict(repo_source)
def _reformat_storage_source(self) -> None:
if "storage_source" not in self.build["source"]:
return
storage_source = self.build["source"]["storage_source"]
if not isinstance(storage_source, str):
return
self.build["source"]["storage_source"] = self._convert_storage_url_to_dict(storage_source)
def process_body(self) -> Build:
"""
Process the body passed in the constructor.
:return: the body.
"""
if "source" in self.build:
self._verify_source()
self._reformat_source()
return Build(self.build)
@staticmethod
def _convert_repo_url_to_dict(source: str) -> dict[str, Any]:
"""
Convert url to repository in Google Cloud Source to a format supported by the API.
Example valid input:
.. code-block:: none
https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:
"""
url_parts = urlsplit(source)
match = REGEX_REPO_PATH.search(url_parts.path)
if url_parts.scheme != "https" or url_parts.hostname != "source.cloud.google.com" or not match:
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:"
)
project_id = unquote(match.group("project_id"))
repo_name = unquote(match.group("repo_name"))
branch_name = unquote(match.group("branch_name")) if match.group("branch_name") else "master"
source_dict = {
"project_id": project_id,
"repo_name": repo_name,
"branch_name": branch_name,
}
return source_dict
@staticmethod
def _convert_storage_url_to_dict(storage_url: str) -> dict[str, Any]:
"""
Convert url to object in Google Cloud Storage to a format supported by the API.
Example valid input:
.. code-block:: none
gs://bucket-name/object-name.tar.gz
"""
url_parts = urlsplit(storage_url)
if url_parts.scheme != "gs" or not url_parts.hostname or not url_parts.path or url_parts.path == "/":
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"gs://bucket-name/object-name.tar.gz#24565443"
)
source_dict: dict[str, Any] = {
"bucket": url_parts.hostname,
"object_": url_parts.path[1:],
}
if url_parts.fragment:
source_dict["generation"] = int(url_parts.fragment)
return source_dict
| BuildProcessor |
python | lepture__authlib | authlib/common/errors.py | {
"start": 1615,
"end": 1667
} | class ____(AuthlibBaseError):
pass
| ContinueIteration |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py | {
"start": 1069,
"end": 1395
} | class ____(BaseModel):
model: Optional[str] = None
"""Model name that generated the response.
Defaults to null when using the session default.
"""
tool_choice: Optional[InferenceOptionsToolChoice] = None
"""Preferred tool to invoke. Defaults to null when ChatKit should auto-select."""
| InferenceOptions |
python | scikit-learn__scikit-learn | sklearn/gaussian_process/kernels.py | {
"start": 44186,
"end": 48530
} | class ____(StationaryKernelMixin, GenericKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise of the signal as independently and identically
normally-distributed. The parameter noise_level equals the variance of this
noise.
.. math::
k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default=1.0
Parameter controlling the noise level (variance)
noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'noise_level'.
If set to "fixed", 'noise_level' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0, 592.1 ]), array([316.6, 316.6]))
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter("noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(_num_samples(X))
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (
K,
self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis],
)
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
else:
return np.zeros((_num_samples(X), _num_samples(Y)))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(
_num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype
)
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(
self.__class__.__name__, self.noise_level
)
| WhiteKernel |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 347890,
"end": 349407
} | class ____(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment or construction such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be special cased during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
starred_expr_allowed_here = False
def __init__(self, pos, target):
ExprNode.__init__(self, pos, target=target)
def analyse_declarations(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target.analyse_declarations(env)
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_types(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
| StarredUnpackingNode |
python | google__jax | tests/pallas/pallas_test.py | {
"start": 27860,
"end": 27928
} | class ____(PallasCallTest):
INTERPRET = True
| PallasCallInterpretTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/links/test_dataplex.py | {
"start": 6433,
"end": 7474
} | class ____:
@pytest.mark.db_test
def test_get_link(self, create_task_instance_of_operator, session, mock_supervisor_comms):
expected_url = DATAPLEX_LAKE_LINK
link = DataplexLakeLink()
ti = create_task_instance_of_operator(
DataplexCreateLakeOperator,
dag_id="test_link_dag",
task_id="test_link_task",
region=TEST_LOCATION,
lake_id=TEST_LAKE_ID,
project_id=TEST_PROJECT_ID,
body={},
)
session.add(ti)
session.commit()
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key="key",
value={
"lake_id": ti.task.lake_id,
"region": ti.task.region,
"project_id": ti.task.project_id,
},
)
actual_url = link.get_link(operator=ti.task, ti_key=ti.key)
assert actual_url == expected_url
| TestDataplexLakeLink |
python | numba__numba | numba/core/typing/listdecl.py | {
"start": 2808,
"end": 3179
} | class ____(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
unified = self.context.unify_pairs(a, b)
if unified is not None:
return signature(unified, a, b)
@infer_global(operator.iadd)
| AddList |
python | TheAlgorithms__Python | data_structures/binary_tree/avl_tree.py | {
"start": 6977,
"end": 9702
} | class ____:
"""
An AVL tree doctest
Examples:
>>> t = AVLtree()
>>> t.insert(4)
insert:4
>>> print(str(t).replace(" \\n","\\n"))
4
*************************************
>>> t.insert(2)
insert:2
>>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
4
2 *
*************************************
>>> t.insert(3)
insert:3
right rotation node: 2
left rotation node: 4
>>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
3
2 4
*************************************
>>> t.get_height()
2
>>> t.del_node(3)
delete:3
>>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
4
2 *
*************************************
"""
def __init__(self) -> None:
self.root: MyNode | None = None
def get_height(self) -> int:
return get_height(self.root)
def insert(self, data: Any) -> None:
print("insert:" + str(data))
self.root = insert_node(self.root, data)
def del_node(self, data: Any) -> None:
print("delete:" + str(data))
if self.root is None:
print("Tree is empty!")
return
self.root = del_node(self.root, data)
def __str__(
self,
) -> str: # a level traversale, gives a more intuitive look on the tree
output = ""
q = MyQueue()
q.push(self.root)
layer = self.get_height()
if layer == 0:
return output
cnt = 0
while not q.is_empty():
node = q.pop()
space = " " * int(math.pow(2, layer - 1))
output += space
if node is None:
output += "*"
q.push(None)
q.push(None)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
cnt = cnt + 1
for i in range(100):
if cnt == math.pow(2, i) - 1:
layer = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _test() -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
t = AVLtree()
lst = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| AVLtree |
python | tensorflow__tensorflow | tensorflow/python/keras/callbacks.py | {
"start": 21810,
"end": 32875
} | class ____:
"""Abstract base class used to build new callbacks.
Callbacks can be passed to keras methods such as `fit`, `evaluate`, and
`predict` in order to hook into the various stages of the model training and
inference lifecycle.
To create a custom callback, subclass `keras.callbacks.Callback` and override
the method associated with the stage of interest. See
https://www.tensorflow.org/guide/keras/custom_callback for more information.
Example:
>>> training_finished = False
>>> class MyCallback(tf.keras.callbacks.Callback):
... def on_train_end(self, logs=None):
... global training_finished
... training_finished = True
>>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
>>> model.compile(loss='mean_squared_error')
>>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
... callbacks=[MyCallback()])
>>> assert training_finished == True
If you want to use `Callback` objects in a custom training loop:
1. You should pack all your callbacks into a single `callbacks.CallbackList`
so they can all be called together.
2. You will need to manually call all the `on_*` methods at the appropriate
locations in your loop. Like this:
```
callbacks = tf.keras.callbacks.CallbackList([...])
callbacks.append(...)
callbacks.on_train_begin(...)
for epoch in range(EPOCHS):
callbacks.on_epoch_begin(epoch)
for i, data in dataset.enumerate():
callbacks.on_train_batch_begin(i)
batch_logs = model.train_step(data)
callbacks.on_train_batch_end(i, batch_logs)
epoch_logs = ...
callbacks.on_epoch_end(epoch, epoch_logs)
final_logs=...
callbacks.on_train_end(final_logs)
```
Attributes:
params: Dict. Training parameters (eg. verbosity, batch size, number of
epochs...).
model: Instance of `keras.models.Model`. Reference of the model being
trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch (see method-specific docstrings).
"""
def __init__(self):
self.validation_data = None # pylint: disable=g-missing-from-attributes
self.model = None
# Whether this Callback should only run on the chief worker in a
# Multi-Worker setting.
# TODO(omalleyt): Make this attr public once solution is stable.
self._chief_worker_only = None
self._supports_tf_logs = False
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
@doc_controls.for_subclass_implementers
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`. For training epoch, the values of the
`Model`'s metrics are returned. Example : `{'loss': 0.2, 'accuracy':
0.7}`.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing
the model's outputs.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to `on_epoch_end()`
is passed to this argument for this method but that may change in
the future.
"""
@doc_controls.for_subclass_implementers
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to
`on_test_batch_end()` is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def _implements_train_batch_hooks(self):
"""Determines if this Callback should be called for each train batch."""
return (not generic_utils.is_default(self.on_batch_begin) or
not generic_utils.is_default(self.on_batch_end) or
not generic_utils.is_default(self.on_train_batch_begin) or
not generic_utils.is_default(self.on_train_batch_end))
def _implements_test_batch_hooks(self):
"""Determines if this Callback should be called for each test batch."""
return (not generic_utils.is_default(self.on_test_batch_begin) or
not generic_utils.is_default(self.on_test_batch_end))
def _implements_predict_batch_hooks(self):
"""Determines if this Callback should be called for each predict batch."""
return (not generic_utils.is_default(self.on_predict_batch_begin) or
not generic_utils.is_default(self.on_predict_batch_end))
| Callback |
python | plotly__plotly.py | plotly/graph_objs/layout/annotation/_hoverlabel.py | {
"start": 235,
"end": 5099
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.annotation"
_path_str = "layout.annotation.hoverlabel"
_valid_props = {"bgcolor", "bordercolor", "font"}
@property
def bgcolor(self):
"""
Sets the background color of the hover label. By default uses
the annotation's `bgcolor` made opaque, or white if it was
transparent.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover label. By default uses
either dark grey or white, for maximum contrast with
`hoverlabel.bgcolor`.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def font(self):
"""
Sets the hover label text font. By default uses the global
hover font and size, with color from `hoverlabel.bordercolor`.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.annotation.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.annotation.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the background color of the hover label. By
default uses the annotation's `bgcolor` made opaque, or
white if it was transparent.
bordercolor
Sets the border color of the hover label. By default
uses either dark grey or white, for maximum contrast
with `hoverlabel.bgcolor`.
font
Sets the hover label text font. By default uses the
global hover font and size, with color from
`hoverlabel.bordercolor`.
"""
def __init__(self, arg=None, bgcolor=None, bordercolor=None, font=None, **kwargs):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.annotation.Hoverlabel`
bgcolor
Sets the background color of the hover label. By
default uses the annotation's `bgcolor` made opaque, or
white if it was transparent.
bordercolor
Sets the border color of the hover label. By default
uses either dark grey or white, for maximum contrast
with `hoverlabel.bgcolor`.
font
Sets the hover label text font. By default uses the
global hover font and size, with color from
`hoverlabel.bordercolor`.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.annotation.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.annotation.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("font", arg, font)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 86404,
"end": 86782
} | class ____(BaseModel, extra="forbid"):
points: List["PointStruct"] = Field(..., description="")
shard_key: Optional["ShardKeySelector"] = Field(default=None, description="")
update_filter: Optional["Filter"] = Field(
default=None,
description="If specified, only points that match this filter will be updated, others will be inserted",
)
| PointsList |
python | spack__spack | lib/spack/spack/error.py | {
"start": 5103,
"end": 5476
} | class ____(SpackError):
"""Raised when something goes wrong during install or uninstall.
The error can be annotated with a ``pkg`` attribute to allow the
caller to get the package for which the exception was raised.
"""
def __init__(self, message, long_msg=None, pkg=None):
super().__init__(message, long_msg)
self.pkg = pkg
| InstallError |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/int.py | {
"start": 15898,
"end": 20982
} | class ____(BaseInt[np.dtypes.Int16DType, np.int16], HasEndianness):
"""
A Zarr data type for arrays containing 16-bit signed integers.
Wraps the [`np.dtypes.Int16DType`][numpy.dtypes.Int16DType] data type. Scalars for this data type are instances of
[`np.int16`][numpy.int16].
Attributes
----------
dtype_cls : np.dtypes.Int16DType
The class of the underlying NumPy dtype.
References
----------
This class implements the 16-bit signed integer data type defined in Zarr V2 and V3.
See the [Zarr V2](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding) and [Zarr V3](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v3/data-types/index.rst) specification documents for details.
"""
dtype_cls = np.dtypes.Int16DType
_zarr_v3_name: ClassVar[Literal["int16"]] = "int16"
_zarr_v2_names: ClassVar[tuple[Literal[">i2"], Literal["<i2"]]] = (">i2", "<i2")
@classmethod
def from_native_dtype(cls, dtype: TBaseDType) -> Self:
"""
Create an instance of this data type from a np.dtype('int16') instance.
Parameters
----------
dtype : np.dtype
The instance of np.dtype('int16') to create from.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input data type is not an instance of np.dtype('int16').
"""
if cls._check_native_dtype(dtype):
return cls(endianness=get_endianness_from_numpy_dtype(dtype))
raise DataTypeValidationError(
f"Invalid data type: {dtype}. Expected an instance of {cls.dtype_cls}"
)
def to_native_dtype(self) -> np.dtypes.Int16DType:
"""
Convert the data type to a np.dtype('int16') instance.
Returns
-------
np.dtype
The np.dtype('int16') instance.
"""
byte_order = endianness_to_numpy_str(self.endianness)
return self.dtype_cls().newbyteorder(byte_order)
@classmethod
def _from_json_v2(cls, data: DTypeJSON) -> Self:
"""
Create an instance of this data type from Zarr V2-flavored JSON.
Parameters
----------
data : DTypeJSON
The JSON data.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class.
"""
if cls._check_json_v2(data):
# Going via NumPy ensures that we get the endianness correct without
# annoying string parsing.
name = data["name"]
return cls.from_native_dtype(np.dtype(name))
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected one of the strings {cls._zarr_v2_names!r}."
raise DataTypeValidationError(msg)
@classmethod
def _from_json_v3(cls, data: DTypeJSON) -> Self:
"""
Create an instance of this data type from Zarr V3-flavored JSON.
Parameters
----------
data : DTypeJSON
The JSON data.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class.
"""
if cls._check_json_v3(data):
return cls()
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected the string {cls._zarr_v3_name!r}"
raise DataTypeValidationError(msg)
@overload
def to_json(self, zarr_format: Literal[2]) -> DTypeConfig_V2[Literal[">i2", "<i2"], None]: ...
@overload
def to_json(self, zarr_format: Literal[3]) -> Literal["int16"]: ...
def to_json(
self, zarr_format: ZarrFormat
) -> DTypeConfig_V2[Literal[">i2", "<i2"], None] | Literal["int16"]:
"""
Serialize this ZDType to v2- or v3-flavored JSON
Parameters
----------
zarr_format : ZarrFormat
The Zarr format version (2 or 3).
Returns
-------
DTypeConfig_V2[Literal[">i2", "<i2"], None] or Literal["int16"]
The JSON representation of the Int16 instance.
Raises
------
ValueError
If the zarr_format is not 2 or 3.
"""
if zarr_format == 2:
name = self.to_native_dtype().str
return {"name": name, "object_codec_id": None}
elif zarr_format == 3:
return self._zarr_v3_name
raise ValueError(f"zarr_format must be 2 or 3, got {zarr_format}") # pragma: no cover
@property
def item_size(self) -> int:
"""
The size of a single scalar in bytes.
Returns
-------
int
The size of a single scalar in bytes.
"""
return 2
@dataclass(frozen=True, kw_only=True)
| Int16 |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 98439,
"end": 98931
} | class ____(CType):
#
# PEP-539 "Py_tss_t" type
#
declaration_value = "Py_tss_NEEDS_INIT"
def __repr__(self):
return "<Py_tss_t>"
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
base_code = "Py_tss_t"
else:
base_code = public_decl("Py_tss_t", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
| CPyTSSTType |
python | cython__cython | Cython/Compiler/Errors.py | {
"start": 2757,
"end": 2973
} | class ____(Exception):
# Throw this to stop the compilation immediately.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, "Abort error: %s" % message)
| AbortError |
python | run-llama__llama_index | llama-index-integrations/retrievers/llama-index-retrievers-galaxia/llama_index/retrievers/galaxia/base.py | {
"start": 2880,
"end": 4939
} | class ____(BaseRetriever):
"""
Galaxia knowledge retriever.
before using the API create your knowledge base here:
beta.cloud.smabbler.com/
learn more here:
https://smabbler.gitbook.io/smabbler/api-rag/smabblers-api-rag
Args:
api_url : url of galaxia API, e.g. "https://beta.api.smabbler.com"
api_key : API key
knowledge_base_id : ID of the knowledge base (galaxia model)
Example:
.. code-block:: python
from llama_index.retrievers.galaxia import GalaxiaRetriever
from llama_index.core.schema import QueryBundle
retriever = GalaxiaRetriever(
api_url="beta.api.smabbler.com",
api_key="<key>",
knowledge_base_id="<knowledge_base_id>",
)
result = retriever._retrieve(QueryBundle(
"<test question>"
))
print(result)
"""
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int = 20,
wait_time: int = 2,
callback_manager: Optional[CallbackManager] = None,
):
self._client = GalaxiaClient(
api_url, api_key, knowledge_base_id, n_retries, wait_time
)
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
response = self._client.retrieve(query)
if response is None:
return []
node_with_score = []
for res in response:
node_with_score.append(
NodeWithScore(
node=TextNode(
text=res["category"],
metadata={
"model": res["model"],
"file": res["group"],
},
),
score=res["rank"],
)
)
return node_with_score
| GalaxiaRetriever |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 6385,
"end": 6530
} | class ____(_PathValueError):
code = 'path.not_exists'
msg_template = 'file or directory at path "{path}" does not exist'
| PathNotExistsError |
python | getsentry__sentry | src/sentry/integrations/base.py | {
"start": 2005,
"end": 2192
} | class ____(NamedTuple):
description: str # A markdown description of the feature
featureGate: IntegrationFeatures # A IntegrationFeature that gates this feature
| FeatureDescription |
python | allegroai__clearml | clearml/backend_api/services/v2_20/auth.py | {
"start": 13416,
"end": 15207
} | class ____(Response):
"""
Response of auth.edit_user endpoint.
:param updated: Number of users updated (0 or 1)
:type updated: float
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "auth"
_action = "edit_user"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of users updated (0 or 1)",
"enum": [0, 1],
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[float] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(EditUserResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[float]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[float]) -> None:
if value is None:
self._property_updated = None
return
self.assert_isinstance(value, "updated", six.integer_types + (float,))
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| EditUserResponse |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 32504,
"end": 33963
} | class ____(TestCase):
"""Tests for ``spy()``"""
def test_basic(self):
original_iterable = iter('abcdefg')
head, new_iterable = mi.spy(original_iterable)
self.assertEqual(head, ['a'])
self.assertEqual(
list(new_iterable), ['a', 'b', 'c', 'd', 'e', 'f', 'g']
)
def test_unpacking(self):
original_iterable = iter('abcdefg')
(first, second, third), new_iterable = mi.spy(original_iterable, 3)
self.assertEqual(first, 'a')
self.assertEqual(second, 'b')
self.assertEqual(third, 'c')
self.assertEqual(
list(new_iterable), ['a', 'b', 'c', 'd', 'e', 'f', 'g']
)
def test_too_many(self):
original_iterable = iter('abc')
head, new_iterable = mi.spy(original_iterable, 4)
self.assertEqual(head, ['a', 'b', 'c'])
self.assertEqual(list(new_iterable), ['a', 'b', 'c'])
def test_zero(self):
original_iterable = iter('abc')
head, new_iterable = mi.spy(original_iterable, 0)
self.assertEqual(head, [])
self.assertEqual(list(new_iterable), ['a', 'b', 'c'])
def test_immutable(self):
original_iterable = iter('abcdefg')
head, new_iterable = mi.spy(original_iterable, 3)
head[0] = 'A'
self.assertEqual(head, ['A', 'b', 'c'])
self.assertEqual(
list(new_iterable), ['a', 'b', 'c', 'd', 'e', 'f', 'g']
)
| SpyTests |
python | py-pdf__pypdf | pypdf/_encryption.py | {
"start": 2058,
"end": 4351
} | class ____:
def __init__(
self,
stm_crypt: CryptBase,
str_crypt: CryptBase,
ef_crypt: CryptBase,
) -> None:
self.stm_crypt = stm_crypt
self.str_crypt = str_crypt
self.ef_crypt = ef_crypt
def encrypt_object(self, obj: PdfObject) -> PdfObject:
if isinstance(obj, ByteStringObject):
data = self.str_crypt.encrypt(obj.original_bytes)
obj = ByteStringObject(data)
elif isinstance(obj, TextStringObject):
data = self.str_crypt.encrypt(obj.get_encoded_bytes())
obj = ByteStringObject(data)
elif isinstance(obj, StreamObject):
obj2 = StreamObject()
obj2.update(obj)
obj2.set_data(self.stm_crypt.encrypt(obj._data))
for key, value in obj.items(): # Dont forget the Stream dict.
obj2[key] = self.encrypt_object(value)
obj = obj2
elif isinstance(obj, DictionaryObject):
obj2 = DictionaryObject() # type: ignore
for key, value in obj.items():
obj2[key] = self.encrypt_object(value)
obj = obj2
elif isinstance(obj, ArrayObject):
obj = ArrayObject(self.encrypt_object(x) for x in obj)
return obj
def decrypt_object(self, obj: PdfObject) -> PdfObject:
if isinstance(obj, (ByteStringObject, TextStringObject)):
data = self.str_crypt.decrypt(obj.original_bytes)
obj = create_string_object(data)
elif isinstance(obj, StreamObject):
obj._data = self.stm_crypt.decrypt(obj._data)
for key, value in obj.items(): # Dont forget the Stream dict.
obj[key] = self.decrypt_object(value)
elif isinstance(obj, DictionaryObject):
for key, value in obj.items():
obj[key] = self.decrypt_object(value)
elif isinstance(obj, ArrayObject):
for i in range(len(obj)):
obj[i] = self.decrypt_object(obj[i])
return obj
_PADDING = (
b"\x28\xbf\x4e\x5e\x4e\x75\x8a\x41\x64\x00\x4e\x56\xff\xfa\x01\x08"
b"\x2e\x2e\x00\xb6\xd0\x68\x3e\x80\x2f\x0c\xa9\xfe\x64\x53\x69\x7a"
)
def _padding(data: bytes) -> bytes:
return (data + _PADDING)[:32]
| CryptFilter |
python | donnemartin__interactive-coding-challenges | online_judges/utopian_tree/test_utopian_tree.py | {
"start": 18,
"end": 478
} | class ____(unittest.TestCase):
def test_utopian_tree(self):
solution = Solution()
self.assertEqual(solution.calc_utopian_tree_height(0), 1)
self.assertEqual(solution.calc_utopian_tree_height(1), 2)
self.assertEqual(solution.calc_utopian_tree_height(4), 7)
print('Success: test_utopian_tree')
def main():
test = TestUtopianTree()
test.test_utopian_tree()
if __name__ == '__main__':
main()
| TestUtopianTree |
python | davidhalter__jedi | jedi/inference/analysis.py | {
"start": 2271,
"end": 7763
} | class ____(Error):
pass
def add(node_context, error_name, node, message=None, typ=Error, payload=None):
exception = CODES[error_name][1]
if _check_for_exception_catch(node_context, node, exception, payload):
return
# TODO this path is probably not right
module_context = node_context.get_root_context()
module_path = module_context.py__file__()
issue_instance = typ(error_name, module_path, node.start_pos, message)
debug.warning(str(issue_instance), format=False)
node_context.inference_state.analysis.append(issue_instance)
return issue_instance
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
module = instance.get_root_context()
node = module.tree_node
if node is None:
# If it's a compiled module or doesn't have a tree_node
return False
try:
stmt_names = node.get_used_names()['setattr']
except KeyError:
return False
return any(node.start_pos < n.start_pos < node.end_pos
# Check if it's a function called setattr.
and not (n.parent.type == 'funcdef' and n.parent.name == n)
for n in stmt_names)
def add_attribute_error(name_context, lookup_value, name):
message = ('AttributeError: %s has no attribute %s.' % (lookup_value, name))
# Check for __getattr__/__getattribute__ existance and issue a warning
# instead of an error, if that happens.
typ = Error
if lookup_value.is_instance() and not lookup_value.is_compiled():
# TODO maybe make a warning for __getattr__/__getattribute__
if _check_for_setattr(lookup_value):
typ = Warning
payload = lookup_value, name
add(name_context, 'attribute-error', name, message, typ, payload)
def _check_for_exception_catch(node_context, jedi_name, exception, payload=None):
"""
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
doesn't count as an error (if equal to `exception`).
Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
it.
Returns True if the exception was catched.
"""
def check_match(cls, exception):
if not cls.is_class():
return False
for python_cls in exception.mro():
if cls.py__name__() == python_cls.__name__ \
and cls.parent_context.is_builtins_module():
return True
return False
def check_try_for_except(obj, exception):
# Only nodes in try
iterator = iter(obj.children)
for branch_type in iterator:
next(iterator) # The colon
suite = next(iterator)
if branch_type == 'try' \
and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
return False
for node in obj.get_except_clause_tests():
if node is None:
return True # An exception block that catches everything.
else:
except_classes = node_context.infer_node(node)
for cls in except_classes:
from jedi.inference.value import iterable
if isinstance(cls, iterable.Sequence) and \
cls.array_type == 'tuple':
# multiple exceptions
for lazy_value in cls.py__iter__():
for typ in lazy_value.infer():
if check_match(typ, exception):
return True
else:
if check_match(cls, exception):
return True
def check_hasattr(node, suite):
try:
assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
assert node.type in ('power', 'atom_expr')
base = node.children[0]
assert base.type == 'name' and base.value == 'hasattr'
trailer = node.children[1]
assert trailer.type == 'trailer'
arglist = trailer.children[1]
assert arglist.type == 'arglist'
from jedi.inference.arguments import TreeArguments
args = TreeArguments(node_context.inference_state, node_context, arglist)
unpacked_args = list(args.unpack())
# Arguments should be very simple
assert len(unpacked_args) == 2
# Check name
key, lazy_value = unpacked_args[1]
names = list(lazy_value.infer())
assert len(names) == 1 and is_string(names[0])
assert names[0].get_safe_value() == payload[1].value
# Check objects
key, lazy_value = unpacked_args[0]
objects = lazy_value.infer()
return payload[0] in objects
except AssertionError:
return False
obj = jedi_name
while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
if isinstance(obj, tree.Flow):
# try/except catch check
if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
return True
# hasattr check
if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
if check_hasattr(obj.children[1], obj.children[3]):
return True
obj = obj.parent
return False
| Warning |
python | google__pytype | pytype/tests/test_paramspec.py | {
"start": 174,
"end": 7468
} | class ____(test_base.BaseTest):
"""Tests for ParamSpec."""
def test_basic(self):
ty = self.Infer("""
from typing import ParamSpec
P = ParamSpec("P")
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import ParamSpec
P = ParamSpec("P")
""",
)
def test_import(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", """P = ParamSpec("P")""")
ty = self.Infer(
"""
from a import P
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import ParamSpec
P = ParamSpec("P")
""",
)
def test_invalid(self):
ty, errors = self.InferWithErrors("""
from typing import ParamSpec
T = ParamSpec() # invalid-typevar[e1]
T = ParamSpec("T") # ok
T = ParamSpec(42) # invalid-typevar[e2]
T = ParamSpec(str()) # invalid-typevar[e3]
T = ParamSpec("T", str, int if __random__ else float) # invalid-typevar[e4]
T = ParamSpec("T", 0, float) # invalid-typevar[e5]
T = ParamSpec("T", str) # invalid-typevar[e6]
# pytype: disable=not-supported-yet
S = ParamSpec("S", covariant=False) # ok
T = ParamSpec("T", covariant=False) # duplicate ok
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import ParamSpec
S = ParamSpec("S")
T = ParamSpec("T")
""",
)
self.assertErrorRegexes(
errors,
{
"e1": r"wrong arguments",
"e2": r"Expected.*str.*Actual.*int",
"e3": r"constant str",
"e4": r"constraint.*Must be constant",
"e5": r"Expected.*_1:.*type.*Actual.*_1: int",
"e6": r"0 or more than 1",
},
)
def test_print_args(self):
ty = self.Infer("""
from typing import ParamSpec
S = ParamSpec("S", bound=float, covariant=True)
""")
# The "covariant" keyword is ignored for now.
self.assertTypesMatchPytd(
ty,
"""
from typing import ParamSpec
S = ParamSpec("S", bound=float)
""",
)
def test_paramspec_in_def(self):
ty = self.Infer("""
from typing import Callable, ParamSpec
P = ParamSpec("P")
def f(x: Callable[P, int]) -> Callable[P, int]:
return x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, ParamSpec
P = ParamSpec("P")
def f(x: Callable[P, int]) -> Callable[P, int]: ...
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_paramspec_in_type_alias_695(self):
ty = self.Infer("""
from typing import Callable
type Foo[T, **P] = Callable[P, T]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, ParamSpec
P = ParamSpec('P')
T = TypeVar('T')
Foo = Callable[P, T]
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_paramspec_in_function_def_695(self):
ty = self.Infer("""
from typing import Callable
def foo[T, **P](a: Callable[P, T]) -> Callable[P, T]:
return a
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, ParamSpec
P = ParamSpec('P')
T = TypeVar('T')
def foo(a: Callable[P, T]) -> Callable[P, T]: ...
""",
)
def test_concatenate_in_def(self):
ty = self.Infer("""
from typing import Callable, Concatenate, ParamSpec
P = ParamSpec("P")
def f(x: Callable[Concatenate[int, P], int]) -> Callable[P, int]:
return x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, Concatenate, ParamSpec
P = ParamSpec("P")
def f(x: Callable[Concatenate[int, P], int]) -> Callable[P, int]: ...
""",
)
def test_drop_param(self):
self.Check("""
from typing import Callable, Concatenate, ParamSpec
P = ParamSpec("P")
def f(x: Callable[Concatenate[int, P], int], y: int) -> Callable[P, int]:
return lambda k: x(y, k)
def g(x: int, y: str) -> int:
return 42
a = f(g, 1)
assert_type(a, Callable[[str], int])
""")
def test_add_param(self):
self.Check("""
from typing import Callable, Concatenate, ParamSpec
P = ParamSpec("P")
def f(x: Callable[P, int]) -> Callable[Concatenate[int, P], int]:
return lambda p, q: x(q)
def g(x: str) -> int:
return 42
a = f(g)
assert_type(a, Callable[[int, str], int])
""")
def test_change_return_type(self):
self.Check("""
from typing import Callable, Concatenate, ParamSpec
P = ParamSpec("P")
def f(x: Callable[P, int]) -> Callable[P, str]:
return lambda p: str(x(p))
def g(x: int) -> int:
return 42
a = f(g)
assert_type(a, Callable[[int], str])
""")
def test_bound_to_any1(self):
self.Check("""
from typing import Any, Callable, ParamSpec
P = ParamSpec("P")
def f(x: Callable[P, int]) -> Callable[P, int]:
return x
def test1(g: Any):
assert_type(f(g), Callable[..., int])
def test1(g: Callable[..., Any]):
assert_type(f(g), Callable[..., int])
""")
@unittest.expectedFailure
def test_bound_to_any2(self):
self.Check("""
from typing import Any, Callable, Concatenate, ParamSpec
P = ParamSpec("P")
def f(x: Callable[Concatenate[int, P], int]) -> Callable[Concatenate[int, P], int]:
return x
def test(g: Any):
assert_type(f(g), Callable[Concatenate[int, ...], int])
""")
def test_typevar(self):
self.Check("""
from typing import Callable, Concatenate, List, ParamSpec, TypeVar
P = ParamSpec("P")
T = TypeVar('T')
def f(x: Callable[P, T]) -> Callable[P, List[T]]:
def inner(p):
return [x(p)]
return inner
def g(x: int) -> int:
return 42
def h(x: bool) -> str:
return '42'
a = f(g)
assert_type(a, Callable[[int], List[int]])
b = f(h)
assert_type(b, Callable[[bool], List[str]])
""")
def test_args_and_kwargs(self):
self.Check("""
from typing import ParamSpec, Callable, TypeVar
P = ParamSpec("P")
T = TypeVar("T")
def decorator(f: Callable[P, T]) -> Callable[P, T]:
def foo(*args: P.args, **kwargs: P.kwargs) -> T:
return f(*args, **kwargs)
return foo
def g(x: int, y: str) -> bool:
return False
a = decorator(g)
b = a(1, '2')
assert_type(b, bool)
""")
def test_use_as_protocol_parameter(self):
self.Check("""
from typing import ParamSpec, Protocol, TypeVar
P = ParamSpec('P')
T = TypeVar('T')
class CallLogger(Protocol[P, T]):
def args(self, *args: P.args, **kwargs: P.kwargs) -> None:
pass
""")
_DECORATOR_PYI = """
from typing import TypeVar, ParamSpec, Callable, List
T = TypeVar("T")
P = ParamSpec("P")
def decorator(fn: Callable[P, T]) -> Callable[P, List[T]]: ...
"""
@test_utils.skipBeforePy((3, 10), "ParamSpec is new in 3.10")
| ParamSpecTest |
python | pytorch__pytorch | torch/_higher_order_ops/_invoke_quant.py | {
"start": 826,
"end": 1770
} | class ____:
"""
Invoke a quantization function that will be preserved as a single operator. Preservation
as a single operator aids in pattern matching and custom lowerings.
The operation appears as:
torch.ops.higher_order.invoke_quant(subgraph, *args, scheme=scheme)
Args:
codegen_low_precision: Use observed subgraph dtypes for codegen instead of
upcasting to fp32. Can improve performance for prologue fusion but
requires careful testing of numerics.
"""
codegen_low_precision: bool = True
def __call__(
self,
*args,
scheme: Optional[str] = None,
**kwargs,
):
if not torch.compiler.is_compiling():
return args[0](*args[1:], **kwargs)
if scheme is not None:
kwargs["scheme"] = scheme
return invoke_quant_packed(*args, **kwargs, quant_options=self) # type: ignore[call-arg]
| InvokeQuant |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 517073,
"end": 517946
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "labels")
field = sgqlc.types.Field(
sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field"
)
labels = sgqlc.types.Field(
LabelConnection,
graphql_name="labels",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
| ProjectV2ItemFieldLabelValue |
python | pandas-dev__pandas | pandas/tests/indexing/test_scalar.py | {
"start": 2115,
"end": 8504
} | class ____:
# at and iat tests that don't need Base class
def test_float_index_at_iat(self):
ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in ser.items():
assert ser.at[el] == item
for i in range(len(ser)):
assert ser.iat[i] == i + 1
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range("1/1/2000", periods=8)
df = DataFrame(
np.random.default_rng(2).standard_normal((8, 4)),
index=dates,
columns=["A", "B", "C", "D"],
)
s = df["A"]
result = s.at[dates[5]]
xp = s.values[5]
assert result == xp
@pytest.mark.parametrize(
"ser, expected",
[
[
Series(["2014-01-01", "2014-02-02"], dtype="datetime64[ns]"),
Timestamp("2014-02-02"),
],
[
Series(["1 days", "2 days"], dtype="timedelta64[ns]"),
Timedelta("2 days"),
],
],
)
def test_iloc_iat_coercion_datelike(self, indexer_ial, ser, expected):
# GH 7729
# make sure we are boxing the returns
result = indexer_ial(ser)[1]
assert result == expected
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype="int64")
result = s.iloc[2]
assert result == 2
result = s.iat[2]
assert result == 2
msg = "index 10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[10]
msg = "index -10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[-10]
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype="int64")
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
assert result == 2
def test_frame_at_with_duplicate_axes(self):
# GH#33041
arr = np.random.default_rng(2).standard_normal(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
result = df.at[0, "A"]
expected = df.iloc[0].copy()
tm.assert_series_equal(result, expected)
result = df.T.at["A", 0]
tm.assert_series_equal(result, expected)
# setter
df.at[1, "A"] = 2
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
def test_at_getitem_dt64tz_values(self):
# gh-15822
df = DataFrame(
{
"name": ["John", "Anderson"],
"date": [
Timestamp(2017, 3, 13, 13, 32, 56),
Timestamp(2017, 2, 16, 12, 10, 3),
],
}
)
df["date"] = df["date"].dt.tz_localize("Asia/Shanghai")
expected = Timestamp("2017-03-13 13:32:56+0800", tz="Asia/Shanghai")
result = df.loc[0, "date"]
assert result == expected
result = df.at[0, "date"]
assert result == expected
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
for el, item in s.items():
assert s.at[el] == s.loc[el] == item
for i in range(len(s)):
assert s.iat[i] == s.iloc[i] == i + 1
with pytest.raises(KeyError, match="^4$"):
s.at[4]
with pytest.raises(KeyError, match="^4$"):
s.loc[4]
def test_mixed_index_at_iat_loc_iloc_dataframe(self):
# GH 19860
df = DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2]
)
for rowIdx, row in df.iterrows():
for el, item in row.items():
assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item
for row in range(2):
for i in range(5):
assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i
with pytest.raises(KeyError, match="^3$"):
df.at[0, 3]
with pytest.raises(KeyError, match="^3$"):
df.loc[0, 3]
def test_iat_setter_incompatible_assignment(self):
# GH 23236
result = DataFrame({"a": [0.0, 1.0], "b": [4, 5]})
result.iat[0, 0] = None
expected = DataFrame({"a": [None, 1], "b": [4, 5]})
tm.assert_frame_equal(result, expected)
def test_iat_dont_wrap_object_datetimelike():
# GH#32809 .iat calls go through DataFrame._get_value, should not
# call maybe_box_datetimelike
dti = date_range("2016-01-01", periods=3)
tdi = dti - dti
ser = Series(dti.to_pydatetime(), dtype=object)
ser2 = Series(tdi.to_pytimedelta(), dtype=object)
df = DataFrame({"A": ser, "B": ser2})
assert (df.dtypes == object).all()
for result in [df.at[0, "A"], df.iat[0, 0], df.loc[0, "A"], df.iloc[0, 0]]:
assert result is ser[0]
assert isinstance(result, datetime)
assert not isinstance(result, Timestamp)
for result in [df.at[1, "B"], df.iat[1, 1], df.loc[1, "B"], df.iloc[1, 1]]:
assert result is ser2[1]
assert isinstance(result, timedelta)
assert not isinstance(result, Timedelta)
def test_at_with_tuple_index_get():
# GH 26989
# DataFrame.at getter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
assert df.at[(1, 2), "a"] == 1
# Series.at getter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
assert series.at[(1, 2)] == 1
@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
def test_at_with_tuple_index_set():
# GH 26989
# DataFrame.at setter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
df.at[(1, 2), "a"] = 2
assert df.at[(1, 2), "a"] == 2
# Series.at setter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
series.at[1, 2] = 3
assert series.at[1, 2] == 3
| TestAtAndiAT |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 28833,
"end": 35625
} | class ____(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Examples:
>>> # The inputs are 28x28x28 volumes with a single channel, and the
>>> # batch size is 4
>>> input_shape =(4, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 26, 26, 26, 2)
>>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames,
>>> # with 7 frames per video.
>>> input_shape = (4, 7, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 26, 26, 26, 2)
Args:
filters: Integer, the dimensionality of the output space (i.e. the number of
output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
height and width of the 3D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the convolution along each spatial dimension. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2,
spatial_dim3, channels)` while `channels_first` corresponds to inputs with
shape `batch_shape + (channels, spatial_dim1, spatial_dim2,
spatial_dim3)`. It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`. If you never set it, then it
will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any stride
value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved separately
with `filters / groups` filters. The output is the concatenation of all
the `groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (see
`keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (see
`keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2,
conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3,
channels)` if data_format='channels_last'.
Output shape:
5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,
new_conv_dim2, new_conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,
new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`,
`new_conv_dim2` and `new_conv_dim3` values might have changed due to
padding.
Returns:
A tensor of rank 5+ representing
`activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
| Conv3D |
python | django__django | tests/postgres_tests/models.py | {
"start": 2888,
"end": 3112
} | class ____(models.Model):
id = models.BigAutoField(primary_key=True)
# Scene/Character/Line models are used to test full text search. They're
# populated with content from Monty Python and the Holy Grail.
| BigAutoFieldModel |
python | walkccc__LeetCode | solutions/3062. Winner of the Linked List Game/3062.py | {
"start": 0,
"end": 342
} | class ____:
def gameResult(self, head: ListNode | None) -> str:
even = 0
odd = 0
while head:
if head.val > head.next.val:
even += 1
elif head.val < head.next.val:
odd += 1
head = head.next.next
if even > odd:
return 'Even'
if even < odd:
return 'Odd'
return 'Tie'
| Solution |
python | MongoEngine__mongoengine | tests/fields/test_url_field.py | {
"start": 83,
"end": 1823
} | class ____(MongoDBTestCase):
def test_validation(self):
"""Ensure that URLFields validate urls properly."""
class Link(Document):
url = URLField()
link = Link()
link.url = "google"
with pytest.raises(ValidationError):
link.validate()
link.url = "http://www.google.com:8080"
link.validate()
def test_unicode_url_validation(self):
"""Ensure unicode URLs are validated properly."""
class Link(Document):
url = URLField()
link = Link()
link.url = "http://привет.com"
# TODO fix URL validation - this *IS* a valid URL
# For now we just want to make sure that the error message is correct
with pytest.raises(ValidationError) as exc_info:
link.validate()
assert (
str(exc_info.value)
== "ValidationError (Link:None) (Invalid URL: http://\u043f\u0440\u0438\u0432\u0435\u0442.com: ['url'])"
)
def test_url_scheme_validation(self):
"""Ensure that URLFields validate urls with specific schemes properly."""
class Link(Document):
url = URLField()
class SchemeLink(Document):
url = URLField(schemes=["ws", "irc"])
link = Link()
link.url = "ws://google.com"
with pytest.raises(ValidationError):
link.validate()
scheme_link = SchemeLink()
scheme_link.url = "ws://google.com"
scheme_link.validate()
def test_underscore_allowed_in_domains_names(self):
class Link(Document):
url = URLField()
link = Link()
link.url = "https://san_leandro-ca.geebo.com"
link.validate()
| TestURLField |
python | getsentry__sentry | tests/sentry/integrations/github/test_webhooks.py | {
"start": 26837,
"end": 39435
} | class ____(APITestCase):
def setUp(self) -> None:
self.url = "/extensions/github/webhook/"
self.secret = "b3002c3e321d4b7880360d397db2ccfd"
options.set("github-app.webhook-secret", self.secret)
def _create_integration_and_send_pull_request_opened_event(self):
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=self.organization,
external_id="12345",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
response = self.client.post(
path=self.url,
data=PULL_REQUEST_OPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_HUB_SIGNATURE="sha1=bc7ce12fc1058a35bf99355e6fc0e6da72c35de3",
HTTP_X_HUB_SIGNATURE_256="sha256=ed9e5aed0617ad10312986257e22448b019569200c5fdbd005a2b68a80049317",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
@responses.activate
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@patch("sentry.integrations.github.webhook.PullRequestEventWebhook.__call__")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_webhook_error_metric(
self, mock_record: MagicMock, mock_event: MagicMock, get_jwt: MagicMock
) -> None:
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=self.organization,
external_id="12345",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
error = Exception("error")
mock_event.side_effect = error
response = self.client.post(
path=self.url,
data=PULL_REQUEST_OPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_HUB_SIGNATURE="sha1=bc7ce12fc1058a35bf99355e6fc0e6da72c35de3",
HTTP_X_HUB_SIGNATURE_256="sha256=ed9e5aed0617ad10312986257e22448b019569200c5fdbd005a2b68a80049317",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 500
assert_failure_metric(mock_record, error)
@patch("sentry.integrations.source_code_management.commit_context.metrics")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_opened(
self,
mock_record: MagicMock,
mock_metrics: MagicMock,
) -> None:
group = self.create_group(project=self.project, short_id=7)
repo = Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
self._create_integration_and_send_pull_request_opened_event()
prs = PullRequest.objects.filter(
repository_id=repo.id, organization_id=self.project.organization.id
)
assert len(prs) == 1
pr = prs[0]
assert pr.key == "1"
assert (
pr.message
== "This is a pretty simple change that we need to pull into master. Fixes BAR-7"
)
assert pr.title == "Update the README with new information"
assert pr.author is not None
assert pr.author.name == "baxterthehacker"
self.assert_group_link(group, pr)
assert_success_metric(mock_record)
@patch("sentry.integrations.github.webhook.metrics")
def test_creates_missing_repo(self, mock_metrics: MagicMock) -> None:
self._create_integration_and_send_pull_request_opened_event()
repos = Repository.objects.all()
assert len(repos) == 1
assert repos[0].organization_id == self.project.organization.id
assert repos[0].external_id == "35129377"
assert repos[0].provider == "integrations:github"
assert repos[0].name == "baxterthehacker/public-repo"
mock_metrics.incr.assert_any_call("github.webhook.repository_created")
def test_ignores_hidden_repo(self) -> None:
repo = self.create_repo(
project=self.project,
provider="integrations:github",
name="baxterthehacker/public-repo",
)
repo.status = ObjectStatus.HIDDEN
repo.external_id = "35129377"
repo.save()
self._create_integration_and_send_pull_request_opened_event()
repos = Repository.objects.all()
assert len(repos) == 1
assert repos[0] == repo
@patch("sentry.integrations.github.webhook.metrics")
def test_multiple_orgs_creates_missing_repo(self, mock_metrics: MagicMock) -> None:
project = self.project # force creation
org2 = self.create_organization()
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=project.organization,
external_id="12345",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(org2.id, self.user)
response = self.client.post(
path=self.url,
data=PULL_REQUEST_OPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_HUB_SIGNATURE="sha1=bc7ce12fc1058a35bf99355e6fc0e6da72c35de3",
HTTP_X_HUB_SIGNATURE_256="sha256=ed9e5aed0617ad10312986257e22448b019569200c5fdbd005a2b68a80049317",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
repos = Repository.objects.all()
assert len(repos) == 2
assert {repo.organization_id for repo in repos} == {project.organization.id, org2.id}
for repo in repos:
assert repo.external_id == "35129377"
assert repo.provider == "integrations:github"
assert repo.name == "baxterthehacker/public-repo"
mock_metrics.incr.assert_any_call("github.webhook.repository_created")
def test_multiple_orgs_ignores_hidden_repo(self) -> None:
org2 = self.create_organization()
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=self.organization,
external_id="12345",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
integration.add_organization(org2.id, self.user)
repo = self.create_repo(
project=self.project,
provider="integrations:github",
name="baxterthehacker/public-repo",
)
repo.external_id = "35129377"
repo.status = ObjectStatus.HIDDEN
repo.save()
response = self.client.post(
path=self.url,
data=PULL_REQUEST_OPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_HUB_SIGNATURE="sha1=bc7ce12fc1058a35bf99355e6fc0e6da72c35de3",
HTTP_X_HUB_SIGNATURE_256="sha256=ed9e5aed0617ad10312986257e22448b019569200c5fdbd005a2b68a80049317",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
repos = Repository.objects.all()
assert len(repos) == 1
assert repos[0] == repo
def test_edited_pr_description_with_group_link(self) -> None:
group = self.create_group(project=self.project, short_id=7)
url = "/extensions/github/webhook/"
secret = "b3002c3e321d4b7880360d397db2ccfd"
options.set("github-app.webhook-secret", secret)
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=self.organization,
external_id="12345",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
repo = Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
pr = PullRequest.objects.create(
key="1", repository_id=repo.id, organization_id=self.project.organization.id
)
response = self.client.post(
path=url,
data=PULL_REQUEST_EDITED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_HUB_SIGNATURE="sha1=83100642f0cf5d7f6145cf8d04da5d00a09f890f",
HTTP_X_HUB_SIGNATURE_256="sha256=3e45e315ec12367c10ae7aa9de22372868440ece2ea719251a4dc6cc6531ca20",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
pr = PullRequest.objects.get(id=pr.id)
assert pr.key == "1"
assert pr.message == "new edited body. Fixes BAR-7"
assert pr.title == "new edited title"
assert pr.author is not None
assert pr.author.name == "baxterthehacker"
self.assert_group_link(group, pr)
@patch("sentry.integrations.github.webhook.metrics")
def test_closed(self, mock_metrics: MagicMock) -> None:
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=self.organization,
external_id="12345",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
repo = Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
response = self.client.post(
path=self.url,
data=PULL_REQUEST_CLOSED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_HUB_SIGNATURE="sha1=49db856f5658b365b73a2fa73a7cffa543f4d3af",
HTTP_X_HUB_SIGNATURE_256="sha256=c99f2b44a5915b1430d1a1b095a44e3297c70ffd24d06c156a4efc449ec53c47",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
prs = PullRequest.objects.filter(
repository_id=repo.id, organization_id=self.project.organization.id
)
assert len(prs) == 1
pr = prs[0]
assert pr.key == "1"
assert pr.message == "new closed body"
assert pr.title == "new closed title"
assert pr.author is not None
assert pr.author.name == "baxterthehacker"
assert pr.merge_commit_sha == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
assert mock_metrics.incr.call_count == 0
def assert_group_link(self, group, pr):
link = GroupLink.objects.get()
assert link.group_id == group.id
assert link.linked_id == pr.id
assert link.linked_type == GroupLink.LinkedType.pull_request
@with_feature("organizations:integrations-github-project-management")
| PullRequestEventWebhook |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.