language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/1195. Fizz Buzz Multithreaded/1195.py | {
"start": 34,
"end": 1490
} | class ____:
def __init__(self, n: int):
self.n = n
self.fizzSemaphore = Semaphore(0)
self.buzzSemaphore = Semaphore(0)
self.fizzbuzzSemaphore = Semaphore(0)
self.numberSemaphore = Semaphore(1)
# printFizz() outputs "fizz"
def fizz(self, printFizz: 'Callable[[], None]') -> None:
for i in range(1, self.n + 1):
if i % 3 == 0 and i % 15 != 0:
self.fizzSemaphore.acquire()
printFizz()
self.numberSemaphore.release()
# printBuzz() outputs "buzz"
def buzz(self, printBuzz: 'Callable[[], None]') -> None:
for i in range(1, self.n + 1):
if i % 5 == 0 and i % 15 != 0:
self.buzzSemaphore.acquire()
printBuzz()
self.numberSemaphore.release()
# printFizzBuzz() outputs "fizzbuzz"
def fizzbuzz(self, printFizzBuzz: 'Callable[[], None]') -> None:
for i in range(1, self.n + 1):
if i % 15 == 0:
self.fizzbuzzSemaphore.acquire()
printFizzBuzz()
self.numberSemaphore.release()
# printNumber(x) outputs "x", where x is an integer.
def number(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(1, self.n + 1):
self.numberSemaphore.acquire()
if i % 15 == 0:
self.fizzbuzzSemaphore.release()
elif i % 3 == 0:
self.fizzSemaphore.release()
elif i % 5 == 0:
self.buzzSemaphore.release()
else:
printNumber(i)
self.numberSemaphore.release()
| FizzBuzz |
python | kamyu104__LeetCode-Solutions | Python/implement-rand10-using-rand7.py | {
"start": 897,
"end": 1108
} | class ____(object):
def rand10(self):
"""
:rtype: int
"""
while True:
x = (rand7()-1)*7 + (rand7()-1)
if x < 40:
return x%10 + 1
| Solution2 |
python | pytorch__pytorch | torch/fx/experimental/graph_gradual_typechecker.py | {
"start": 21374,
"end": 29101
} | class ____:
def __init__(self, env, traced):
self.env = env
self.traced = traced
def type_check(self):
"""
A gradual type checker for graphs
Effect: every node's field type will be
populated with a type after type-checking is done
"""
graph = self.traced.graph
# type check every node with gradual type rules
# if any node does not type check return false
for n in graph.nodes:
self.type_check_node(n)
return True
def type_check_node(self, n: Node):
"""
Type check a given fx node.
Current operations:
- Reshape
- Transpose
- Add
- Relu
- conv2d
- batchnorm2d
- flatten
- maxpool2d
- adaptiveavgpool2d
- linear
"""
if n.type is None:
n.type = Dyn
if n.op == "placeholder":
return n.type
elif n.op == "get_attr":
t = get_parameter(self.traced, n.target) # type: ignore[arg-type]
if isinstance(t.data, torch.Tensor):
n.type = TensorType(t.data.shape)
return n.type
elif n.op == "call_function":
if n.target is getattr:
assert getattr in _INFERENCE_RULES
return _INFERENCE_RULES[n.target](n, self.traced)
elif n.target in _INFERENCE_RULES:
return _INFERENCE_RULES[n.target](n)
else:
raise RuntimeError(
f"No inference rule registered for target {n.target}!"
)
elif n.op == "call_module":
module_instance = self.traced.get_submodule(n.target)
if type(module_instance) in _INFERENCE_RULES:
return _INFERENCE_RULES[type(module_instance)](n, module_instance)
else:
raise RuntimeError(
f"No inference rule registered for class {type(module_instance)}!"
)
elif n.op == "output":
def get_node_type(a):
return a.type
n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
return n.type
else:
raise NotImplementedError(f"Method {n.op} not yet implemented")
@register_refinement_rule(Conv2d)
def conv_refinement_rule(n: Node):
"""
The equality constraints are between the first dimension of
the input and output
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
return res
@register_refinement_rule(torch.nn.Linear)
def linear_refinement_rule(n: Node):
"""
The equality constraints are between the first dimension of
the input and output
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
return res
@register_refinement_rule(BatchNorm2d)
@register_refinement_rule(torch.nn.ReLU)
def all_eq(n: Node):
"""
For operations where the input shape is equal to the output shape
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
args1 = arg_type.__args__
args2 = n.type.__args__
res = [Equality(args1[i], args2[i]) for i in range(len(args1))]
return res
@register_refinement_rule(torch.nn.AdaptiveAvgPool2d)
@register_refinement_rule(torch.nn.MaxPool2d)
def first_two_eq(n: Node):
"""
For operations where the first two dimensions of the input and output shape
are equal
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
args1 = arg_type.__args__
args2 = n.type.__args__
res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])]
return res
@register_refinement_rule(torch.add)
@register_refinement_rule(operator.add)
def element_wise_eq(n: Node):
"""
For element-wise operations and handles broadcasting.
Note that after applying broadcasting to the arguments
we are able to determine if certain dimensions have not been broadcast
if they are symbolicallu equal.
in this case, we can establish equality between those dimensions and the
corresponding output dimensions.
Note that it takes two iterations for this result. One iteration to establish
equality between certain dimensions of the operands (requiring the whole solver
including unification) and another iteration to establish equality between the operands
and the resulting type, requiring another round of constraint generation and unificaiton.
"""
res = []
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
arg_type1 = n.args[0].type
arg_type2 = n.args[1].type
if (
isinstance(arg_type1, TensorType)
and isinstance(arg_type2, TensorType)
and isinstance(n.type, TensorType)
):
args1, args2 = broadcast_types(arg_type1, arg_type2)
# by this point, we know that args1 and args2 are the same size.
a1 = args1.__args__
a2 = args2.__args__
a3 = n.type.__args__
# we would be here in the second iteration where we establish equality
# between operand type dimensions and the resulting type dimensions
r = []
for x, y, z in zip(a1, a2, a3):
if x == y:
r.append(Equality(x, z))
res = r
return res
@register_refinement_rule(torch.flatten)
def flatten_refinement_rule(n: Node):
"""
Generates equality constraints between the dimensions of the input and output
that will not be involved in the flatten operation
"""
assert isinstance(n.args[0], Node)
eq_const = []
start_dim = 1
end_dim = -1
if len(n.args) > 1:
assert isinstance(n.args[1], int)
start_dim = n.args[1]
if len(n.args) > 2:
assert isinstance(n.args[2], int)
end_dim = n.args[2]
if isinstance(n.type, TensorType) and isinstance(n.args[0].type, TensorType):
l = len(n.type.__args__)
arg_type = n.args[0].type
start_dim = l if start_dim == -1 else start_dim
end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
for t1, t2 in zip(n.type.__args__[0:start_dim], arg_type.__args__[0:start_dim]):
eq_const.append(Equality(t1, t2))
for t1, t2 in zip(n.type.__args__[end_dim:], arg_type.__args__[end_dim:]):
eq_const.append(Equality(t1, t2))
return eq_const
@register_algebraic_expressions_inference_rule(Conv2d)
def conv_rule(n: Node, module_instance):
"""
Represents the output in terms of an algrbraic expression w.r.t
the input when possible
"""
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
w_in = arg_type.__args__[3]
h_in = arg_type.__args__[2]
h_out = calculate_out_dimension(h_in, module_instance, 0)
w_out = calculate_out_dimension(w_in, module_instance, 1)
new_type = TensorType((n.type.__args__[0], n.type.__args__[1], h_out, w_out))
n.type = new_type
return new_type
| GraphTypeChecker |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_cmath.py | {
"start": 23818,
"end": 25764
} | class ____(test_math.IsCloseTests):
isclose = cmath.isclose
def test_reject_complex_tolerances(self):
with self.assertRaises(TypeError):
self.isclose(1j, 1j, rel_tol=1j)
with self.assertRaises(TypeError):
self.isclose(1j, 1j, abs_tol=1j)
with self.assertRaises(TypeError):
self.isclose(1j, 1j, rel_tol=1j, abs_tol=1j)
def test_complex_values(self):
# test complex values that are close to within 12 decimal places
complex_examples = [(1.0+1.0j, 1.000000000001+1.0j),
(1.0+1.0j, 1.0+1.000000000001j),
(-1.0+1.0j, -1.000000000001+1.0j),
(1.0-1.0j, 1.0-0.999999999999j),
]
self.assertAllClose(complex_examples, rel_tol=1e-12)
self.assertAllNotClose(complex_examples, rel_tol=1e-13)
def test_complex_near_zero(self):
# test values near zero that are near to within three decimal places
near_zero_examples = [(0.001j, 0),
(0.001, 0),
(0.001+0.001j, 0),
(-0.001+0.001j, 0),
(0.001-0.001j, 0),
(-0.001-0.001j, 0),
]
self.assertAllClose(near_zero_examples, abs_tol=1.5e-03)
self.assertAllNotClose(near_zero_examples, abs_tol=0.5e-03)
self.assertIsClose(0.001-0.001j, 0.001+0.001j, abs_tol=2e-03)
self.assertIsNotClose(0.001-0.001j, 0.001+0.001j, abs_tol=1e-03)
def test_complex_special(self):
self.assertIsNotClose(INF, INF*1j)
self.assertIsNotClose(INF*1j, INF)
self.assertIsNotClose(INF, -INF)
self.assertIsNotClose(-INF, INF)
self.assertIsNotClose(0, INF)
self.assertIsNotClose(0, INF*1j)
if __name__ == "__main__":
run_tests()
| IsCloseTests |
python | readthedocs__readthedocs.org | readthedocs/invitations/migrations/0001_initial.py | {
"start": 250,
"end": 3367
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Invitation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("object_id", models.PositiveIntegerField()),
(
"to_email",
models.EmailField(max_length=254, null=True, blank=True, verbose_name="E-mail"),
),
("token", models.CharField(max_length=32, unique=True)),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.contenttype",
),
),
(
"from_user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="invitations_sent",
to=settings.AUTH_USER_MODEL,
verbose_name="From user",
),
),
(
"to_user",
models.ForeignKey(
null=True,
blank=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="invitations_received",
to=settings.AUTH_USER_MODEL,
verbose_name="To user",
),
),
(
"expiration_date",
models.DateTimeField(verbose_name="Expiration date"),
),
],
),
migrations.AddIndex(
model_name="invitation",
index=models.Index(
fields=["content_type", "object_id"],
name="invitations_content_5a26b9_idx",
),
),
migrations.AlterUniqueTogether(
name="invitation",
unique_together={
("to_email", "content_type", "object_id"),
("to_user", "content_type", "object_id"),
},
),
]
| Migration |
python | bokeh__bokeh | src/bokeh/models/filters.py | {
"start": 6327,
"end": 8536
} | class ____(Filter):
''' Filter data sources with a custom defined JavaScript function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
args = RestrictedDict(String, AnyRef, disallow=("source",), help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to filter data contained in a columnar data source.
The code is made into the body of a function, and all of of the named objects in
``args`` are available as parameters that the code can use. The variable
``source`` will contain the data source that is associated with the ``CDSView`` this
filter is added to.
The code should either return the indices of the subset or an array of booleans
to use to subset data source rows.
Example:
.. code-block::
code = '''
const indices = []
for (let i = 0; i <= source.data['some_column'].length; i++) {
if (source.data['some_column'][i] == 'some_value') {
indices.push(i)
}
}
return indices
'''
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| CustomJSFilter |
python | bokeh__bokeh | src/bokeh/models/coordinates.py | {
"start": 1390,
"end": 3209
} | class ____(Model):
""" A mapping between two coordinate systems. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
x_source = Instance(Range, default=InstanceDefault(DataRange1d), help="""
The source range of the horizontal dimension of the new coordinate space.
""")
y_source = Instance(Range, default=InstanceDefault(DataRange1d), help="""
The source range of the vertical dimension of the new coordinate space.
""")
x_scale = Instance(Scale, default=InstanceDefault(LinearScale), help="""
What kind of scale to use to convert x-coordinates from the source (data)
space into x-coordinates in the target (possibly screen) coordinate space.
""")
y_scale = Instance(Scale, default=InstanceDefault(LinearScale), help="""
What kind of scale to use to convert y-coordinates from the source (data)
space into y-coordinates in the target (possibly screen) coordinate space.
""")
x_target = Instance(Range, help="""
The horizontal range to map x-coordinates in the target coordinate space.
""")
y_target = Instance(Range, help="""
The vertical range to map y-coordinates in the target coordinate space.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| CoordinateMapping |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-analytics-v4/source_google_analytics_v4/source.py | {
"start": 1053,
"end": 3072
} | class ____(HttpStream):
"""
Provides functionality to fetch the valid (dimensions, metrics) for the Analytics Reporting API and their data
types.
"""
primary_key = None
# Link to query the metadata for available metrics and dimensions.
# Those are not provided in the Analytics Reporting API V4.
# Column id completely match for v3 and v4.
url_base = "https://www.googleapis.com/analytics/v3/metadata/ga/columns"
def path(self, **kwargs: Any) -> str:
return ""
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""Abstractmethod HTTPStream CDK dependency"""
return None
def parse_response(self, response: requests.Response, **kwargs: Any) -> Tuple[dict, dict]:
"""
Returns a map of (dimensions, metrics) hashes, example:
({"ga:userType": "STRING", "ga:sessionCount": "STRING"}, {"ga:pageviewsPerSession": "FLOAT", "ga:sessions": "INTEGER"})
Each available dimension can be found in dimensions with its data type
as the value. e.g. dimensions['ga:userType'] == STRING
Each available metric can be found in metrics with its data type
as the value. e.g. metrics['ga:sessions'] == INTEGER
"""
metrics = {}
dimensions = {}
results = response.json()
columns = results.get("items", [])
for column in columns:
column_attributes = column.get("attributes", [])
column_name = column.get("id")
column_type = column_attributes.get("type")
column_data_type = column_attributes.get("dataType")
if column_type == "METRIC":
metrics[column_name] = column_data_type
elif column_type == "DIMENSION":
dimensions[column_name] = column_data_type
else:
raise Exception(f"Unsupported column type {column_type}.")
return dimensions, metrics
| GoogleAnalyticsV4TypesList |
python | pytorch__pytorch | torch/_inductor/autoheuristic/autoheuristic.py | {
"start": 1407,
"end": 8225
} | class ____:
"""
AutoHeuristic is a framework that allows one to collect data, learn a heuristic (i.e. a regression tree) and
generate the heuristic to code. This class allows one to collect data. The collected data can then be used to train
a heuristic (see torchgen/autoheuristic/).
"""
collected_feedback: dict[Choice, Feedback]
def __init__(
self,
fallback: Callable[[], Choice],
choices: list[Choice],
feedback: Optional[LocalFeedback],
context: AHContext,
name: str,
augment_context: Optional[list[AHOperation]] = None,
precondition: Optional[Callable[[AHMetadata, AHContext], bool]] = None,
) -> None:
"""
Initializes an instance of the AutoHeuristic class.
Args:
fallback: A callable that returns a Choice when the heuristic is unsure which choice to make, or
AutoHeuristic is in data collection mode.
choices: A list of possible choices the heuristic can make.
feedback: An instance of LocalFeedback that provides feedback for a given choice.
context: Context to store with each choice and feedback.
name: A string that identifies the heuristic.
augment_context: An optional list of AHOperation instances that augment the context.
precondition: A callable that returns a boolean indicating whether AutoHeuristic should run.
"""
self.fallback = fallback
self.choices = choices
self.feedback = feedback
self.context = context
self.name = name
self.collected_feedback = {}
self.augment_context = augment_context
self.metadata = AHMetadata(
get_gpu_shared_memory(),
torch.cuda.get_device_capability(),
self.choices,
self.name,
)
self.precondition = precondition
if not self.satisfies_precondition():
return
if torch._inductor.config.autoheuristic_log_path == "DEFAULT":
self.log_path = self.get_default_log_path()
else:
self.log_path = torch._inductor.config.autoheuristic_log_path
if torch._inductor.config.collect_autoheuristic(self.name):
if self.feedback is not None:
for choice in self.choices:
feedback_val = self.feedback(choice)
self.save_data(choice, feedback_val)
def satisfies_precondition(self) -> bool:
return self.precondition is None or self.precondition(
self.metadata, self.context
)
def get_choice(self) -> Choice:
"""
Returns the chosen option based on the value of autoheuristic_use.
If self.name is one of the comma separated strings in autoheuristic_use,
it queries a learned heuristic to make a decision. Otherwise, it returns the fallback option.
"""
if not self.satisfies_precondition():
return self.fallback()
if torch._inductor.config.use_autoheuristic(self.name):
if self.augment_context is not None:
self.context.apply_operations(self.augment_context)
controller = LearnedHeuristicController(
self.metadata,
self.context,
)
decision = controller.get_decision()
if decision not in self.choices:
# TODO(AlnisM): We might want to allow this in the future
return self.fallback()
if decision is not None:
return decision
return self.fallback()
def get_top_k_choices(
self, top_k: int, always_included: Optional[list[str]] = None
) -> Optional[list[Choice]]:
if not self.satisfies_precondition():
return None
if torch._inductor.config.use_autoheuristic(self.name):
if self.augment_context is not None:
self.context.apply_operations(self.augment_context)
controller = LearnedHeuristicController(
self.metadata,
self.context,
)
choices = controller.get_decisions_ranked(top_k)
if choices is None:
return None
if always_included is not None:
for choice in always_included:
if choice not in choices:
choices.append(choice)
return choices
return None
def get_collected_feedback(self, choice: Choice) -> Any:
return self.collected_feedback.get(choice, None)
@staticmethod
def get_device_identifier() -> str:
# a heuristic might work well for one GPU, but not for another
# we store the collected data per GPU model and learn a heuristic per GPU model
# TODO(AlnisM): just using the device name for now, but the same GPU model can have different names
device_name = torch.cuda.get_device_name().replace(" ", "_")
return device_name
def get_default_log_path(self) -> str:
device_name = self.get_device_identifier()
path = f"{cache_dir()}/autoheuristic/{device_name}/"
os.makedirs(path, exist_ok=True)
path += f"{self.name}.txt"
return path
def serialize_metadata(self) -> str:
metadata_dict = self.metadata.to_dict()
(
num_features,
cat_features,
) = self.context.get_numerical_and_categorical_features()
metadata_dict["numerical_features"] = num_features
metadata_dict["categorical_features"] = cat_features
return json.dumps(metadata_dict)
def save_data(self, choice: Choice, feedback_val: Feedback) -> None:
self.collected_feedback[choice] = feedback_val
log_path = self.log_path
lines = []
log_exists = os.path.exists(log_path)
if log_exists:
# if log already exists, make sure it is consistent
metadata = self.serialize_metadata()
existing_metadata = get_metadata_str_from_log(self.log_path)
if existing_metadata != metadata:
raise InconsistentMetadata(
"Given metadata does not match existing metadata"
)
else:
lines.append(self.serialize_metadata())
feature_header = self.context.get_feature_names_csv()
header = feature_header + "," + CHOICE_COL + "," + FEEDBACK_COL
lines.append(header)
line = ""
feature_values = self.context.get_feature_values_csv()
line += feature_values + "," + choice + "," + str(feedback_val)
lines.append(line)
with open(log_path, "a") as f:
f.write("\n".join(lines) + "\n")
| AutoHeuristic |
python | django__django | tests/db_functions/datetime/test_now.py | {
"start": 416,
"end": 2104
} | class ____(TestCase):
def test_basic(self):
a1 = Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
a2 = Article.objects.create(
title="How to Time Travel",
text=lorem_ipsum,
written=timezone.now(),
)
num_updated = Article.objects.filter(id=a1.id, published=None).update(
published=Now()
)
self.assertEqual(num_updated, 1)
num_updated = Article.objects.filter(id=a1.id, published=None).update(
published=Now()
)
self.assertEqual(num_updated, 0)
a1.refresh_from_db()
self.assertIsInstance(a1.published, datetime)
a2.published = Now() + timedelta(days=2)
a2.save()
a2.refresh_from_db()
self.assertIsInstance(a2.published, datetime)
self.assertQuerySetEqual(
Article.objects.filter(published__lte=Now()),
["How to Django"],
lambda a: a.title,
)
self.assertQuerySetEqual(
Article.objects.filter(published__gt=Now()),
["How to Time Travel"],
lambda a: a.title,
)
def test_microseconds(self):
Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
now_string = (
Article.objects.annotate(now_string=Cast(Now(), TextField()))
.get()
.now_string
)
precision = connection.features.time_cast_precision
self.assertRegex(now_string, rf"^.*\.\d{{1,{precision}}}")
| NowTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 39296,
"end": 39737
} | class ____(sgqlc.types.Enum):
"""Whether or not a PullRequest can be merged.
Enumeration Choices:
* `CONFLICTING`: The pull request cannot be merged due to merge
conflicts.
* `MERGEABLE`: The pull request can be merged.
* `UNKNOWN`: The mergeability of the pull request is still being
calculated.
"""
__schema__ = github_schema
__choices__ = ("CONFLICTING", "MERGEABLE", "UNKNOWN")
| MergeableState |
python | MongoEngine__mongoengine | tests/test_context_managers.py | {
"start": 1267,
"end": 27935
} | class ____(MongoDBTestCase):
def test_set_write_concern(self):
class User(Document):
name = StringField()
collection = User._get_collection()
original_write_concern = collection.write_concern
with set_write_concern(
collection, {"w": "majority", "j": True, "wtimeout": 1234}
) as updated_collection:
assert updated_collection.write_concern.document == {
"w": "majority",
"j": True,
"wtimeout": 1234,
}
assert original_write_concern.document == collection.write_concern.document
def test_set_read_write_concern(self):
class User(Document):
name = StringField()
collection = User._get_collection()
original_read_concern = collection.read_concern
original_write_concern = collection.write_concern
with set_read_write_concern(
collection,
{"w": "majority", "j": True, "wtimeout": 1234},
{"level": "local"},
) as update_collection:
assert update_collection.read_concern.document == {"level": "local"}
assert update_collection.write_concern.document == {
"w": "majority",
"j": True,
"wtimeout": 1234,
}
assert original_read_concern.document == collection.read_concern.document
assert original_write_concern.document == collection.write_concern.document
def test_switch_db_context_manager(self):
register_connection("testdb-1", "mongoenginetest2")
class Group(Document):
name = StringField()
Group.drop_collection()
Group(name="hello - default").save()
assert 1 == Group.objects.count()
with switch_db(Group, "testdb-1") as Group:
assert 0 == Group.objects.count()
Group(name="hello").save()
assert 1 == Group.objects.count()
Group.drop_collection()
assert 0 == Group.objects.count()
assert 1 == Group.objects.count()
def test_switch_collection_context_manager(self):
register_connection(alias="testdb-1", db="mongoenginetest2")
class Group(Document):
name = StringField()
Group.drop_collection() # drops in default
with switch_collection(Group, "group1") as Group:
Group.drop_collection() # drops in group1
Group(name="hello - group").save()
assert 1 == Group.objects.count()
with switch_collection(Group, "group1") as Group:
assert 0 == Group.objects.count()
Group(name="hello - group1").save()
assert 1 == Group.objects.count()
Group.drop_collection()
assert 0 == Group.objects.count()
assert 1 == Group.objects.count()
def test_no_dereference_context_manager_object_id(self):
"""Ensure that DBRef items in ListFields aren't dereferenced."""
class User(Document):
name = StringField()
class Group(Document):
ref = ReferenceField(User, dbref=False)
generic = GenericReferenceField()
members = ListField(ReferenceField(User, dbref=False))
User.drop_collection()
Group.drop_collection()
for i in range(1, 51):
User(name="user %s" % i).save()
user = User.objects.first()
Group(ref=user, members=User.objects, generic=user).save()
with no_dereference(Group):
assert not Group._fields["members"]._auto_dereference
with no_dereference(Group):
group = Group.objects.first()
for m in group.members:
assert isinstance(m, DBRef)
assert isinstance(group.ref, DBRef)
assert isinstance(group.generic, dict)
group = Group.objects.first()
for m in group.members:
assert isinstance(m, User)
assert isinstance(group.ref, User)
assert isinstance(group.generic, User)
def test_no_dereference_context_manager_thread_safe(self):
"""Ensure no_dereference context manager works in threaded condition"""
class User(Document):
name = StringField()
class Group(Document):
ref = ReferenceField(User, dbref=False)
User.drop_collection()
Group.drop_collection()
user = User(name="user 1").save()
Group(ref=user).save()
def run_in_thread(id):
time.sleep(random.uniform(0.1, 0.5)) # Force desync of threads
if id % 2 == 0:
with no_dereference(Group):
for i in range(20):
time.sleep(random.uniform(0.1, 0.5))
assert Group.ref._auto_dereference is False
group = Group.objects.first()
assert isinstance(group.ref, DBRef)
else:
for i in range(20):
time.sleep(random.uniform(0.1, 0.5))
assert Group.ref._auto_dereference is True
group = Group.objects.first()
assert isinstance(group.ref, User)
threads = [
TestableThread(target=run_in_thread, args=(id,)) for id in range(100)
]
_ = [th.start() for th in threads]
_ = [th.join() for th in threads]
def test_no_dereference_context_manager_nested(self):
class User(Document):
name = StringField()
class Group(Document):
ref = ReferenceField(User, dbref=False)
User.drop_collection()
Group.drop_collection()
for i in range(1, 51):
User(name="user %s" % i).save()
user = User.objects.first()
Group(ref=user).save()
with no_dereference(Group):
group = Group.objects.first()
assert isinstance(group.ref, DBRef)
with no_dereference(Group):
group = Group.objects.first()
assert isinstance(group.ref, DBRef)
# make sure it's still off here
group = Group.objects.first()
assert isinstance(group.ref, DBRef)
group = Group.objects.first()
assert isinstance(group.ref, User)
def test_no_dereference_context_manager_dbref(self):
"""Ensure that DBRef items in ListFields aren't dereferenced"""
class User(Document):
name = StringField()
class Group(Document):
ref = ReferenceField(User, dbref=True)
generic = GenericReferenceField()
members = ListField(ReferenceField(User, dbref=True))
User.drop_collection()
Group.drop_collection()
for i in range(1, 51):
User(name="user %s" % i).save()
user = User.objects.first()
Group(ref=user, members=User.objects, generic=user).save()
with no_dereference(Group):
assert not Group._fields["members"]._auto_dereference
with no_dereference(Group):
qs = Group.objects
assert qs._auto_dereference is False
group = qs.first()
assert not group._fields["members"]._auto_dereference
assert all(not isinstance(m, User) for m in group.members)
assert not isinstance(group.ref, User)
assert not isinstance(group.generic, User)
group = Group.objects.first()
assert all(isinstance(m, User) for m in group.members)
assert isinstance(group.ref, User)
assert isinstance(group.generic, User)
def test_no_sub_classes(self):
class A(Document):
x = IntField()
meta = {"allow_inheritance": True}
class B(A):
z = IntField()
class C(B):
zz = IntField()
A.drop_collection()
A(x=10).save()
A(x=15).save()
B(x=20).save()
B(x=30).save()
C(x=40).save()
assert A.objects.count() == 5
assert B.objects.count() == 3
assert C.objects.count() == 1
with no_sub_classes(A):
assert A.objects.count() == 2
for obj in A.objects:
assert obj.__class__ == A
with no_sub_classes(B):
assert B.objects.count() == 2
for obj in B.objects:
assert obj.__class__ == B
with no_sub_classes(C):
assert C.objects.count() == 1
for obj in C.objects:
assert obj.__class__ == C
# Confirm context manager exit correctly
assert A.objects.count() == 5
assert B.objects.count() == 3
assert C.objects.count() == 1
def test_no_sub_classes_modification_to_document_class_are_temporary(self):
class A(Document):
x = IntField()
meta = {"allow_inheritance": True}
class B(A):
z = IntField()
assert A._subclasses == ("A", "A.B")
with no_sub_classes(A):
assert A._subclasses == ("A",)
assert A._subclasses == ("A", "A.B")
assert B._subclasses == ("A.B",)
with no_sub_classes(B):
assert B._subclasses == ("A.B",)
assert B._subclasses == ("A.B",)
def test_no_subclass_context_manager_does_not_swallow_exception(self):
class User(Document):
name = StringField()
with pytest.raises(TypeError):
with no_sub_classes(User):
raise TypeError()
def test_query_counter_does_not_swallow_exception(self):
with pytest.raises(TypeError):
with query_counter():
raise TypeError()
def test_query_counter_temporarily_modifies_profiling_level(self):
db = get_db()
def _current_profiling_level():
return db.command({"profile": -1})["was"]
def _set_profiling_level(lvl):
db.command({"profile": lvl})
initial_profiling_level = _current_profiling_level()
try:
new_level = 1
_set_profiling_level(new_level)
assert _current_profiling_level() == new_level
with query_counter():
assert _current_profiling_level() == 2
assert _current_profiling_level() == new_level
except Exception:
_set_profiling_level(
initial_profiling_level
) # Ensures it gets reseted no matter the outcome of the test
raise
def test_query_counter(self):
db = get_db()
collection = db.query_counter
collection.drop()
def issue_1_count_query():
count_documents(collection, {})
def issue_1_insert_query():
collection.insert_one({"test": "garbage"})
def issue_1_find_query():
collection.find_one()
counter = 0
with query_counter() as q:
assert q == counter
assert q == counter # Ensures previous count query did not get counted
for _ in range(10):
issue_1_insert_query()
counter += 1
assert q == counter
for _ in range(4):
issue_1_find_query()
counter += 1
assert q == counter
for _ in range(3):
issue_1_count_query()
counter += 1
assert q == counter
assert int(q) == counter # test __int__
assert repr(q) == str(int(q)) # test __repr__
assert q > -1 # test __gt__
assert q >= int(q) # test __gte__
assert q != -1
assert q < 1000
assert q <= int(q)
def test_query_counter_alias(self):
"""query_counter works properly with db aliases?"""
# Register a connection with db_alias testdb-1
register_connection("testdb-1", "mongoenginetest2")
class A(Document):
"""Uses default db_alias"""
name = StringField()
class B(Document):
"""Uses testdb-1 db_alias"""
name = StringField()
meta = {"db_alias": "testdb-1"}
A.drop_collection()
B.drop_collection()
with query_counter() as q:
assert q == 0
A.objects.create(name="A")
assert q == 1
a = A.objects.first()
assert q == 2
a.name = "Test A"
a.save()
assert q == 3
# querying the other db should'nt alter the counter
B.objects().first()
assert q == 3
with query_counter(alias="testdb-1") as q:
assert q == 0
B.objects.create(name="B")
assert q == 1
b = B.objects.first()
assert q == 2
b.name = "Test B"
b.save()
assert b.name == "Test B"
assert q == 3
# querying the other db should'nt alter the counter
A.objects().first()
assert q == 3
def test_query_counter_counts_getmore_queries(self):
db = get_db()
collection = db.query_counter
collection.drop()
many_docs = [{"test": "garbage %s" % i} for i in range(150)]
collection.insert_many(
many_docs
) # first batch of documents contains 101 documents
with query_counter() as q:
assert q == 0
list(collection.find())
assert q == 2 # 1st select + 1 getmore
def test_query_counter_ignores_particular_queries(self):
db = get_db()
collection = db.query_counter
collection.insert_many([{"test": "garbage %s" % i} for i in range(10)])
with query_counter() as q:
assert q == 0
cursor = collection.find()
assert q == 0 # cursor wasn't opened yet
_ = next(cursor) # opens the cursor and fires the find query
assert q == 1
cursor.close() # issues a `killcursors` query that is ignored by the context
assert q == 1
_ = (
db.system.indexes.find_one()
) # queries on db.system.indexes are ignored as well
assert q == 1
@requires_mongodb_gte_40
def test_updating_a_document_within_a_transaction(self):
class A(Document):
name = StringField()
A.drop_collection()
a_doc = A.objects.create(name="a")
with run_in_transaction():
a_doc.update(name="b")
assert A.objects.get(id=a_doc.id).name == "b"
assert A.objects.count() == 1
assert A.objects.count() == 1
assert A.objects.get(id=a_doc.id).name == "b"
@requires_mongodb_gte_40
def test_updating_a_document_within_a_transaction_that_fails(self):
class A(Document):
name = StringField()
A.drop_collection()
a_doc = A.objects.create(name="a")
with pytest.raises(TestRollbackError):
with run_in_transaction():
a_doc.update(name="b")
assert A.objects.get(id=a_doc.id).name == "b"
raise TestRollbackError()
assert A.objects.count() == 1
assert A.objects.get(id=a_doc.id).name == "a"
@requires_mongodb_gte_40
def test_creating_a_document_within_a_transaction(self):
class A(Document):
name = StringField()
A.drop_collection()
# ensure collection is created (needed for transaction with MongoDB <= 4.2)
A.objects.create(name="test")
A.objects.delete()
with run_in_transaction():
a_doc = A.objects.create(name="a")
another_doc = A(name="b").save()
assert A.objects.get(id=a_doc.id).name == "a"
assert A.objects.get(id=another_doc.id).name == "b"
assert A.objects.count() == 2
assert A.objects.count() == 2
assert A.objects.get(id=a_doc.id).name == "a"
assert A.objects.get(id=another_doc.id).name == "b"
@requires_mongodb_gte_40
def test_creating_a_document_within_a_transaction_that_fails(self):
class A(Document):
name = StringField()
A.drop_collection()
# ensure collection is created (needed for transaction with MongoDB <= 4.2)
A.objects.create(name="test")
A.objects.delete()
with pytest.raises(TestRollbackError):
with run_in_transaction():
a_doc = A.objects.create(name="a")
another_doc = A(name="b").save()
assert A.objects.get(id=a_doc.id).name == "a"
assert A.objects.get(id=another_doc.id).name == "b"
assert A.objects.count() == 2
raise TestRollbackError()
assert A.objects.count() == 0
@requires_mongodb_gte_40
def test_transaction_updates_across_databases(self):
connect("mongoenginetest")
connect("test2", "test2")
class A(Document):
name = StringField()
A.objects.all().delete()
a_doc = A.objects.create(name="a")
class B(Document):
meta = {"db_alias": "test2"}
name = StringField()
B.objects.all().delete()
b_doc = B.objects.create(name="b")
with run_in_transaction():
a_doc.update(name="a2")
b_doc.update(name="b2")
assert "a2" == A.objects.get(id=a_doc.id).name
assert "b2" == B.objects.get(id=b_doc.id).name
@requires_mongodb_gte_44
def test_collection_creation_via_upserts_across_databases_in_transaction(self):
connect("mongoenginetest")
connect("test2", "test2")
class A(Document):
name = StringField()
A.drop_collection()
a_doc = A.objects.create(name="a")
class B(Document):
meta = {"db_alias": "test2"}
name = StringField()
B.drop_collection()
b_doc = B.objects.create(name="b")
with run_in_transaction():
a_doc.update(name="a3")
with switch_db(A, "test2"):
a_doc.update(name="a4", upsert=True)
b_doc.update(name="b3")
b_doc.update(name="b4")
assert "a3" == A.objects.get(id=a_doc.id).name
assert "b4" == B.objects.get(id=b_doc.id).name
with switch_db(A, "test2"):
assert "a4" == A.objects.get(id=a_doc.id).name
@requires_mongodb_gte_40
def test_an_exception_raised_in_transactions_across_databases_rolls_back_updates(
self,
):
connect("mongoenginetest")
connect("test2", "test2")
class A(Document):
name = StringField()
A.drop_collection()
with switch_db(A, "test2"):
A.drop_collection()
a_doc = A.objects.create(name="a")
class B(Document):
meta = {"db_alias": "test2"}
name = StringField()
B.drop_collection()
b_doc = B.objects.create(name="b")
try:
with run_in_transaction():
a_doc.update(name="a3")
with switch_db(A, "test2"):
a_doc.update(name="a4", upsert=True)
b_doc.update(name="b3")
b_doc.update(name="b4")
raise Exception
except Exception:
pass
assert "a" == A.objects.get(id=a_doc.id).name
assert "b" == B.objects.get(id=b_doc.id).name
with switch_db(A, "test2"):
assert 0 == A.objects.all().count()
@requires_mongodb_gte_40
def test_exception_in_child_of_a_nested_transaction_rolls_parent_back(self):
class A(Document):
name = StringField()
A.drop_collection()
a_doc = A.objects.create(name="a")
class B(Document):
name = StringField()
B.drop_collection()
b_doc = B.objects.create(name="b")
def run_tx():
try:
with run_in_transaction():
a_doc.update(name="trx-parent")
try:
with run_in_transaction():
b_doc.update(name="trx-child")
raise TestRollbackError()
except TestRollbackError as exc:
# at this stage, the parent transaction is still there
assert A.objects.get(id=a_doc.id).name == "trx-parent"
raise exc
except OperationError as op_failure:
"""
See thread safety test below for more details about TransientTransactionError handling
"""
if "TransientTransactionError" in str(op_failure):
logging.warning("TransientTransactionError - retrying...")
run_tx()
else:
raise op_failure
with pytest.raises(TestRollbackError):
run_tx()
assert A.objects.get(id=a_doc.id).name == "a"
assert B.objects.get(id=b_doc.id).name == "b"
@requires_mongodb_gte_40
def test_exception_in_parent_of_nested_transaction_after_child_completed_only_rolls_parent_back(
self,
):
class A(Document):
name = StringField()
A.drop_collection()
a_doc = A.objects.create(name="a")
class B(Document):
name = StringField()
B.drop_collection()
b_doc = B.objects.create(name="b")
def run_tx():
try:
with run_in_transaction():
a_doc.update(name="trx-parent")
with run_in_transaction():
b_doc.update(name="trx-child")
raise TestRollbackError()
except TestRollbackError:
pass
except OperationError as op_failure:
"""
See thread safety test below for more details about TransientTransactionError handling
"""
if "TransientTransactionError" in str(op_failure):
logging.warning("TransientTransactionError - retrying...")
run_tx()
else:
raise op_failure
run_tx()
assert "a" == A.objects.get(id=a_doc.id).name
assert "trx-child" == B.objects.get(id=b_doc.id).name
@requires_mongodb_gte_40
def test_nested_transactions_create_and_release_sessions_accordingly(self):
with run_in_transaction():
s1 = _get_session()
with run_in_transaction():
s2 = _get_session()
assert s1 is not s2
with run_in_transaction():
pass
assert _get_session() is s2
assert _get_session() is s1
assert _get_session() is None
@requires_mongodb_gte_40
def test_thread_safety_of_transactions(self):
"""
Make sure transactions don't step over each other. Each
session should be isolated to each thread. If this is the
case, then no amount of runtime variability should have
an effect on the output.
This test sets up e.g 10 records, each with an integer field
of value 0 - 9.
We then spin up e.g 10 threads and attempt to update a target
record by multiplying its integer value by 10. Then, if
the target record is even, throw an exception, which
should then roll the transaction back. The odd rows always
succeed.
If the sessions are properly thread safe, we should ALWAYS
net out with the following sum across the integer fields
of the 10 records:
0 + 10 + 2 + 30 + 4 + 50 + 6 + 70 + 8 + 90 = 270
"""
class A(Document):
i = IntField(unique=True)
A.drop_collection()
# Ensure the collection is created
_ = A.objects.first()
thread_count = 20
def thread_fn(idx):
# Open the transaction at some unknown interval
time.sleep(random.uniform(0.1, 0.5))
try:
with run_in_transaction():
a = A.objects.get(i=idx)
a.i = idx * thread_count
# Save at some unknown interval
time.sleep(random.uniform(0.1, 0.5))
a.save()
# Force rollbacks for the even runs...
if idx % 2 == 0:
raise TestRollbackError()
except TestRollbackError:
pass
except pymongo.errors.OperationFailure as op_failure:
"""
If there's a TransientTransactionError, retry - the lock could not be acquired.
Per MongoDB docs: The core transaction API does not incorporate retry logic for
"TransientTransactionError". To handle "TransientTransactionError", applications
should explicitly incorporate retry logic for the error.
See: https://www.mongodb.com/docs/manual/core/transactions-in-applications/#-transienttransactionerror-
"""
error_labels = op_failure.details.get("errorLabels", [])
if "TransientTransactionError" in error_labels:
logging.warning("TransientTransactionError - retrying...")
thread_fn(idx)
else:
raise op_failure
for r in range(5):
"""
Threads & randomization are tricky - run it multiple times
"""
# Clear out the collection for a fresh run
A.objects.all().delete()
# Prepopulate the data for reads
for i in range(thread_count):
A.objects.create(i=i)
# Prime the threads
threads = [
TestableThread(target=thread_fn, args=(i,)) for i in range(thread_count)
]
for t in threads:
t.start()
for t in threads:
t.join()
# Check the sum
expected_sum = sum(
i if i % 2 == 0 else i * thread_count for i in range(thread_count)
)
assert expected_sum == 2090
assert expected_sum == sum(a.i for a in A.objects.all())
if __name__ == "__main__":
unittest.main()
| TestContextManagers |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 26593,
"end": 27200
} | class ____:
name: str
backend: xc.Client
axis_name: core.AxisName
axis_size: int
global_axis_size: int
devices: Sequence[xc.Device] | None
in_axes: Iterable[int | None]
out_axes_thunk: Callable[[], Sequence[int | None]]
avals: Sequence[core.AbstractValue]
@cached_property
def local_devices(self):
if self.devices:
out = [d for d in self.devices
if d.process_index == xb.process_index(self.backend)]
assert len(out) > 0
else:
out = None
return out
@cached_property
def out_axes(self):
return self.out_axes_thunk()
| ParallelCallableInfo |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 121451,
"end": 122128
} | class ____:
xlThemeColorAccent1 = 5 # from enum XlThemeColor
xlThemeColorAccent2 = 6 # from enum XlThemeColor
xlThemeColorAccent3 = 7 # from enum XlThemeColor
xlThemeColorAccent4 = 8 # from enum XlThemeColor
xlThemeColorAccent5 = 9 # from enum XlThemeColor
xlThemeColorAccent6 = 10 # from enum XlThemeColor
xlThemeColorDark1 = 1 # from enum XlThemeColor
xlThemeColorDark2 = 3 # from enum XlThemeColor
xlThemeColorFollowedHyperlink = 12 # from enum XlThemeColor
xlThemeColorHyperlink = 11 # from enum XlThemeColor
xlThemeColorLight1 = 2 # from enum XlThemeColor
xlThemeColorLight2 = 4 # from enum XlThemeColor
| ThemeColor |
python | django-extensions__django-extensions | django_extensions/import_subclasses.py | {
"start": 286,
"end": 2319
} | class ____:
def __init__(self, base_classes_from_settings):
self.base_classes = []
for element in base_classes_from_settings:
if isinstance(element, str):
element = import_string(element)
self.base_classes.append(element)
def _should_be_imported(self, candidate_to_import): # type: (Tuple[str, type]) -> bool
for base_class in self.base_classes:
if issubclass(candidate_to_import[1], base_class):
return True
return False
def collect_subclasses(self): # type: () -> Dict[str, List[Tuple[str, str]]]
"""
Collect all subclasses of user-defined base classes from project.
:return: Dictionary from module name to list of tuples.
First element of tuple is model name and second is alias.
Currently we set alias equal to model name,
but in future functionality of aliasing subclasses can be added.
"""
result = {} # type: Dict[str, List[Tuple[str, str]]]
for loader, module_name, is_pkg in walk_packages(path=[str(settings.BASE_DIR)]):
subclasses_from_module = self._collect_classes_from_module(module_name)
if subclasses_from_module:
result[module_name] = subclasses_from_module
return result
def _collect_classes_from_module(self, module_name): # type: (str) -> List[Tuple[str, str]]
for excluded_module in getattr(
settings, "SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST", []
):
if module_name.startswith(excluded_module):
return []
imported_module = import_module(module_name)
classes_to_import = getmembers(
imported_module,
lambda element: isclass(element)
and element.__module__ == imported_module.__name__,
)
classes_to_import = list(filter(self._should_be_imported, classes_to_import))
return [(name, name) for name, _ in classes_to_import]
| SubclassesFinder |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_input_json_delta.py | {
"start": 195,
"end": 293
} | class ____(BaseModel):
partial_json: str
type: Literal["input_json_delta"]
| BetaInputJSONDelta |
python | pypa__pip | src/pip/_vendor/urllib3/util/ssltransport.py | {
"start": 136,
"end": 6895
} | class ____:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context):
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, "wrap_bio"):
if six.PY2:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"supported on Python 2"
)
else:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"available on non-native SSLContext"
)
def __init__(
self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(
self.incoming, self.outgoing, server_hostname=server_hostname
)
# Perform initial handshake.
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv")
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv_into")
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to sendall")
count = 0
with memoryview(data) as view, view.cast("B") as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to send")
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def unwrap(self):
self._ssl_io_loop(self.sslobj.unwrap)
def close(self):
self.socket.close()
def getpeercert(self, binary_form=False):
return self.sslobj.getpeercert(binary_form)
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0 # eof, return 0.
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
# WANT_READ, and WANT_WRITE are expected, others are not.
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
| SSLTransport |
python | pytorch__pytorch | test/fx/test_subgraph_rewriter.py | {
"start": 976,
"end": 35160
} | class ____(JitTestCase):
def test_subgraph_rewriter_preserves_logic(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def comparison(x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
# Replace `pattern` with the same pattern (shouldn't change
# the underlying logic)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_with_oneliner_pattern(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_with_trivial_replacement(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x)
val = torch.add(val, val)
return torch.add(val, val)
def pattern(x):
return torch.add(x, x)
def replacement(x):
return x
def comparison(x):
return torch.neg(x)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(1, 5)
matches = subgraph_rewriter.replace_pattern_with_filters(
traced, pattern, replacement, []
)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
no_replacements = len(matches) == 2 and len(matches[1].replacements) == 0
self.assertEqual(ref_output, test_output)
self.assertTrue(no_replacements)
def test_subgraph_rewriter_single_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_multiple_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2):
m1 = torch.cat([w1, w2]).sum()
m2 = torch.cat([w1, w2]).sum()
return x + torch.max(m1) + torch.max(m2)
def pattern(w1, w2):
return torch.cat([w1, w2]).sum()
def replacement(w1, w2):
return torch.stack([w1, w2])
def comparison(x, w1, w2):
m1 = torch.stack([w1, w2])
m2 = torch.stack([w1, w2])
return x + torch.max(m1) + torch.max(m2)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
w1 = torch.rand(1, 3)
w2 = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, w1, w2)
test_outs = traced.forward(x, w1, w2)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_graph_argument_order(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.mm(x, y)
def pattern(x, y):
return torch.mm(x, y)
def comparison(x, y):
return torch.mm(x, y)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
y = torch.randn(4, 5)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_correct_output_replacement(self):
class M(torch.nn.Module):
def forward(self, x, y):
val = torch.neg(y) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.neg(x)
def comparison(x, y):
val = torch.neg(y) + torch.neg(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_traced_as_callable(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
class Pattern(torch.nn.Module):
def forward(self, x):
return torch.neg(x) + torch.relu(x)
class Replacement(torch.nn.Module):
def forward(self, x):
return torch.sigmoid(x)
def comparison(x):
val = torch.sigmoid(x)
return torch.add(val, val)
traced = symbolic_trace(M())
traced_pattern = symbolic_trace(Pattern())
traced_replacement = symbolic_trace(Replacement())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, traced_pattern, traced_replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_is_entire_graph(self):
class M(torch.nn.Module):
def forward(self, x):
a = torch.neg(x)
return torch.add(a, a)
def pattern(x):
a = torch.neg(x)
return torch.add(a, a)
def replacement(x):
a = torch.sigmoid(x)
return torch.cat([a, a])
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(replacement)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_output_pattern_node_can_have_users_that_are_not_matched(
self,
):
class M(torch.nn.Module):
def forward(self, x):
y = torch.relu(x)
return torch.neg(y) - y
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.sigmoid(x)
def comparison(x):
y = torch.sigmoid(x)
return torch.neg(y) - y
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_internal_pattern_nodes_cannot_have_users_that_are_not_matched(
self,
):
class M(torch.nn.Module):
def forward(self, x, w1, w2, b1, b2):
m0 = torch.cat([w1, w2]) # noqa: F841
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
t0 = torch.addmm(b1, m1, m2.t()) # noqa: F841
t1 = torch.sum(w1, 1)
t2 = torch.addmm(b1, m1, m2.t())
return torch.sum(t1), torch.sum(t2)
def pattern(x, w1, w2, b1, b2):
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
return torch.addmm(b1, m1, m2.t())
def replacement(x, w1, w2, b1, b2):
return torch.cat([x, w1, w2])
traced = symbolic_trace(M())
# Result should be [] since no matches can be found
res = subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
self.assertEqual(res, [])
def test_subgraph_rewriter_placeholder_matching(self):
"""
This tests that a placeholder Node can be matched to a Node with
a different number of input Nodes. In the example below, the
original traced Module looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_function <built-in function add> (x, 3) {}
call_method dequantize (add,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
while the pattern we want to match looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_method dequantize (x,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
Here, we want to be able to match the original graph's
`call_function.add` Node with the pattern graph's
`placeholder.x` Node.
Credit to Jerry Zhang (GitHub: jerryzh168) for this test case
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.dtype = torch.float16
def forward(self, x):
x += 3
x = x.dequantize()
x = torch.sigmoid(x)
dtype = self.dtype
x = x.to(dtype)
return x
def pattern(x):
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
def replacement(x):
return x
def comparison(x):
return x + 3
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_replaces_referenced_submodules(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.sigmoid(x))
class Pattern(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.sigmoid(x))
class Replacement(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.tanh = torch.nn.Tanh()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.tanh(x))
class Comparison(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.tanh = torch.nn.Tanh()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.tanh(x))
traced = symbolic_trace(M())
comparison = Comparison()
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, Pattern(), Replacement())
traced.graph.lint()
ref_outs = comparison(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
traced.get_submodule("tanh")
with self.assertRaisesRegex(AttributeError, "has no attribute"):
traced.get_submodule("sigmoid")
submod = traced.get_submodule("submod")
self.assertEqual(type(submod), torch.nn.ReLU)
def test_subgraph_rewriter_annotations_int(self):
class M1(torch.nn.Module):
def forward(self, x):
y: int = x
return torch.add(x, y)
class M2(torch.nn.Module):
def forward(self, x):
y = annotate(x, int)
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M1())
module = M2()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
for n, m in zip(symbolic_traced.graph.nodes, graph.nodes):
if n.op == "placeholder":
assert n.type is int
assert m.type is int
def test_subgraph_rewriter_replace_consecutive_submodules(self):
def f(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return torch.sigmoid(x)
def pattern(x):
return torch.sigmoid(x)
def replacement(x):
return torch.exp(x)
def comparison(x):
x = torch.exp(x)
x = torch.exp(x)
return torch.exp(x)
traced = symbolic_trace(f)
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_with_overlapping_matches(self):
def f(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return torch.sigmoid(x)
def pattern(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return x
def replacement(x):
return torch.neg(x)
def comparison(x):
x = torch.neg(x)
return torch.neg(x)
traced = symbolic_trace(f)
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_replace_with_multiple_outputs(self):
def f(x):
y = torch.sigmoid(x)
z = torch.relu(x)
return y + z
def pattern(a):
b = torch.sigmoid(a)
c = torch.relu(a)
return b, c
def replacement(x):
return torch.exp(x), torch.abs(x)
def comparison(x):
y = torch.exp(x)
z = torch.abs(x)
return y + z
traced = symbolic_trace(f)
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_replace_with_duplicated_outputs(self):
def f(x1, x2):
x = x1 - x2
y = torch.sigmoid(x)
z = torch.relu(x)
return y + z
def pattern(a1, a2):
a = a1 - a2
b = torch.sigmoid(a)
c = torch.relu(a)
return b, c, a
def replacement(x1, x2):
y1 = torch.exp(x1)
y2 = torch.abs(x2)
return y2, y2, y1
def comparison(x1, x2):
y2 = torch.abs(x2)
return y2 + y2
traced = symbolic_trace(f)
comparison_fn = symbolic_trace(comparison)
x1 = torch.randn(3, 4)
x2 = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x1, x2)
test_outs = traced.forward(x1, x2)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_with_unused_args(self):
class M(torch.nn.Module):
def forward(self, x, y, z):
return x + y
def pattern(x, y):
return x + y
def replacement(x, y):
return x - y
def comparison(x1, x2, x3):
return x1 - x2
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x1 = torch.randn(3, 4)
x2 = torch.randn(3, 4)
x3 = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
placeholder_nodes = [n for n in traced.graph.nodes if n.op == "placeholder"]
assert len(placeholder_nodes) == 3
ref_outs = comparison_fn(x1, x2, x3)
test_outs = traced.forward(x1, x2, x3)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_with_unused_results(self):
class M(torch.nn.Module):
def forward(self, x, y, cache):
m = torch.mul(x, y)
n = cache.index_copy(0, torch.tensor([0]), m)
p = torch.ops.aten.copy.default(cache, n)
q = torch.ops.aten.copy_.default(cache, p) # noqa: F841
u = torch.relu(cache)
# check the result to ensure cache is updated before relu op
return u
def pattern(self_tensor, src_tensor):
p = torch.ops.aten.copy.default(self_tensor, src_tensor)
q = torch.ops.aten.copy_.default(self_tensor, p)
return q
def replacement(self_tensor, src_tensor):
q = torch.ops.aten.copy_.default(self_tensor, src_tensor)
return q
def comparison(x, y, cache):
m = torch.mul(x, y)
n = cache.index_copy(0, torch.tensor([0]), m)
q = torch.ops.aten.copy_.default(cache, n) # noqa: F841
u = torch.relu(cache)
return u
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
x = torch.randn(1, 8)
y = torch.randn(1, 8)
cache = torch.randn(2, 8)
x_clone = x.clone()
y_clone = y.clone()
cache_clone = cache.clone()
ref_outs = comparison_fn(x, y, cache)
test_outs = traced.forward(x_clone, y_clone, cache_clone)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_call_method(self):
class M(torch.nn.Module):
def forward(self, x):
x = x.dequantize()
x = x.sigmoid()
x = x.to(torch.float16)
return x
def pattern(x):
x = x.dequantize()
x = x.sigmoid()
x = x.to(torch.float16)
return x
def replacement(x):
return x
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(replacement)
x1 = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x1)
test_outs = traced.forward(x1)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_nodes_with_kwargs(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w0 = torch.nn.Parameter(torch.empty([128, 128]))
self.b0 = torch.nn.Parameter(torch.empty([128]))
def forward(self, in0):
lin_res = torch.nn.functional.linear(in0, self.w0, bias=self.b0)
mul_res = in0 * lin_res
sum_res = mul_res + in0
return sum_res
def pattern(a, b, bias):
lin_res = torch.nn.functional.linear(a, b, bias=bias)
mul_res = a * lin_res
return lin_res, mul_res
def replacement(a, b, bias):
lin_res, mul_res = wrapped_gemm_bias_mul(a, b, bias)
return lin_res, mul_res
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern(traced, pattern, replacement)
self.assertEqual(len(matches), 1)
found_repalcement_node = False
for node in traced.graph.nodes:
if node.target is wrapped_gemm_bias_mul:
found_repalcement_node = True
break
self.assertTrue(found_repalcement_node)
def test_subgraph_rewriter_local_revert(self):
# Following model will have 3 anchors as the matching candidate with the given pattern
# Anchor 1 and 3 is a real match, but anchor 2 is not.
# The subgraph rewriter should be able to revert the changes made while matching anchor 2.
# Final match with anchor 3 should be successful.
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w0 = torch.nn.Parameter(torch.empty([128, 128]))
self.b0 = torch.nn.Parameter(torch.empty([128]))
self.w1 = torch.nn.Parameter(torch.empty([128, 128]))
self.b1 = torch.nn.Parameter(torch.empty([128]))
self.w2 = torch.nn.Parameter(torch.empty([128, 128]))
self.b2 = torch.nn.Parameter(torch.empty([128]))
self.w3 = torch.nn.Parameter(torch.empty([128, 128]))
self.b3 = torch.nn.Parameter(torch.empty([128]))
self.w4 = torch.nn.Parameter(torch.empty([128, 128]))
self.b4 = torch.nn.Parameter(torch.empty([128]))
def forward(self, in0, in1):
lin_res_1 = torch.nn.functional.linear(in1, self.w0, bias=self.b0)
lin_res_2 = torch.nn.functional.linear(lin_res_1, self.w1, bias=self.b1)
# potential match at anchor 1
mul_res_1 = in1 * lin_res_2
sum_res_1 = mul_res_1 + in1
lin_res_3 = torch.nn.functional.linear(sum_res_1, self.w2, bias=self.b2)
sigmoid_res_1 = torch.sigmoid(lin_res_3)
# potential match at anchor 2
mul_res_2 = lin_res_3 * sigmoid_res_1
lin_res_4 = torch.nn.functional.linear(in0, self.w3, bias=self.b3)
lin_res_5 = torch.nn.functional.linear(lin_res_4, self.w4, bias=self.b4)
# potential match at anchor 3
mul_res_3 = in0 * lin_res_5
sum_res_2 = mul_res_3 + in0
cat_res = torch.cat(
[mul_res_2, sum_res_2],
dim=1,
)
return cat_res
def gemm_bias_mul_pattern_with_c(a, b, bias, c):
lin_res = torch.nn.functional.linear(a, b, bias=bias)
mul_res = c * lin_res
return lin_res, mul_res
def gemm_bias_mul_replacement_with_c(a, b, bias, c):
lin_res, mul_res = wrapped_gemm_bias_mul_with_c(a, b, bias, c)
return lin_res, mul_res
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern(
traced, gemm_bias_mul_pattern_with_c, gemm_bias_mul_replacement_with_c
)
self.assertEqual(len(matches), 2)
repalcement_node_found = 0
for node in traced.graph.nodes:
if node.target is wrapped_gemm_bias_mul_with_c:
repalcement_node_found += 1
self.assertEqual(repalcement_node_found, 2)
def test_replace_pattern_with_filters(self):
class M(torch.nn.Module):
def forward(self, x, scale, zero_point):
# Match, second input to add is a scalar
x = x.dequantize()
x = torch.add(x, 2)
x = x.relu()
x = torch.quantize_per_tensor(x, scale, zero_point, torch.quint8)
y = x + 1
# NOT a match, second input to add is NOT a scalar
x = x.dequantize()
x = torch.add(x, y)
x = x.relu()
x = torch.quantize_per_tensor(x, scale, zero_point, torch.quint8)
return x
def BinaryOpScalarReLUPattern(x, num, scale, zero_point):
x = x.dequantize()
x = torch.add(x, num)
x = x.relu()
x = torch.quantize_per_tensor(x, scale, zero_point, torch.quint8)
return x
def BinaryOpScalarReLUReplacement(x, num, scale, zero_point):
x = torch.mul(x, num)
return x
def second_input_is_scalar(match, original_graph, pattern_graph):
"""check the node that's matched to the second input of the pattern graph
is a scalar number
"""
input_idx = 0
for node in pattern_graph.nodes:
if node.op == "placeholder":
if input_idx == 1:
num_node = node
input_idx += 1
return isinstance(match.nodes_map[num_node], (int, float))
def check_replacement_nodes(self, traced, matches):
replacement_nodes_in_graph = [
node for node in traced.graph.nodes if node.target == torch.mul
]
replacement_nodes_in_res = [r for m in matches for r in m.replacements]
self.assertEqual(
len(replacement_nodes_in_graph), len(replacement_nodes_in_res)
)
self.assertEqual(replacement_nodes_in_graph, replacement_nodes_in_res)
return len(replacement_nodes_in_graph)
# match without filter, should find 2 match
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern_with_filters(
traced, BinaryOpScalarReLUPattern, BinaryOpScalarReLUReplacement, None
)
self.assertEqual(len(matches), 2)
self.assertEqual(check_replacement_nodes(self, traced, matches), 2)
# match with filter, should find 1 match
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern_with_filters(
traced,
BinaryOpScalarReLUPattern,
BinaryOpScalarReLUReplacement,
[second_input_is_scalar],
)
self.assertEqual(len(matches), 1)
self.assertEqual(check_replacement_nodes(self, traced, matches), 1)
def test_matching_pattern_with_list_type_arg(self):
class M(torch.nn.Module):
def forward(self, x):
return torch.ops.aten._reshape_alias_copy.default(x, [1, 2], [3, 4])
def pattern(x, arg0, arg1):
return torch.ops.aten._reshape_alias_copy.default(x, arg0, arg1)
def replacement(x, arg0, arg1):
return torch.ops.aten._reshape_alias_copy.default(x, arg1, arg0)
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern(traced, pattern, replacement)
self.assertEqual(len(matches), 1)
self.assertExpectedInline(
traced.code.strip(),
"""\
def forward(self, x):
_reshape_alias_copy_default_1 = torch.ops.aten._reshape_alias_copy.default(x, [3, 4], [1, 2]); x = None
return _reshape_alias_copy_default_1""",
) # noqa: B950
def test_replacement_with_attrs(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1])
self.b = torch.tensor([2])
def forward(self, x):
return x + self.a - self.b
class Pattern(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1])
def forward(self, x):
return x + self.a
class Replacement(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.c = torch.tensor([3])
def forward(self, x):
return x - self.c
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern(traced, Pattern(), Replacement())
self.assertEqual(len(matches), 1)
def test_matching_variable_arguments(self):
class M(torch.nn.Module):
def forward(self, x):
return torch.ops.aten.max_pool2d_with_indices.default(
x, [2, 2], stride=[2, 2]
)
def pattern(x, kernel_size, stride):
# default padding is [0, 0]
return torch.ops.aten.max_pool2d_with_indices.default(
x, kernel_size, stride, padding=[0, 0]
)
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern(traced, pattern, pattern)
self.assertEqual(len(matches), 1)
def test_replaced_nodes(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.add(x, y)
def pattern(x, y):
return torch.add(x, y)
def replacement(x, y):
return torch.sub(torch.mul(x, y), y)
traced = symbolic_trace(M())
matches = subgraph_rewriter.replace_pattern_with_filters(
traced, pattern, replacement
)
def check_replacement_nodes(self, traced, matches):
replacement_nodes_in_graph = [
node
for node in traced.graph.nodes
if node.target in {torch.sub, torch.mul}
]
replacement_nodes_in_res = [r for m in matches for r in m.replacements]
self.assertEqual(
len(replacement_nodes_in_graph), len(replacement_nodes_in_res)
)
self.assertEqual(replacement_nodes_in_graph, replacement_nodes_in_res)
return len(replacement_nodes_in_graph)
self.assertEqual(check_replacement_nodes(self, traced, matches), 2)
def test_replace_pattern_with_callback(self) -> None:
class M(torch.nn.Module):
def forward(self, x, y):
return torch.add(x, y)
def pattern(x, y):
return torch.add(x, y)
def replacement(x, y):
return torch.sub(torch.mul(x, y), y)
traced = symbolic_trace(M())
# Return the same replacement graph for all matches, but have it be a unique
# object each time.
matches = subgraph_rewriter.replace_pattern_with_filters(
traced,
pattern,
replacement_callback=lambda *args: symbolic_trace(replacement).graph,
)
def check_replacement_nodes(self, traced, matches):
replacement_nodes_in_graph = [
node
for node in traced.graph.nodes
if node.target in {torch.sub, torch.mul}
]
replacement_nodes_in_res = [r for m in matches for r in m.replacements]
self.assertEqual(
len(replacement_nodes_in_graph), len(replacement_nodes_in_res)
)
self.assertEqual(replacement_nodes_in_graph, replacement_nodes_in_res)
return len(replacement_nodes_in_graph)
self.assertEqual(check_replacement_nodes(self, traced, matches), 2)
| TestSubgraphRewriter |
python | ray-project__ray | rllib/connectors/agent/clip_reward.py | {
"start": 373,
"end": 1723
} | class ____(AgentConnector):
def __init__(self, ctx: ConnectorContext, sign=False, limit=None):
super().__init__(ctx)
assert (
not sign or not limit
), "should not enable both sign and limit reward clipping."
self.sign = sign
self.limit = limit
def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
d = ac_data.data
assert (
type(d) is dict
), "Single agent data must be of type Dict[str, TensorStructType]"
if SampleBatch.REWARDS not in d:
# Nothing to clip. May happen for initial obs.
return ac_data
if self.sign:
d[SampleBatch.REWARDS] = np.sign(d[SampleBatch.REWARDS])
elif self.limit:
d[SampleBatch.REWARDS] = np.clip(
d[SampleBatch.REWARDS],
a_min=-self.limit,
a_max=self.limit,
)
return ac_data
def to_state(self):
return ClipRewardAgentConnector.__name__, {
"sign": self.sign,
"limit": self.limit,
}
@staticmethod
def from_state(ctx: ConnectorContext, params: Any):
return ClipRewardAgentConnector(ctx, **params)
register_connector(ClipRewardAgentConnector.__name__, ClipRewardAgentConnector)
| ClipRewardAgentConnector |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/sensors/test_cosmos.py | {
"start": 1040,
"end": 2373
} | class ____:
@mock.patch("airflow.providers.microsoft.azure.sensors.cosmos.AzureCosmosDBHook")
def test_should_call_hook_with_args(self, mock_hook):
mock_instance = mock_hook.return_value
mock_instance.get_document.return_value = True # Indicate document returned
sensor = AzureCosmosDocumentSensor(
task_id="test-task-1",
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
document_id=DOCUMENT_ID,
)
result = sensor.poke(None)
mock_instance.get_document.assert_called_once_with(DOCUMENT_ID, DB_NAME, COLLECTION_NAME)
assert result is True
@mock.patch("airflow.providers.microsoft.azure.sensors.cosmos.AzureCosmosDBHook")
def test_should_return_false_on_no_document(self, mock_hook):
mock_instance = mock_hook.return_value
mock_instance.get_document.return_value = None # Indicate document not returned
sensor = AzureCosmosDocumentSensor(
task_id="test-task-2",
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
document_id=DOCUMENT_ID,
)
result = sensor.poke(None)
mock_instance.get_document.assert_called_once_with(DOCUMENT_ID, DB_NAME, COLLECTION_NAME)
assert result is False
| TestAzureCosmosSensor |
python | ray-project__ray | doc/source/serve/doc_code/monitoring/deployment_logger.py | {
"start": 127,
"end": 539
} | class ____:
def __init__(self):
self.count = 0
def __call__(self, request):
self.count += 1
logger.info(f"count: {self.count}")
return {"count": self.count}
counter = Counter.bind()
serve.run(counter)
for i in range(10):
requests.get("http://127.0.0.1:8000/")
# __end__
response = requests.get("http://127.0.0.1:8000/")
assert response.json() == {"count": 11}
| Counter |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py | {
"start": 405,
"end": 2148
} | class ____(CloudProvider):
"""Hetzner Cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
"""Filter out the cloud tests when the necessary config and resources are not available."""
aci = self._create_ansible_core_ci()
if aci.available:
return
super().filter(targets, exclude)
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self) -> None:
"""Request Hetzner credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
token = response['hetzner']['token']
display.sensitive.add(token)
display.info('Hetzner Cloud Token: %s' % token, verbosity=1)
values = dict(
TOKEN=token,
)
display.sensitive.add(values['TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self) -> AnsibleCoreCI:
"""Return a Hetzner instance of AnsibleCoreCI."""
return AnsibleCoreCI(self.args, CloudResource(platform='hetzner'))
| HcloudCloudProvider |
python | getlogbook__logbook | src/logbook/_fallback.py | {
"start": 1972,
"end": 2974
} | class ____:
"""Baseclass for all objects that provide stack manipulation
operations.
"""
def push_context(self):
"""Pushes the stacked object to the context stack."""
raise NotImplementedError()
def pop_context(self):
"""Pops the stacked object from the context stack."""
raise NotImplementedError()
def push_application(self):
"""Pushes the stacked object to the application stack."""
raise NotImplementedError()
def pop_application(self):
"""Pops the stacked object from the application stack."""
raise NotImplementedError()
def __enter__(self):
self.push_context()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop_context()
def applicationbound(self):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the application.
"""
return ApplicationBound(self)
| StackedObject |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py | {
"start": 6070,
"end": 6247
} | class ____(graphene.ObjectType):
ranges = non_null_list(GrapheneTimePartitionRangeStatus)
class Meta:
name = "TimePartitionStatuses"
| GrapheneTimePartitionStatuses |
python | scipy__scipy | scipy/stats/_discrete_distns.py | {
"start": 938,
"end": 3548
} | class ____(rv_discrete):
r"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is:
.. math::
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1`
`binom` takes :math:`n` and :math:`p` as shape parameters,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
This distribution uses routines from the Boost Math C++ library for
the computation of the ``pmf``, ``cdf``, ``sf``, ``ppf`` and ``isf``
methods. [1]_
%(after_notes)s
References
----------
.. [1] The Boost Developers. "Boost C++ Libraries". https://www.boost.org/.
%(example)s
See Also
--------
hypergeom, nbinom, nhypergeom
"""
def _shape_info(self):
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("p", False, (0, 1), (True, True))]
def _rvs(self, n, p, size=None, random_state=None):
return random_state.binomial(n, p, size)
def _argcheck(self, n, p):
return (n >= 0) & _isintegral(n) & (p >= 0) & (p <= 1)
def _get_support(self, n, p):
return self.a, n
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
return scu._binom_pmf(x, n, p)
def _cdf(self, x, n, p):
k = floor(x)
return scu._binom_cdf(k, n, p)
def _sf(self, x, n, p):
k = floor(x)
return scu._binom_sf(k, n, p)
def _isf(self, x, n, p):
return scu._binom_isf(x, n, p)
def _ppf(self, q, n, p):
return scu._binom_ppf(q, n, p)
def _stats(self, n, p, moments='mv'):
mu = n * p
var = mu - n * np.square(p)
g1, g2 = None, None
if 's' in moments:
pq = p - np.square(p)
npq_sqrt = np.sqrt(n * pq)
t1 = np.reciprocal(npq_sqrt)
t2 = (2.0 * p) / npq_sqrt
g1 = t1 - t2
if 'k' in moments:
pq = p - np.square(p)
npq = n * pq
t1 = np.reciprocal(npq)
t2 = 6.0/n
g2 = t1 - t2
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
| binom_gen |
python | walkccc__LeetCode | solutions/2918. Minimum Equal Sum of Two Arrays After Replacing Zeros/2918.py | {
"start": 0,
"end": 338
} | class ____:
def minSum(self, nums1: list[int], nums2: list[int]) -> int:
sum1 = sum(nums1)
sum2 = sum(nums2)
zero1 = nums1.count(0)
zero2 = nums2.count(0)
if zero1 == 0 and sum1 < sum2 + zero2:
return -1
if zero2 == 0 and sum2 < sum1 + zero1:
return -1
return max(sum1 + zero1, sum2 + zero2)
| Solution |
python | PyCQA__pylint | tests/functional/a/assigning/assigning_non_slot.py | {
"start": 705,
"end": 947
} | class ____(Bad):
""" missing not found in slots """
__slots__ = ['component']
def __init__(self):
self.component = 42
self.member = 24
self.missing = 42 # [assigning-non-slot]
super().__init__()
| Bad3 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/airflow/AIR301_context.py | {
"start": 4616,
"end": 4783
} | class ____(BaseOperator):
def execute(self, context):
execution_date = context.get("execution_date")
next_ds = context.get("next_ds")
| CustomOperatorNew |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/utils/test_emailer.py | {
"start": 933,
"end": 2082
} | class ____:
@mock.patch("airflow.providers.amazon.aws.utils.emailer.SesHook")
def test_send_ses_email(self, mock_hook):
send_email(
to="to@test.com",
subject="subject",
html_content="content",
from_email="From Test <from@test.com>",
custom_headers={"X-Test-Header": "test-val"},
)
mock_hook.return_value.send_email.assert_called_once_with(
mail_from="From Test <from@test.com>",
to="to@test.com",
subject="subject",
html_content="content",
bcc=None,
cc=None,
files=None,
mime_charset="utf-8",
mime_subtype="mixed",
custom_headers={"X-Test-Header": "test-val"},
)
@mock.patch("airflow.providers.amazon.aws.utils.emailer.SesHook")
def test_send_ses_email_no_from_mail(self, mock_hook):
with pytest.raises(
RuntimeError, match="The `from_email' configuration has to be set for the SES emailer."
):
send_email(to="to@test.com", subject="subject", html_content="content")
| TestSendEmailSes |
python | python-pillow__Pillow | src/PIL/PngImagePlugin.py | {
"start": 3768,
"end": 4679
} | class ____(IntEnum):
OP_SOURCE = 0
"""
All color components of this frame, including alpha, overwrite the previous output
image contents.
See :ref:`Saving APNG sequences<apng-saving>`.
"""
OP_OVER = 1
"""
This frame should be alpha composited with the previous output image contents.
See :ref:`Saving APNG sequences<apng-saving>`.
"""
def _safe_zlib_decompress(s: bytes) -> bytes:
dobj = zlib.decompressobj()
plaintext = dobj.decompress(s, MAX_TEXT_CHUNK)
if dobj.unconsumed_tail:
msg = "Decompressed data too large for PngImagePlugin.MAX_TEXT_CHUNK"
raise ValueError(msg)
return plaintext
def _crc32(data: bytes, seed: int = 0) -> int:
return zlib.crc32(data, seed) & 0xFFFFFFFF
# --------------------------------------------------------------------
# Support classes. Suitable for PNG and related formats like MNG etc.
| Blend |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py | {
"start": 833,
"end": 2191
} | class ____(CloudEnvironment):
"""VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
# We may be in a container, so we cannot just reach VMWARE_TEST_PLATFORM,
# We do a try/except instead
parser = configparser.ConfigParser()
parser.read(self.config_path) # static
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('DEFAULT', raw=True)))
for key, value in ansible_vars.items():
if key.endswith('_password'):
display.sensitive.add(value)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
module_defaults={
'group/vmware': {
'hostname': ansible_vars['vcenter_hostname'],
'username': ansible_vars['vcenter_username'],
'password': ansible_vars['vcenter_password'],
'port': ansible_vars.get('vcenter_port', '443'),
'validate_certs': ansible_vars.get('vmware_validate_certs', 'no'),
},
},
)
| VcenterEnvironment |
python | getsentry__sentry | src/sentry/workflow_engine/types.py | {
"start": 11279,
"end": 12139
} | class ____(Generic[T]):
class Group(StrEnum):
DETECTOR_TRIGGER = "detector_trigger"
WORKFLOW_TRIGGER = "workflow_trigger"
ACTION_FILTER = "action_filter"
class Subgroup(StrEnum):
ISSUE_ATTRIBUTES = "issue_attributes"
FREQUENCY = "frequency"
EVENT_ATTRIBUTES = "event_attributes"
group: ClassVar[Group]
subgroup: ClassVar[Subgroup]
comparison_json_schema: ClassVar[dict[str, Any]] = {}
condition_result_schema: ClassVar[dict[str, Any]] = {}
@staticmethod
def evaluate_value(value: T, comparison: Any) -> DataConditionResult:
"""
Evaluate the value of a data condition.
Any error that results in a failure to provide a correct result should
raise a DataConditionEvaluationException.
"""
raise NotImplementedError
| DataConditionHandler |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_redshift_sql.py | {
"start": 1239,
"end": 11172
} | class ____:
def setup_method(self):
self.connection = Connection(
conn_type="redshift",
login=LOGIN_USER,
password=LOGIN_PASSWORD,
host=LOGIN_HOST,
port=LOGIN_PORT,
schema=LOGIN_SCHEMA,
)
self.db_hook = RedshiftSQLHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
def test_get_uri(self):
db_uri = self.db_hook.get_uri()
expected = "postgresql://login:password@host:5439/dev"
assert db_uri == expected
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.redshift_connector.connect")
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(
user="login", password="password", host="host", port=5439, database="dev"
)
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.redshift_connector.connect")
def test_get_conn_extra(self, mock_connect):
self.connection.extra = json.dumps(
{
"iam": False,
"cluster_identifier": "my-test-cluster",
"profile": "default",
}
)
self.db_hook.get_conn()
mock_connect.assert_called_once_with(
user=LOGIN_USER,
password=LOGIN_PASSWORD,
host=LOGIN_HOST,
port=LOGIN_PORT,
cluster_identifier="my-test-cluster",
profile="default",
database=LOGIN_SCHEMA,
iam=False,
)
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.redshift_connector.connect")
@pytest.mark.parametrize("aws_conn_id", [NOTSET, None, "mock_aws_conn"])
def test_get_conn_iam(self, mock_connect, mock_aws_hook_conn, aws_conn_id):
mock_conn_extra = {"iam": True, "profile": "default", "cluster_identifier": "my-test-cluster"}
if aws_conn_id is not NOTSET:
self.db_hook.aws_conn_id = aws_conn_id
self.connection.extra = json.dumps(mock_conn_extra)
mock_db_user = f"IAM:{self.connection.login}"
mock_db_pass = "aws_token"
# Mock AWS Connection
mock_aws_hook_conn.get_cluster_credentials.return_value = {
"DbPassword": mock_db_pass,
"DbUser": mock_db_user,
}
self.db_hook.get_conn()
# Check boto3 'redshift' client method `get_cluster_credentials` call args
mock_aws_hook_conn.get_cluster_credentials.assert_called_once_with(
DbUser=LOGIN_USER,
DbName=LOGIN_SCHEMA,
ClusterIdentifier="my-test-cluster",
AutoCreate=False,
)
mock_connect.assert_called_once_with(
user=mock_db_user,
password=mock_db_pass,
host=LOGIN_HOST,
port=LOGIN_PORT,
cluster_identifier="my-test-cluster",
profile="default",
database=LOGIN_SCHEMA,
iam=True,
)
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.redshift_connector.connect")
@pytest.mark.parametrize("aws_conn_id", [NOTSET, None, "mock_aws_conn"])
def test_get_conn_iam_serverless_redshift(self, mock_connect, mock_aws_hook_conn, aws_conn_id):
mock_work_group = "my-test-workgroup"
mock_conn_extra = {
"iam": True,
"is_serverless": True,
"profile": "default",
"serverless_work_group": mock_work_group,
}
if aws_conn_id is not NOTSET:
self.db_hook.aws_conn_id = aws_conn_id
self.connection.extra = json.dumps(mock_conn_extra)
mock_db_user = f"IAM:{self.connection.login}"
mock_db_pass = "aws_token"
# Mock AWS Connection
mock_aws_hook_conn.get_credentials.return_value = {
"dbPassword": mock_db_pass,
"dbUser": mock_db_user,
}
self.db_hook.get_conn()
# Check boto3 'redshift' client method `get_cluster_credentials` call args
mock_aws_hook_conn.get_credentials.assert_called_once_with(
dbName=LOGIN_SCHEMA,
workgroupName=mock_work_group,
durationSeconds=3600,
)
mock_connect.assert_called_once_with(
user=mock_db_user,
password=mock_db_pass,
host=LOGIN_HOST,
port=LOGIN_PORT,
serverless_work_group=mock_work_group,
profile="default",
database=LOGIN_SCHEMA,
iam=True,
is_serverless=True,
)
@pytest.mark.parametrize(
("conn_params", "conn_extra", "expected_call_args"),
[
({}, {}, {}),
({"login": "test"}, {}, {"user": "test"}),
({}, {"user": "test"}, {"user": "test"}),
({"login": "original"}, {"user": "overridden"}, {"user": "overridden"}),
({"login": "test1"}, {"password": "test2"}, {"user": "test1", "password": "test2"}),
],
)
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.redshift_connector.connect")
def test_get_conn_overrides_correctly(self, mock_connect, conn_params, conn_extra, expected_call_args):
with mock.patch(
"airflow.providers.amazon.aws.hooks.redshift_sql.RedshiftSQLHook.conn",
Connection(conn_type="redshift", extra=conn_extra, **conn_params),
):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(**expected_call_args)
@pytest.mark.parametrize(
("connection_host", "connection_extra", "expected_cluster_identifier", "expected_exception_msg"),
[
# test without a connection host and without a cluster_identifier in connection extra
(None, {"iam": True}, None, "Please set cluster_identifier or host in redshift connection."),
# test without a connection host but with a cluster_identifier in connection extra
(
None,
{"iam": True, "cluster_identifier": "cluster_identifier_from_extra"},
"cluster_identifier_from_extra",
None,
),
# test with a connection host and without a cluster_identifier in connection extra
("cluster_identifier_from_host.x.y", {"iam": True}, "cluster_identifier_from_host", None),
# test with both connection host and cluster_identifier in connection extra
(
"cluster_identifier_from_host.x.y",
{"iam": True, "cluster_identifier": "cluster_identifier_from_extra"},
"cluster_identifier_from_extra",
None,
),
],
)
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_sql.redshift_connector.connect")
def test_get_iam_token(
self,
mock_connect,
mock_aws_hook_conn,
connection_host,
connection_extra,
expected_cluster_identifier,
expected_exception_msg,
):
self.connection.host = connection_host
self.connection.extra = json.dumps(connection_extra)
mock_db_user = f"IAM:{self.connection.login}"
mock_db_pass = "aws_token"
# Mock AWS Connection
mock_aws_hook_conn.get_cluster_credentials.return_value = {
"DbPassword": mock_db_pass,
"DbUser": mock_db_user,
}
if expected_exception_msg is not None:
with pytest.raises(AirflowException, match=expected_exception_msg):
self.db_hook.get_conn()
else:
self.db_hook.get_conn()
mock_aws_hook_conn.get_cluster_credentials.assert_called_once_with(
DbUser=LOGIN_USER,
DbName=LOGIN_SCHEMA,
ClusterIdentifier=expected_cluster_identifier,
AutoCreate=False,
)
@mock.patch.dict("os.environ", AIRFLOW_CONN_AWS_DEFAULT=f"aws://?region_name={MOCK_REGION_NAME}")
@pytest.mark.parametrize(
("connection_host", "connection_extra", "expected_identity"),
[
# test without a connection host but with a cluster_identifier in connection extra
(
None,
{"iam": True, "cluster_identifier": "cluster_identifier_from_extra"},
f"cluster_identifier_from_extra.{MOCK_REGION_NAME}",
),
# test with a connection host and without a cluster_identifier in connection extra
(
"cluster_identifier_from_host.id.my_region.redshift.amazonaws.com",
{"iam": True},
"cluster_identifier_from_host.my_region",
),
# test with both connection host and cluster_identifier in connection extra
(
"cluster_identifier_from_host.x.y",
{"iam": True, "cluster_identifier": "cluster_identifier_from_extra"},
f"cluster_identifier_from_extra.{MOCK_REGION_NAME}",
),
# test when hostname doesn't match pattern
(
"1.2.3.4",
{},
"1.2.3.4",
),
],
)
def test_get_openlineage_redshift_authority_part(
self,
connection_host,
connection_extra,
expected_identity,
):
self.connection.host = connection_host
self.connection.extra = json.dumps(connection_extra)
assert f"{expected_identity}:{LOGIN_PORT}" == self.db_hook._get_openlineage_redshift_authority_part(
self.connection
)
| TestRedshiftSQLHookConn |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property5.py | {
"start": 416,
"end": 541
} | class ____(Foo[NewInt]):
def fizz(self) -> None:
self.bar.new_thing()
self.bar_method().new_thing()
| FooNewInt |
python | doocs__leetcode | solution/1300-1399/1360.Number of Days Between Two Dates/Solution.py | {
"start": 0,
"end": 980
} | class ____:
def daysBetweenDates(self, date1: str, date2: str) -> int:
def isLeapYear(year: int) -> bool:
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def daysInMonth(year: int, month: int) -> int:
days = [
31,
28 + int(isLeapYear(year)),
31,
30,
31,
30,
31,
31,
30,
31,
30,
31,
]
return days[month - 1]
def calcDays(date: str) -> int:
year, month, day = map(int, date.split("-"))
days = 0
for y in range(1971, year):
days += 365 + int(isLeapYear(y))
for m in range(1, month):
days += daysInMonth(year, m)
days += day
return days
return abs(calcDays(date1) - calcDays(date2))
| Solution |
python | Textualize__textual | src/textual/widgets/_sparkline.py | {
"start": 463,
"end": 3847
} | class ____(Widget):
"""A sparkline widget to display numerical data."""
COMPONENT_CLASSES: ClassVar[set[str]] = {
"sparkline--max-color",
"sparkline--min-color",
}
"""
Use these component classes to define the two colors that the sparkline
interpolates to represent its numerical data.
Note:
These two component classes are used exclusively for the _color_ of the
sparkline widget. Setting any style other than [`color`](/styles/color.md)
will have no effect.
| Class | Description |
| :- | :- |
| `sparkline--max-color` | The color used for the larger values in the data. |
| `sparkline--min-color` | The color used for the smaller values in the data. |
"""
DEFAULT_CSS = """
Sparkline {
height: 1;
}
Sparkline > .sparkline--max-color {
color: $primary;
}
Sparkline > .sparkline--min-color {
color: $primary 30%;
}
"""
data = reactive[Optional[Sequence[float]]](None)
"""The data that populates the sparkline."""
summary_function = reactive[Callable[[Sequence[float]], float]](_max_factory)
"""The function that computes the value that represents each bar."""
def __init__(
self,
data: Sequence[float] | None = None,
*,
min_color: Color | str | None = None,
max_color: Color | str | None = None,
summary_function: Callable[[Sequence[float]], float] | None = None,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
"""Initialize a sparkline widget.
Args:
data: The initial data to populate the sparkline with.
min_color: The color of the minimum value, or `None` to take from CSS.
max_color: the color of the maximum value, or `None` to take from CSS.
summary_function: Summarizes bar values into a single value used to
represent each bar.
name: The name of the widget.
id: The ID of the widget in the DOM.
classes: The CSS classes for the widget.
disabled: Whether the widget is disabled or not.
"""
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
self.min_color = None if min_color is None else Color.parse(min_color)
self.max_color = None if max_color is None else Color.parse(max_color)
self.data = data
if summary_function is not None:
self.summary_function = summary_function
def render(self) -> RenderResult:
"""Renders the sparkline when there is data available."""
data = self.data or []
_, base = self.background_colors
min_color = base + (
self.get_component_styles("sparkline--min-color").color
if self.min_color is None
else self.min_color
)
max_color = base + (
self.get_component_styles("sparkline--max-color").color
if self.max_color is None
else self.max_color
)
return SparklineRenderable(
data,
width=self.size.width,
min_color=min_color.rich_color,
max_color=max_color.rich_color,
summary_function=self.summary_function,
)
| Sparkline |
python | allegroai__clearml | clearml/backend_interface/task/repo/detectors.py | {
"start": 12036,
"end": 13553
} | class ____(Detector):
def __init__(self, type_name: str) -> None:
super(EnvDetector, self).__init__(type_name, "{} environment".format(type_name))
def _is_repo_type(self, script_path: str) -> bool:
return VCS_REPO_TYPE.get().lower() == self.type_name and bool(VCS_REPOSITORY_URL.get())
@staticmethod
def _normalize_root(root: str) -> str:
"""
Convert to absolute and squash 'path/../folder'
"""
# noinspection PyBroadException
try:
return os.path.abspath((Path.cwd() / root).absolute().as_posix())
except Exception:
return Path.cwd()
def _get_info(
self,
_: Any,
include_diff: bool = False,
diff_from_remote: bool = None,
) -> Result:
repository_url = VCS_REPOSITORY_URL.get()
if not repository_url:
raise DetectionError("No VCS environment data")
status = VCS_STATUS.get() or ""
diff = VCS_DIFF.get() or ""
modified = bool(diff or (status and [s for s in status.split("\n") if s.strip().startswith("M ")]))
if modified and not diff:
diff = "# Repository modified, but no git diff could be extracted."
return Result(
url=repository_url,
branch=VCS_BRANCH.get(),
commit=VCS_COMMIT_ID.get(),
root=VCS_ROOT.get(converter=self._normalize_root),
status=status,
diff=diff,
modified=modified,
)
| EnvDetector |
python | huggingface__transformers | src/transformers/models/glm4_moe/modular_glm4_moe.py | {
"start": 12667,
"end": 12761
} | class ____(DeepseekV3PreTrainedModel):
_can_compile_fullgraph = False
| Glm4MoePreTrainedModel |
python | weaviate__weaviate-python-client | weaviate/collections/classes/filters.py | {
"start": 3217,
"end": 3576
} | class ____(_Filters):
def __init__(self, filters: List[_Filters]):
self.filters: List[_Filters] = filters
# replace with the following once 3.11 is the minimum version
# Operator: weaviate_pb2.Filters.OperatorType = weaviate_pb2.Filters.OperatorOr
@property
def operator(self) -> _Operator:
return _Operator.OR
| _FilterOr |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 578511,
"end": 578909
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("DiscussionComment", graphql_name="node")
"""The item at the end of the edge."""
| DiscussionCommentEdge |
python | getsentry__sentry | src/sentry/preprod/analytics.py | {
"start": 1970,
"end": 2195
} | class ____(analytics.Event):
organization_id: int
project_id: int
user_id: int | None = None
artifact_id: str
@analytics.eventclass("preprod_artifact.api.size_analysis_compare.get")
| PreprodArtifactApiDeleteEvent |
python | mahmoud__glom | glom/core.py | {
"start": 47409,
"end": 60981
} | class ____:
"""``T``, short for "target". A singleton object that enables
object-oriented expression of a glom specification.
.. note::
``T`` is a singleton, and does not need to be constructed.
Basically, think of ``T`` as your data's stunt double. Everything
that you do to ``T`` will be recorded and executed during the
:func:`glom` call. Take this example:
>>> spec = T['a']['b']['c']
>>> target = {'a': {'b': {'c': 'd'}}}
>>> glom(target, spec)
'd'
So far, we've relied on the ``'a.b.c'``-style shorthand for
access, or used the :class:`~glom.Path` objects, but if you want
to explicitly do attribute and key lookups, look no further than
``T``.
But T doesn't stop with unambiguous access. You can also call
methods and perform almost any action you would with a normal
object:
>>> spec = ('a', (T['b'].items(), list)) # reviewed below
>>> glom(target, spec)
[('c', 'd')]
A ``T`` object can go anywhere in the spec. As seen in the example
above, we access ``'a'``, use a ``T`` to get ``'b'`` and iterate
over its ``items``, turning them into a ``list``.
You can even use ``T`` with :class:`~glom.Call` to construct objects:
>>> class ExampleClass(object):
... def __init__(self, attr):
... self.attr = attr
...
>>> target = {'attr': 3.14}
>>> glom(target, Call(ExampleClass, kwargs=T)).attr
3.14
On a further note, while ``lambda`` works great in glom specs, and
can be very handy at times, ``T`` and :class:`~glom.Call`
eliminate the need for the vast majority of ``lambda`` usage with
glom.
Unlike ``lambda`` and other functions, ``T`` roundtrips
beautifully and transparently:
>>> T['a'].b['c']('success')
T['a'].b['c']('success')
``T``-related access errors raise a :exc:`~glom.PathAccessError`
during the :func:`~glom.glom` call.
.. note::
While ``T`` is clearly useful, powerful, and here to stay, its
semantics are still being refined. Currently, operations beyond
method calls and attribute/item access are considered
experimental and should not be relied upon.
.. note::
``T`` attributes starting with __ are reserved to avoid
colliding with many built-in Python behaviors, current and
future. The ``T.__()`` method is available for cases where
they are needed. For example, ``T.__('class__')`` is
equivalent to accessing the ``__class__`` attribute.
"""
__slots__ = ('__ops__',)
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError('T instances reserve dunder attributes.'
' To access the "{name}" attribute, use'
' T.__("{d_name}")'.format(name=name, d_name=name[2:]))
return _t_child(self, '.', name)
def __getitem__(self, item):
return _t_child(self, '[', item)
def __call__(self, *args, **kwargs):
if self is S:
if args:
raise TypeError(f'S() takes no positional arguments, got: {args!r}')
if not kwargs:
raise TypeError('S() expected at least one kwarg, got none')
# TODO: typecheck kwarg vals?
return _t_child(self, '(', (args, kwargs))
def __star__(self):
return _t_child(self, 'x', None)
def __starstar__(self):
return _t_child(self, 'X', None)
def __stars__(self):
"""how many times the result will be wrapped in extra lists"""
t_ops = self.__ops__[1::2]
return t_ops.count('x') + t_ops.count('X')
def __add__(self, arg):
return _t_child(self, '+', arg)
def __sub__(self, arg):
return _t_child(self, '-', arg)
def __mul__(self, arg):
return _t_child(self, '*', arg)
def __floordiv__(self, arg):
return _t_child(self, '#', arg)
def __truediv__(self, arg):
return _t_child(self, '/', arg)
__div__ = __truediv__
def __mod__(self, arg):
return _t_child(self, '%', arg)
def __pow__(self, arg):
return _t_child(self, ':', arg)
def __and__(self, arg):
return _t_child(self, '&', arg)
def __or__(self, arg):
return _t_child(self, '|', arg)
def __xor__(self, arg):
return _t_child(self, '^', arg)
def __invert__(self):
return _t_child(self, '~', None)
def __neg__(self):
return _t_child(self, '_', None)
def __(self, name):
return _t_child(self, '.', '__' + name)
def __repr__(self):
t_path = self.__ops__
return _format_t(t_path[1:], t_path[0])
def __getstate__(self):
t_path = self.__ops__
return tuple(({T: 'T', S: 'S', A: 'A'}[t_path[0]],) + t_path[1:])
def __setstate__(self, state):
self.__ops__ = ({'T': T, 'S': S, 'A': A}[state[0]],) + state[1:]
def _t_child(parent, operation, arg):
base = parent.__ops__
if base[0] is A and operation not in ('.', '[', 'P'):
# whitelist rather than blacklist assignment friendly operations
# TODO: error type?
raise BadSpec("operation not allowed on A assignment path")
t = TType()
t.__ops__ = base + (operation, arg)
return t
def _s_first_magic(scope, key, _t):
"""
enable S.a to do S['a'] or S['a'].val as a special
case for accessing user defined string variables
"""
err = None
try:
cur = scope[key]
except KeyError as e:
err = PathAccessError(e, Path(_t), 0) # always only one level depth, hence 0
if err:
raise err
return cur
def _t_eval(target, _t, scope):
t_path = _t.__ops__
i = 1
fetch_till = len(t_path)
root = t_path[0]
if root is T:
cur = target
elif root is S or root is A:
# A is basically the same as S, but last step is assign
if root is A:
fetch_till -= 2
if fetch_till < 1:
raise BadSpec('cannot assign without destination')
cur = scope
if fetch_till > 1 and t_path[1] in ('.', 'P'):
cur = _s_first_magic(cur, t_path[2], _t)
i += 2
elif root is S and fetch_till > 1 and t_path[1] == '(':
# S(var='spec') style assignment
_, kwargs = t_path[2]
scope.update({
k: arg_val(target, v, scope) for k, v in kwargs.items()})
return target
else:
raise ValueError('TType instance with invalid root') # pragma: no cover
pae = None
while i < fetch_till:
op, arg = t_path[i], t_path[i + 1]
arg = arg_val(target, arg, scope)
if op == '.':
try:
cur = getattr(cur, arg)
except AttributeError as e:
pae = PathAccessError(e, Path(_t), i // 2)
elif op == '[':
try:
cur = cur[arg]
except (KeyError, IndexError, TypeError) as e:
pae = PathAccessError(e, Path(_t), i // 2)
elif op == 'P':
# Path type stuff (fuzzy match)
get = scope[TargetRegistry].get_handler('get', cur, path=t_path[2:i+2:2])
try:
cur = get(cur, arg)
except Exception as e:
pae = PathAccessError(e, Path(_t), i // 2)
elif op in 'xX':
nxt = []
get_handler = scope[TargetRegistry].get_handler
if op == 'x': # increases arity of cur each time through
# TODO: so many try/except -- could scope[TargetRegistry] stuff be cached on type?
_extend_children(nxt, cur, get_handler)
elif op == 'X':
sofar = set()
_extend_children(nxt, cur, get_handler)
for item in nxt:
if id(item) not in sofar:
sofar.add(id(item))
_extend_children(nxt, item, get_handler)
nxt.insert(0, cur)
# handle the rest of the t_path in recursive calls
cur = []
todo = TType()
todo.__ops__ = (root,) + t_path[i+2:]
for child in nxt:
try:
cur.append(_t_eval(child, todo, scope))
except PathAccessError:
pass
break # we handled the rest in recursive call, break loop
elif op == '(':
args, kwargs = arg
scope[Path] += t_path[2:i+2:2]
cur = scope[glom](
target, Call(cur, args, kwargs), scope)
# call with target rather than cur,
# because it is probably more intuitive
# if args to the call "reset" their path
# e.g. "T.a" should mean the same thing
# in both of these specs: T.a and T.b(T.a)
else: # arithmetic operators
try:
if op == '+':
cur = cur + arg
elif op == '-':
cur = cur - arg
elif op == '*':
cur = cur * arg
#elif op == '#':
# cur = cur // arg # TODO: python 2 friendly approach?
elif op == '/':
cur = cur / arg
elif op == '%':
cur = cur % arg
elif op == ':':
cur = cur ** arg
elif op == '&':
cur = cur & arg
elif op == '|':
cur = cur | arg
elif op == '^':
cur = cur ^ arg
elif op == '~':
cur = ~cur
elif op == '_':
cur = -cur
except (TypeError, ZeroDivisionError) as e:
pae = PathAccessError(e, Path(_t), i // 2)
if pae:
raise pae
i += 2
if root is A:
op, arg = t_path[-2:]
if cur is scope:
op = '[' # all assignment on scope is setitem
_assign_op(dest=cur, op=op, arg=arg, val=target, path=_t, scope=scope)
return target # A should not change the target
return cur
def _assign_op(dest, op, arg, val, path, scope):
"""helper method for doing the assignment on a T operation"""
if op == '[':
dest[arg] = val
elif op == '.':
setattr(dest, arg, val)
elif op == 'P':
_assign = scope[TargetRegistry].get_handler('assign', dest)
try:
_assign(dest, arg, val)
except Exception as e:
raise PathAssignError(e, path, arg)
else: # pragma: no cover
raise ValueError('unsupported T operation for assignment')
def _extend_children(children, item, get_handler):
try: # dict or obj-like
keys = get_handler('keys', item)
get = get_handler('get', item)
except UnregisteredTarget:
try:
iterate = get_handler('iterate', item)
except UnregisteredTarget:
pass
else:
try: # list-like
children.extend(iterate(item))
except Exception:
pass
else:
try:
for key in keys(item):
try:
children.append(get(item, key))
except Exception:
pass
except Exception:
pass
T = TType() # target aka Mr. T aka "this"
S = TType() # like T, but means grab stuff from Scope, not Target
A = TType() # like S, but shorthand to assign target to scope
T.__ops__ = (T,)
S.__ops__ = (S,)
A.__ops__ = (A,)
_T_STAR = T.__star__() # helper constant for Path.from_text
_T_STARSTAR = T.__starstar__() # helper constant for Path.from_text
UP = make_sentinel('UP')
ROOT = make_sentinel('ROOT')
def _format_slice(x):
if type(x) is not slice:
return bbrepr(x)
fmt = lambda v: "" if v is None else bbrepr(v)
if x.step is None:
return fmt(x.start) + ":" + fmt(x.stop)
return fmt(x.start) + ":" + fmt(x.stop) + ":" + fmt(x.step)
def _format_t(path, root=T):
prepr = [{T: 'T', S: 'S', A: 'A'}[root]]
i = 0
while i < len(path):
op, arg = path[i], path[i + 1]
if op == '.':
prepr.append('.' + arg)
elif op == '[':
if type(arg) is tuple:
index = ", ".join([_format_slice(x) for x in arg])
else:
index = _format_slice(arg)
prepr.append(f"[{index}]")
elif op == '(':
args, kwargs = arg
prepr.append(format_invocation(args=args, kwargs=kwargs, repr=bbrepr))
elif op == 'P':
return _format_path(path)
elif op == 'x':
prepr.append(".__star__()")
elif op == 'X':
prepr.append(".__starstar__()")
elif op in ('_', '~'): # unary arithmetic operators
if any([o in path[:i] for o in '+-/%:&|^~_']):
prepr = ['('] + prepr + [')']
prepr = ['-' if op == '_' else op] + prepr
else: # binary arithmetic operators
formatted_arg = bbrepr(arg)
if type(arg) is TType:
arg_path = arg.__ops__
if any([o in arg_path for o in '+-/%:&|^~_']):
formatted_arg = '(' + formatted_arg + ')'
prepr.append(' ' + ('**' if op == ':' else op) + ' ')
prepr.append(formatted_arg)
i += 2
return "".join(prepr)
| TType |
python | pallets__jinja | tests/test_api.py | {
"start": 15601,
"end": 16861
} | class ____:
def test_custom_code_generator(self):
class CustomCodeGenerator(CodeGenerator):
def visit_Const(self, node, frame=None):
# This method is pure nonsense, but works fine for testing...
if node.value == "foo":
self.write(repr("bar"))
else:
super().visit_Const(node, frame)
class CustomEnvironment(Environment):
code_generator_class = CustomCodeGenerator
env = CustomEnvironment()
tmpl = env.from_string('{% set foo = "foo" %}{{ foo }}')
assert tmpl.render() == "bar"
def test_custom_context(self):
class CustomContext(Context):
def resolve_or_missing(self, key):
return "resolve-" + key
class CustomEnvironment(Environment):
context_class = CustomContext
env = CustomEnvironment()
tmpl = env.from_string("{{ foo }}")
assert tmpl.render() == "resolve-foo"
def test_overlay_enable_async(env):
assert not env.is_async
assert not env.overlay().is_async
env_async = env.overlay(enable_async=True)
assert env_async.is_async
assert not env_async.overlay(enable_async=False).is_async
| TestLowLevel |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-elevenlabs/llama_index/tools/elevenlabs/base.py | {
"start": 143,
"end": 3724
} | class ____(BaseToolSpec):
"""ElevenLabs tool spec for text-to-speech synthesis."""
spec_functions = ["get_voices", "text_to_speech"]
def __init__(
self, api_key: str, base_url: Optional[str] = "https://api.elevenlabs.io"
) -> None:
"""
Initialize with parameters.
Args:
api_key (str): Your ElevenLabs API key
base_url (Optional[str]): The base url of elevenlabs
"""
self.api_key = api_key
self.base_url = base_url
def get_voices(self) -> List[dict]:
"""
Get list of available voices from ElevenLabs.
Returns:
List[dict]: List of available voices with their details
"""
from elevenlabs import ElevenLabs
# Create the client
client = ElevenLabs(base_url=self.base_url, api_key=self.api_key)
# Get the voices
response = client.voices.get_all()
# Return the dumped voice models as dict
return [voice.model_dump() for voice in response.voices]
def text_to_speech(
self,
text: str,
output_path: str,
voice_id: Optional[str] = None,
voice_stability: Optional[float] = None,
voice_similarity_boost: Optional[float] = None,
voice_style: Optional[float] = None,
voice_use_speaker_boost: Optional[bool] = None,
model_id: Optional[str] = "eleven_monolingual_v1",
) -> str:
"""
Convert text to speech using ElevenLabs API.
Args:
text (str): The text to convert to speech
output_path (str): Where to save the output file
output_path (str): Path to save the audio file. If None, generates one
voice_id (Optional[str]): Override the default voice ID
voice_stability (Optional[float]): The stability setting of the voice
voice_similarity_boost (Optional[float]): The similarity boost setting of the voice
voice_style: (Optional[float]): The style setting of the voice
voice_use_speaker_boost (Optional[bool]): Whether to use speaker boost or not
model_id (Optional[str]): Override the default model ID
Returns:
str: Path to the generated audio file
"""
from elevenlabs import ElevenLabs, VoiceSettings
from elevenlabs.client import DEFAULT_VOICE
# Create client
client = ElevenLabs(base_url=self.base_url, api_key=self.api_key)
# Default the settings if not supplied
if voice_stability is None:
voice_stability = DEFAULT_VOICE.settings.stability
if voice_similarity_boost is None:
voice_similarity_boost = DEFAULT_VOICE.settings.similarity_boost
if voice_style is None:
voice_style = DEFAULT_VOICE.settings.style
if voice_use_speaker_boost is None:
voice_use_speaker_boost = DEFAULT_VOICE.settings.use_speaker_boost
# Create the VoiceSettings
voice_settings = VoiceSettings(
stability=voice_stability,
similarity_boost=voice_similarity_boost,
style=voice_style,
use_speaker_boost=voice_use_speaker_boost,
)
# Generate audio
audio = client.generate(
text=text, voice=voice_id, voice_settings=voice_settings, model=model_id
)
# Save the audio
with open(output_path, "wb") as fp:
fp.write(b"".join(audio))
# Return the save location
return output_path
| ElevenLabsToolSpec |
python | doocs__leetcode | solution/2500-2599/2553.Separate the Digits in an Array/Solution.py | {
"start": 0,
"end": 265
} | class ____:
def separateDigits(self, nums: List[int]) -> List[int]:
ans = []
for x in nums:
t = []
while x:
t.append(x % 10)
x //= 10
ans.extend(t[::-1])
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 1730,
"end": 2769
} | class ____(ModelOutput):
r"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Swinv2 model's outputs that also contains a pooling of the last hidden states.
"""
)
# Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->Swinv2
| Swinv2EncoderOutput |
python | pytorch__pytorch | torch/utils/backend_registration.py | {
"start": 19593,
"end": 19970
} | class ____:
def is_initialized(self) -> bool:
return True
def is_available(self) -> bool:
return True
def current_device(self) -> int:
return 0
def _is_in_bad_fork(self) -> bool:
return False
def manual_seed_all(self, seed: int) -> None:
pass
def device_count(self) -> int:
return 1
| _DummyBackendModule |
python | huggingface__transformers | src/transformers/models/convnextv2/modeling_convnextv2.py | {
"start": 10678,
"end": 11306
} | class ____(PreTrainedModel):
config: ConvNextV2Config
base_model_prefix = "convnextv2"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["ConvNextV2Layer"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, ConvNextV2GRN):
init.zeros_(module.weight)
init.zeros_(module.bias)
@auto_docstring
# Copied from transformers.models.convnext.modeling_convnext.ConvNextModel with CONVNEXT->CONVNEXTV2, ConvNext->ConvNextV2
| ConvNextV2PreTrainedModel |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 37020,
"end": 37523
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.linear1 = nn.Linear(config.class_embed_dim, config.task_encoder_hidden_dim)
self.activation = ACT2FN[config.decoder_activation]
self.dropout = nn.Dropout(config.decoder_dropout)
self.linear2 = nn.Linear(config.task_encoder_hidden_dim, config.class_embed_dim)
def forward(self, x):
return self.linear2(self.dropout(self.activation(self.linear1(x))))
| OmDetTurboMLPWithDropout |
python | plotly__plotly.py | plotly/graph_objs/contour/legendgrouptitle/_font.py | {
"start": 233,
"end": 9927
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour.legendgrouptitle"
_path_str = "contour.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.contour.legend
grouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | wandb__wandb | wandb/sdk/launch/environment/aws_environment.py | {
"start": 663,
"end": 12522
} | class ____(AbstractEnvironment):
"""AWS environment."""
def __init__(
self,
region: str,
access_key: str,
secret_key: str,
session_token: str,
) -> None:
"""Initialize the AWS environment.
Arguments:
region (str): The AWS region.
Raises:
LaunchError: If the AWS environment is not configured correctly.
"""
super().__init__()
_logger.info(f"Initializing AWS environment in region {region}.")
self._region = region
self._access_key = access_key
self._secret_key = secret_key
self._session_token = session_token
self._account = None
self._partition = None
@classmethod
def from_default(cls, region: Optional[str] = None) -> "AwsEnvironment":
"""Create an AWS environment from the default AWS environment.
Arguments:
region (str, optional): The AWS region.
verify (bool, optional): Whether to verify the AWS environment. Defaults to True.
Returns:
AwsEnvironment: The AWS environment.
"""
_logger.info("Creating AWS environment from default credentials.")
try:
session = boto3.Session()
if hasattr(session, "region"):
region = region or session.region
region = region or os.environ.get("AWS_REGION")
credentials = session.get_credentials()
if not credentials:
raise LaunchError(
"Could not create AWS environment from default environment. Please verify that your AWS credentials are configured correctly."
)
access_key = credentials.access_key
secret_key = credentials.secret_key
session_token = credentials.token
except botocore.client.ClientError as e:
raise LaunchError(
f"Could not create AWS environment from default environment. Please verify that your AWS credentials are configured correctly. {e}"
)
if not region:
raise LaunchError(
"Could not create AWS environment from default environment. Region not specified."
)
return cls(
region=region,
access_key=access_key,
secret_key=secret_key,
session_token=session_token,
)
@classmethod
def from_config(
cls,
config: Dict[str, str],
) -> "AwsEnvironment":
"""Create an AWS environment from the default AWS environment.
Arguments:
config (dict): Configuration dictionary.
verify (bool, optional): Whether to verify the AWS environment. Defaults to True.
Returns:
AwsEnvironment: The AWS environment.
"""
region = str(config.get("region", ""))
if not region:
raise LaunchError(
"Could not create AWS environment from config. Region not specified."
)
return cls.from_default(
region=region,
)
@property
def region(self) -> str:
"""The AWS region."""
return self._region
@region.setter
def region(self, region: str) -> None:
self._region = region
async def get_partition(self) -> str:
"""Set the partition for the AWS environment."""
try:
session = await self.get_session()
client = await event_loop_thread_exec(session.client)("sts")
get_caller_identity = event_loop_thread_exec(client.get_caller_identity)
identity = await get_caller_identity()
arn = identity.get("Arn")
if not arn:
raise LaunchError(
"Could not set partition for AWS environment. ARN not found."
)
matched_partition = ARN_PARTITION_RE.match(arn)
if not matched_partition:
raise LaunchError(
f"Could not set partition for AWS environment. ARN {arn} is not valid."
)
partition = matched_partition.group(1)
return partition
except botocore.exceptions.ClientError as e:
raise LaunchError(
f"Could not set partition for AWS environment. {e}"
) from e
async def verify(self) -> None:
"""Verify that the AWS environment is configured correctly.
Raises:
LaunchError: If the AWS environment is not configured correctly.
"""
_logger.debug("Verifying AWS environment.")
try:
session = await self.get_session()
client = await event_loop_thread_exec(session.client)("sts")
get_caller_identity = event_loop_thread_exec(client.get_caller_identity)
self._account = (await get_caller_identity()).get("Account")
# TODO: log identity details from the response
except botocore.exceptions.ClientError as e:
raise LaunchError(
f"Could not verify AWS environment. Please verify that your AWS credentials are configured correctly. {e}"
) from e
async def get_session(self) -> "boto3.Session": # type: ignore
"""Get an AWS session.
Returns:
boto3.Session: The AWS session.
Raises:
LaunchError: If the AWS session could not be created.
"""
_logger.debug(f"Creating AWS session in region {self._region}")
try:
session = event_loop_thread_exec(boto3.Session)
return await session(
region_name=self._region,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
aws_session_token=self._session_token,
)
except botocore.exceptions.ClientError as e:
raise LaunchError(f"Could not create AWS session. {e}")
async def upload_file(self, source: str, destination: str) -> None:
"""Upload a file to s3 from local storage.
The destination is a valid s3 URI, e.g. s3://bucket/key and will
be used as a prefix for the uploaded file. Only the filename of the source
is kept in the upload key. So if the source is "foo/bar" and the
destination is "s3://bucket/key", the file "foo/bar" will be uploaded
to "s3://bucket/key/bar".
Arguments:
source (str): The path to the file or directory.
destination (str): The uri of the storage destination. This should
be a valid s3 URI, e.g. s3://bucket/key.
Raises:
LaunchError: If the copy fails, the source path does not exist, or the
destination is not a valid s3 URI, or the upload fails.
"""
_logger.debug(f"Uploading {source} to {destination}")
_err_prefix = f"Error attempting to copy {source} to {destination}."
if not os.path.isfile(source):
raise LaunchError(f"{_err_prefix}: Source {source} does not exist.")
match = S3_URI_RE.match(destination)
if not match:
raise LaunchError(
f"{_err_prefix}: Destination {destination} is not a valid s3 URI."
)
bucket = match.group(1)
key = match.group(2).lstrip("/")
if not key:
key = ""
session = await self.get_session()
try:
client = await event_loop_thread_exec(session.client)("s3")
client.upload_file(source, bucket, key)
except botocore.exceptions.ClientError as e:
raise LaunchError(
f"{_err_prefix}: botocore error attempting to copy {source} to {destination}. {e}"
)
async def upload_dir(self, source: str, destination: str) -> None:
"""Upload a directory to s3 from local storage.
The upload will place the contents of the source directory in the destination
with the same directory structure. So if the source is "foo/bar" and the
destination is "s3://bucket/key", the contents of "foo/bar" will be uploaded
to "s3://bucket/key/bar".
Arguments:
source (str): The path to the file or directory.
destination (str): The URI of the storage.
recursive (bool, optional): If True, copy the directory recursively. Defaults to False.
Raises:
LaunchError: If the copy fails, the source path does not exist, or the
destination is not a valid s3 URI.
"""
_logger.debug(f"Uploading {source} to {destination}")
_err_prefix = f"Error attempting to copy {source} to {destination}."
if not os.path.isdir(source):
raise LaunchError(f"{_err_prefix}: Source {source} does not exist.")
match = S3_URI_RE.match(destination)
if not match:
raise LaunchError(
f"{_err_prefix}: Destination {destination} is not a valid s3 URI."
)
bucket = match.group(1)
key = match.group(2).lstrip("/")
if not key:
key = ""
session = await self.get_session()
try:
client = await event_loop_thread_exec(session.client)("s3")
for path, _, files in os.walk(source):
for file in files:
abs_path = os.path.join(path, file)
key_path = (
abs_path.replace(source, "").replace("\\", "/").lstrip("/")
)
client.upload_file(
abs_path,
bucket,
key_path,
)
except botocore.exceptions.ClientError as e:
raise LaunchError(
f"{_err_prefix}: botocore error attempting to copy {source} to {destination}. {e}"
) from e
except Exception as e:
raise LaunchError(
f"{_err_prefix}: Unexpected error attempting to copy {source} to {destination}. {e}"
) from e
async def verify_storage_uri(self, uri: str) -> None:
"""Verify that s3 storage is configured correctly.
This will check that the bucket exists and that the credentials are
configured correctly.
Arguments:
uri (str): The URI of the storage.
Raises:
LaunchError: If the storage is not configured correctly or the URI is
not a valid s3 URI.
Returns:
None
"""
_logger.debug(f"Verifying storage {uri}")
match = S3_URI_RE.match(uri)
if not match:
raise LaunchError(
f"Failed to validate storage uri: {uri} is not a valid s3 URI."
)
bucket = match.group(1)
try:
session = await self.get_session()
client = await event_loop_thread_exec(session.client)("s3")
client.head_bucket(Bucket=bucket)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
raise LaunchError(
f"Could not verify AWS storage uri {uri}. Bucket {bucket} does not exist."
)
if e.response["Error"]["Code"] == "403":
raise LaunchError(
f"Could not verify AWS storage uri {uri}. "
"Bucket {bucket} is not accessible. Please check that this "
"client is authenticated with permission to access the bucket."
)
raise LaunchError(
f"Failed to verify AWS storage uri {uri}. Response: {e.response} Please verify that your AWS credentials are configured correctly."
)
| AwsEnvironment |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 197055,
"end": 200724
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[4, 3]"):
l_x_ = L_x_
tensor: "i64[1]" = torch.tensor((12,))
cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
neg: "i64[0]" = getitem.neg(); getitem = None
unbind = neg.unbind(); neg = unbind = None
chunk: "f32[12, 12]" = l_x_.new_zeros(12, 12)
diagonal: "f32[12]" = chunk.diagonal(0)
fill_: "f32[12]" = diagonal.fill_(1); diagonal = fill_ = None
child: "f32[12, 4, 3]" = chunk.view(12, 4, 3); chunk = None
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(12, 'error'); _vmap_increment_nesting = None
child_1: "f32[4, 3]" = torch._functorch.predispatch._add_batch_dim(child, 0, 1); child = None
_jvp_increment_nesting = torch._C._functorch._jvp_increment_nesting(); _jvp_increment_nesting = None
_set_fwd_grad_enabled = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled = None
_enter_dual_level = torch._C._enter_dual_level(); _enter_dual_level = None
_maybe_load_decompositions = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions = None
_make_dual: "f32[4, 3]" = torch._make_dual(l_x_, child_1, level = 0); child_1 = None
_wrap_for_grad: "f32[4, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 2); l_x_ = _wrap_for_grad = None
result_duals: "f32[4, 3]" = torch.sin(_make_dual); _make_dual = None
_unpack_dual = torch._unpack_dual(result_duals, level = 0); result_duals = None
primal: "f32[4, 3]" = _unpack_dual[0]
dual: "f32[4, 3]" = _unpack_dual[1]; _unpack_dual = None
primals_out_unflatten: "f32[4, 3]" = torch._C._functorch._unwrap_for_grad(primal, 2); primal = primals_out_unflatten = None
tangents_out_unflatten: "f32[4, 3]" = torch._C._functorch._unwrap_for_grad(dual, 2); dual = None
_exit_dual_level = torch._C._exit_dual_level(0); _exit_dual_level = None
_set_fwd_grad_enabled_1 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_1 = None
_jvp_decrement_nesting = torch._C._functorch._jvp_decrement_nesting(); _jvp_decrement_nesting = None
results: "f32[12, 4, 3]" = torch._functorch.predispatch._remove_batch_dim(tangents_out_unflatten, 1, 12, 0); tangents_out_unflatten = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
movedim: "f32[4, 3, 12]" = results.movedim(0, -1); results = None
split = movedim.split((12,), dim = -1); movedim = None
jac_out_in: "f32[4, 3, 12]" = split[0]; split = None
unflatten: "f32[4, 3, 4, 3]" = jac_out_in.unflatten(-1, (4, 3)); jac_out_in = None
return (unflatten,)
""",
)
def test_jacfwd_two_tensors_argnums(self):
counters.clear()
def fn(x, y):
return y.sin()
def wrapper_fn(x, y):
return torch.func.jacfwd(fn, argnums=1)(x, y)
x = torch.randn(4, 3)
y = torch.randn(3, 4)
wrapped_gm = self._compile_check(wrapper_fn, (x, y))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg.py | {
"start": 7625,
"end": 7831
} | class ____(JSONB):
def bind_processor(self, dialect):
return self._make_bind_processor(None, dialect._psycopg_Jsonb)
def result_processor(self, dialect, coltype):
return None
| _PGJSONB |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 613481,
"end": 613808
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("SecurityAdvisory", graphql_name="node")
| SecurityAdvisoryEdge |
python | anthropics__anthropic-sdk-python | src/anthropic/pagination.py | {
"start": 384,
"end": 1329
} | class ____(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
data: List[_T]
has_more: Optional[bool] = None
first_id: Optional[str] = None
last_id: Optional[str] = None
@override
def _get_page_items(self) -> List[_T]:
data = self.data
if not data:
return []
return data
@override
def has_next_page(self) -> bool:
has_more = self.has_more
if has_more is not None and has_more is False:
return False
return super().has_next_page()
@override
def next_page_info(self) -> Optional[PageInfo]:
if self._options.params.get("before_id"):
first_id = self.first_id
if not first_id:
return None
return PageInfo(params={"before_id": first_id})
last_id = self.last_id
if not last_id:
return None
return PageInfo(params={"after_id": last_id})
| SyncPage |
python | tiangolo__fastapi | fastapi/security/oauth2.py | {
"start": 10792,
"end": 14465
} | class ____(SecurityBase):
"""
This is the base class for OAuth2 authentication, an instance of it would be used
as a dependency. All other OAuth2 classes inherit from it and customize it for
each OAuth2 flow.
You normally would not create a new class inheriting from it but use one of the
existing subclasses, and maybe compose them if you want to support multiple flows.
Read more about it in the
[FastAPI docs for Security](https://fastapi.tiangolo.com/tutorial/security/).
"""
def __init__(
self,
*,
flows: Annotated[
Union[OAuthFlowsModel, Dict[str, Dict[str, Any]]],
Doc(
"""
The dictionary of OAuth2 flows.
"""
),
] = OAuthFlowsModel(),
scheme_name: Annotated[
Optional[str],
Doc(
"""
Security scheme name.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
Security scheme description.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
auto_error: Annotated[
bool,
Doc(
"""
By default, if no HTTP Authorization header is provided, required for
OAuth2 authentication, it will automatically cancel the request and
send the client an error.
If `auto_error` is set to `False`, when the HTTP Authorization header
is not available, instead of erroring out, the dependency result will
be `None`.
This is useful when you want to have optional authentication.
It is also useful when you want to have authentication that can be
provided in one of multiple optional ways (for example, with OAuth2
or in a cookie).
"""
),
] = True,
):
self.model = OAuth2Model(
flows=cast(OAuthFlowsModel, flows), description=description
)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
def make_not_authenticated_error(self) -> HTTPException:
"""
The OAuth 2 specification doesn't define the challenge that should be used,
because a `Bearer` token is not really the only option to authenticate.
But declaring any other authentication challenge would be application-specific
as it's not defined in the specification.
For practical reasons, this method uses the `Bearer` challenge by default, as
it's probably the most common one.
If you are implementing an OAuth2 authentication scheme other than the provided
ones in FastAPI (based on bearer tokens), you might want to override this.
Ref: https://datatracker.ietf.org/doc/html/rfc6749
"""
return HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
async def __call__(self, request: Request) -> Optional[str]:
authorization = request.headers.get("Authorization")
if not authorization:
if self.auto_error:
raise self.make_not_authenticated_error()
else:
return None
return authorization
| OAuth2 |
python | RaRe-Technologies__gensim | gensim/models/poincare.py | {
"start": 56661,
"end": 60750
} | class ____:
"""Evaluate reconstruction on given network for given embedding."""
def __init__(self, file_path, embedding):
"""Initialize evaluation instance with tsv file containing relation pairs and embedding to be evaluated.
Parameters
----------
file_path : str
Path to tsv file containing relation pairs.
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding to be evaluated.
"""
items = set()
relations = defaultdict(set)
with utils.open(file_path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
assert len(row) == 2, 'Hypernym pair has more than two items'
item_1_index = embedding.get_index(row[0])
item_2_index = embedding.get_index(row[1])
relations[item_1_index].add(item_2_index)
items.update([item_1_index, item_2_index])
self.items = items
self.relations = relations
self.embedding = embedding
@staticmethod
def get_positive_relation_ranks_and_avg_prec(all_distances, positive_relations):
"""Compute ranks and Average Precision of positive relations.
Parameters
----------
all_distances : numpy.array of float
Array of all distances (floats) for a specific item.
positive_relations : list
List of indices of positive relations for the item.
Returns
-------
(list of int, float)
The list contains ranks of positive relations in the same order as `positive_relations`.
The float is the Average Precision of the ranking, e.g. ([1, 2, 3, 20], 0.610).
"""
positive_relation_distances = all_distances[positive_relations]
negative_relation_distances = np.ma.array(all_distances, mask=False)
negative_relation_distances.mask[positive_relations] = True
# Compute how many negative relation distances are less than each positive relation distance, plus 1 for rank
ranks = (negative_relation_distances < positive_relation_distances[:, np.newaxis]).sum(axis=1) + 1
map_ranks = np.sort(ranks) + np.arange(len(ranks))
avg_precision = ((np.arange(1, len(map_ranks) + 1) / np.sort(map_ranks)).mean())
return list(ranks), avg_precision
def evaluate(self, max_n=None):
"""Evaluate all defined metrics for the reconstruction task.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
dict of (str, float)
(metric_name, metric_value) pairs, e.g. {'mean_rank': 50.3, 'MAP': 0.31}.
"""
mean_rank, map_ = self.evaluate_mean_rank_and_map(max_n)
return {'mean_rank': mean_rank, 'MAP': map_}
def evaluate_mean_rank_and_map(self, max_n=None):
"""Evaluate mean rank and MAP for reconstruction.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
(float, float)
(mean_rank, MAP), e.g (50.3, 0.31).
"""
ranks = []
avg_precision_scores = []
for i, item in enumerate(self.items, start=1):
if item not in self.relations:
continue
item_relations = list(self.relations[item])
item_term = self.embedding.index_to_key[item]
item_distances = self.embedding.distances(item_term)
positive_relation_ranks, avg_precision = \
self.get_positive_relation_ranks_and_avg_prec(item_distances, item_relations)
ranks += positive_relation_ranks
avg_precision_scores.append(avg_precision)
if max_n is not None and i > max_n:
break
return np.mean(ranks), np.mean(avg_precision_scores)
| ReconstructionEvaluation |
python | walkccc__LeetCode | solutions/1636. Sort Array by Increasing Frequency/1636.py | {
"start": 60,
"end": 222
} | class ____:
num: int
freq: int
def __lt__(self, other):
if self.freq == other.freq:
return self.num > other.num
return self.freq < other.freq
| T |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 938791,
"end": 939559
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for RepositoryMigration."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("RepositoryMigrationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("RepositoryMigration"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| RepositoryMigrationConnection |
python | kamyu104__LeetCode-Solutions | Python/smallest-index-with-digit-sum-equal-to-index.py | {
"start": 41,
"end": 397
} | class ____(object):
def smallestIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def total(x):
result = 0
while x:
result += x%10
x //= 10
return result
return next((i for i, x in enumerate(nums) if total(x) == i), -1)
| Solution |
python | getsentry__sentry | tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py | {
"start": 11729,
"end": 13174
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
super().setUp()
self.handler = DiscordIssueAlertHandler()
self.detector = self.create_detector(project=self.project)
self.action = self.create_action(
type=Action.Type.DISCORD,
integration_id="1234567890",
config={"target_identifier": "channel456", "target_type": ActionTarget.SPECIFIC},
data={"tags": "environment,user,my_tag"},
)
def test_build_rule_action_blob(self) -> None:
"""Test that build_rule_action_blob creates correct Discord action data"""
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": "sentry.integrations.discord.notify_action.DiscordNotifyServiceAction",
"server": "1234567890",
"channel_id": "channel456",
"tags": "environment,user,my_tag",
}
def test_build_rule_action_blob_no_tags(self) -> None:
"""Test that build_rule_action_blob handles missing tags"""
self.action.data = {}
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": "sentry.integrations.discord.notify_action.DiscordNotifyServiceAction",
"server": "1234567890",
"channel_id": "channel456",
"tags": "",
}
| TestDiscordIssueAlertHandler |
python | airbytehq__airbyte | airbyte-ci/connectors/ci_credentials/ci_credentials/models.py | {
"start": 1715,
"end": 1980
} | class ____(Secret):
enabled_version: str
@classmethod
def from_secret(cls, secret: Secret, enabled_version: str) -> RemoteSecret:
return RemoteSecret(secret.connector_name, secret.configuration_file_name, secret.value, enabled_version)
| RemoteSecret |
python | conda__conda | conda/testing/solver_helpers.py | {
"start": 1382,
"end": 6080
} | class ____:
"""Helper environment object."""
REPO_DATA_KEYS = (
"build",
"build_number",
"depends",
"license",
"md5",
"name",
"sha256",
"size",
"subdir",
"timestamp",
"version",
"track_features",
"features",
)
def __init__(self, path, solver_class, subdirs=context.subdirs):
self._path = pathlib.Path(path)
self._prefix_path = self._path / "prefix"
self._channels_path = self._path / "channels"
self._solver_class = solver_class
self.subdirs = subdirs
self.installed_packages = []
# if repo_packages is a list, the packages will be put in a `test` channel
# if it is a dictionary, it the keys are the channel name and the value
# the channel packages
self.repo_packages: list[str] | dict[str, list[str]] = []
def solver(self, add, remove):
"""Writes ``repo_packages`` to the disk and creates a solver instance."""
channels = []
self._write_installed_packages()
for channel_name, packages in self._channel_packages.items():
self._write_repo_packages(channel_name, packages)
channel = Channel(str(self._channels_path / channel_name))
channels.append(channel)
return self._solver_class(
prefix=self._prefix_path,
subdirs=self.subdirs,
channels=channels,
specs_to_add=add,
specs_to_remove=remove,
)
def solver_transaction(self, add=(), remove=(), as_specs=False):
packages = self.solver(add=add, remove=remove).solve_final_state()
if as_specs:
return packages
return package_string_set(packages)
def install(self, *specs, as_specs=False):
return self.solver_transaction(add=specs, as_specs=as_specs)
def remove(self, *specs, as_specs=False):
return self.solver_transaction(remove=specs, as_specs=as_specs)
@property
def _channel_packages(self):
"""Helper that unfolds the ``repo_packages`` into a dictionary."""
if isinstance(self.repo_packages, dict):
return self.repo_packages
return {"test": self.repo_packages}
def _package_data(self, record):
"""Turn record into data, to be written in the JSON environment/repo files."""
data = {
key: value
for key, value in vars(record).items()
if key in self.REPO_DATA_KEYS
}
if "subdir" not in data:
data["subdir"] = context.subdir
return data
def _write_installed_packages(self):
if not self.installed_packages:
return
conda_meta = self._prefix_path / "conda-meta"
conda_meta.mkdir(exist_ok=True, parents=True)
# write record files
for record in self.installed_packages:
record_path = (
conda_meta / f"{record.name}-{record.version}-{record.build}.json"
)
record_data = self._package_data(record)
record_data["channel"] = record.channel.name
record_path.write_text(json.dumps(record_data))
# write history file
history_path = conda_meta / "history"
history_path.write_text(
"\n".join(
(
"==> 2000-01-01 00:00:00 <==",
*map(package_string, self.installed_packages),
)
)
)
def _write_repo_packages(self, channel_name, packages):
"""Write packages to the channel path."""
# build package data
package_data = collections.defaultdict(dict)
for record in packages:
package_data[record.subdir][record.fn] = self._package_data(record)
# write repodata
assert set(self.subdirs).issuperset(set(package_data.keys()))
for subdir in self.subdirs:
subdir_path = self._channels_path / channel_name / subdir
subdir_path.mkdir(parents=True, exist_ok=True)
subdir_path.joinpath("repodata.json").write_text(
json.dumps(
{
"info": {
"subdir": subdir,
},
"packages": package_data.get(subdir, {}),
}
)
)
def empty_prefix():
return TemporaryDirectory(prefix="conda-test-repo-")
@pytest.fixture()
def temp_simple_env(solver_class=Solver) -> SimpleEnvironment:
with empty_prefix() as prefix:
yield SimpleEnvironment(prefix, solver_class)
| SimpleEnvironment |
python | falconry__falcon | tests/test_error_handlers.py | {
"start": 8821,
"end": 9716
} | class ____:
@pytest.fixture()
def body_client(self, asgi, util):
app = util.create_app(asgi=asgi)
app.add_route('/error', NoBodyResource())
def no_reps(req, resp, exception):
pass
app.set_error_serializer(no_reps)
return testing.TestClient(app)
def test_data_is_set(self, body_client):
res = body_client.simulate_get('/error')
assert res.status == falcon.HTTP_IM_A_TEAPOT
assert res.status_code == 418
assert res.content == b''
def test_media_is_set(self, body_client):
res = body_client.simulate_post('/error')
assert res.status == falcon.HTTP_740
assert res.content == b''
def test_body_is_set(self, body_client):
res = body_client.simulate_put('/error')
assert res.status == falcon.HTTP_701
assert res.content == b''
| TestNoBodyWithStatus |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/base.py | {
"start": 189107,
"end": 193309
} | class ____(RunnableEachBase[Input, Output]):
"""RunnableEach class.
`Runnable` that calls another `Runnable` for each element of the input sequence.
It allows you to call multiple inputs with the bounded `Runnable`.
`RunnableEach` makes it easy to run multiple inputs for the `Runnable`.
In the below example, we associate and run three inputs
with a `Runnable`:
```python
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_template("Tell me a short joke about
{topic}")
model = ChatOpenAI()
output_parser = StrOutputParser()
runnable = prompt | model | output_parser
runnable_each = RunnableEach(bound=runnable)
output = runnable_each.invoke([{'topic':'Computer Science'},
{'topic':'Art'},
{'topic':'Biology'}])
print(output) # noqa: T201
```
"""
@override
def get_name(self, suffix: str | None = None, *, name: str | None = None) -> str:
name = name or self.name or f"RunnableEach<{self.bound.get_name()}>"
return super().get_name(suffix, name=name)
@override
def bind(self, **kwargs: Any) -> RunnableEach[Input, Output]:
return RunnableEach(bound=self.bound.bind(**kwargs))
@override
def with_config(
self, config: RunnableConfig | None = None, **kwargs: Any
) -> RunnableEach[Input, Output]:
return RunnableEach(bound=self.bound.with_config(config, **kwargs))
@override
def with_listeners(
self,
*,
on_start: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
on_end: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
on_error: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
) -> RunnableEach[Input, Output]:
"""Bind lifecycle listeners to a `Runnable`, returning a new `Runnable`.
The `Run` object contains information about the run, including its `id`,
`type`, `input`, `output`, `error`, `start_time`, `end_time`, and
any tags or metadata added to the run.
Args:
on_start: Called before the `Runnable` starts running, with the `Run`
object.
on_end: Called after the `Runnable` finishes running, with the `Run`
object.
on_error: Called if the `Runnable` throws an error, with the `Run`
object.
Returns:
A new `Runnable` with the listeners bound.
"""
return RunnableEach(
bound=self.bound.with_listeners(
on_start=on_start, on_end=on_end, on_error=on_error
)
)
def with_alisteners(
self,
*,
on_start: AsyncListener | None = None,
on_end: AsyncListener | None = None,
on_error: AsyncListener | None = None,
) -> RunnableEach[Input, Output]:
"""Bind async lifecycle listeners to a `Runnable`.
Returns a new `Runnable`.
The `Run` object contains information about the run, including its `id`,
`type`, `input`, `output`, `error`, `start_time`, `end_time`, and
any tags or metadata added to the run.
Args:
on_start: Called asynchronously before the `Runnable` starts running,
with the `Run` object.
on_end: Called asynchronously after the `Runnable` finishes running,
with the `Run` object.
on_error: Called asynchronously if the `Runnable` throws an error,
with the `Run` object.
Returns:
A new `Runnable` with the listeners bound.
"""
return RunnableEach(
bound=self.bound.with_alisteners(
on_start=on_start, on_end=on_end, on_error=on_error
)
)
| RunnableEach |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 2289,
"end": 18183
} | class ____(VariableTracker):
_nonvar_fields = {
*VariableTracker._nonvar_fields,
}
def __init__(self, typevar, objvar=None, **kwargs) -> None:
super().__init__(**kwargs)
# typevar is the first argument to super(). In the case where no argument
# is provided to super(), it is the __class__ object where
# the super() function is being called
self.typevar = typevar
# objvar here must be an instance or subtype of typevar.
# In the case where super() is called without arguments, it is the first argument
# to the current function where super() is called from (self for regular method,
# cls for a classmethod)
self.objvar = objvar
def reconstruct(self, codegen: "PyCodegen"):
codegen.add_push_null(lambda: codegen(variables.BuiltinVariable(super)))
codegen(self.typevar)
if self.objvar is not None:
codegen(self.objvar)
codegen.extend_output(create_call_function(2, False))
else:
codegen.extend_output(create_call_function(1, False))
def _resolved_getattr_and_source(self, tx: "InstructionTranslator", name):
if not self.objvar:
unimplemented(
gb_type="1-arg super not implemented",
context="",
explanation=f"Dynamo failed to trace attribute `{name}` accessed "
f"via `super()` (for type `{self.typevar}` and object `{self.objvar}`) "
"because one-argument of super() is not supported.",
hints=[
"Use two-argument super(type, object_or_type).",
],
)
search_type = self.typevar.as_python_constant()
# The rest of this function does two things:
# - Walk the mro to find where the attribute comes from to be
# able to provide accurate source
# - Call the getattr to get the object
# Find the class object, where the function lives.
# When objvar is "self", use type(self), when objvar is "cls", use it as-is
type_to_use = self.objvar.python_type()
type_to_use_source = (
TypeSource(self.objvar.source) if self.objvar.source else None
)
if issubclass(type_to_use, type):
type_to_use = self.objvar.value
type_to_use_source = self.objvar.source
source = None
search_mro = type_to_use.__mro__
try:
start_index = search_mro.index(search_type) + 1
except ValueError:
# Corner case where the typevar is not in the mro of the objvar
# https://github.com/python/cpython/blob/3.11/Objects/typeobject.c#L8843-L8844
return getattr(super(search_type, type_to_use), name), None
# Implemented based on https://github.com/python/cpython/blob/3.11/Objects/typeobject.c#L8812
# super has its getattro implementation. The key point is that instead of calling getattr, it checks the
# attribute in the class __dict__
for index in range(start_index, len(search_mro)):
# Dont call getattr, just check the __dict__ of the class
if resolved_getattr := search_mro[index].__dict__.get(name, NO_SUCH_SUBOBJ):
if resolved_getattr is not NO_SUCH_SUBOBJ:
# Equivalent of something like type(L['self']).__mro__[1].attr_name
if type_to_use_source:
source = AttrSource(
GetItemSource(TypeMROSource(type_to_use_source), index),
name,
)
return resolved_getattr, source
unimplemented(
gb_type="Unable to resolve super getattr",
context="",
explanation=f"Dynamo failed to trace attribute `{name}` accessed "
f"via `super()` (for type `{self.typevar}` and object `{self.objvar}`) "
"because the resolved attribute type is not supported.",
hints=[
"Ensure the attribute exists in the parent class.",
"Check the arguments passed to `super()`.",
],
)
def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker":
# Check if getattr is a constant. If not, delay the actual work by
# wrapping the result in GetAttrVariable. Mostly super is called with a
# method, so most of the work is delayed to call_function.
#
# We could have just implemented a const_getattr. However, super is
# special when it comes to finding sources. Compared to other VTs, super
# requires the attr name to walk the mro and find the actual source (and
# not just AttrSource).
value, source = self._resolved_getattr_and_source(self, name)
if not variables.ConstantVariable.is_literal(value):
return GetAttrVariable(self, name)
if source:
install_guard(source.make_guard(GuardBuilder.CONSTANT_MATCH))
return variables.ConstantVariable.create(value, source=source)
def call_method(
self,
tx: "InstructionTranslator",
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
inner_fn, source = self._resolved_getattr_and_source(self, name)
# This essentially simulates CPython's `super_getattro`:
# https://github.com/python/cpython/blob/a1c52d1265c65bcf0d9edf87e143843ad54f9b8f/Objects/typeobject.c#L11138-L11168
# where `inner_fn` is the VT for `res = _super_lookup_descr(...)`.
#
# However, `res`'s type needs to be checked for `tp_descr_get`, and
# applied if it has one. We currently don't have polyfills for all the
# relevant `tp_descr_get`, so we explicitly handle the cases we care
# about here (e.g., note the staticmethod, classmethod cases).
if inner_fn is object.__init__:
return LambdaVariable(identity)
elif inner_fn is torch.nn.Module.__init__:
objvar = self.objvar
from ..side_effects import AttributeMutationNew
if (
isinstance(objvar, variables.UserDefinedObjectVariable)
and isinstance(objvar.mutation_type, AttributeMutationNew)
and not (args or kwargs)
):
with do_not_convert_to_tracable_parameter():
fn_vt = VariableTracker.build(
tx, unpatched_nn_module_init, source=source
)
return fn_vt.call_function(tx, [self.objvar] + args, kwargs)
else:
unimplemented(
gb_type="Unsupported super().__init__() call",
context=f"call_method {self} {name} {args} {kwargs}",
explanation="Dynamo encountered a super().__init__() call "
f"on {objvar} that resolved to a `torch.nn.Module.__init__()` "
"call that we cannot trace.",
hints=[*graph_break_hints.DIFFICULT],
)
elif (
self.objvar.source
and hasattr(inner_fn, "__name__")
and inner_fn.__name__ == "__new__"
and variables.UserDefinedClassVariable.is_supported_new_method(inner_fn)
):
user_cls = inner_fn.__self__
if hasattr(user_cls, "__module__") and user_cls.__module__ == "builtins":
user_cls_vt = variables.BuiltinVariable(user_cls)
else:
user_cls_source = source.member
user_cls_vt = variables.UserDefinedClassVariable(
user_cls, source=user_cls_source
)
return user_cls_vt.call_method(tx, "__new__", args, kwargs)
elif isinstance(inner_fn, staticmethod) and isinstance(
inner_fn.__func__, types.FunctionType
):
fn_vt = VariableTracker.build(tx, inner_fn.__func__, source=source)
return fn_vt.call_function(tx, args, kwargs)
elif isinstance(inner_fn, classmethod) and isinstance(
inner_fn.__func__, types.FunctionType
):
if isinstance(self.objvar, variables.UserDefinedClassVariable):
# super().classmethod is called from a classmethod itself. So,
# super was converted to super(__class__, cls) in bytecode and
# therefore we have to propagate the cls.
cls_variable = self.objvar
else:
# current function is an instance method, therefore super was
# converted to super(__class__, self). We have to find
# type(self) to bind the cls to the parent classmethod.
# Note that it can't be the self.typevar because __class__ is
# the class where the method is defined, which could be
# different from type(self) with polymorphism.
cls_source = None
if self.objvar.source:
cls_source = TypeSource(self.objvar.source)
cls_variable = VariableTracker.build(
tx, self.objvar.value_type, cls_source
)
fn_vt = VariableTracker.build(
tx, inner_fn.__func__, source=AttrSource(source, "__func__")
)
return fn_vt.call_function(tx, [cls_variable, *args], kwargs)
elif isinstance(inner_fn, types.FunctionType):
fn_vt = VariableTracker.build(tx, inner_fn, source=source)
return fn_vt.call_function(tx, [self.objvar] + args, kwargs)
elif isinstance(inner_fn, types.MethodType):
return variables.UserMethodVariable(
inner_fn.__func__, self.objvar, source=source
).call_function(tx, args, kwargs)
elif is_standard_setattr(inner_fn) and isinstance(
self.objvar, UserDefinedObjectVariable
):
return self.objvar.method_setattr_standard(tx, *args, **kwargs)
elif inner_fn is object.__delattr__:
attr = args[0]
try:
attr = attr.as_python_constant()
except NotImplementedError as exc:
unimplemented(
gb_type="Non-constant attribute given to `super().__delattr__()`",
context=f"call_method {self} {name}",
explanation="Dynamo requires the attribute name passed to "
"`super().__delattr__(...)` to be a constant (string).",
hints=[
"Ensure the attribute name is a string literal or a constant variable."
],
from_exc=exc,
)
if not tx.output.side_effects.is_attribute_mutation(self.objvar):
unimplemented(
gb_type="Attempted super().__delattr__() on an object without mutation tracking",
context=f"call_method {self} {name}",
explanation="Dynamo needs to track mutations on an object "
"before `super().__delattr__` can be used on it. But the "
f"object ({self.objvar}) doesn't have attribute mutation "
"tracking enabled.",
hints=[
"Ensure the object is tracked by Dynamo's side effect system.",
*graph_break_hints.DYNAMO_BUG,
],
)
tx.output.side_effects.store_attr(
self.objvar, attr, variables.DeletedVariable()
)
return variables.ConstantVariable(None)
elif (
isinstance(self.objvar, variables.UserDefinedDictVariable)
and inner_fn in self.objvar._dict_methods
):
return self.objvar._dict_vt.call_method(tx, name, args, kwargs)
elif (
isinstance(self.objvar, variables.UserDefinedSetVariable)
and inner_fn in self.objvar._set_methods
):
return self.objvar._set_vt.call_method(tx, name, args, kwargs)
elif (
isinstance(self.objvar, variables.UserDefinedTupleVariable)
and inner_fn in tuple_methods
):
return self.objvar._tuple_vt.call_method(tx, name, args, kwargs)
elif (
isinstance(self.objvar, variables.UserDefinedListVariable)
and inner_fn in list_methods
):
return self.objvar._list_vt.call_method(tx, name, args, kwargs)
elif inner_fn is object.__getattribute__:
# object.__getattribute__ has no side-effects. We can directly call
# __getattribute__ to access the attribute.
attr_name = args[0].value
if tx.output.side_effects.has_pending_mutation_of_attr(
self.objvar, attr_name
):
result = tx.output.side_effects.load_attr(
self.objvar, attr_name, deleted_ok=True
)
if isinstance(result, variables.DeletedVariable):
raise_observed_exception(AttributeError, tx)
return result
try:
# NB - use object.__getattribute__ to prevent running any user code
attr_value = object.__getattribute__(self.objvar.value, attr_name)
except AttributeError:
raise_observed_exception(AttributeError, tx)
attr_source = None
if self.objvar.source is not None:
# setup a object.__getattribute__(self.objvar, name) source
attr_source = GenericAttrSource(self.objvar.source, attr_name)
return VariableTracker.build(tx, attr_value, attr_source)
elif inner_fn is torch._C._disabled_torch_function_impl:
# See `THPModule_disable_torch_function` for the C impl.
# The signature of _disabled_torch_function_impl is similar to
# `__torch_function__`, just without the first `cls` argument:
# * (func, types, args, kwargs)
func = args[0]
tf_kwargs = {}
tf_args = args[2].items
for hash_key_vt, value_vt in args[3].items.items():
key_str = hash_key_vt.vt.as_python_constant()
tf_kwargs[key_str] = value_vt
tx_old = tx.symbolic_torch_function_state.torch_function_subclass_enabled
tx.symbolic_torch_function_state.torch_function_subclass_enabled = False
try:
return func.call_function(tx, tf_args, tf_kwargs)
finally:
tx.symbolic_torch_function_state.torch_function_subclass_enabled = (
tx_old
)
elif (
isinstance(inner_fn, types.MethodDescriptorType)
and inner_fn in trace_rules.get_tensor_method()
):
# FunctionType but implementation is in C, we support some of these,
# e.g., tensor ops like `torch.Tensor.to`.
fn_var = VariableTracker.build(tx, inner_fn, source)
return fn_var.call_function(tx, [self.objvar] + args, kwargs)
unimplemented(
gb_type="Attempted to call a super() attribute that is "
"not a function or method",
context=f"call_method {self} {name}",
explanation="Dynamo does not know how to trace the call "
f"`super().{name}()` because `super().{name}` is not a "
"function or method attribute.",
hints=[
"Ensure the attribute accessed via `super()` is a standard method or function.",
],
)
| SuperVariable |
python | crytic__slither | slither/tools/upgradeability/checks/initialization.py | {
"start": 11298,
"end": 12631
} | class ____(AbstractCheck):
ARGUMENT = "initialize-target"
IMPACT = CheckClassification.INFORMATIONAL
HELP = "Initialize function that must be called"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#initialize-function"
WIKI_TITLE = "Initialize function"
# region wiki_description
WIKI_DESCRIPTION = """
Show the function that must be called at deployment.
This finding does not have an immediate security impact and is informative.
"""
# endregion wiki_description
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Ensure that the function is called at deployment.
"""
# endregion wiki_recommendation
REQUIRE_CONTRACT = True
def _check(self):
# TODO: handle MultipleInitTarget
try:
most_derived_init = _get_most_derived_init(self.contract)
except MultipleInitTarget:
# Should be already reported by MissingCalls
# logger.error(red(f'Too many init targets in {self.contract}'))
return []
if most_derived_init is None:
return []
info = [
self.contract,
" needs to be initialized by ",
most_derived_init,
".\n",
]
json = self.generate_result(info)
return [json]
| InitializeTarget |
python | pandas-dev__pandas | pandas/tests/indexes/numeric/test_indexing.py | {
"start": 274,
"end": 3203
} | class ____:
def test_get_loc(self):
index = Index([0, 1, 2])
assert index.get_loc(1) == 1
def test_get_loc_raises_bad_label(self):
index = Index([0, 1, 2])
with pytest.raises(InvalidIndexError, match=r"\[1, 2\]"):
index.get_loc([1, 2])
def test_get_loc_float64(self):
idx = Index([0.0, 1.0, 2.0], dtype=np.float64)
with pytest.raises(KeyError, match="^'foo'$"):
idx.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5)
with pytest.raises(KeyError, match="^True$"):
idx.get_loc(True)
with pytest.raises(KeyError, match="^False$"):
idx.get_loc(False)
def test_get_loc_na(self):
idx = Index([np.nan, 1, 2], dtype=np.float64)
assert idx.get_loc(1) == 1
assert idx.get_loc(np.nan) == 0
idx = Index([np.nan, 1, np.nan], dtype=np.float64)
assert idx.get_loc(1) == 1
# representable by slice [0:2:2]
msg = "'Cannot get left slice bound for non-unique label: nan'"
with pytest.raises(KeyError, match=msg):
idx.slice_locs(np.nan)
# not representable by slice
idx = Index([np.nan, 1, np.nan, np.nan], dtype=np.float64)
assert idx.get_loc(1) == 1
msg = "'Cannot get left slice bound for non-unique label: nan"
with pytest.raises(KeyError, match=msg):
idx.slice_locs(np.nan)
def test_get_loc_missing_nan(self):
# GH#8569
idx = Index([1, 2], dtype=np.float64)
assert idx.get_loc(1) == 0
with pytest.raises(KeyError, match=r"^3$"):
idx.get_loc(3)
with pytest.raises(KeyError, match="^nan$"):
idx.get_loc(np.nan)
with pytest.raises(InvalidIndexError, match=r"\[nan\]"):
# listlike/non-hashable raises TypeError
idx.get_loc([np.nan])
@pytest.mark.parametrize("vals", [[1], [1.0], [Timestamp("2019-12-31")], ["test"]])
def test_get_loc_float_index_nan_with_method(self, vals):
# GH#39382
idx = Index(vals)
with pytest.raises(KeyError, match="nan"):
idx.get_loc(np.nan)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_get_loc_numericindex_none_raises(self, dtype):
# case that goes through searchsorted and key is non-comparable to values
arr = np.arange(10**7, dtype=dtype)
idx = Index(arr)
with pytest.raises(KeyError, match="None"):
idx.get_loc(None)
def test_get_loc_overflows(self):
# unique but non-monotonic goes through IndexEngine.mapping.get_item
idx = Index([0, 2, 1])
val = np.iinfo(np.int64).max + 1
with pytest.raises(KeyError, match=str(val)):
idx.get_loc(val)
with pytest.raises(KeyError, match=str(val)):
idx._engine.get_loc(val)
| TestGetLoc |
python | astropy__astropy | astropy/io/ascii/rst.py | {
"start": 622,
"end": 730
} | class ____(FixedWidthData):
end_line = -1
splitter_class = FixedWidthTwoLineDataSplitter
| SimpleRSTData |
python | doocs__leetcode | solution/3000-3099/3025.Find the Number of Ways to Place People I/Solution.py | {
"start": 0,
"end": 370
} | class ____:
def numberOfPairs(self, points: List[List[int]]) -> int:
points.sort(key=lambda x: (x[0], -x[1]))
ans = 0
for i, (_, y1) in enumerate(points):
max_y = -inf
for _, y2 in points[i + 1 :]:
if max_y < y2 <= y1:
max_y = y2
ans += 1
return ans
| Solution |
python | pytorch__pytorch | test/ao/sparsity/test_data_sparsifier.py | {
"start": 474,
"end": 814
} | class ____(BaseDataSparsifier):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def update_mask(self, name, data, **kwargs):
mask = self.get_mask(name)
mask[0] = 0
linear_state = self.state[name]
linear_state["step_count"] = linear_state.get("step_count", 0) + 1
| ImplementedSparsifier |
python | pytorch__pytorch | test/inductor/test_group_batch_fusion.py | {
"start": 6724,
"end": 7517
} | class ____(torch.nn.Module):
def __init__(self, device, has_bias=True):
super().__init__()
self.device = device
self.weights = torch.nn.ParameterList(
[torch.nn.Parameter(torch.randn(50, 100)).to(self.device) for _ in range(5)]
)
self.biases = (
([torch.nn.Parameter(torch.randn(50)).to(self.device) for _ in range(5)])
if has_bias
else [None for _ in range(5)]
)
def forward(self, x):
l1_out = torch.split(x.to(self.device), 100, dim=1)
l1_linear = [
torch.nn.functional.linear(l1_out[i], self.weights[i], self.biases[i])
for i in range(len(l1_out))
]
l1_out = torch.cat(l1_linear, dim=1)
return torch.sin(l1_out)
| MyModule5 |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 32605,
"end": 48501
} | class ____:
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x:
def __array__(self, dtype=None, copy=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = f'String conversion for {type}'
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert arr.dtype == 'V8' # current default
# Same length scalars (those that go to the same void) work:
arr = np.array([b"1234", b"1234"], dtype="V")
assert arr.dtype == "V4"
# Promoting different lengths will fail (pre 1.20 this worked)
# by going via S5 and casting to V5.
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="V")
with pytest.raises(TypeError):
np.array([b"12345", b"1234"], dtype="V")
# Check the same for the casting path:
arr = np.array([b"1234", b"1234"], dtype="O").astype("V")
assert arr.dtype == "V4"
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="O").astype("V")
@pytest.mark.parametrize("idx",
[pytest.param(Ellipsis, id="arr"), pytest.param((), id="scalar")])
def test_structured_void_promotion(self, idx):
arr = np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i')[idx]],
dtype="V")
assert_array_equal(arr, np.array([(1, 1), (2, 2)], dtype="i,i"))
# The following fails to promote the two dtypes, resulting in an error
with pytest.raises(TypeError):
np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]],
dtype="V")
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
@pytest.mark.skipif(not IS_64BIT,
reason="malloc may not fail on 32 bit systems")
@pytest.mark.thread_unsafe(reason="large slow test in parallel")
def test_malloc_fails(self):
# This test is guaranteed to fail due to a too large allocation
with assert_raises(np._core._exceptions._ArrayMemoryError):
np.empty(np.iinfo(np.intp).max, dtype=np.uint8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del d
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3, 3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3, 3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3, 3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3, 3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3, 3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3, 3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3, 3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3, 3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogeneous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1], [1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j], [1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1], [1, 1j, 1.], [1, 1, 1]]).dtype, complex)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail:
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError
class Map:
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
a = np.array(C()) # segfault?
assert_equal(len(a), 0)
def test_false_len_iterable(self):
# Special case where a bad __getitem__ makes us fall back on __iter__:
class C:
def __getitem__(self, x):
raise Exception
def __iter__(self):
return iter(())
def __len__(self):
return 2
a = np.empty(2)
with assert_raises(ValueError):
a[:] = C() # Segfault!
np.array(C()) == list(C())
def test_failed_len_sequence(self):
# gh-7393
class A:
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1, 2, 3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes // itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes // itemsize + 1,), dtype=dtype)
def _ragged_creation(self, seq):
# without dtype=object, the ragged object raises
with pytest.raises(ValueError, match=".*detected shape was"):
a = np.array(seq)
return np.array(seq, dtype=object)
def test_ragged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = self._ragged_creation([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_ragged_shape_object(self):
# The ragged dimension of a list is turned into an object array
a = self._ragged_creation([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2], [3, 3]])
assert a.shape == (3,)
assert a.dtype == object
def test_array_of_ragged_array(self):
outer = np.array([None, None])
outer[0] = outer[1] = np.array([1, 2, 3])
assert np.array(outer).shape == (2,)
assert np.array([outer]).shape == (1, 2)
outer_ragged = np.array([None, None])
outer_ragged[0] = np.array([1, 2, 3])
outer_ragged[1] = np.array([1, 2, 3, 4])
# should both of these emit deprecation warnings?
assert np.array(outer_ragged).shape == (2,)
assert np.array([outer_ragged]).shape == (1, 2,)
def test_deep_nonragged_object(self):
# None of these should raise, even though they are missing dtype=object
a = np.array([[[Decimal(1)]]])
a = np.array([1, Decimal(1)])
a = np.array([[1], [Decimal(1)]])
@pytest.mark.parametrize("dtype", [object, "O,O", "O,(3,)O", "(2,3)O"])
@pytest.mark.parametrize("function", [
np.ndarray, np.empty,
lambda shape, dtype: np.empty_like(np.empty(shape, dtype=dtype))])
def test_object_initialized_to_None(self, function, dtype):
# NumPy has support for object fields to be NULL (meaning None)
# but generally, we should always fill with the proper None, and
# downstream may rely on that. (For fully initialized arrays!)
arr = function(3, dtype=dtype)
# We expect a fill value of None, which is not NULL:
expected = np.array(None).tobytes()
expected = expected * (arr.nbytes // len(expected))
assert arr.tobytes() == expected
@pytest.mark.parametrize("func", [
np.array, np.asarray, np.asanyarray, np.ascontiguousarray,
np.asfortranarray])
def test_creation_from_dtypemeta(self, func):
dtype = np.dtype('i')
arr1 = func([1, 2, 3], dtype=dtype)
arr2 = func([1, 2, 3], dtype=type(dtype))
assert_array_equal(arr1, arr2)
assert arr2.dtype == dtype
def test_ndmax_less_than_actual_dims_dtype_object(self):
data = [[1, 2, 3], [4, 5, 6]]
arr = np.array(data, ndmax=1, dtype=object)
assert arr.ndim == 1
assert arr.shape == (2,)
assert arr.dtype == object
data = [[1, 2, 3], [4, 5]]
arr = np.array(data, ndmax=1, dtype=object)
assert arr.ndim == 1
assert arr.shape == (2,)
assert arr.dtype == object
data = [[[1], [2]], [[3], [4]]]
arr = np.array(data, ndmax=2, dtype=object)
assert arr.ndim == 2
assert arr.shape == (2, 2)
assert arr.dtype == object
def test_ndmax_equal_to_actual_dims(self):
data = [[1, 2], [3, 4]]
arr = np.array(data, ndmax=2)
assert arr.ndim == 2
assert_array_equal(arr, np.array(data))
def test_ndmax_greater_than_actual_dims(self):
data = [[1, 2], [3, 4]]
arr = np.array(data, ndmax=3)
assert arr.ndim == 2
assert_array_equal(arr, np.array(data))
def test_ndmax_less_than_actual_dims(self):
data = [[[1], [2]], [[3], [4]]]
with pytest.raises(ValueError,
match="setting an array element with a sequence. "
"The requested array would exceed the maximum number of dimension of 2."):
np.array(data, ndmax=2)
def test_ndmax_is_zero(self):
data = [1, 2, 3]
arr = np.array(data, ndmax=0, dtype=object)
assert arr.ndim == 0
assert arr.shape == ()
assert arr.dtype == object
data = [[1, 2, 3], [4, 5, 6]]
arr = np.array(data, ndmax=0, dtype=object)
assert arr.ndim == 0
assert arr.shape == ()
assert arr.dtype == object
data = [[1, 2, 3], [4, 5]]
arr = np.array(data, ndmax=0, dtype=object)
assert arr.ndim == 0
assert arr.shape == ()
assert arr.dtype == object
def test_ndmax_less_than_ndmin(self):
data = [[[1], [2]], [[3], [4]]]
with pytest.raises(ValueError, match="ndmin must be <= ndmax"):
np.array(data, ndmax=1, ndmin=2)
def test_ndmax_is_negative(self):
data = [1, 2, 3]
with pytest.raises(ValueError, match="ndmax must be in the range"):
np.array(data, ndmax=-1)
def test_ndmax_greather_than_NPY_MAXDIMS(self):
data = [1, 2, 3]
# current NPY_MAXDIMS is 64
with pytest.raises(ValueError, match="ndmax must be in the range"):
np.array(data, ndmax=65)
def test_ndmax_less_than_ndim(self):
# np.array input bypasses recursive inference, allowing ndim > ndmax validation
data = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match="object too deep for desired array"):
np.array(data, ndmax=1, dtype=object)
| TestCreation |
python | kennethreitz__tablib | src/tablib/formats/__init__.py | {
"start": 1633,
"end": 1962
} | class ____(FormatDescriptorBase):
def __get__(self, obj, cls, **kwargs):
self.ensure_format_loaded()
return self._format.export_book(obj, **kwargs)
def __set__(self, obj, val):
self.ensure_format_loaded()
return self._format.import_book(obj, normalize_input(val))
| ImportExportBookDescriptor |
python | pypa__pip | src/pip/_internal/distributions/sdist.py | {
"start": 544,
"end": 6627
} | class ____(AbstractDistribution):
"""Represents a source distribution.
The preparation step for these needs metadata for the packages to be
generated.
"""
@property
def build_tracker_id(self) -> str | None:
"""Identify this requirement uniquely by its link."""
assert self.req.link
return self.req.link.url_without_fragment
def get_metadata_distribution(self) -> BaseDistribution:
return self.req.get_dist()
def prepare_distribution_metadata(
self,
build_env_installer: BuildEnvironmentInstaller,
build_isolation: bool,
check_build_deps: bool,
) -> None:
# Load pyproject.toml
self.req.load_pyproject_toml()
# Set up the build isolation, if this requirement should be isolated
if build_isolation:
# Setup an isolated environment and install the build backend static
# requirements in it.
self._prepare_build_backend(build_env_installer)
# Check that the build backend supports PEP 660. This cannot be done
# earlier because we need to setup the build backend to verify it
# supports build_editable, nor can it be done later, because we want
# to avoid installing build requirements needlessly.
self.req.editable_sanity_check()
# Install the dynamic build requirements.
self._install_build_reqs(build_env_installer)
else:
# When not using build isolation, we still need to check that
# the build backend supports PEP 660.
self.req.editable_sanity_check()
# Check if the current environment provides build dependencies
if check_build_deps:
pyproject_requires = self.req.pyproject_requires
assert pyproject_requires is not None
conflicting, missing = self.req.build_env.check_requirements(
pyproject_requires
)
if conflicting:
self._raise_conflicts("the backend dependencies", conflicting)
if missing:
self._raise_missing_reqs(missing)
self.req.prepare_metadata()
def _prepare_build_backend(
self, build_env_installer: BuildEnvironmentInstaller
) -> None:
# Isolate in a BuildEnvironment and install the build-time
# requirements.
pyproject_requires = self.req.pyproject_requires
assert pyproject_requires is not None
self.req.build_env = BuildEnvironment(build_env_installer)
self.req.build_env.install_requirements(
pyproject_requires, "overlay", kind="build dependencies", for_req=self.req
)
conflicting, missing = self.req.build_env.check_requirements(
self.req.requirements_to_check
)
if conflicting:
self._raise_conflicts("PEP 517/518 supported requirements", conflicting)
if missing:
logger.warning(
"Missing build requirements in pyproject.toml for %s.",
self.req,
)
logger.warning(
"The project does not specify a build backend, and "
"pip cannot fall back to setuptools without %s.",
" and ".join(map(repr, sorted(missing))),
)
def _get_build_requires_wheel(self) -> Iterable[str]:
with self.req.build_env:
runner = runner_with_spinner_message("Getting requirements to build wheel")
backend = self.req.pep517_backend
assert backend is not None
with backend.subprocess_runner(runner):
return backend.get_requires_for_build_wheel()
def _get_build_requires_editable(self) -> Iterable[str]:
with self.req.build_env:
runner = runner_with_spinner_message(
"Getting requirements to build editable"
)
backend = self.req.pep517_backend
assert backend is not None
with backend.subprocess_runner(runner):
return backend.get_requires_for_build_editable()
def _install_build_reqs(
self, build_env_installer: BuildEnvironmentInstaller
) -> None:
# Install any extra build dependencies that the backend requests.
# This must be done in a second pass, as the pyproject.toml
# dependencies must be installed before we can call the backend.
if (
self.req.editable
and self.req.permit_editable_wheels
and self.req.supports_pyproject_editable
):
build_reqs = self._get_build_requires_editable()
else:
build_reqs = self._get_build_requires_wheel()
conflicting, missing = self.req.build_env.check_requirements(build_reqs)
if conflicting:
self._raise_conflicts("the backend dependencies", conflicting)
self.req.build_env.install_requirements(
missing, "normal", kind="backend dependencies", for_req=self.req
)
def _raise_conflicts(
self, conflicting_with: str, conflicting_reqs: set[tuple[str, str]]
) -> None:
format_string = (
"Some build dependencies for {requirement} "
"conflict with {conflicting_with}: {description}."
)
error_message = format_string.format(
requirement=self.req,
conflicting_with=conflicting_with,
description=", ".join(
f"{installed} is incompatible with {wanted}"
for installed, wanted in sorted(conflicting_reqs)
),
)
raise InstallationError(error_message)
def _raise_missing_reqs(self, missing: set[str]) -> None:
format_string = (
"Some build dependencies for {requirement} are missing: {missing}."
)
error_message = format_string.format(
requirement=self.req, missing=", ".join(map(repr, sorted(missing)))
)
raise InstallationError(error_message)
| SourceDistribution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 189543,
"end": 191973
} | class ____(Request):
"""
Remove a task from its queue.
Fails if task status is not queued.
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "dequeue"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task: str, status_reason: Optional[str] = None, status_message: Optional[str] = None, **kwargs: Any
) -> None:
super(DequeueRequest, self).__init__(**kwargs)
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| DequeueRequest |
python | huggingface__transformers | tests/models/deepseek_vl/test_modeling_deepseek_vl.py | {
"start": 4270,
"end": 8954
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DeepseekVLModel, DeepseekVLForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": DeepseekVLModel,
"image-text-to-text": DeepseekVLForConditionalGeneration,
"any-to-any": DeepseekVLForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = DeepseekVLModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeepseekVLConfig, has_text_modality=False)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for VLMs.
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
# Copied from tests.models.janus.test_modeling_janus.JanusVisionText2TextModelTest.test_sdpa_can_dispatch_composite_models
def test_sdpa_can_dispatch_composite_models(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Load the model with SDPA
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# Load model with eager attention
model_eager = model_class.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
# SigLip has one shared cls attr for all models, so we assign both submodels heer
vision_attn = language_attn = "sdpa" if model._supports_sdpa else "eager"
if hasattr(model_sdpa, "vision_model") and hasattr(model_sdpa, "language_model"):
self.assertTrue(model_sdpa.vision_model.config._attn_implementation == vision_attn)
self.assertTrue(model_sdpa.language_model.config._attn_implementation == language_attn)
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if any(re.finditer(r"Attention(?!Pool)", class_name)):
self.assertTrue(submodule.config._attn_implementation == "eager")
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if any(re.finditer(r"Attention(?!Pool)", class_name)):
self.assertTrue(submodule.config._attn_implementation == "sdpa")
@require_torch
@require_torch_accelerator
@slow
| DeepseekVLModelTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ranges.py | {
"start": 21504,
"end": 22238
} | class ____(List[Range[_T]]):
"""Represents a multirange sequence.
This list subclass is an utility to allow automatic type inference of
the proper multi-range SQL type depending on the single range values.
This is useful when operating on literal multi-ranges::
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import MultiRange, Range
value = literal(MultiRange([Range(2, 4)]))
select(tbl).where(tbl.c.value.op("@")(MultiRange([Range(-3, 7)])))
.. versionadded:: 2.0.26
.. seealso::
- :ref:`postgresql_multirange_list_use`.
"""
@property
def __sa_type_engine__(self) -> AbstractMultiRange[_T]:
return AbstractMultiRange()
| MultiRange |
python | pytorch__pytorch | torch/jit/frontend.py | {
"start": 3670,
"end": 4333
} | class ____(NotSupportedError):
def __init__(self, ctx, offending_node, reason="") -> None:
# If we don't have a specific token, we default to length of 1
node_type = type(offending_node)
range_len = len(node_start_tokens.get(node_type, " "))
source_range = ctx.make_range(
offending_node.lineno,
offending_node.col_offset,
offending_node.col_offset + range_len,
)
feature_name = pretty_node_names.get(node_type, node_type.__name__)
msg = f"{feature_name} {reason + ' ' if reason else ''}aren't supported"
super().__init__(source_range, msg)
| UnsupportedNodeError |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 34321,
"end": 38707
} | class ____(PathPatch):
"""
A path patch describing a stepwise constant function.
By default, the path is not closed and starts and stops at
baseline value.
"""
_edge_default = False
@_docstring.interpd
def __init__(self, values, edges, *,
orientation='vertical', baseline=0, **kwargs):
"""
Parameters
----------
values : array-like
The step heights.
edges : array-like
The edge positions, with ``len(edges) == len(vals) + 1``,
between which the curve takes on vals values.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
The direction of the steps. Vertical means that *values* are
along the y-axis, and edges are along the x-axis.
baseline : float, array-like or None, default: 0
The bottom value of the bounding edges or when
``fill=True``, position of lower edge. If *fill* is
True or an array is passed to *baseline*, a closed
path is drawn.
**kwargs
`Patch` properties:
%(Patch:kwdoc)s
"""
self.orientation = orientation
self._edges = np.asarray(edges)
self._values = np.asarray(values)
self._baseline = np.asarray(baseline) if baseline is not None else None
self._update_path()
super().__init__(self._path, **kwargs)
def _update_path(self):
if np.isnan(np.sum(self._edges)):
raise ValueError('Nan values in "edges" are disallowed')
if self._edges.size - 1 != self._values.size:
raise ValueError('Size mismatch between "values" and "edges". '
"Expected `len(values) + 1 == len(edges)`, but "
f"`len(values) = {self._values.size}` and "
f"`len(edges) = {self._edges.size}`.")
# Initializing with empty arrays allows supporting empty stairs.
verts, codes = [np.empty((0, 2))], [np.empty(0, dtype=Path.code_type)]
_nan_mask = np.isnan(self._values)
if self._baseline is not None:
_nan_mask |= np.isnan(self._baseline)
for idx0, idx1 in cbook.contiguous_regions(~_nan_mask):
x = np.repeat(self._edges[idx0:idx1+1], 2)
y = np.repeat(self._values[idx0:idx1], 2)
if self._baseline is None:
y = np.concatenate([y[:1], y, y[-1:]])
elif self._baseline.ndim == 0: # single baseline value
y = np.concatenate([[self._baseline], y, [self._baseline]])
elif self._baseline.ndim == 1: # baseline array
base = np.repeat(self._baseline[idx0:idx1], 2)[::-1]
x = np.concatenate([x, x[::-1]])
y = np.concatenate([base[-1:], y, base[:1],
base[:1], base, base[-1:]])
else: # no baseline
raise ValueError('Invalid `baseline` specified')
if self.orientation == 'vertical':
xy = np.column_stack([x, y])
else:
xy = np.column_stack([y, x])
verts.append(xy)
codes.append([Path.MOVETO] + [Path.LINETO]*(len(xy)-1))
self._path = Path(np.concatenate(verts), np.concatenate(codes))
def get_data(self):
"""Get `.StepPatch` values, edges and baseline as namedtuple."""
StairData = namedtuple('StairData', 'values edges baseline')
return StairData(self._values, self._edges, self._baseline)
def set_data(self, values=None, edges=None, baseline=None):
"""
Set `.StepPatch` values, edges and baseline.
Parameters
----------
values : 1D array-like or None
Will not update values, if passing None
edges : 1D array-like, optional
baseline : float, 1D array-like or None
"""
if values is None and edges is None and baseline is None:
raise ValueError("Must set *values*, *edges* or *baseline*.")
if values is not None:
self._values = np.asarray(values)
if edges is not None:
self._edges = np.asarray(edges)
if baseline is not None:
self._baseline = np.asarray(baseline)
self._update_path()
self.stale = True
| StepPatch |
python | astropy__astropy | astropy/units/core.py | {
"start": 78288,
"end": 78503
} | class ____(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
| PrefixUnit |
python | mlflow__mlflow | mlflow/gateway/providers/base.py | {
"start": 5046,
"end": 6363
} | class ____(ABC):
@classmethod
@abstractmethod
def model_to_embeddings(cls, resp, config): ...
@classmethod
@abstractmethod
def model_to_completions(cls, resp, config): ...
@classmethod
def model_to_completions_streaming(cls, resp, config):
raise NotImplementedError
@classmethod
@abstractmethod
def completions_to_model(cls, payload, config): ...
@classmethod
def completions_streaming_to_model(cls, payload, config):
raise NotImplementedError
@classmethod
def model_to_chat(cls, resp, config):
raise NotImplementedError
@classmethod
def model_to_chat_streaming(cls, resp, config):
raise NotImplementedError
@classmethod
def chat_to_model(cls, payload, config):
raise NotImplementedError
@classmethod
def chat_streaming_to_model(cls, payload, config):
raise NotImplementedError
@classmethod
@abstractmethod
def embeddings_to_model(cls, payload, config): ...
@classmethod
def check_keys_against_mapping(cls, mapping, payload):
for k1, k2 in mapping.items():
if k2 in payload:
raise AIGatewayException(
status_code=400, detail=f"Invalid parameter {k2}. Use {k1} instead."
)
| ProviderAdapter |
python | has2k1__plotnine | plotnine/animation.py | {
"start": 456,
"end": 7521
} | class ____(ArtistAnimation):
"""
Animation using ggplot objects
Parameters
----------
plots :
ggplot objects that make up the the frames of the animation
interval : int
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : int
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to `None`.
repeat : bool
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to `True`.
blit : bool
Controls whether blitting is used to optimize drawing. Defaults
to `False`.
Notes
-----
1. The plots should have the same `facet` and
the facet should not have fixed x and y scales.
2. The scales of all the plots should have the same limits. It is
a good idea to create a scale (with limits) for each aesthetic
and add them to all the plots.
"""
def __init__(
self,
plots: Iterable[ggplot],
interval: int = 200,
repeat_delay: int | None = None,
repeat: bool = True,
blit: bool = False,
):
figure, artists = self._draw_plots(plots)
ArtistAnimation.__init__(
self,
figure,
artists,
interval=interval,
repeat_delay=repeat_delay,
repeat=repeat,
blit=blit,
)
def _draw_plots(
self, plots: Iterable[ggplot]
) -> tuple[Figure, list[list[Artist]]]:
"""
Plot and return the figure and artists
Parameters
----------
plots : iterable
ggplot objects that make up the the frames of the animation
Returns
-------
figure
Matplotlib figure
artists
List of [](`Matplotlib.artist.Artist`)
"""
import matplotlib.pyplot as plt
# For keeping track of artists for each frame
artist_offsets: dict[str, list[int]] = {
"collections": [],
"patches": [],
"lines": [],
"texts": [],
"artists": [],
}
scale_limits = {}
def initialise_artist_offsets(n: int):
"""
Initialise artists_offsets arrays to zero
Parameters
----------
n : int
Number of axes to initialise artists for.
The artists for each axes are tracked separately.
"""
for artist_type in artist_offsets:
artist_offsets[artist_type] = [0] * n
def get_frame_artists(axs: list[Axes]) -> list[Artist]:
"""
Artists shown in a given frame
Parameters
----------
axs : list[Axes]
Matplotlib axes that have had artists added
to them.
"""
# The axes accumulate artists for all frames
# For each frame we pickup the newly added artists
# We use offsets to mark the end of the previous frame
# e.g ax.collections[start:]
frame_artists = []
for i, ax in enumerate(axs):
for name in artist_offsets:
start = artist_offsets[name][i]
new_artists = getattr(ax, name)[start:]
frame_artists.extend(new_artists)
artist_offsets[name][i] += len(new_artists)
return frame_artists
def set_scale_limits(scales: list[scale]):
"""
Set limits of all the scales in the animation
Should be called before `check_scale_limits`.
Parameters
----------
scales : list[scales]
List of scales the have been used in building a
ggplot object.
"""
for sc in scales:
ae = sc.aesthetics[0]
scale_limits[ae] = sc.final_limits
def check_scale_limits(scales: list[scale], frame_no: int):
"""
Check limits of the scales of a plot in the animation
Raises a PlotnineError if any of the scales has limits
that do not match those of the first plot/frame.
Should be called after `set_scale_limits`.
Parameters
----------
scales : list[scales]
List of scales the have been used in building a
ggplot object.
frame_no : int
Frame number
"""
if len(scale_limits) != len(scales):
raise PlotnineError(
"All plots must have the same number of scales "
"as the first plot of the animation."
)
for sc in scales:
ae = sc.aesthetics[0]
if ae not in scale_limits:
raise PlotnineError(
f"The plot for frame {frame_no} does not "
f"have a scale for the {ae} aesthetic."
)
if sc.final_limits != scale_limits[ae]:
raise PlotnineError(
f"The {ae} scale of plot for frame {frame_no} has "
"different limits from those of the first frame."
)
figure: Figure | None = None
axs: list[Axes] = []
artists = []
scales = None # Will hold the scales of the first frame
# The first ggplot creates the figure, axes and the initial
# frame of the animation. The rest of the ggplots draw
# onto the figure and axes created by the first ggplot and
# they create the subsequent frames.
for frame_no, p in enumerate(plots):
if figure is None:
figure = p.draw()
axs = figure.get_axes()
initialise_artist_offsets(len(axs))
scales = p._build_objs.scales
set_scale_limits(scales)
else:
plot = self._draw_animation_plot(p, figure, axs)
check_scale_limits(plot.scales, frame_no)
artists.append(get_frame_artists(axs))
if figure is None:
figure = plt.figure()
assert figure is not None
# Prevent Jupyter from plotting any static figure
plt.close(figure)
return figure, artists
def _draw_animation_plot(
self, plot: ggplot, figure: Figure, axs: list[Axes]
) -> ggplot:
"""
Draw a plot/frame of the animation
This methods draws plots from the 2nd onwards
"""
from ._utils.context import plot_context
plot = deepcopy(plot)
plot.figure = figure
plot.axs = axs
with plot_context(plot):
plot._build()
plot.axs = plot.facet.setup(plot)
plot._draw_layers()
return plot
| PlotnineAnimation |
python | facebook__pyre-check | tools/incremental_test/tests/batch_tests.py | {
"start": 577,
"end": 2484
} | class ____:
"""
Mock an environment such that:
- `pyre check` returns error on hash1
- `pyre check` is clean on hash2
- `pyre check` crashes on hash3
- `pyre incremental` always returns cleanly for all commits
"""
_current_commit: str
_error_output: CommandOutput
_clean_output: ClassVar[CommandOutput] = CommandOutput(
return_code=0, stdout="", stderr=""
)
def __init__(self, mock_error: PyreError) -> None:
self._current_commit = ""
self._error_output = CommandOutput(
return_code=1, stdout=json.dumps([asdict(mock_error)]), stderr=""
)
def get_check_result(self) -> CommandOutput:
if self._current_commit == "hash1":
return self._error_output
elif self._current_commit == "hash3":
raise RuntimeError("Intentionally crash the check")
else:
return self._clean_output
def get_incremental_result(self) -> CommandOutput:
return self._clean_output
def __call__(self, input: CommandInput) -> CommandOutput:
command = input.command
if command.startswith("hg update"):
new_commit = command.split()[-1]
self._current_commit = new_commit
return self._clean_output
elif "total_shared_memory_size_over_time" in command:
return CommandOutput(return_code=0, stdout='[["time", 42]]', stderr="")
elif "cold_start_phases" in command:
return CommandOutput(return_code=0, stdout="{}", stderr="")
elif " profile" in command:
return CommandOutput(return_code=0, stdout="[{}, {}, {}]", stderr="")
elif command.endswith("check"):
return self.get_check_result()
elif command.endswith("incremental"):
return self.get_incremental_result()
else:
return self._clean_output
| BasicExecute |
python | pyca__cryptography | tests/hazmat/asn1/test_serialization.py | {
"start": 3217,
"end": 3562
} | class ____:
def test_ok_printable_string(self) -> None:
assert_roundtrips(
[
(asn1.PrintableString(""), b"\x13\x00"),
(asn1.PrintableString("hello"), b"\x13\x05hello"),
(asn1.PrintableString("Test User 1"), b"\x13\x0bTest User 1"),
]
)
| TestPrintableString |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1529175,
"end": 1529871
} | class ____(sgqlc.types.Type, Node, Comment, Deletable, Reactable, UniformResourceLocatable, Updatable, UpdatableComment):
"""A comment on a team discussion."""
__schema__ = github_schema
__field_names__ = ("body_version", "discussion", "number")
body_version = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="bodyVersion")
"""The current version of the body content."""
discussion = sgqlc.types.Field(sgqlc.types.non_null(TeamDiscussion), graphql_name="discussion")
"""The discussion this comment is about."""
number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number")
"""Identifies the comment number."""
| TeamDiscussionComment |
python | facebookresearch__faiss | tests/test_index_accuracy.py | {
"start": 14765,
"end": 18657
} | class ____(unittest.TestCase):
# run on Dec 14, 2018
ref_results = {
(1, True): 800,
(1, True, 20): 794,
(1, False): 769,
(0, True): 831,
(0, True, 20): 828,
(0, False): 829,
}
def test_IVFPQ_IP(self):
self.subtest(faiss.METRIC_INNER_PRODUCT)
def test_IVFPQ_L2(self):
self.subtest(faiss.METRIC_L2)
def subtest(self, mt):
d = 32
xt, xb, xq = get_dataset_2(d, 2000, 1000, 200)
nlist = 64
gt_index = faiss.IndexFlat(d, mt)
gt_index.add(xb)
gt_D, gt_I = gt_index.search(xq, 10)
quantizer = faiss.IndexFlat(d, mt)
for by_residual in True, False:
index = faiss.IndexIVFPQ(quantizer, d, nlist, 4, 8)
index.metric_type = mt
index.by_residual = by_residual
if by_residual:
# perform cheap polysemous training
index.do_polysemous_training = True
pt = faiss.PolysemousTraining()
pt.n_iter = 50000
pt.n_redo = 1
index.polysemous_training = pt
index.train(xt)
index.add(xb)
index.nprobe = 4
D, I = index.search(xq, 10)
ninter = faiss.eval_intersection(I, gt_I)
assert abs(ninter - self.ref_results[mt, by_residual]) <= 3
index.use_precomputed_table = 0
D2, I2 = index.search(xq, 10)
assert np.all(I == I2)
if by_residual:
index.use_precomputed_table = 1
index.polysemous_ht = 20
D, I = index.search(xq, 10)
ninter = faiss.eval_intersection(I, gt_I)
# polysemous behaves bizarrely on ARM
assert (
ninter >= self.ref_results[mt, by_residual,
index.polysemous_ht] - 4
)
# also test range search
if mt == faiss.METRIC_INNER_PRODUCT:
radius = float(D[:, -1].max())
else:
radius = float(D[:, -1].min())
lims, D3, I3 = index.range_search(xq, radius)
ntot = ndiff = 0
for i in range(len(xq)):
l0, l1 = lims[i], lims[i + 1]
Inew = set(I3[l0:l1])
if mt == faiss.METRIC_INNER_PRODUCT:
mask = D2[i] > radius
else:
mask = D2[i] < radius
Iref = set(I2[i, mask])
ndiff += len(Inew ^ Iref)
ntot += len(Iref)
assert ndiff < ntot * 0.02
def test_IVFPQ_non8bit(self):
d = 16
xt, xb, xq = get_dataset_2(d, 10000, 2000, 200)
nlist = 64
gt_index = faiss.IndexFlat(d)
gt_index.add(xb)
gt_D, gt_I = gt_index.search(xq, 10)
quantizer = faiss.IndexFlat(d)
ninter = {}
for v in "2x8", "8x2":
if v == "8x2":
index = faiss.IndexIVFPQ(quantizer, d, nlist, 2, 8)
else:
index = faiss.IndexIVFPQ(quantizer, d, nlist, 8, 2)
index.train(xt)
index.add(xb)
index.nprobe = 16
D, I = index.search(xq, 10)
ninter[v] = faiss.eval_intersection(I, gt_I)
# this should be the case but we don't observe
# that... Probavly too few test points
# assert ninter['2x8'] > ninter['8x2']
# ref numbers updated on 2025-01-19 after fixing nprobe typo
# (was 'npobe')
# Old values were 458/465 based on broken test with default nprobe
# New values reflect proper nprobe=16 configuration with much better
# search quality
assert abs(ninter["2x8"] - 929) < 4
assert abs(ninter["8x2"] - 960) < 4
| TestPQFlavors |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_any.py | {
"start": 11389,
"end": 13297
} | class ____:
__pydantic_serializer__ = 42
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self, **kwargs):
fields = {}
for key, value in kwargs.items():
setattr(self, key, value)
fields[key] = core_schema.model_field(core_schema.any_schema())
self.__pydantic_serializer__ = SchemaSerializer(
core_schema.model_schema(FieldsSetModel, core_schema.model_fields_schema(fields))
)
def test_exclude_unset(any_serializer):
# copied from test of the same name in test_model.py
m = FieldsSetModel(foo=1, bar=2, spam=3, __pydantic_fields_set__={'bar', 'spam'})
assert any_serializer.to_python(m) == {'foo': 1, 'bar': 2, 'spam': 3}
assert any_serializer.to_python(m, exclude_unset=True) == {'bar': 2, 'spam': 3}
assert any_serializer.to_python(m, exclude=None, exclude_unset=True) == {'bar': 2, 'spam': 3}
assert any_serializer.to_python(m, exclude={'bar'}, exclude_unset=True) == {'spam': 3}
assert any_serializer.to_python(m, exclude={'bar': ...}, exclude_unset=True) == {'spam': 3}
assert any_serializer.to_python(m, exclude={'bar': {}}, exclude_unset=True) == {'bar': 2, 'spam': 3}
assert any_serializer.to_json(m, exclude=None, exclude_unset=True) == b'{"bar":2,"spam":3}'
assert any_serializer.to_json(m, exclude={'bar'}, exclude_unset=True) == b'{"spam":3}'
assert any_serializer.to_json(m, exclude={'bar': ...}, exclude_unset=True) == b'{"spam":3}'
assert any_serializer.to_json(m, exclude={'bar': {}}, exclude_unset=True) == b'{"bar":2,"spam":3}'
m2 = FieldsSetModel(foo=1, bar=2, spam=3, __pydantic_fields_set__={'bar', 'spam', 'missing'})
assert any_serializer.to_python(m2) == {'foo': 1, 'bar': 2, 'spam': 3}
assert any_serializer.to_python(m2, exclude_unset=True) == {'bar': 2, 'spam': 3}
| FieldsSetModel |
python | streamlit__streamlit | lib/tests/streamlit/util_test.py | {
"start": 713,
"end": 1492
} | class ____(unittest.TestCase):
"""Test Streamlit utility functions."""
def test_memoization(self):
"""Test that util.memoize works."""
def non_memoized_func():
return random.randint(0, 1000000)
yes_memoized_func = util.memoize(non_memoized_func)
assert non_memoized_func() != non_memoized_func()
assert yes_memoized_func() == yes_memoized_func()
def test_functools_wraps(self):
"""Test wrap for functools.wraps"""
import streamlit as st
@st.cache_data
def f():
return True
assert hasattr(f, "__wrapped__")
def test_calc_md5_can_handle_bytes_and_strings(self):
assert util.calc_md5("eventually bytes") == util.calc_md5(b"eventually bytes")
| UtilTest |
python | numba__numba | numba/core/target_extension.py | {
"start": 4051,
"end": 4606
} | class ____(Target):
"""Mark the target as a ufunc
"""
target_registry['generic'] = Generic
target_registry['CPU'] = CPU
target_registry['cpu'] = CPU
target_registry['GPU'] = GPU
target_registry['gpu'] = GPU
target_registry['CUDA'] = CUDA
target_registry['cuda'] = CUDA
target_registry['npyufunc'] = NPyUfunc
dispatcher_registry = DelayedRegistry(key_type=Target)
# Register the cpu target token with its dispatcher and jit
cpu_target = target_registry['cpu']
dispatcher_registry[cpu_target] = CPUDispatcher
jit_registry[cpu_target] = jit
| NPyUfunc |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/valid/statement/class.py | {
"start": 376,
"end": 425
} | class ____[T = str](): ...
# TypeVar with bound
| Test |
python | pandas-dev__pandas | pandas/io/formats/info.py | {
"start": 16381,
"end": 19052
} | class ____(_InfoPrinterAbstract):
"""
Class for printing dataframe info.
Parameters
----------
info : DataFrameInfo
Instance of DataFrameInfo.
max_cols : int, optional
When to switch from the verbose to the truncated output.
verbose : bool, optional
Whether to print the full summary.
show_counts : bool, optional
Whether to show the non-null counts.
"""
def __init__(
self,
info: DataFrameInfo,
max_cols: int | None = None,
verbose: bool | None = None,
show_counts: bool | None = None,
) -> None:
self.info = info
self.data = info.data
self.verbose = verbose
self.max_cols = self._initialize_max_cols(max_cols)
self.show_counts = self._initialize_show_counts(show_counts)
@property
def max_rows(self) -> int:
"""Maximum info rows to be displayed."""
return get_option("display.max_info_rows")
@property
def exceeds_info_cols(self) -> bool:
"""Check if number of columns to be summarized does not exceed maximum."""
return bool(self.col_count > self.max_cols)
@property
def exceeds_info_rows(self) -> bool:
"""Check if number of rows to be summarized does not exceed maximum."""
return bool(len(self.data) > self.max_rows)
@property
def col_count(self) -> int:
"""Number of columns to be summarized."""
return self.info.col_count
def _initialize_max_cols(self, max_cols: int | None) -> int:
if max_cols is None:
return get_option("display.max_info_columns")
return max_cols
def _initialize_show_counts(self, show_counts: bool | None) -> bool:
if show_counts is None:
return bool(not self.exceeds_info_cols and not self.exceeds_info_rows)
else:
return show_counts
def _create_table_builder(self) -> _DataFrameTableBuilder:
"""
Create instance of table builder based on verbosity and display settings.
"""
if self.verbose:
return _DataFrameTableBuilderVerbose(
info=self.info,
with_counts=self.show_counts,
)
elif self.verbose is False: # specifically set to False, not necessarily None
return _DataFrameTableBuilderNonVerbose(info=self.info)
elif self.exceeds_info_cols:
return _DataFrameTableBuilderNonVerbose(info=self.info)
else:
return _DataFrameTableBuilderVerbose(
info=self.info,
with_counts=self.show_counts,
)
| _DataFrameInfoPrinter |
python | huggingface__transformers | src/transformers/models/edgetam_video/modeling_edgetam_video.py | {
"start": 28023,
"end": 28491
} | class ____(nn.Module):
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
self.layers = nn.ModuleList(
[EdgeTamVideoMemoryFuserCXBlock(config) for _ in range(config.memory_fuser_num_layers)]
)
def forward(self, hidden_states):
# normally hidden_states: (N, C, H, W)
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
| EdgeTamVideoMemoryFuser |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_rich_string03.py | {
"start": 315,
"end": 1020
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("rich_string03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
worksheet.write("A1", "Foo", bold)
worksheet.write("A2", "Bar", italic)
worksheet.write_rich_string("A3", bold, "abc", "defg")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 9129,
"end": 12837
} | class ____(PForTestCase):
def test_reduce(self):
def reduce_fn(p, q):
return math_ops.reduce_mean(p + q, axis=0)
x = random_ops.random_uniform([4, 3, 2])
y = random_ops.random_uniform([4, 3, 2])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
reduced = pfor_config.reduce(reduce_fn, x_i, y_i)
return reduced + x_i
output = pfor_control_flow_ops.pfor(loop_fn, 4)
ans = reduce_fn(x, y) + x
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_concat(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
vectorized_value = pfor_config.reduce_concat(x_i)
mean_value = math_ops.reduce_mean(vectorized_value, axis=0)
return x_i - mean_value
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_mean(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_sum(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_sum(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_sum(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_class(self):
x = random_ops.random_uniform([8, 3])
class LoopFn:
def __init__(self):
pass
def __call__(self, i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(LoopFn(), 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_functools_partial(self):
x = random_ops.random_uniform([8, 3])
def fn(i, pfor_config, dummy=None):
del dummy
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
loop_fn = functools.partial(fn, dummy=1)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_parallel_iterations(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return pfor_config.reduce_sum(x_i)
with self.assertRaisesRegex(ValueError,
"`parallel_iterations` currently unsupported"):
pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)
def test_var_loop_len(self):
if context.executing_eagerly():
self.skipTest("Variable length not possible under eager execution.")
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
return pfor_config.reduce_sum(array_ops.gather(x, i))
num_iters = array_ops.placeholder(dtypes.int32)
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 8})
@test_util.run_all_in_graph_and_eager_modes
| ReductionTest |
python | huggingface__transformers | examples/pytorch/speech-recognition/run_speech_recognition_ctc.py | {
"start": 5657,
"end": 10925
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
dataset_name: str = field(
metadata={"help": "Path or name of the dataset (cf `load_dataset` method of the Datasets library)."}
)
dataset_config_name: str = field(
default=None,
metadata={
"help": "The configuration name of the dataset to use (cf `load_dataset` method of the Datasets library)."
},
)
train_split_name: str = field(
default="train+validation",
metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to "
"'train+validation'"
)
},
)
eval_split_name: str = field(
default="test",
metadata={
"help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'test'"
},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
text_column_name: str = field(
default="text",
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached preprocessed datasets or not."},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
},
)
chars_to_ignore: Optional[list[str]] = list_field(
default=None,
metadata={"help": "A list of characters to remove from the transcripts."},
)
eval_metrics: list[str] = list_field(
default=["wer"],
metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
)
max_duration_in_seconds: float = field(
default=20.0,
metadata={
"help": (
"Filter audio files that are longer than `max_duration_in_seconds` seconds to"
" 'max_duration_in_seconds`"
)
},
)
min_duration_in_seconds: float = field(
default=0.0,
metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"},
)
preprocessing_only: bool = field(
default=False,
metadata={
"help": (
"Whether to only do data preprocessing and skip training. This is especially useful when data"
" preprocessing errors out in distributed training due to timeout. In this case, one should run the"
" preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets"
" can consequently be loaded in distributed training"
)
},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
unk_token: str = field(
default="[UNK]",
metadata={"help": "The unk token for the tokenizer"},
)
pad_token: str = field(
default="[PAD]",
metadata={"help": "The padding token for the tokenizer"},
)
word_delimiter_token: str = field(
default="|",
metadata={"help": "The word delimiter token for the tokenizer"},
)
phoneme_language: Optional[str] = field(
default=None,
metadata={
"help": (
"The target language that should be used be"
" passed to the tokenizer for tokenization. Note that"
" this is only relevant if the model classifies the"
" input audio to a sequence of phoneme sequences."
)
},
)
@dataclass
| DataTrainingArguments |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/overlapping_fields_can_be_merged.py | {
"start": 530,
"end": 24145
} | class ____(ValidationRule):
__slots__ = ('_compared_fragments', '_cached_fields_and_fragment_names', )
def __init__(self, context):
super(OverlappingFieldsCanBeMerged, self).__init__(context)
# A memoization for when two fragments are compared "between" each other for
# conflicts. Two fragments may be compared many times, so memoizing this can
# dramatically improve the performance of this validator.
self._compared_fragments = PairSet()
# A cache for the "field map" and list of fragment names found in any given
# selection set. Selection sets may be asked for this information multiple
# times, so this improves the performance of this validator.
self._cached_fields_and_fragment_names = {}
def leave_SelectionSet(self, node, key, parent, path, ancestors):
# Note: we validate on the reverse traversal so deeper conflicts will be
# caught first, for correct calculation of mutual exclusivity and for
# clearer error messages.
# field_map = _collect_field_asts_and_defs(
# self.context,
# self.context.get_parent_type(),
# node
# )
# conflicts = _find_conflicts(self.context, False, field_map, self.compared_set)
conflicts = _find_conflicts_within_selection_set(self.context, self._cached_fields_and_fragment_names,
self._compared_fragments, self.context.get_parent_type(),
node)
for (reason_name, reason), fields1, fields2 in conflicts:
self.context.report_error(GraphQLError(
self.fields_conflict_message(reason_name, reason),
list(fields1) + list(fields2)
))
@staticmethod
def same_type(type1, type2):
return is_equal_type(type1, type2)
# return type1.is_same_type(type2)
@classmethod
def fields_conflict_message(cls, reason_name, reason):
return (
'Fields "{}" conflict because {}. '
'Use different aliases on the fields to fetch both if this was '
'intentional.'
).format(reason_name, cls.reason_message(reason))
@classmethod
def reason_message(cls, reason):
if isinstance(reason, list):
return ' and '.join('subfields "{}" conflict because {}'.format(reason_name, cls.reason_message(sub_reason))
for reason_name, sub_reason in reason)
return reason
# Algorithm:
#
# Conflicts occur when two fields exist in a query which will produce the same
# response name, but represent differing values, thus creating a conflict.
# The algorithm below finds all conflicts via making a series of comparisons
# between fields. In order to compare as few fields as possible, this makes
# a series of comparisons "within" sets of fields and "between" sets of fields.
#
# Given any selection set, a collection produces both a set of fields by
# also including all inline fragments, as well as a list of fragments
# referenced by fragment spreads.
#
# A) Each selection set represented in the document first compares "within" its
# collected set of fields, finding any conflicts between every pair of
# overlapping fields.
# Note: This is the only time that a the fields "within" a set are compared
# to each other. After this only fields "between" sets are compared.
#
# B) Also, if any fragment is referenced in a selection set, then a
# comparison is made "between" the original set of fields and the
# referenced fragment.
#
# C) Also, if multiple fragments are referenced, then comparisons
# are made "between" each referenced fragment.
#
# D) When comparing "between" a set of fields and a referenced fragment, first
# a comparison is made between each field in the original set of fields and
# each field in the the referenced set of fields.
#
# E) Also, if any fragment is referenced in the referenced selection set,
# then a comparison is made "between" the original set of fields and the
# referenced fragment (recursively referring to step D).
#
# F) When comparing "between" two fragments, first a comparison is made between
# each field in the first referenced set of fields and each field in the the
# second referenced set of fields.
#
# G) Also, any fragments referenced by the first must be compared to the
# second, and any fragments referenced by the second must be compared to the
# first (recursively referring to step F).
#
# H) When comparing two fields, if both have selection sets, then a comparison
# is made "between" both selection sets, first comparing the set of fields in
# the first selection set with the set of fields in the second.
#
# I) Also, if any fragment is referenced in either selection set, then a
# comparison is made "between" the other set of fields and the
# referenced fragment.
#
# J) Also, if two fragments are referenced in both selection sets, then a
# comparison is made "between" the two fragments.
def _find_conflicts_within_selection_set(context, cached_fields_and_fragment_names, compared_fragments, parent_type,
selection_set):
"""Find all conflicts found "within" a selection set, including those found via spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts = []
field_map, fragment_names = _get_fields_and_fragments_names(context, cached_fields_and_fragment_names, parent_type,
selection_set)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
_collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
field_map
)
# (B) Then collect conflicts between these fields and those represented by
# each spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other item
# in that same list (except for itself).
for other_fragment_name in fragment_names[i+1:]:
_collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
fragment_name,
other_fragment_name,
)
return conflicts
def _collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map,
fragment_name):
fragment = context.get_fragment(fragment_name)
if not fragment:
return None
field_map2, fragment_names2 = _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names,
fragment)
# (D) First collect any conflicts between the provided collection of fields
# and the collection of fields represented by the given fragment.
_collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, field_map, field_map2)
# (E) Then collect any conflicts between the provided collection of fields
# and any fragment names found in the given fragment.
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map,
fragment_name2)
# Collect all conflicts found between two fragments, including via spreading in
# any nested fragments
def _collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, fragment_name1, fragment_name2):
fragment1 = context.get_fragment(fragment_name1)
fragment2 = context.get_fragment(fragment_name2)
if not fragment1 or not fragment2:
return None
# No need to compare a fragment to itself.
if fragment1 == fragment2:
return None
# Memoize so two fragments are not compared for conflicts more than once.
if compared_fragments.has(fragment_name1, fragment_name2, are_mutually_exclusive):
return None
compared_fragments.add(fragment_name1, fragment_name2, are_mutually_exclusive)
field_map1, fragment_names1 = _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names,
fragment1)
field_map2, fragment_names2 = _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names,
fragment2)
# (F) First, collect all conflicts between these two collections of fields
# (not including any nested fragments)
_collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, field_map1, field_map2)
# (G) Then collect conflicts between the first fragment and any nested
# fragments spread in the second fragment.
for _fragment_name2 in fragment_names2:
_collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, fragment_name1, _fragment_name2)
# (G) Then collect conflicts between the second fragment and any nested
# fragments spread in the first fragment.
for _fragment_name1 in fragment_names1:
_collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, _fragment_name1, fragment_name2)
def _find_conflicts_between_sub_selection_sets(context, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, parent_type1, selection_set1,
parent_type2, selection_set2):
"""Find all conflicts found between two selection sets.
Includes those found via spreading in fragments. Called when determining if conflicts exist
between the sub-fields of two overlapping fields.
"""
conflicts = []
field_map1, fragment_names1 = _get_fields_and_fragments_names(context, cached_fields_and_fragment_names,
parent_type1, selection_set1)
field_map2, fragment_names2 = _get_fields_and_fragments_names(context, cached_fields_and_fragment_names,
parent_type2, selection_set2)
# (H) First, collect all conflicts between these two collections of field.
_collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, field_map1, field_map2)
# (I) Then collect conflicts between the first collection of fields and
# those referenced by each fragment name associated with the second.
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map1,
fragment_name2)
# (I) Then collect conflicts between the second collection of fields and
# those referenced by each fragment name associated with the first.
for fragment_name1 in fragment_names1:
_collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map2,
fragment_name1)
# (J) Also collect conflicts between any fragment names by the first and
# fragment names by the second. This compares each item in the first set of
# names to each item in the second set of names.
for fragment_name1 in fragment_names1:
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive,
fragment_name1, fragment_name2)
return conflicts
def _collect_conflicts_within(context, conflicts, cached_fields_and_fragment_names, compared_fragments, field_map):
"""Collect all Conflicts "within" one collection of fields."""
# field map is a keyed collection, where each key represents a response
# name and the value at that key is a list of all fields which provide that
# response name. For every response name, if there are multiple fields, they
# must be compared to find a potential conflict.
for response_name, fields in list(field_map.items()):
# This compares every field in the list to every other field in this list
# (except to itself). If the list only has one item, nothing needs to
# be compared.
for i, field in enumerate(fields):
for other_field in fields[i+1:]:
# within one collection is never mutually exclusive
conflict = _find_conflict(context, cached_fields_and_fragment_names, compared_fragments, False,
response_name, field, other_field)
if conflict:
conflicts.append(conflict)
def _collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
parent_fields_are_mutually_exclusive, field_map1, field_map2):
"""Collect all Conflicts between two collections of fields.
This is similar to, but different from the `collect_conflicts_within` function above. This check assumes that
`collect_conflicts_within` has already been called on each provided collection of fields.
This is true because this validator traverses each individual selection set.
"""
# A field map is a keyed collection, where each key represents a response
# name and the value at that key is a list of all fields which provide that
# response name. For any response name which appears in both provided field
# maps, each field from the first field map must be compared to every field
# in the second field map to find potential conflicts.
for response_name, fields1 in list(field_map1.items()):
fields2 = field_map2.get(response_name)
if fields2:
for field1 in fields1:
for field2 in fields2:
conflict = _find_conflict(context, cached_fields_and_fragment_names, compared_fragments,
parent_fields_are_mutually_exclusive, response_name, field1, field2)
if conflict:
conflicts.append(conflict)
def _find_conflict(context, cached_fields_and_fragment_names, compared_fragments, parent_fields_are_mutually_exclusive,
response_name, field1, field2):
"""Determines if there is a conflict between two particular fields."""
parent_type1, ast1, def1 = field1
parent_type2, ast2, def2 = field2
# If it is known that two fields could not possibly apply at the same
# time, due to the parent types, then it is safe to permit them to diverge
# in aliased field or arguments used as they will not present any ambiguity
# by differing.
# It is known that two parent types could never overlap if they are
# different Object types. Interface or Union types might overlap - if not
# in the current state of the schema, then perhaps in some future version,
# thus may not safely diverge.
are_mutually_exclusive = (
parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2 and
isinstance(parent_type1, GraphQLObjectType) and
isinstance(parent_type2, GraphQLObjectType)
)
)
# The return type for each field.
type1 = def1 and def1.type
type2 = def2 and def2.type
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = ast1.name.value
name2 = ast2.name.value
if name1 != name2:
return (
(response_name, '{} and {} are different fields'.format(name1, name2)),
[ast1],
[ast2]
)
# Two field calls must have the same arguments.
if not _same_arguments(ast1.arguments, ast2.arguments):
return (
(response_name, 'they have differing arguments'),
[ast1],
[ast2]
)
if type1 and type2 and do_types_conflict(type1, type2):
return (
(response_name, 'they return conflicting types {} and {}'.format(type1, type2)),
[ast1],
[ast2]
)
# Collect and compare sub-fields. Use the same "visited fragment names" list
# for both collections so fields in a fragment reference are never
# compared to themselves.
selection_set1 = ast1.selection_set
selection_set2 = ast2.selection_set
if selection_set1 and selection_set2:
conflicts = _find_conflicts_between_sub_selection_sets(context, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive,
get_named_type(type1), selection_set1,
get_named_type(type2), selection_set2)
return _subfield_conflicts(conflicts, response_name, ast1, ast2)
def _get_fields_and_fragments_names(context, cached_fields_and_fragment_names, parent_type, selection_set):
cached = cached_fields_and_fragment_names.get(selection_set)
if not cached:
ast_and_defs = OrderedDict()
fragment_names = OrderedDict()
_collect_fields_and_fragment_names(context, parent_type, selection_set, ast_and_defs, fragment_names)
cached = [ast_and_defs, list(fragment_names.keys())]
cached_fields_and_fragment_names[selection_set] = cached
return cached
def _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names, fragment):
"""Given a reference to a fragment, return the represented collection of fields as well as a list of
nested fragment names referenced via fragment spreads."""
# Short-circuit building a type from the AST if possible.
cached = cached_fields_and_fragment_names.get(fragment.selection_set)
if cached:
return cached
fragment_type = type_from_ast(context.get_schema(), fragment.type_condition)
return _get_fields_and_fragments_names(context, cached_fields_and_fragment_names,
fragment_type, fragment.selection_set)
def _collect_fields_and_fragment_names(context, parent_type, selection_set, ast_and_defs, fragment_names):
for selection in selection_set.selections:
if isinstance(selection, ast.Field):
field_name = selection.name.value
if isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):
field_def = parent_type.fields.get(field_name)
else:
field_def = None
response_name = selection.alias.value if selection.alias else field_name
if not ast_and_defs.get(response_name):
ast_and_defs[response_name] = []
ast_and_defs[response_name].append([parent_type, selection, field_def])
elif isinstance(selection, ast.FragmentSpread):
fragment_names[selection.name.value] = True
elif isinstance(selection, ast.InlineFragment):
type_condition = selection.type_condition
if type_condition:
inline_fragment_type = type_from_ast(context.get_schema(), selection.type_condition)
else:
inline_fragment_type = parent_type
_collect_fields_and_fragment_names(context, inline_fragment_type, selection.selection_set, ast_and_defs,
fragment_names)
def _subfield_conflicts(conflicts, response_name, ast1, ast2):
"""Given a series of Conflicts which occurred between two sub-fields, generate a single Conflict."""
if conflicts:
return (
(response_name, [conflict[0] for conflict in conflicts]),
tuple(itertools.chain([ast1], *[conflict[1] for conflict in conflicts])),
tuple(itertools.chain([ast2], *[conflict[2] for conflict in conflicts]))
)
def do_types_conflict(type1, type2):
if isinstance(type1, GraphQLList):
if isinstance(type2, GraphQLList):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if isinstance(type2, GraphQLList):
if isinstance(type1, GraphQLList):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if isinstance(type1, GraphQLNonNull):
if isinstance(type2, GraphQLNonNull):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if isinstance(type2, GraphQLNonNull):
if isinstance(type1, GraphQLNonNull):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if is_leaf_type(type1) or is_leaf_type(type2):
return type1 != type2
return False
def _same_value(value1, value2):
return (not value1 and not value2) or print_ast(value1) == print_ast(value2)
def _same_arguments(arguments1, arguments2):
# Check to see if they are empty arguments or nones. If they are, we can
# bail out early.
if not (arguments1 or arguments2):
return True
if len(arguments1) != len(arguments2):
return False
arguments2_values_to_arg = {a.name.value: a for a in arguments2}
for argument1 in arguments1:
argument2 = arguments2_values_to_arg.get(argument1.name.value)
if not argument2:
return False
if not _same_value(argument1.value, argument2.value):
return False
return True
| OverlappingFieldsCanBeMerged |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_wikipedia_articles.py | {
"start": 2394,
"end": 8827
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid Wikipedia article titles/slugs.
It simply plugs the column value into the Wikipedia URL and checks whether the HTTP status \
code is 200. This Expectation can be used as a template for other (or a more generic) "does this website \
exist" type checks, e.g. for things like user handles, dictionary entries, etc.
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"a": ["Super_Bowl", "Tom_Brady", "Kansas_City_Chiefs"],
"b": ["peytonman", "theweekeeend", "Super_Bowl"],
},
"tests": [
{
"title": "positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a"},
"out": {
"success": True,
},
},
{
"title": "negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "b"},
"out": {
"success": False,
"unexpected_index_list": [0, 1],
"unexpected_list": ["peytonman", "theweekeeend"],
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["experimental"], # Tags for this Expectation in the gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@annaliuu",
"@wangzhongyi0510",
],
"requirements": ["requests"],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_wikipedia_articles"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/expectations.html#expectation-concepts-domain-and-success-keys
# for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see
# https://docs.greatexpectations.io/en/latest/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.html
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesToBeValidWikipediaArticles().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidWikipediaArticles |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/dagster_types.py | {
"start": 4329,
"end": 4819
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneRegularDagsterType,
GraphenePipelineNotFoundError,
GrapheneDagsterTypeNotFoundError,
GraphenePythonError,
)
name = "DagsterTypeOrError"
types = [
GrapheneDagsterType,
GrapheneDagsterTypeOrError,
GrapheneListDagsterType,
GrapheneNullableDagsterType,
GrapheneRegularDagsterType,
GrapheneWrappingDagsterType,
]
| GrapheneDagsterTypeOrError |
python | mwaskom__seaborn | seaborn/_core/scales.py | {
"start": 12915,
"end": 13028
} | class ____(Scale):
# Categorical (convert to strings), sortable, can skip ticklabels
...
@dataclass
| Ordinal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.