sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
saleor/saleor:saleor/graphql/product/tests/queries/variants_where/test_over_attributes_datetime.py | import datetime
import pytest
from ......attribute import AttributeInputType, AttributeType
from ......attribute.models import Attribute
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCT_VARIANTS_WHERE_QUERY
@pytest.mark.parametrize(
("date_time_input", "expected_count"),
[
({"slug": "dt", "value": {"name": {"eq": "datetime-name-1"}}}, 1),
({"slug": "dt", "value": {"slug": {"eq": "datetime-slug-1"}}}, 1),
(
{
"slug": "dt",
"value": {"name": {"oneOf": ["datetime-name-1", "datetime-name-2"]}},
},
2,
),
(
{
"slug": "dt",
"value": {"slug": {"oneOf": ["datetime-slug-1", "datetime-slug-2"]}},
},
2,
),
({"slug": "dt", "value": {"dateTime": {"gte": "2021-01-01T00:00:00Z"}}}, 2),
(
{
"slug": "dt",
"value": {
"dateTime": {
"gte": "2021-01-01T00:00:00Z",
"lte": "2021-01-02T00:00:00Z",
}
},
},
1,
),
({"value": {"name": {"eq": "datetime-name-1"}}}, 1),
({"value": {"slug": {"eq": "datetime-slug-1"}}}, 1),
({"value": {"name": {"oneOf": ["datetime-name-1", "datetime-name-2"]}}}, 2),
({"value": {"slug": {"oneOf": ["datetime-slug-1", "datetime-slug-2"]}}}, 2),
({"value": {"dateTime": {"gte": "2021-01-01T00:00:00Z"}}}, 3),
(
{
"value": {
"dateTime": {
"gte": "2021-01-01T00:00:00Z",
"lte": "2021-01-02T00:00:00Z",
}
}
},
2,
),
],
)
def test_product_variants_query_with_attribute_value_date_time(
date_time_input,
expected_count,
staff_api_client,
product_variant_list,
date_time_attribute,
channel_USD,
):
# given
product = product_variant_list[0].product
product_type = product.product_type
date_time_attribute.slug = "dt"
date_time_attribute.type = "PRODUCT_TYPE"
date_time_attribute.save()
second_date_attribute = Attribute.objects.create(
slug="second_dt",
name="Second dt",
type=AttributeType.PRODUCT_TYPE,
input_type=AttributeInputType.DATE_TIME,
)
product_type.variant_attributes.set([date_time_attribute, second_date_attribute])
attr_value_1 = date_time_attribute.values.first()
attr_value_1.date_time = datetime.datetime(2021, 1, 3, tzinfo=datetime.UTC)
attr_value_1.name = "datetime-name-1"
attr_value_1.slug = "datetime-slug-1"
attr_value_1.save()
associate_attribute_values_to_instance(
product_variant_list[0], {date_time_attribute.pk: [attr_value_1]}
)
second_attr_value = date_time_attribute.values.last()
second_attr_value.date_time = datetime.datetime(2021, 1, 1, tzinfo=datetime.UTC)
second_attr_value.name = "datetime-name-2"
second_attr_value.slug = "datetime-slug-2"
second_attr_value.save()
associate_attribute_values_to_instance(
product_variant_list[1], {date_time_attribute.pk: [second_attr_value]}
)
value_for_second_attr = second_date_attribute.values.create(
date_time=datetime.datetime(2021, 1, 1, tzinfo=datetime.UTC),
name="second-datetime-name",
slug="second-datetime-slug",
)
associate_attribute_values_to_instance(
product_variant_list[3], {second_date_attribute.pk: [value_for_second_attr]}
)
variables = {
"where": {"attributes": [date_time_input]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response)
variants_nodes = content["data"]["productVariants"]["edges"]
assert len(variants_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/variants_where/test_over_attributes_datetime.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/variants_where/test_over_attributes_numeric.py | import pytest
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCT_VARIANTS_WHERE_QUERY
@pytest.mark.parametrize(
("numeric_input", "expected_count"),
[
({"slug": "num-slug", "value": {"numeric": {"eq": 1.2}}}, 1),
({"slug": "num-slug", "value": {"numeric": {"oneOf": [1.2, 2]}}}, 2),
(
{"slug": "num-slug", "value": {"numeric": {"range": {"gte": 1, "lte": 2}}}},
2,
),
({"slug": "num-slug", "value": {"name": {"eq": "1.2"}}}, 1),
({"slug": "num-slug", "value": {"slug": {"eq": "1.2"}}}, 1),
({"slug": "num-slug", "value": {"name": {"oneOf": ["1.2", "2"]}}}, 2),
({"slug": "num-slug", "value": {"slug": {"oneOf": ["1.2", "2"]}}}, 2),
({"value": {"numeric": {"eq": 1.2}}}, 1),
({"value": {"numeric": {"oneOf": [1.2, 2]}}}, 2),
({"value": {"numeric": {"range": {"gte": 1, "lte": 2}}}}, 2),
({"value": {"numeric": {"range": {"gte": 1}}}}, 3),
({"value": {"name": {"eq": "1.2"}}}, 1),
({"value": {"slug": {"eq": "1.2"}}}, 1),
({"value": {"name": {"oneOf": ["1.2", "2"]}}}, 2),
({"value": {"slug": {"oneOf": ["1.2", "2"]}}}, 2),
],
)
def test_product_variants_query_with_attribute_value_numeric(
numeric_input,
expected_count,
staff_api_client,
product_type,
product_variant_list,
numeric_attribute_without_unit,
numeric_attribute,
channel_USD,
):
# given
numeric_attribute_without_unit.slug = "num-slug"
numeric_attribute_without_unit.save()
product_type.variant_attributes.set(
[numeric_attribute_without_unit, numeric_attribute]
)
attr_value_1 = numeric_attribute_without_unit.values.first()
attr_value_1.name = "1.2"
attr_value_1.slug = "1.2"
attr_value_1.numeric = 1.2
attr_value_1.save()
attr_value_2 = numeric_attribute_without_unit.values.last()
attr_value_2.name = "2"
attr_value_2.slug = "2"
attr_value_2.numeric = 2
attr_value_2.save()
second_attr_value = numeric_attribute.values.first()
associate_attribute_values_to_instance(
product_variant_list[0],
{
numeric_attribute_without_unit.pk: [attr_value_1],
},
)
associate_attribute_values_to_instance(
product_variant_list[1], {numeric_attribute_without_unit.pk: [attr_value_2]}
)
associate_attribute_values_to_instance(
product_variant_list[3], {numeric_attribute.pk: [second_attr_value]}
)
variables = {"where": {"attributes": [numeric_input]}, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response)
product_variants_nodes = content["data"]["productVariants"]["edges"]
assert len(product_variants_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/variants_where/test_over_attributes_numeric.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/variants_where/test_over_multiple_arguments.py | import pytest
from ......attribute import AttributeEntityType, AttributeInputType, AttributeType
from ......attribute.models.base import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from ......page.models import Page
from ......product.models import Product
from .....core.utils import to_global_id_or_none
from .....tests.utils import get_graphql_content
from .shared import PRODUCT_VARIANTS_WHERE_QUERY
@pytest.mark.parametrize(
"attribute_filter",
[
# Non-existing attribute slug
[{"slug": "non-existing-attribute"}],
# Existing attribute with non-existing value name
[{"slug": "tag", "value": {"name": {"eq": "Non-existing Name"}}}],
[{"value": {"name": {"eq": "Non-existing Name"}}}],
# Existing numeric attribute with out-of-range value
[{"slug": "count", "value": {"numeric": {"eq": 999}}}],
[{"value": {"numeric": {"eq": 999}}}],
# Existing boolean attribute with no matching boolean value
[{"slug": "boolean", "value": {"boolean": False}}],
[{"value": {"boolean": False}}],
# Multiple attributes where one doesn't exist
[
{"slug": "weight_attribute", "value": {"slug": {"eq": "cotton"}}},
{"slug": "non-existing-attr", "value": {"slug": {"eq": "some-value"}}},
],
[
{"value": {"slug": {"eq": "large"}}},
{"slug": "non-existing-attr", "value": {"slug": {"eq": "some-value"}}},
],
],
)
def test_product_variants_query_with_non_matching_records(
attribute_filter,
staff_api_client,
product_variant_list,
weight_attribute,
tag_page_attribute,
boolean_attribute,
numeric_attribute_without_unit,
date_attribute,
date_time_attribute,
channel_USD,
):
# given
tag_attribute = tag_page_attribute
tag_attribute.type = "PRODUCT_TYPE"
tag_attribute.save()
weight_attribute.slug = "weight_attribute"
weight_attribute.save()
product_type = product_variant_list[0].product.product_type
product_type.variant_attributes.set(
[
weight_attribute,
tag_attribute,
boolean_attribute,
numeric_attribute_without_unit,
date_attribute,
date_time_attribute,
]
)
weight_value = weight_attribute.values.get(slug="cotton")
tag_value = tag_attribute.values.get(name="About")
boolean_value = boolean_attribute.values.filter(boolean=True).first()
numeric_value = numeric_attribute_without_unit.values.first()
date_time_value = date_time_attribute.values.first()
date_value = date_attribute.values.first()
date_attribute.slug = "date"
date_attribute.save()
date_time_attribute.slug = "date_time"
date_time_attribute.save()
associate_attribute_values_to_instance(
product_variant_list[0],
{
weight_attribute.pk: [weight_value],
tag_attribute.pk: [tag_value],
boolean_attribute.pk: [boolean_value],
numeric_attribute_without_unit.pk: [numeric_value],
date_attribute.pk: [date_value],
date_time_attribute.pk: [date_time_value],
},
)
variables = {
"where": {"attributes": attribute_filter},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response)
product_variants_nodes = content["data"]["productVariants"]["edges"]
assert len(product_variants_nodes) == 0
@pytest.mark.parametrize(
("attribute_where_input", "expected_count_result"),
[
(
[
{"slug": "material", "value": {"slug": {"eq": "cotton"}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
{"slug": "color", "value": {"slug": {"oneOf": ["red"]}}},
{"slug": "boolean", "value": {"boolean": True}},
],
1,
),
(
[
{"slug": "material", "value": {"slug": {"eq": "cotton"}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
],
1,
),
(
[
{"slug": "material", "value": {"slug": {"eq": "cotton"}}},
{"slug": "boolean", "value": {"boolean": False}},
],
0,
),
(
[
{"slug": "tag", "value": {"name": {"eq": "About"}}},
{"slug": "material", "value": {"slug": {"eq": "cotton"}}},
],
1,
),
(
[
{"slug": "material", "value": {"slug": {"eq": "poliester"}}},
{"slug": "tag", "value": {"name": {"eq": "Help"}}},
{"slug": "boolean", "value": {"boolean": False}},
],
0,
),
(
[
{
"slug": "color",
"value": {"slug": {"oneOf": ["red", "blue"]}},
},
{"slug": "material", "value": {"slug": {"eq": "cotton"}}},
],
1,
),
(
[
{"slug": "material", "value": {"slug": {"eq": "cotton"}}},
{"slug": "color", "value": {"name": {"eq": "Red"}}},
],
1,
),
(
[
{"slug": "material", "value": {"slug": {"eq": "cotton"}}},
{"slug": "tag", "value": {"name": {"eq": "About"}}},
{"slug": "color", "value": {"slug": {"eq": "red"}}},
],
1,
),
(
[
{
"slug": "material",
"value": {"slug": {"oneOf": ["cotton", "poliester"]}},
},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
],
2,
),
(
[
{
"slug": "material",
"value": {"slug": {"oneOf": ["cotton", "poliester"]}},
},
{"slug": "boolean", "value": {"boolean": True}},
],
1,
),
([{"value": {"slug": {"oneOf": ["red", "blue"]}}}], 2),
(
[
{"value": {"slug": {"oneOf": ["cotton", "poliester"]}}},
{"value": {"boolean": True}},
],
1,
),
],
)
def test_product_variants_query_with_multiple_attribute_filters(
attribute_where_input,
expected_count_result,
staff_api_client,
product_variant_list,
weight_attribute,
tag_page_attribute,
color_attribute,
boolean_attribute,
channel_USD,
):
# given
material_attribute = weight_attribute
material_attribute.slug = "material"
material_attribute.save()
tag_attribute = tag_page_attribute
tag_attribute.slug = "tag"
tag_attribute.type = "PRODUCT_TYPE"
tag_attribute.save()
product_type = product_variant_list[0].product.product_type
product_type.variant_attributes.set(
[material_attribute, tag_attribute, color_attribute, boolean_attribute]
)
material_value = material_attribute.values.get(slug="cotton")
tag_value = tag_attribute.values.get(name="About")
color_value = color_attribute.values.get(slug="red")
second_color_value = color_attribute.values.get(slug="blue")
boolean_value = boolean_attribute.values.filter(boolean=True).first()
associate_attribute_values_to_instance(
product_variant_list[0],
{
material_attribute.pk: [material_value],
tag_attribute.pk: [tag_value],
color_attribute.pk: [color_value],
boolean_attribute.pk: [boolean_value],
},
)
tag_value_2 = tag_attribute.values.get(name="Help")
second_material_value = material_attribute.values.get(slug="poliester")
associate_attribute_values_to_instance(
product_variant_list[1],
{
material_attribute.pk: [second_material_value],
tag_attribute.pk: [tag_value_2],
color_attribute.pk: [second_color_value],
},
)
variables = {
"where": {"attributes": attribute_where_input},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response)
product_variants_nodes = content["data"]["productVariants"]["edges"]
assert len(product_variants_nodes) == expected_count_result
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 3), ("containsAll", 1)]
)
def test_product_variants_query_with_multiple_attribute_referenced_ids(
filter_type,
expected_count,
staff_api_client,
product_variant_list,
page_type,
channel_USD,
product_type_product_reference_attribute,
product_type_page_reference_attribute,
variant,
):
# given
assert len(product_variant_list) > expected_count
product_type = product_variant_list[0].product.product_type
variant_reference_attribute = Attribute.objects.create(
slug="second-variant-reference",
name="variant reference",
type=AttributeType.PRODUCT_TYPE,
input_type=AttributeInputType.REFERENCE,
entity_type=AttributeEntityType.PRODUCT_VARIANT,
)
product_type.variant_attributes.set(
[
product_type_product_reference_attribute,
product_type_page_reference_attribute,
variant_reference_attribute,
]
)
referenced_page = Page.objects.create(
title="Referenced Page 1",
slug="referenced-page-1",
page_type=page_type,
is_published=True,
)
referenced_product = Product.objects.create(
name="Reference Product 1",
slug="ref-1",
product_type=product_type,
)
referenced_variant = variant
attr_ref_product, attr_ref_page, attr_ref_variant = (
AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=product_type_product_reference_attribute,
name=f"Product {referenced_product.pk}",
slug=f"product-{referenced_product.pk}",
reference_product=referenced_product,
),
AttributeValue(
attribute=product_type_page_reference_attribute,
name=f"Page {referenced_page.pk}",
slug=f"page-{referenced_page.pk}",
reference_page=referenced_page,
),
AttributeValue(
attribute=variant_reference_attribute,
name=f"Variant {referenced_variant.pk}",
slug=f"variant-{referenced_variant.pk}",
reference_variant=referenced_variant,
),
]
)
)
product_variant_with_all_references = product_variant_list[0]
associate_attribute_values_to_instance(
product_variant_with_all_references,
{
product_type_product_reference_attribute.pk: [attr_ref_product],
product_type_page_reference_attribute.pk: [attr_ref_page],
variant_reference_attribute.pk: [attr_ref_variant],
},
)
product_variant_with_two_references = product_variant_list[1]
associate_attribute_values_to_instance(
product_variant_with_two_references,
{
product_type_product_reference_attribute.pk: [attr_ref_product],
product_type_page_reference_attribute.pk: [attr_ref_page],
},
)
product_variant_with_single_reference = product_variant_list[3]
associate_attribute_values_to_instance(
product_variant_with_single_reference,
{
product_type_product_reference_attribute.pk: [attr_ref_product],
},
)
variables = {
"where": {
"attributes": [
{
"value": {
"reference": {
"referencedIds": {
filter_type: [
to_global_id_or_none(referenced_product),
to_global_id_or_none(referenced_page),
to_global_id_or_none(referenced_variant),
]
}
}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response)
product_variants_nodes = content["data"]["productVariants"]["edges"]
assert len(product_variants_nodes) == expected_count
returned_ids = [node["node"]["id"] for node in product_variants_nodes]
# Returned in both cases
assert to_global_id_or_none(product_variant_with_all_references) in returned_ids
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/variants_where/test_over_multiple_arguments.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/variants_where/test_over_validation.py | import pytest
from .....tests.utils import get_graphql_content
from .shared import PRODUCT_VARIANTS_WHERE_QUERY
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"numeric": None},
{"name": None},
{"slug": None},
{"boolean": False},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_product_variants_query_failed_filter_validation_for_numeric_with_slug_input(
attribute_value_filter,
staff_api_client,
numeric_attribute_without_unit,
product_variant_list,
channel_USD,
):
# given
attr_slug_input = "numeric"
numeric_attribute_without_unit.slug = attr_slug_input
numeric_attribute_without_unit.save()
product_type = product_variant_list[0].product.product_type
product_type.variant_attributes.add(numeric_attribute_without_unit)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"boolean": None},
{"name": None},
{"slug": None},
{"numeric": {"eq": 1.2}},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_product_variants_query_failed_filter_validation_for_boolean_with_slug_input(
attribute_value_filter,
staff_api_client,
boolean_attribute,
product_variant_list,
channel_USD,
):
# given
attr_slug_input = "boolean"
boolean_attribute.slug = attr_slug_input
boolean_attribute.save()
product_type = product_variant_list[0].product.product_type
product_type.variant_attributes.add(boolean_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"dateTime": None},
{"name": None},
{"slug": None},
{"numeric": {"eq": 1.2}},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_product_variants_query_failed_filter_validation_for_date_attribute_with_slug_input(
attribute_value_filter,
staff_api_client,
date_attribute,
product_variant_list,
channel_USD,
):
# given
attr_slug_input = "date"
date_attribute.slug = attr_slug_input
date_attribute.save()
product_type = product_variant_list[0].product.product_type
product_type.variant_attributes.add(date_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"dateTime": None},
{"name": None},
{"slug": None},
{"numeric": {"eq": 1.2}},
{"date": None},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_product_variants_query_failed_filter_validation_for_datetime_attribute_with_slug_input(
attribute_value_filter,
staff_api_client,
date_time_attribute,
product_variant_list,
channel_USD,
):
# given
attr_slug_input = "date_time"
date_time_attribute.slug = attr_slug_input
date_time_attribute.save()
product_type = product_variant_list[0].product.product_type
product_type.variant_attributes.add(date_time_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"slug": None, "value": None},
{"slug": None, "value": {"name": {"eq": "name"}}},
],
)
def test_product_variants_query_failed_filter_validation_null_in_input(
attribute_value_filter,
staff_api_client,
channel_USD,
):
# given
variables = {
"where": {"attributes": [attribute_value_filter]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"slug": None},
{"name": None},
{
"slug": {"eq": "true_slug"},
"name": {"eq": "name"},
},
{
"slug": {"oneOf": ["true_slug"]},
"name": {"oneOf": ["name"]},
},
],
)
def test_product_variants_query_failed_filter_validation_for_basic_value_fields_with_attr_slug(
attribute_value_filter,
staff_api_client,
channel_USD,
):
# given
attr_slug_input = "product-size"
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
def test_product_variants_query_failed_filter_validation_for_duplicated_attr_slug(
staff_api_client,
channel_USD,
):
# given
attr_slug_input = "product-size"
variables = {
"where": {
"attributes": [
{"slug": attr_slug_input},
{"slug": attr_slug_input},
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{},
{"reference": {}},
{"reference": None},
{"reference": {"referencedIds": {"containsAll": []}}},
{"reference": {"pageSlugs": {"containsAll": []}}},
{"reference": {"productSlugs": {"containsAll": []}}},
{"reference": {"productVariantSkus": {"containsAll": []}}},
{"reference": {"pageSlugs": {"containsAny": []}}},
{"reference": {"productSlugs": {"containsAny": []}}},
{"reference": {"productVariantSkus": {"containsAny": []}}},
{"reference": {"referencedIds": {"containsAny": []}}},
{"reference": {"pageSlugs": {"containsAny": [], "containsAll": []}}},
{"reference": {"productSlugs": {"containsAny": [], "containsAll": []}}},
{"reference": {"productVariantSkus": {"containsAny": [], "containsAll": []}}},
{"reference": {"referencedIds": {"containsAny": [], "containsAll": []}}},
{"reference": {"referencedIds": {"containsAll": None}}},
{"reference": {"pageSlugs": {"containsAll": None}}},
{"reference": {"productSlugs": {"containsAll": None}}},
{"reference": {"productVariantSkus": {"containsAll": None}}},
{"reference": {"pageSlugs": {"containsAny": None}}},
{"reference": {"productSlugs": {"containsAny": None}}},
{"reference": {"productVariantSkus": {"containsAny": None}}},
{"reference": {"referencedIds": {"containsAny": None}}},
{"reference": {"referencedIds": {"containsAny": ["non-existing-id"]}}},
{"reference": {"referencedIds": {"containsAll": ["non-existing-id"]}}},
# ID of not valid object
{"reference": {"referencedIds": {"containsAny": ["T3JkZXI6MQ=="]}}},
{"reference": {"referencedIds": {"containsAll": ["T3JkZXI6MQ=="]}}},
],
)
def test_product_variants_query_failed_filter_validation_for_reference_attribute_with_slug_input(
attribute_value_filter,
staff_api_client,
channel_USD,
):
# given
attr_slug_input = "reference-product"
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
PRODUCT_VARIANTS_WHERE_QUERY,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["productVariants"] is None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/variants_where/test_over_validation.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/core/types/tests/test_money.py | from decimal import Decimal
from prices import Money
from ..money import Money as MoneyObject
def test_money_object_usd():
money = Money(Decimal("12.950000"), "USD")
resolve_info = None
assert MoneyObject.resolve_amount(money, resolve_info) == Decimal("12.95")
assert MoneyObject.resolve_fractional_amount(money, resolve_info) == 1295
assert MoneyObject.resolve_fraction_digits(money, resolve_info) == 2
def test_money_object_jpy():
money = Money(Decimal(1234), "JPY")
resolve_info = None
assert MoneyObject.resolve_amount(money, resolve_info) == Decimal(1234)
assert MoneyObject.resolve_fractional_amount(money, resolve_info) == 1234
assert MoneyObject.resolve_fraction_digits(money, resolve_info) == 0
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/core/types/tests/test_money.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/shared.py | PRODUCTS_WHERE_QUERY = """
query($where: ProductWhereInput!, $channel: String) {
products(first: 10, where: $where, channel: $channel) {
edges {
node {
id
name
slug
}
}
}
}
"""
PRODUCTS_FILTER_QUERY = """
query($where: ProductFilterInput!, $channel: String) {
products(first: 10, filter: $where, channel: $channel) {
edges {
node {
id
name
slug
}
}
}
}
"""
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/shared.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes.py | import graphene
import pytest
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
def test_products_query_with_attribute_slug(
query, staff_api_client, product_list, size_attribute, channel_USD
):
# given
product_list[0].product_type.product_attributes.add(size_attribute)
product_attr_value = size_attribute.values.first()
associate_attribute_values_to_instance(
product_list[0], {size_attribute.pk: [product_attr_value]}
)
variables = {
"where": {"attributes": [{"slug": size_attribute.slug}]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == 1
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("attribute_input", "expected_count"),
[
({"value": {"slug": {"eq": "test-slug-1"}}}, 1),
({"value": {"slug": {"oneOf": ["test-slug-1", "test-slug-2"]}}}, 2),
({"slug": "size_attribute", "value": {"slug": {"eq": "test-slug-1"}}}, 1),
(
{
"slug": "size_attribute",
"value": {"slug": {"oneOf": ["test-slug-1", "test-slug-2"]}},
},
2,
),
],
)
def test_products_query_with_attribute_value_slug(
query,
attribute_input,
expected_count,
staff_api_client,
product_list,
size_attribute,
channel_USD,
):
# given
size_attribute.slug = "size_attribute"
size_attribute.save()
product_list[0].product_type.product_attributes.add(size_attribute)
attr_value_1 = size_attribute.values.first()
attr_value_1.slug = "test-slug-1"
attr_value_1.save()
attr_value_2 = size_attribute.values.last()
attr_value_2.slug = "test-slug-2"
attr_value_2.save()
associate_attribute_values_to_instance(
product_list[0], {size_attribute.pk: [attr_value_1]}
)
associate_attribute_values_to_instance(
product_list[1], {size_attribute.pk: [attr_value_2]}
)
variables = {
"where": {"attributes": [attribute_input]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("attribute_input", "expected_count"),
[
({"value": {"name": {"eq": "test-name-1"}}}, 1),
({"value": {"name": {"oneOf": ["test-name-1", "test-name-2"]}}}, 2),
({"slug": "size_attribute", "value": {"name": {"eq": "test-name-1"}}}, 1),
(
{
"slug": "size_attribute",
"value": {"name": {"oneOf": ["test-name-1", "test-name-2"]}},
},
2,
),
],
)
def test_products_query_with_attribute_value_name(
query,
attribute_input,
expected_count,
staff_api_client,
product_list,
size_attribute,
channel_USD,
):
# given
size_attribute.slug = "size_attribute"
size_attribute.save()
product_list[0].product_type.product_attributes.add(size_attribute)
attr_value_1 = size_attribute.values.first()
attr_value_1.name = "test-name-1"
attr_value_1.save()
attr_value_2 = size_attribute.values.last()
attr_value_2.name = "test-name-2"
attr_value_2.save()
associate_attribute_values_to_instance(
product_list[0], {size_attribute.pk: [attr_value_1]}
)
associate_attribute_values_to_instance(
product_list[1], {size_attribute.pk: [attr_value_2]}
)
variables = {
"where": {"attributes": [attribute_input]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_boolean.py | import graphene
import pytest
from ......attribute import AttributeInputType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"boolean_input",
[
{"value": {"boolean": True}},
{"value": {"name": {"eq": "True-name"}}},
{"value": {"slug": {"eq": "true_slug"}}},
{"value": {"name": {"oneOf": ["True-name", "True-name-2"]}}},
{"value": {"slug": {"oneOf": ["true_slug"]}}},
{"slug": "b_s", "value": {"boolean": True}},
{"slug": "b_s", "value": {"name": {"eq": "True-name"}}},
{"slug": "b_s", "value": {"slug": {"eq": "true_slug"}}},
{"slug": "b_s", "value": {"name": {"oneOf": ["True-name", "True-name-2"]}}},
{"slug": "b_s", "value": {"slug": {"oneOf": ["true_slug"]}}},
],
)
def test_products_query_with_attribute_value_boolean(
query,
boolean_input,
staff_api_client,
product_type,
product_list,
boolean_attribute,
channel_USD,
):
# given
boolean_attribute.slug = "b_s"
boolean_attribute.type = "PRODUCT_TYPE"
boolean_attribute.save()
second_attribute = Attribute.objects.create(
slug="s_boolean",
name="Boolean",
type=AttributeType.PRODUCT_TYPE,
input_type=AttributeInputType.BOOLEAN,
)
product_type.product_attributes.set([boolean_attribute, second_attribute])
true_value = boolean_attribute.values.filter(boolean=True).first()
true_value.name = "True-name"
true_value.slug = "true_slug"
true_value.save()
associate_attribute_values_to_instance(
product_list[0], {boolean_attribute.pk: [true_value]}
)
value_for_second_attr = AttributeValue.objects.create(
attribute=second_attribute,
name=f"{second_attribute.name}: Yes",
slug=f"{second_attribute.id}_false",
boolean=False,
)
associate_attribute_values_to_instance(
product_list[1], {second_attribute.pk: [value_for_second_attr]}
)
variables = {"where": {"attributes": [boolean_input]}, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == 1
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_boolean.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_date.py | import datetime
import pytest
from ......attribute import AttributeInputType, AttributeType
from ......attribute.models import Attribute
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("date_input", "expected_count"),
[
({"slug": "date", "value": {"date": {"gte": "2021-01-01"}}}, 1),
({"slug": "date", "value": {"name": {"eq": "date-name-1"}}}, 1),
({"slug": "date", "value": {"slug": {"eq": "date-slug-1"}}}, 1),
(
{
"slug": "date",
"value": {"name": {"oneOf": ["date-name-1", "date-name-2"]}},
},
1,
),
(
{
"slug": "date",
"value": {"slug": {"oneOf": ["date-slug-1", "date-slug-2"]}},
},
1,
),
(
{
"slug": "date",
"value": {"date": {"gte": "2021-01-02", "lte": "2021-01-03"}},
},
1,
),
({"value": {"date": {"gte": "2021-01-01"}}}, 2),
({"value": {"name": {"eq": "date-name-1"}}}, 1),
({"value": {"slug": {"eq": "date-slug-1"}}}, 1),
({"value": {"name": {"oneOf": ["date-name-1", "date-name-2"]}}}, 2),
({"value": {"slug": {"oneOf": ["date-slug-1", "date-slug-2"]}}}, 2),
({"value": {"date": {"gte": "2021-01-01", "lte": "2021-01-02"}}}, 1),
],
)
def test_products_query_with_attribute_value_date(
query,
date_input,
expected_count,
staff_api_client,
product_list,
product_type,
date_attribute,
channel_USD,
):
# given
date_attribute.type = "PRODUCT_TYPE"
date_attribute.slug = "date"
date_attribute.save()
second_date_attribute = Attribute.objects.create(
slug="second_date",
name="Second date",
type=AttributeType.PRODUCT_TYPE,
input_type=AttributeInputType.DATE,
)
product_type.product_attributes.set([date_attribute, second_date_attribute])
attr_value_1 = date_attribute.values.first()
attr_value_1.date_time = datetime.datetime(2021, 1, 3, tzinfo=datetime.UTC)
attr_value_1.name = "date-name-1"
attr_value_1.slug = "date-slug-1"
attr_value_1.save()
associate_attribute_values_to_instance(
product_list[0], {date_attribute.pk: [attr_value_1]}
)
second_attr_value = second_date_attribute.values.create(
date_time=datetime.datetime(2021, 1, 2, tzinfo=datetime.UTC),
name="date-name-2",
slug="date-slug-2",
)
associate_attribute_values_to_instance(
product_list[1], {second_date_attribute.pk: [second_attr_value]}
)
variables = {"where": {"attributes": [date_input]}, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_date.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_datetime.py | import datetime
import pytest
from ......attribute import AttributeInputType, AttributeType
from ......attribute.models import Attribute
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("date_time_input", "expected_count"),
[
({"slug": "dt", "value": {"name": {"eq": "datetime-name-1"}}}, 1),
({"slug": "dt", "value": {"slug": {"eq": "datetime-slug-1"}}}, 1),
(
{
"slug": "dt",
"value": {"name": {"oneOf": ["datetime-name-1", "datetime-name-2"]}},
},
2,
),
(
{
"slug": "dt",
"value": {"slug": {"oneOf": ["datetime-slug-1", "datetime-slug-2"]}},
},
2,
),
({"slug": "dt", "value": {"dateTime": {"gte": "2021-01-01T00:00:00Z"}}}, 2),
(
{
"slug": "dt",
"value": {
"dateTime": {
"gte": "2021-01-01T00:00:00Z",
"lte": "2021-01-02T00:00:00Z",
}
},
},
1,
),
({"value": {"name": {"eq": "datetime-name-1"}}}, 1),
({"value": {"slug": {"eq": "datetime-slug-1"}}}, 1),
({"value": {"name": {"oneOf": ["datetime-name-1", "datetime-name-2"]}}}, 2),
({"value": {"slug": {"oneOf": ["datetime-slug-1", "datetime-slug-2"]}}}, 2),
({"value": {"dateTime": {"gte": "2021-01-01T00:00:00Z"}}}, 3),
(
{
"value": {
"dateTime": {
"gte": "2021-01-01T00:00:00Z",
"lte": "2021-01-02T00:00:00Z",
}
}
},
2,
),
],
)
def test_products_query_with_attribute_value_date_time(
query,
date_time_input,
expected_count,
staff_api_client,
product_type,
product_list,
date_time_attribute,
channel_USD,
):
# given
date_time_attribute.slug = "dt"
date_time_attribute.type = "PRODUCT_TYPE"
date_time_attribute.save()
second_date_attribute = Attribute.objects.create(
slug="second_dt",
name="Second dt",
type=AttributeType.PRODUCT_TYPE,
input_type=AttributeInputType.DATE_TIME,
)
product_type.product_attributes.set([date_time_attribute, second_date_attribute])
attr_value_1 = date_time_attribute.values.first()
attr_value_1.date_time = datetime.datetime(2021, 1, 3, tzinfo=datetime.UTC)
attr_value_1.name = "datetime-name-1"
attr_value_1.slug = "datetime-slug-1"
attr_value_1.save()
associate_attribute_values_to_instance(
product_list[0], {date_time_attribute.pk: [attr_value_1]}
)
second_attr_value = date_time_attribute.values.last()
second_attr_value.date_time = datetime.datetime(2021, 1, 1, tzinfo=datetime.UTC)
second_attr_value.name = "datetime-name-2"
second_attr_value.slug = "datetime-slug-2"
second_attr_value.save()
associate_attribute_values_to_instance(
product_list[1], {date_time_attribute.pk: [second_attr_value]}
)
value_for_second_attr = second_date_attribute.values.create(
date_time=datetime.datetime(2021, 1, 1, tzinfo=datetime.UTC),
name="second-datetime-name",
slug="second-datetime-slug",
)
associate_attribute_values_to_instance(
product_list[2], {second_date_attribute.pk: [value_for_second_attr]}
)
variables = {
"where": {"attributes": [date_time_input]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_datetime.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_numeric.py | import pytest
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("numeric_input", "expected_count"),
[
({"slug": "num-slug", "value": {"numeric": {"eq": 1.2}}}, 1),
({"slug": "num-slug", "value": {"numeric": {"oneOf": [1.2, 2]}}}, 2),
(
{"slug": "num-slug", "value": {"numeric": {"range": {"gte": 1, "lte": 2}}}},
2,
),
({"slug": "num-slug", "value": {"name": {"eq": "1.2"}}}, 1),
({"slug": "num-slug", "value": {"slug": {"eq": "1.2"}}}, 1),
({"slug": "num-slug", "value": {"name": {"oneOf": ["1.2", "2"]}}}, 2),
({"slug": "num-slug", "value": {"slug": {"oneOf": ["1.2", "2"]}}}, 2),
({"value": {"numeric": {"eq": 1.2}}}, 1),
({"value": {"numeric": {"oneOf": [1.2, 2]}}}, 2),
({"value": {"numeric": {"range": {"gte": 1, "lte": 2}}}}, 2),
({"value": {"numeric": {"range": {"gte": 1}}}}, 3),
({"value": {"name": {"eq": "1.2"}}}, 1),
({"value": {"slug": {"eq": "1.2"}}}, 1),
({"value": {"name": {"oneOf": ["1.2", "2"]}}}, 2),
({"value": {"slug": {"oneOf": ["1.2", "2"]}}}, 2),
],
)
def test_products_query_with_attribute_value_numeric(
query,
numeric_input,
expected_count,
staff_api_client,
product_type,
product_list,
numeric_attribute_without_unit,
numeric_attribute,
channel_USD,
):
# given
numeric_attribute_without_unit.slug = "num-slug"
numeric_attribute_without_unit.type = "PRODUCT_TYPE"
numeric_attribute_without_unit.save()
product_type.product_attributes.set(
[numeric_attribute_without_unit, numeric_attribute]
)
attr_value_1 = numeric_attribute_without_unit.values.first()
attr_value_1.name = "1.2"
attr_value_1.slug = "1.2"
attr_value_1.numeric = 1.2
attr_value_1.save()
attr_value_2 = numeric_attribute_without_unit.values.last()
attr_value_2.name = "2"
attr_value_2.slug = "2"
attr_value_2.numeric = 2
attr_value_2.save()
second_attr_value = numeric_attribute.values.first()
associate_attribute_values_to_instance(
product_list[0],
{
numeric_attribute_without_unit.pk: [attr_value_1],
},
)
associate_attribute_values_to_instance(
product_list[1], {numeric_attribute_without_unit.pk: [attr_value_2]}
)
associate_attribute_values_to_instance(
product_list[2], {numeric_attribute.pk: [second_attr_value]}
)
variables = {"where": {"attributes": [numeric_input]}, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_attributes_numeric.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_multiple_arguments.py | import pytest
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_filter",
[
# Non-existing attribute slug
[{"slug": "non-existing-attribute"}],
# Existing attribute with non-existing value name
[{"slug": "tag", "value": {"name": {"eq": "Non-existing Name"}}}],
[{"value": {"name": {"eq": "Non-existing Name"}}}],
# Existing numeric attribute with out-of-range value
[{"slug": "count", "value": {"numeric": {"eq": 999}}}],
[{"value": {"numeric": {"eq": 999}}}],
# Existing boolean attribute with no matching boolean value
[{"slug": "boolean", "value": {"boolean": False}}],
[{"value": {"boolean": False}}],
# Multiple attributes where one doesn't exist
[
{"slug": "size", "value": {"slug": {"eq": "large"}}},
{"slug": "non-existing-attr", "value": {"slug": {"eq": "some-value"}}},
],
[
{"value": {"slug": {"eq": "large"}}},
{"slug": "non-existing-attr", "value": {"slug": {"eq": "some-value"}}},
],
],
)
def test_products_query_with_non_matching_records(
query,
attribute_filter,
staff_api_client,
product_list,
size_attribute,
tag_page_attribute,
boolean_attribute,
numeric_attribute_without_unit,
date_attribute,
date_time_attribute,
channel_USD,
):
# given
tag_attribute = tag_page_attribute
tag_attribute.type = "PRODUCT_TYPE"
tag_attribute.save()
product_type = product_list[0].product_type
product_type.product_attributes.set(
[
size_attribute,
tag_attribute,
boolean_attribute,
numeric_attribute_without_unit,
date_attribute,
date_time_attribute,
]
)
size_value = size_attribute.values.get(slug="small")
tag_value = tag_attribute.values.get(name="About")
boolean_value = boolean_attribute.values.filter(boolean=True).first()
numeric_value = numeric_attribute_without_unit.values.first()
date_time_value = date_time_attribute.values.first()
date_value = date_attribute.values.first()
date_attribute.slug = "date"
date_attribute.save()
date_time_attribute.slug = "date_time"
date_time_attribute.save()
associate_attribute_values_to_instance(
product_list[0],
{
size_attribute.pk: [size_value],
tag_attribute.pk: [tag_value],
boolean_attribute.pk: [boolean_value],
numeric_attribute_without_unit.pk: [numeric_value],
date_attribute.pk: [date_value],
date_time_attribute.pk: [date_time_value],
},
)
variables = {
"where": {"attributes": attribute_filter},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == 0
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("attribute_where_input", "expected_count_result"),
[
(
[
{"slug": "size", "value": {"slug": {"eq": "big"}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
{"slug": "color", "value": {"slug": {"oneOf": ["red"]}}},
{"slug": "boolean", "value": {"boolean": True}},
],
1,
),
(
[
{"slug": "size", "value": {"slug": {"eq": "big"}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
],
1,
),
(
[
{"slug": "size", "value": {"slug": {"eq": "big"}}},
{"slug": "boolean", "value": {"boolean": False}},
],
0,
),
(
[
{"slug": "tag", "value": {"name": {"eq": "About"}}},
{"slug": "size", "value": {"slug": {"eq": "big"}}},
],
1,
),
(
[
{"slug": "size", "value": {"slug": {"eq": "small"}}},
{"slug": "tag", "value": {"name": {"eq": "Help"}}},
{"slug": "boolean", "value": {"boolean": False}},
],
0,
),
(
[
{
"slug": "color",
"value": {"slug": {"oneOf": ["red", "blue"]}},
},
{"slug": "size", "value": {"slug": {"eq": "big"}}},
],
1,
),
(
[
{"slug": "size", "value": {"slug": {"eq": "big"}}},
{"slug": "color", "value": {"name": {"eq": "Red"}}},
],
1,
),
(
[
{"slug": "size", "value": {"slug": {"eq": "big"}}},
{"slug": "tag", "value": {"name": {"eq": "About"}}},
{"slug": "color", "value": {"slug": {"eq": "red"}}},
],
1,
),
(
[
{"slug": "size", "value": {"slug": {"oneOf": ["big", "small"]}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
],
2,
),
(
[
{"slug": "size", "value": {"slug": {"oneOf": ["big", "small"]}}},
{"slug": "boolean", "value": {"boolean": True}},
],
1,
),
([{"value": {"slug": {"oneOf": ["red", "blue"]}}}], 3),
(
[
{"value": {"slug": {"oneOf": ["big", "small"]}}},
{"value": {"boolean": True}},
],
1,
),
],
)
def test_products_query_with_multiple_attribute_filters(
query,
attribute_where_input,
expected_count_result,
staff_api_client,
product_list,
size_attribute,
tag_page_attribute,
color_attribute,
boolean_attribute,
channel_USD,
):
# given
tag_attribute = tag_page_attribute
tag_attribute.slug = "tag"
tag_attribute.type = "PRODUCT_TYPE"
tag_attribute.save()
product_type = product_list[0].product_type
product_type.product_attributes.set(
[size_attribute, tag_attribute, color_attribute, boolean_attribute]
)
size_value = size_attribute.values.get(slug="big")
tag_value = tag_attribute.values.get(name="About")
color_value = color_attribute.values.get(slug="red")
second_color_value = color_attribute.values.get(slug="blue")
boolean_value = boolean_attribute.values.filter(boolean=True).first()
associate_attribute_values_to_instance(
product_list[0],
{
size_attribute.pk: [size_value],
tag_attribute.pk: [tag_value],
color_attribute.pk: [color_value],
boolean_attribute.pk: [boolean_value],
},
)
tag_value_2 = tag_attribute.values.get(name="Help")
size_value_small = size_attribute.values.get(slug="small")
associate_attribute_values_to_instance(
product_list[1],
{
size_attribute.pk: [size_value_small],
tag_attribute.pk: [tag_value_2],
color_attribute.pk: [second_color_value],
},
)
variables = {
"where": {"attributes": attribute_where_input},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count_result
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_multiple_arguments.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_references_pages.py | import graphene
import pytest
from ......attribute import AttributeEntityType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from ......page.models import Page
from .....core.utils import to_global_id_or_none
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"scenario",
[
{
"attr_fixture": "product_type_page_reference_attribute",
"filter": "containsAny",
"expected": 2,
"product_assignments": {
"product_with_both_pages": ["ref-1", "ref-2"],
"product_with_second_page_only": ["ref-2"],
},
"search": ["ref-1", "ref-2"],
},
{
"attr_fixture": "product_type_page_reference_attribute",
"filter": "containsAll",
"expected": 1,
"product_assignments": {
"product_with_both_pages": ["ref-1", "ref-2"],
"product_with_second_page_only": ["ref-2"],
},
"search": ["ref-1", "ref-2"],
},
{
"attr_fixture": "product_type_page_single_reference_attribute",
"filter": "containsAny",
"expected": 2,
"product_assignments": {
"product_with_first_page": ["ref-1"],
"product_with_second_page": ["ref-2"],
},
"search": ["ref-1", "ref-2"],
},
{
"attr_fixture": "product_type_page_single_reference_attribute",
"filter": "containsAll",
"expected": 2,
"product_assignments": {
"product_with_first_page": ["ref-1"],
"product_second_overridden": ["ref-1"],
},
"search": ["ref-1"],
},
],
)
def test_products_query_with_attr_slug_and_attribute_value_reference_to_pages(
query,
scenario,
request,
staff_api_client,
product_list,
product_type,
page_type,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(scenario["attr_fixture"])
product_type.product_attributes.add(reference_attribute)
ref_page_1, ref_page_2 = Page.objects.bulk_create(
[
Page(
title="Reference Page 1",
slug="ref-1",
page_type=page_type,
is_published=True,
),
Page(
title="Reference Page 2",
slug="ref-2",
page_type=page_type,
is_published=True,
),
]
)
attribute_values = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Page {ref_page_1.pk}",
slug=f"page-{ref_page_1.pk}",
reference_page=ref_page_1,
),
AttributeValue(
attribute=reference_attribute,
name=f"Page {ref_page_2.pk}",
slug=f"page-{ref_page_2.pk}",
reference_page=ref_page_2,
),
]
)
slug_to_value = {
ref_page_1.slug: attribute_values[0],
ref_page_2.slug: attribute_values[1],
}
for product, slugs in zip(
product_list, scenario["product_assignments"].values(), strict=False
):
associate_attribute_values_to_instance(
product,
{reference_attribute.pk: [slug_to_value[slug] for slug in slugs]},
)
variables = {
"where": {
"attributes": [
{
"slug": reference_attribute.slug,
"value": {
"reference": {
"pageSlugs": {scenario["filter"]: scenario["search"]}
}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == scenario["expected"]
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("reference_attribute_fixture", "filter_type", "expected_count"),
[
# REFERENCE type - searches across all reference attributes
("product_type_page_reference_attribute", "containsAny", 2),
("product_type_page_reference_attribute", "containsAll", 1),
# SINGLE_REFERENCE - product has page1 in attr1 and page2 in attr2
("product_type_page_single_reference_attribute", "containsAny", 2),
("product_type_page_single_reference_attribute", "containsAll", 1),
],
)
def test_products_query_with_attribute_value_reference_to_pages(
query,
reference_attribute_fixture,
filter_type,
expected_count,
request,
staff_api_client,
product_list,
product_type,
page_type,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(reference_attribute_fixture)
second_page_reference_attribute = Attribute.objects.create(
slug="second-page-reference",
name="Page reference",
type=AttributeType.PRODUCT_TYPE,
input_type=reference_attribute.input_type,
entity_type=AttributeEntityType.PAGE,
)
product_type.product_attributes.add(
reference_attribute,
second_page_reference_attribute,
)
ref_page_1, ref_page_2 = Page.objects.bulk_create(
[
Page(
title="Reference Page 1",
slug="ref-1",
page_type=page_type,
is_published=True,
),
Page(
title="Reference Page 2",
slug="ref-2",
page_type=page_type,
is_published=True,
),
]
)
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Page {ref_page_1.pk}",
slug=f"page-{ref_page_1.pk}",
reference_page=ref_page_1,
),
AttributeValue(
attribute=second_page_reference_attribute,
name=f"Page {ref_page_2.pk}",
slug=f"page-{ref_page_2.pk}",
reference_page=ref_page_2,
),
]
)
product_with_both_references = product_list[0]
# Always assign one value per attribute for this test (no slug test)
associate_attribute_values_to_instance(
product_with_both_references,
{
reference_attribute.pk: [attribute_value_1],
second_page_reference_attribute.pk: [attribute_value_2],
},
)
product_with_single_reference = product_list[1]
associate_attribute_values_to_instance(
product_with_single_reference,
{second_page_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"value": {
"reference": {
"pageSlugs": {
filter_type: [ref_page_1.slug, ref_page_2.slug]
}
}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"scenario",
[
{
"attr_fixture": "product_type_page_reference_attribute",
"filter": "containsAny",
"expected": 3,
"product_assignments": {
"product_with_all_pages": ["ref-1", "ref-2", "ref-3"],
"product_with_same_all_pages": ["ref-1", "ref-2", "ref-3"],
"product_with_first_page_only": ["ref-1"],
},
"search": ["ref-1", "ref-2", "ref-3"],
},
{
"attr_fixture": "product_type_page_reference_attribute",
"filter": "containsAll",
"expected": 2,
"product_assignments": {
"product_with_all_pages": ["ref-1", "ref-2", "ref-3"],
"product_with_same_all_pages": ["ref-1", "ref-2", "ref-3"],
"product_with_first_page_only": ["ref-1"],
},
"search": ["ref-1", "ref-2", "ref-3"],
},
{
"attr_fixture": "product_type_page_single_reference_attribute",
"filter": "containsAny",
"expected": 3,
"product_assignments": {
"product_with_first_page": ["ref-1"],
"product_with_second_page": ["ref-2"],
"product_with_third_page": ["ref-3"],
},
"search": ["ref-1", "ref-2", "ref-3"],
},
{
"attr_fixture": "product_type_page_single_reference_attribute",
"filter": "containsAll",
"expected": 1,
"product_assignments": {
"product_with_first_page": ["ref-1"],
"product_with_second_page": ["ref-2"],
"product_with_third_page": ["ref-3"],
},
"search": ["ref-1"],
},
],
)
def test_products_query_with_attr_slug_and_attribute_value_referenced_page_ids(
query,
scenario,
request,
staff_api_client,
product_list,
product_type,
page_type,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(scenario["attr_fixture"])
product_type.product_attributes.add(
reference_attribute,
)
ref_page_1, ref_page_2, ref_page_3 = Page.objects.bulk_create(
[
Page(
title="Reference Page 1",
slug="ref-1",
page_type=page_type,
is_published=True,
),
Page(
title="Reference Page 2",
slug="ref-2",
page_type=page_type,
is_published=True,
),
Page(
title="Reference Page 3",
slug="ref-3",
page_type=page_type,
is_published=True,
),
]
)
attr_values = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Page {ref_page_1.pk}",
slug=f"page-{ref_page_1.pk}",
reference_page=ref_page_1,
),
AttributeValue(
attribute=reference_attribute,
name=f"Page {ref_page_2.pk}",
slug=f"page-{ref_page_2.pk}",
reference_page=ref_page_2,
),
AttributeValue(
attribute=reference_attribute,
name=f"Page {ref_page_3.pk}",
slug=f"page-{ref_page_3.pk}",
reference_page=ref_page_3,
),
]
)
slug_to_value = {
ref_page_1.slug: attr_values[0],
ref_page_2.slug: attr_values[1],
ref_page_3.slug: attr_values[2],
}
for product, slugs in zip(
product_list, scenario["product_assignments"].values(), strict=False
):
associate_attribute_values_to_instance(
product,
{reference_attribute.pk: [slug_to_value[slug] for slug in slugs]},
)
ref_lookup = {page.slug: page for page in [ref_page_1, ref_page_2, ref_page_3]}
search_ids = [to_global_id_or_none(ref_lookup[slug]) for slug in scenario["search"]]
variables = {
"where": {
"attributes": [
{
"slug": reference_attribute.slug,
"value": {
"reference": {"referencedIds": {scenario["filter"]: search_ids}}
},
},
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == scenario["expected"]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_references_pages.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_references_products.py | import graphene
import pytest
from ......attribute import AttributeEntityType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from ......product.models import Product
from .....core.utils import to_global_id_or_none
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
(
"reference_attribute_fixture",
"filter_type",
"expected_count",
"product1_values",
"product2_values",
"search_values",
),
[
# REFERENCE type - can assign multiple values to one product
(
"product_type_product_reference_attribute",
"containsAny",
2,
[0, 1],
[1],
[0, 1],
),
(
"product_type_product_reference_attribute",
"containsAll",
1,
[0, 1],
[1],
[0, 1],
),
# SINGLE_REFERENCE type - can only assign one value
(
"product_type_product_single_reference_attribute",
"containsAny",
2,
[0],
[1],
[0, 1],
),
# For SINGLE_REFERENCE containsAll, search for just one value that both products have
(
"product_type_product_single_reference_attribute",
"containsAll",
2,
[0],
[0],
[0],
),
],
)
def test_products_query_with_attr_slug_and_attribute_value_reference_to_products(
query,
reference_attribute_fixture,
filter_type,
expected_count,
product1_values,
product2_values,
search_values,
request,
staff_api_client,
product_list,
product_type,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(reference_attribute_fixture)
product_type.product_attributes.add(reference_attribute)
ref_product_1, ref_product_2 = Product.objects.bulk_create(
[
Product(
name="Reference Product 1",
slug="ref-1",
product_type=product_type,
),
Product(
name="Reference Product 2",
slug="ref-2",
product_type=product_type,
),
]
)
attribute_values = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Product {ref_product_1.pk}",
slug=f"product-{ref_product_1.pk}",
reference_product=ref_product_1,
),
AttributeValue(
attribute=reference_attribute,
name=f"Product {ref_product_2.pk}",
slug=f"product-{ref_product_2.pk}",
reference_product=ref_product_2,
),
]
)
product_with_both_references = product_list[0]
# Assign values based on product1_values indices
associate_attribute_values_to_instance(
product_with_both_references,
{reference_attribute.pk: [attribute_values[i] for i in product1_values]},
)
product_with_single_reference = product_list[1]
associate_attribute_values_to_instance(
product_with_single_reference,
{reference_attribute.pk: [attribute_values[i] for i in product2_values]},
)
# Build search slugs based on search_values indices
ref_products = [ref_product_1, ref_product_2]
search_slugs = [ref_products[i].slug for i in search_values]
variables = {
"where": {
"attributes": [
{
"slug": reference_attribute.slug,
"value": {
"reference": {"productSlugs": {filter_type: search_slugs}}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
# Verify first product when we expect results
if expected_count > 0:
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("reference_attribute_fixture", "filter_type", "expected_count"),
[
# REFERENCE type - searches across all reference attributes
("product_type_product_reference_attribute", "containsAny", 2),
("product_type_product_reference_attribute", "containsAll", 1),
# SINGLE_REFERENCE - product has ref1 in attr1 and ref2 in attr2
("product_type_product_single_reference_attribute", "containsAny", 2),
("product_type_product_single_reference_attribute", "containsAll", 1),
],
)
def test_products_query_with_attribute_value_reference_to_products(
query,
reference_attribute_fixture,
filter_type,
expected_count,
request,
staff_api_client,
product_list,
product_type,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(reference_attribute_fixture)
second_product_reference_attribute = Attribute.objects.create(
slug="second-product-reference",
name="Product reference",
type=AttributeType.PRODUCT_TYPE,
input_type=reference_attribute.input_type, # Use same type as main attribute
entity_type=AttributeEntityType.PRODUCT,
)
product_type.product_attributes.add(
reference_attribute,
second_product_reference_attribute,
)
ref_product_1, ref_product_2 = Product.objects.bulk_create(
[
Product(
name="Reference Product 1",
slug="ref-1",
product_type=product_type,
),
Product(
name="Reference Product 2",
slug="ref-2",
product_type=product_type,
),
]
)
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Product {ref_product_1.pk}",
slug=f"product-{ref_product_1.pk}",
reference_product=ref_product_1,
),
AttributeValue(
attribute=second_product_reference_attribute,
name=f"Product {ref_product_2.pk}",
slug=f"product-{ref_product_2.pk}",
reference_product=ref_product_2,
),
]
)
product_with_both_references = product_list[0]
# Always assign one value per attribute for this test (no slug test)
associate_attribute_values_to_instance(
product_with_both_references,
{
reference_attribute.pk: [attribute_value_1],
second_product_reference_attribute.pk: [attribute_value_2],
},
)
product_with_single_reference = product_list[1]
associate_attribute_values_to_instance(
product_with_single_reference,
{second_product_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"value": {
"reference": {
"productSlugs": {
filter_type: [ref_product_1.slug, ref_product_2.slug]
}
}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
# Verify first product when we expect results
if expected_count > 0:
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
(
"reference_attribute_fixture",
"filter_type",
"expected_count",
"product_value_assignments",
"search_indices",
),
[
# REFERENCE type - products can have multiple values
(
"product_type_product_reference_attribute",
"containsAny",
3,
[[0, 1, 2], [0, 1, 2], [0]],
[0, 1, 2],
), # Search for all 3 refs
(
"product_type_product_reference_attribute",
"containsAll",
2,
[[0, 1, 2], [0, 1, 2], [0]],
[0, 1, 2],
), # Search for all 3 refs
# SINGLE_REFERENCE - each product has one value
(
"product_type_product_single_reference_attribute",
"containsAny",
3,
[[0], [1], [2]],
[0, 1, 2],
), # Search for all 3 refs
# For containsAll with SINGLE_REFERENCE, search for single value
(
"product_type_product_single_reference_attribute",
"containsAll",
1,
[[0], [1], [2]],
[0],
), # Search for just ref[0], only product1 has it
],
)
def test_products_query_with_attr_slug_and_attribute_value_referenced_product_ids(
query,
reference_attribute_fixture,
filter_type,
expected_count,
product_value_assignments,
search_indices,
request,
staff_api_client,
product_list,
product_type,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(reference_attribute_fixture)
product_type.product_attributes.add(reference_attribute)
# Create additional products to use as references
ref_products = Product.objects.bulk_create(
[
Product(
name=f"Reference Product {i + 1}",
slug=f"ref-{i + 1}",
product_type=product_type,
)
for i in range(3)
]
)
attr_values = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Product {ref_product.pk}",
slug=f"product-{ref_product.pk}",
reference_product=ref_product,
)
for ref_product in ref_products
]
)
# Assign values based on product_value_assignments configuration
for product, value_indices in zip(
product_list, product_value_assignments, strict=False
):
associate_attribute_values_to_instance(
product,
{reference_attribute.pk: [attr_values[i] for i in value_indices]},
)
ref_global_ids = [to_global_id_or_none(ref) for ref in ref_products]
# Build search IDs based on search_indices
search_ids = [ref_global_ids[i] for i in search_indices]
variables = {
"where": {
"attributes": [
{
"slug": reference_attribute.slug,
"value": {
"reference": {"referencedIds": {filter_type: search_ids}}
},
},
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_references_products.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 369,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_references_variants.py | import graphene
import pytest
from ......attribute import AttributeEntityType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from .....core.utils import to_global_id_or_none
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
("reference_attribute_fixture", "filter_type", "expected_count"),
[
# REFERENCE type - searches across all reference attributes
("product_type_variant_reference_attribute", "containsAny", 2),
("product_type_variant_reference_attribute", "containsAll", 1),
# SINGLE_REFERENCE - product has variant1 in attr1 and variant2 in attr2
("product_type_variant_single_reference_attribute", "containsAny", 2),
("product_type_variant_single_reference_attribute", "containsAll", 1),
],
)
def test_products_query_with_attribute_value_reference_to_product_variants(
query,
reference_attribute_fixture,
filter_type,
expected_count,
request,
staff_api_client,
product_list,
product_type,
product_variant_list,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(reference_attribute_fixture)
product_type.product_attributes.add(reference_attribute)
second_variant_reference_attribute = Attribute.objects.create(
slug="second-product-reference",
name="Product reference",
type=AttributeType.PRODUCT_TYPE,
input_type=reference_attribute.input_type,
entity_type=AttributeEntityType.PRODUCT_VARIANT,
)
first_variant_sku = "test-variant-1"
second_variant_sku = "test-variant-2"
first_variant = product_variant_list[0]
first_variant.sku = first_variant_sku
first_variant.save()
second_variant = product_variant_list[1]
second_variant.sku = second_variant_sku
second_variant.save()
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Variant {first_variant.pk}",
slug=f"variant-{first_variant.pk}",
reference_variant=first_variant,
),
AttributeValue(
attribute=second_variant_reference_attribute,
name=f"Variant {second_variant.pk}",
slug=f"variant-{second_variant.pk}",
reference_variant=second_variant,
),
]
)
product_with_both_references = product_list[0]
associate_attribute_values_to_instance(
product_with_both_references,
{
reference_attribute.pk: [attribute_value_1],
second_variant_reference_attribute.pk: [attribute_value_2],
},
)
product_with_single_reference = product_list[1]
associate_attribute_values_to_instance(
product_with_single_reference,
{second_variant_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"value": {
"reference": {
"productVariantSkus": {
filter_type: [
first_variant_sku,
second_variant_sku,
]
}
}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == expected_count
# Verify first product when we expect results
if expected_count > 0:
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"scenario",
[
{
"attr_fixture": "product_type_variant_reference_attribute",
"filter": "containsAny",
"expected": 2,
"product_assignments": {
"product_with_both_variants": ["test-variant-1", "test-variant-2"],
"product_with_second_variant_only": ["test-variant-2"],
},
"search": ["test-variant-1", "test-variant-2"],
},
{
"attr_fixture": "product_type_variant_reference_attribute",
"filter": "containsAll",
"expected": 1,
"product_assignments": {
"product_with_both_variants": ["test-variant-1", "test-variant-2"],
"product_with_second_variant_only": ["test-variant-2"],
},
"search": ["test-variant-1", "test-variant-2"],
},
{
"attr_fixture": "product_type_variant_single_reference_attribute",
"filter": "containsAny",
"expected": 2,
"product_assignments": {
"product_with_first_variant": ["test-variant-1"],
"product_with_second_variant": ["test-variant-2"],
},
"search": ["test-variant-1", "test-variant-2"],
},
{
"attr_fixture": "product_type_variant_single_reference_attribute",
"filter": "containsAll",
"expected": 2,
"product_assignments": {
"product_with_first_variant": ["test-variant-1"],
"product_second_overridden": ["test-variant-1"],
},
"search": ["test-variant-1"],
},
],
)
def test_products_query_with_attr_slug_and_attribute_value_reference_to_product_variants(
query,
scenario,
request,
staff_api_client,
product_list,
product_type,
product_variant_list,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(scenario["attr_fixture"])
product_type.product_attributes.add(reference_attribute)
first_variant_sku = "test-variant-1"
second_variant_sku = "test-variant-2"
first_variant = product_variant_list[0]
first_variant.sku = first_variant_sku
first_variant.save()
second_variant = product_variant_list[1]
second_variant.sku = second_variant_sku
second_variant.save()
attribute_values = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Variant {first_variant.pk}",
slug=f"variant-{first_variant.pk}",
reference_variant=first_variant,
),
AttributeValue(
attribute=reference_attribute,
name=f"Variant {second_variant.pk}",
slug=f"variant-{second_variant.pk}",
reference_variant=second_variant,
),
]
)
sku_to_value = {
first_variant_sku: attribute_values[0],
second_variant_sku: attribute_values[1],
}
for product, skus in zip(
product_list, scenario["product_assignments"].values(), strict=False
):
associate_attribute_values_to_instance(
product,
{reference_attribute.pk: [sku_to_value[sku] for sku in skus]},
)
variables = {
"where": {
"attributes": [
{
"slug": reference_attribute.slug,
"value": {
"reference": {
"productVariantSkus": {
scenario["filter"]: scenario["search"]
}
}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == scenario["expected"]
# Verify first product when we expect results
if scenario["expected"] > 0:
assert products_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[0].pk
)
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"scenario",
[
{
"attr_fixture": "product_type_variant_reference_attribute",
"filter": "containsAny",
"expected": 3,
"product_assignments": {
"product_with_all_variants": [
"test-variant-1",
"test-variant-2",
"test-variant-3",
],
"product_with_same_all_variants": [
"test-variant-1",
"test-variant-2",
"test-variant-3",
],
"product_with_first_variant_only": ["test-variant-1"],
},
"search": ["test-variant-1", "test-variant-2", "test-variant-3"],
},
{
"attr_fixture": "product_type_variant_reference_attribute",
"filter": "containsAll",
"expected": 2,
"product_assignments": {
"product_with_all_variants": [
"test-variant-1",
"test-variant-2",
"test-variant-3",
],
"product_with_same_all_variants": [
"test-variant-1",
"test-variant-2",
"test-variant-3",
],
"product_with_first_variant_only": ["test-variant-1"],
},
"search": ["test-variant-1", "test-variant-2", "test-variant-3"],
},
{
"attr_fixture": "product_type_variant_single_reference_attribute",
"filter": "containsAny",
"expected": 3,
"product_assignments": {
"product_with_first_variant": ["test-variant-1"],
"product_with_second_variant": ["test-variant-2"],
"product_with_third_variant": ["test-variant-3"],
},
"search": ["test-variant-1", "test-variant-2", "test-variant-3"],
},
{
"attr_fixture": "product_type_variant_single_reference_attribute",
"filter": "containsAll",
"expected": 1,
"product_assignments": {
"product_with_first_variant": ["test-variant-1"],
"product_with_second_variant": ["test-variant-2"],
"product_with_third_variant": ["test-variant-3"],
},
"search": ["test-variant-1"],
},
],
)
def test_products_query_with_attr_slug_attribute_value_referenced_variant_ids(
query,
scenario,
request,
staff_api_client,
product_list,
product_type,
product_variant_list,
channel_USD,
):
# given
reference_attribute = request.getfixturevalue(scenario["attr_fixture"])
product_type.product_attributes.add(reference_attribute)
first_variant = product_variant_list[0]
second_variant = product_variant_list[1]
third_variant = product_variant_list[2]
first_variant.sku = "test-variant-1"
first_variant.save()
second_variant.sku = "test-variant-2"
second_variant.save()
third_variant.sku = "test-variant-3"
third_variant.save()
attr_values = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=reference_attribute,
name=f"Variant {first_variant.pk}",
slug=f"variant-{first_variant.pk}",
reference_variant=first_variant,
),
AttributeValue(
attribute=reference_attribute,
name=f"Variant {second_variant.pk}",
slug=f"variant-{second_variant.pk}",
reference_variant=second_variant,
),
AttributeValue(
attribute=reference_attribute,
name=f"Variant {third_variant.pk}",
slug=f"variant-{third_variant.pk}",
reference_variant=third_variant,
),
]
)
# Assign values based on product_value_assignments configuration
sku_to_value = {
first_variant.sku: attr_values[0],
second_variant.sku: attr_values[1],
third_variant.sku: attr_values[2],
}
for product, skus in zip(
product_list, scenario["product_assignments"].values(), strict=False
):
associate_attribute_values_to_instance(
product,
{reference_attribute.pk: [sku_to_value[sku] for sku in skus]},
)
ref_lookup = {
variant.sku: variant
for variant in [first_variant, second_variant, third_variant]
}
search_ids = [to_global_id_or_none(ref_lookup[sku]) for sku in scenario["search"]]
variables = {
"where": {
"attributes": [
{
"slug": reference_attribute.slug,
"value": {
"reference": {"referencedIds": {scenario["filter"]: search_ids}}
},
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response)
products_nodes = content["data"]["products"]["edges"]
assert len(products_nodes) == scenario["expected"]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_references_variants.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/products_filtrations/test_over_validation.py | import pytest
from .....tests.utils import get_graphql_content
from .shared import PRODUCTS_FILTER_QUERY, PRODUCTS_WHERE_QUERY
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_value_filter",
[{"numeric": None}, {"name": None}, {"slug": None}, {"boolean": False}],
)
def test_products_query_failed_filter_validation_for_numeric_with_slug_input(
query,
attribute_value_filter,
staff_api_client,
numeric_attribute_without_unit,
product_type,
channel_USD,
):
# given
attr_slug_input = "numeric"
numeric_attribute_without_unit.slug = attr_slug_input
numeric_attribute_without_unit.save()
product_type.product_attributes.add(numeric_attribute_without_unit)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_value_filter",
[{"boolean": None}, {"name": None}, {"slug": None}, {"numeric": {"eq": 1.2}}],
)
def test_products_query_failed_filter_validation_for_boolean_with_slug_input(
query,
attribute_value_filter,
staff_api_client,
boolean_attribute,
product_type,
channel_USD,
):
# given
attr_slug_input = "boolean"
boolean_attribute.slug = attr_slug_input
boolean_attribute.save()
product_type.product_attributes.add(boolean_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"dateTime": None},
{"name": None},
{"slug": None},
{"numeric": {"eq": 1.2}},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_products_query_failed_filter_validation_for_date_attribute_with_slug_input(
query,
attribute_value_filter,
staff_api_client,
date_attribute,
product_type,
channel_USD,
):
# given
attr_slug_input = "date"
date_attribute.slug = attr_slug_input
date_attribute.save()
product_type.product_attributes.add(date_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"dateTime": None},
{"name": None},
{"slug": None},
{"numeric": {"eq": 1.2}},
{"date": None},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_products_query_failed_filter_validation_for_datetime_attribute_with_slug_input(
query,
attribute_value_filter,
staff_api_client,
date_time_attribute,
product_type,
channel_USD,
):
# given
attr_slug_input = "date_time"
date_time_attribute.slug = attr_slug_input
date_time_attribute.save()
product_type.product_attributes.add(date_time_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"slug": None, "value": None},
{"slug": None, "value": {"name": {"eq": "name"}}},
],
)
def test_products_query_failed_filter_validation_null_in_input(
query,
attribute_value_filter,
staff_api_client,
channel_USD,
):
# given
variables = {
"where": {"attributes": [attribute_value_filter]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"slug": None},
{"name": None},
{
"slug": {"eq": "true_slug"},
"name": {"eq": "name"},
},
{
"slug": {"oneOf": ["true_slug"]},
"name": {"oneOf": ["name"]},
},
],
)
def test_products_query_failed_filter_validation_for_basic_value_fields_with_attr_slug(
query,
attribute_value_filter,
staff_api_client,
channel_USD,
):
# given
attr_slug_input = "product-size"
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
def test_products_query_failed_filter_validation_for_duplicated_attr_slug(
query,
staff_api_client,
channel_USD,
):
# given
attr_slug_input = "product-size"
variables = {
"where": {
"attributes": [
{"slug": attr_slug_input},
{"slug": attr_slug_input},
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_value_filter",
[
{},
{"reference": {}},
{"reference": None},
{"reference": {"referencedIds": {"containsAll": []}}},
{"reference": {"pageSlugs": {"containsAll": []}}},
{"reference": {"productSlugs": {"containsAll": []}}},
{"reference": {"productVariantSkus": {"containsAll": []}}},
{"reference": {"pageSlugs": {"containsAny": []}}},
{"reference": {"productSlugs": {"containsAny": []}}},
{"reference": {"productVariantSkus": {"containsAny": []}}},
{"reference": {"referencedIds": {"containsAny": []}}},
{"reference": {"pageSlugs": {"containsAny": [], "containsAll": []}}},
{"reference": {"productSlugs": {"containsAny": [], "containsAll": []}}},
{"reference": {"productVariantSkus": {"containsAny": [], "containsAll": []}}},
{"reference": {"referencedIds": {"containsAny": [], "containsAll": []}}},
{"reference": {"referencedIds": {"containsAll": None}}},
{"reference": {"pageSlugs": {"containsAll": None}}},
{"reference": {"productSlugs": {"containsAll": None}}},
{"reference": {"productVariantSkus": {"containsAll": None}}},
{"reference": {"pageSlugs": {"containsAny": None}}},
{"reference": {"productSlugs": {"containsAny": None}}},
{"reference": {"productVariantSkus": {"containsAny": None}}},
{"reference": {"referencedIds": {"containsAny": None}}},
{"reference": {"referencedIds": {"containsAny": ["non-existing-id"]}}},
{"reference": {"referencedIds": {"containsAll": ["non-existing-id"]}}},
# ID of not valid object
{"reference": {"referencedIds": {"containsAny": ["T3JkZXI6MQ=="]}}},
{"reference": {"referencedIds": {"containsAll": ["T3JkZXI6MQ=="]}}},
],
)
def test_products_query_failed_filter_validation_for_reference_attribute_with_slug_input(
query,
attribute_value_filter,
staff_api_client,
product_type,
product_type_product_reference_attribute,
channel_USD,
):
# given
attr_slug_input = "reference-product"
variables = {
"where": {
"attributes": [
{
"slug": attr_slug_input,
"value": attribute_value_filter,
}
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_filter_input",
[
{"values": ["test-slug"]},
{"valuesRange": {"gte": 1}},
{"dateTime": {"gte": "2023-01-01T00:00:00Z"}},
{"date": {"gte": "2023-01-01"}},
{"boolean": True},
],
)
def test_products_query_failed_filter_validation_when_missing_attr_slug_for_deprecated_input(
query,
attribute_filter_input,
staff_api_client,
channel_USD,
):
# given
variables = {
"where": {
"attributes": [attribute_filter_input],
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_filter_input",
[
{"values": ["test-slug"]},
{"valuesRange": {"gte": 1}},
{"dateTime": {"gte": "2023-01-01T00:00:00Z"}},
{"date": {"gte": "2023-01-01"}},
{"boolean": True},
],
)
def test_products_query_failed_filter_validation_when_multiple_inputs_with_deprecated_and_new(
query,
attribute_filter_input,
staff_api_client,
channel_USD,
):
# given
variables = {
"where": {
"attributes": [
{
"slug": "attr-slug",
"value": {"name": {"eq": "val-name"}},
},
{"slug": "attr-slug2", **attribute_filter_input},
]
},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
@pytest.mark.parametrize("query", [PRODUCTS_WHERE_QUERY, PRODUCTS_FILTER_QUERY])
@pytest.mark.parametrize(
"attribute_filter_input",
[
{"values": ["test-slug"]},
{"valuesRange": {"gte": 1}},
{"dateTime": {"gte": "2023-01-01T00:00:00Z"}},
{"date": {"gte": "2023-01-01"}},
{"boolean": True},
],
)
def test_products_query_failed_filter_validation_when_providing_deprecated_and_new_input(
query,
attribute_filter_input,
staff_api_client,
channel_USD,
):
# given
attribute_filter_input["value"] = {"name": {"eq": "val-name"}}
variables = {
"where": {"attributes": [{"slug": "attr-slug", **attribute_filter_input}]},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
query,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["products"] is None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/products_filtrations/test_over_validation.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 409,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/filters/category.py | import django_filters
import graphene
from django.db.models import Q
from ....product.models import Category
from ...core.doc_category import DOC_CATEGORY_PRODUCTS
from ...core.filters import (
FilterInputObjectType,
GlobalIDMultipleChoiceFilter,
GlobalIDMultipleChoiceWhereFilter,
ListObjectTypeFilter,
MetadataFilterBase,
MetadataWhereFilterBase,
ObjectTypeFilter,
)
from ...core.filters.where_input import (
WhereInputObjectType,
)
from ...core.types import (
DateTimeRangeInput,
)
from ...utils.filters import (
filter_by_ids,
filter_slug_list,
)
from .shared import filter_updated_at_range
class CategoryFilter(MetadataFilterBase):
search = django_filters.CharFilter(method="category_filter_search")
ids = GlobalIDMultipleChoiceFilter(field_name="id")
slugs = ListObjectTypeFilter(input_class=graphene.String, method=filter_slug_list)
updated_at = ObjectTypeFilter(
input_class=DateTimeRangeInput,
method=filter_updated_at_range,
help_text="Filter by when was the most recent update.",
)
class Meta:
model = Category
fields = ["search"]
@classmethod
def category_filter_search(cls, queryset, _name, value):
if not value:
return queryset
name_slug_desc_qs = (
Q(name__ilike=value)
| Q(slug__ilike=value)
| Q(description_plaintext__ilike=value)
)
return queryset.filter(name_slug_desc_qs)
class CategoryWhere(MetadataWhereFilterBase):
ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids("Category"))
# Category where filter is also used in promotion logic. In case of extending it
# the category update mutation should also be modified to recalculate the prices
# for new added field.
class Meta:
model = Category
fields = []
class CategoryFilterInput(FilterInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = CategoryFilter
class CategoryWhereInput(WhereInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = CategoryWhere
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/filters/category.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/product/filters/collection.py | import django_filters
import graphene
from django.db.models import Q
from ....product.models import (
Collection,
)
from ...channel.filters import get_channel_slug_from_filter_data
from ...core.doc_category import DOC_CATEGORY_PRODUCTS
from ...core.filters import (
ChannelFilterInputObjectType,
EnumFilter,
GlobalIDMultipleChoiceFilter,
GlobalIDMultipleChoiceWhereFilter,
ListObjectTypeFilter,
MetadataFilterBase,
MetadataWhereFilterBase,
)
from ...core.filters.where_input import (
WhereInputObjectType,
)
from ...utils.filters import (
filter_by_ids,
filter_slug_list,
)
from ..enums import (
CollectionPublished,
)
def _filter_collections_is_published(qs, _, value, channel_slug):
return qs.filter(
channel_listings__is_published=value,
channel_listings__channel__slug=channel_slug,
)
class CollectionFilter(MetadataFilterBase):
published = EnumFilter(
input_class=CollectionPublished, method="filter_is_published"
)
search = django_filters.CharFilter(method="collection_filter_search")
ids = GlobalIDMultipleChoiceFilter(field_name="id")
slugs = ListObjectTypeFilter(input_class=graphene.String, method=filter_slug_list)
class Meta:
model = Collection
fields = ["published", "search"]
def collection_filter_search(self, queryset, _name, value):
if not value:
return queryset
name_slug_qs = Q(name__ilike=value) | Q(slug__ilike=value)
return queryset.filter(name_slug_qs)
def filter_is_published(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
if value == CollectionPublished.PUBLISHED:
return _filter_collections_is_published(queryset, name, True, channel_slug)
if value == CollectionPublished.HIDDEN:
return _filter_collections_is_published(queryset, name, False, channel_slug)
return queryset
class CollectionWhere(MetadataWhereFilterBase):
ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids("Collection"))
# Collection where filter is also used in promotion logic. In case of extending it
# the collection update mutation should also be modified to recalculate the prices
# for new added field.
class Meta:
model = Collection
fields = []
class CollectionFilterInput(ChannelFilterInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = CollectionFilter
class CollectionWhereInput(WhereInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = CollectionWhere
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/filters/collection.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/product/filters/product.py | import django_filters
import graphene
from django.db.models import Exists, OuterRef
from ....channel.models import Channel
from ....product.models import (
CollectionProduct,
Product,
ProductChannelListing,
ProductVariant,
ProductVariantChannelListing,
)
from ...channel.filters import get_channel_slug_from_filter_data
from ...core.doc_category import DOC_CATEGORY_PRODUCTS
from ...core.filters import (
BooleanWhereFilter,
ChannelFilterInputObjectType,
EnumFilter,
EnumWhereFilter,
GlobalIDMultipleChoiceFilter,
GlobalIDMultipleChoiceWhereFilter,
ListObjectTypeFilter,
ListObjectTypeWhereFilter,
MetadataFilterBase,
MetadataWhereFilterBase,
ObjectTypeFilter,
ObjectTypeWhereFilter,
OperationObjectTypeWhereFilter,
)
from ...core.filters.where_input import (
DateTimeFilterInput,
DecimalFilterInput,
GlobalIDFilterInput,
StringFilterInput,
WhereInputObjectType,
)
from ...core.scalars import DateTime
from ...core.types import (
BaseInputObjectType,
DateTimeRangeInput,
IntRangeInput,
NonNullList,
PriceRangeInput,
)
from ...utils.filters import (
filter_by_id,
filter_by_ids,
filter_slug_list,
filter_where_by_id_field,
filter_where_by_numeric_field,
filter_where_by_value_field,
)
from ..enums import StockAvailability
from .product_attributes import filter_products_by_attributes, validate_attribute_input
from .product_helpers import (
filter_categories,
filter_collections,
filter_gift_card,
filter_has_category,
filter_has_preordered_variants,
filter_minimal_price,
filter_product_types,
filter_products_channel_field_from_date,
filter_products_is_available,
filter_products_is_published,
filter_products_visible_in_listing,
filter_search,
filter_stock_availability,
filter_stocks,
filter_variant_price,
where_filter_by_categories,
where_filter_gift_card,
where_filter_has_category,
where_filter_has_preordered_variants,
where_filter_products_channel_field_from_date,
where_filter_products_is_available,
where_filter_stock_availability,
where_filter_stocks,
where_filter_updated_at_range,
)
from .shared import filter_updated_at_range
T_PRODUCT_FILTER_QUERIES = dict[int, list[int]]
class ProductStockFilterInput(BaseInputObjectType):
warehouse_ids = NonNullList(graphene.ID, required=False)
quantity = graphene.Field(IntRangeInput, required=False)
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
class ProductFilter(MetadataFilterBase):
is_published = django_filters.BooleanFilter(method="filter_is_published")
published_from = ObjectTypeFilter(
input_class=DateTime,
method="filter_published_from",
help_text="Filter by the publication date.",
)
is_available = django_filters.BooleanFilter(
method="filter_is_available",
help_text="Filter by availability for purchase.",
)
available_from = ObjectTypeFilter(
input_class=DateTime,
method="filter_available_from",
help_text="Filter by the date of availability for purchase.",
)
is_visible_in_listing = django_filters.BooleanFilter(
method="filter_listed",
help_text="Filter by visibility in product listings.",
)
collections = GlobalIDMultipleChoiceFilter(method=filter_collections)
categories = GlobalIDMultipleChoiceFilter(method=filter_categories)
has_category = django_filters.BooleanFilter(method=filter_has_category)
price = ObjectTypeFilter(input_class=PriceRangeInput, method="filter_variant_price")
minimal_price = ObjectTypeFilter(
input_class=PriceRangeInput,
method="filter_minimal_price",
field_name="minimal_price_amount",
help_text="Filter by the lowest variant price after discounts.",
)
attributes = ListObjectTypeFilter(
input_class="saleor.graphql.attribute.types.AttributeInput",
method="filter_attributes",
)
stock_availability = EnumFilter(
input_class=StockAvailability,
method="filter_stock_availability",
help_text="Filter by variants having specific stock status.",
)
updated_at = ObjectTypeFilter(
input_class=DateTimeRangeInput,
method=filter_updated_at_range,
help_text="Filter by when was the most recent update.",
)
product_types = GlobalIDMultipleChoiceFilter(method=filter_product_types)
stocks = ObjectTypeFilter(input_class=ProductStockFilterInput, method=filter_stocks)
search = django_filters.CharFilter(method=filter_search)
gift_card = django_filters.BooleanFilter(
method=filter_gift_card,
help_text="Filter on whether product is a gift card or not.",
)
ids = GlobalIDMultipleChoiceFilter(method=filter_by_id("Product"))
has_preordered_variants = django_filters.BooleanFilter(
method=filter_has_preordered_variants
)
slugs = ListObjectTypeFilter(input_class=graphene.String, method=filter_slug_list)
class Meta:
model = Product
fields = [
"is_published",
"collections",
"categories",
"has_category",
"attributes",
"stock_availability",
"stocks",
"search",
]
def filter_attributes(self, queryset, name, value):
if not value:
return queryset
return filter_products_by_attributes(queryset, value)
def filter_variant_price(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_variant_price(queryset, name, value, channel_slug)
def filter_minimal_price(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_minimal_price(queryset, name, value, channel_slug)
def filter_is_published(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_products_is_published(
queryset,
name,
value,
channel_slug,
)
def filter_published_from(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_products_channel_field_from_date(
queryset,
name,
value,
channel_slug,
"published_at",
)
def filter_is_available(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_products_is_available(
queryset,
name,
value,
channel_slug,
)
def filter_available_from(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_products_channel_field_from_date(
queryset,
name,
value,
channel_slug,
"available_for_purchase_at",
)
def filter_listed(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_products_visible_in_listing(
queryset,
name,
value,
channel_slug,
)
def filter_stock_availability(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_stock_availability(queryset, name, value, channel_slug)
def is_valid(self):
if attributes := self.data.get("attributes"):
validate_attribute_input(attributes, self.queryset.db)
return super().is_valid()
class ProductWhere(MetadataWhereFilterBase):
ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids("Product"))
name = OperationObjectTypeWhereFilter(
input_class=StringFilterInput,
method="filter_product_name",
help_text="Filter by product name.",
)
slug = OperationObjectTypeWhereFilter(
input_class=StringFilterInput,
method="filter_product_slug",
help_text="Filter by product slug.",
)
product_type = OperationObjectTypeWhereFilter(
input_class=GlobalIDFilterInput,
method="filter_product_type",
help_text="Filter by product type.",
)
category = OperationObjectTypeWhereFilter(
input_class=GlobalIDFilterInput,
method="filter_category",
help_text="Filter by product category.",
)
collection = OperationObjectTypeWhereFilter(
input_class=GlobalIDFilterInput,
method="filter_collection",
help_text="Filter by collection.",
)
is_available = BooleanWhereFilter(
method="filter_is_available", help_text="Filter by availability for purchase."
)
is_published = BooleanWhereFilter(
method="filter_is_published", help_text="Filter by public visibility."
)
is_visible_in_listing = BooleanWhereFilter(
method="filter_is_listed", help_text="Filter by visibility on the channel."
)
published_from = ObjectTypeWhereFilter(
input_class=DateTime,
method="filter_published_from",
help_text="Filter by the publication date.",
)
available_from = ObjectTypeWhereFilter(
input_class=DateTime,
method="filter_available_from",
help_text="Filter by the date of availability for purchase.",
)
has_category = BooleanWhereFilter(
method=where_filter_has_category,
help_text="Filter by product with category assigned.",
)
price = OperationObjectTypeWhereFilter(
input_class=DecimalFilterInput,
method="filter_variant_price",
help_text="Filter by product variant price.",
)
minimal_price = OperationObjectTypeWhereFilter(
input_class=DecimalFilterInput,
method="filter_minimal_price",
field_name="minimal_price_amount",
help_text="Filter by the lowest variant price after discounts.",
)
attributes = ListObjectTypeWhereFilter(
input_class="saleor.graphql.attribute.types.AttributeInput",
method="filter_attributes",
help_text="Filter by attributes associated with the product.",
)
stock_availability = EnumWhereFilter(
input_class=StockAvailability,
method="filter_stock_availability",
help_text="Filter by variants having specific stock status.",
)
stocks = ObjectTypeWhereFilter(
input_class=ProductStockFilterInput,
method=where_filter_stocks,
help_text="Filter by stock of the product variant.",
)
gift_card = BooleanWhereFilter(
method=where_filter_gift_card,
help_text="Filter on whether product is a gift card or not.",
)
has_preordered_variants = BooleanWhereFilter(
method=where_filter_has_preordered_variants,
help_text="Filter by product with preordered variants.",
)
updated_at = ObjectTypeWhereFilter(
input_class=DateTimeFilterInput,
method=where_filter_updated_at_range,
help_text="Filter by when was the most recent update.",
)
class Meta:
model = Product
fields = []
@staticmethod
def filter_product_name(qs, _, value):
return filter_where_by_value_field(qs, "name", value)
@staticmethod
def filter_product_slug(qs, _, value):
return filter_where_by_value_field(qs, "slug", value)
@staticmethod
def filter_product_type(qs, _, value):
return filter_where_by_id_field(qs, "product_type", value, "ProductType")
@staticmethod
def filter_category(qs, _, value):
return where_filter_by_categories(qs, value)
@staticmethod
def filter_collection(qs, _, value):
collection_products_qs = CollectionProduct.objects.using(qs.db).filter()
collection_products_qs = filter_where_by_id_field(
collection_products_qs, "collection_id", value, "Collection"
)
collection_products = collection_products_qs.values("product_id")
return qs.filter(Exists(collection_products.filter(product_id=OuterRef("pk"))))
def filter_is_available(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return where_filter_products_is_available(
queryset,
name,
value,
channel_slug,
)
def filter_is_published(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_products_is_published(
queryset,
name,
value,
channel_slug,
)
def filter_is_listed(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return filter_products_visible_in_listing(
queryset,
name,
value,
channel_slug,
)
def filter_published_from(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return where_filter_products_channel_field_from_date(
queryset,
name,
value,
channel_slug,
"published_at",
)
def filter_available_from(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return where_filter_products_channel_field_from_date(
queryset,
name,
value,
channel_slug,
"available_for_purchase_at",
)
def filter_variant_price(self, qs, _, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
channel_id = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
variant_listing = ProductVariantChannelListing.objects.using(qs.db).filter(
Exists(channel_id.filter(pk=OuterRef("channel_id")))
)
variant_listing = filter_where_by_numeric_field(
variant_listing, "price_amount", value
)
variant_listing = variant_listing.values("variant_id")
variants = (
ProductVariant.objects.using(qs.db)
.filter(Exists(variant_listing.filter(variant_id=OuterRef("pk"))))
.values("product_id")
)
return qs.filter(Exists(variants.filter(product_id=OuterRef("pk"))))
def filter_minimal_price(self, qs, _, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).first()
if not channel:
return qs
product_listing = ProductChannelListing.objects.using(qs.db).filter(
channel_id=channel.id
)
product_listing = filter_where_by_numeric_field(
product_listing, "discounted_price_amount", value
)
product_listing = product_listing.values("product_id")
return qs.filter(Exists(product_listing.filter(product_id=OuterRef("pk"))))
@staticmethod
def filter_attributes(queryset, name, value):
return filter_products_by_attributes(queryset, value)
def filter_stock_availability(self, queryset, name, value):
channel_slug = get_channel_slug_from_filter_data(self.data)
return where_filter_stock_availability(queryset, name, value, channel_slug)
def is_valid(self):
if attributes := self.data.get("attributes"):
validate_attribute_input(attributes, self.queryset.db)
return super().is_valid()
class ProductFilterInput(ChannelFilterInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = ProductFilter
class ProductWhereInput(WhereInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = ProductWhere
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/filters/product.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 414,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/product/filters/product_attributes.py | import datetime
import math
from collections import defaultdict
from typing import Literal, TypedDict
from django.db.models import Exists, OuterRef, Q, QuerySet
from graphql import GraphQLError
from ....attribute import AttributeInputType
from ....attribute.models import (
AssignedProductAttributeValue,
AssignedVariantAttribute,
AssignedVariantAttributeValue,
Attribute,
AttributeValue,
)
from ....product.models import Product, ProductVariant
from ...attribute.shared_filters import (
CONTAINS_TYPING,
clean_up_referenced_global_ids,
get_attribute_values_by_boolean_value,
get_attribute_values_by_date_time_value,
get_attribute_values_by_date_value,
get_attribute_values_by_numeric_value,
get_attribute_values_by_referenced_category_ids,
get_attribute_values_by_referenced_category_slugs,
get_attribute_values_by_referenced_collection_ids,
get_attribute_values_by_referenced_collection_slugs,
get_attribute_values_by_referenced_page_ids,
get_attribute_values_by_referenced_page_slugs,
get_attribute_values_by_referenced_product_ids,
get_attribute_values_by_referenced_product_slugs,
get_attribute_values_by_referenced_variant_ids,
get_attribute_values_by_referenced_variant_skus,
get_attribute_values_by_slug_or_name_value,
validate_attribute_value_input,
)
from ...utils.filters import Number
T_PRODUCT_FILTER_QUERIES = dict[int, list[int]]
def _clean_product_attributes_filter_input(
filter_values, queries, database_connection_name
):
attribute_slugs = []
values = []
for attr_slug, field_value in filter_values:
attribute_slugs.append(attr_slug)
values.extend(field_value)
attributes_slug_pk_map: dict[str, int] = {}
attributes_pk_slug_map: dict[int, str] = {}
attributes = Attribute.objects.using(database_connection_name).filter(
slug__in=attribute_slugs
)
for attr_slug, attr_pk in attributes.values_list("slug", "id"):
attributes_slug_pk_map[attr_slug] = attr_pk
attributes_pk_slug_map[attr_pk] = attr_slug
values_map = _populate_slug_value_map(
database_connection_name, values, attributes, attributes_pk_slug_map
)
_update_queries(queries, filter_values, attributes_slug_pk_map, values_map)
def _populate_slug_value_map(
database_connection_name, slugs, attribute_qs, attributes_pk_slug_map
):
value_maps: dict[str, dict[str, list[int]]] = defaultdict(lambda: defaultdict(list))
for (
attr_pk,
value_pk,
value_slug,
) in (
AttributeValue.objects.using(database_connection_name)
.filter(Exists(attribute_qs.filter(pk=OuterRef("attribute_id"))))
.filter(slug__in=slugs)
.values_list("attribute_id", "pk", "slug")
):
attr_slug = attributes_pk_slug_map[attr_pk]
value_maps[attr_slug][value_slug].append(value_pk)
return value_maps
def _update_queries(queries, filter_values, attributes_slug_pk_map, value_maps):
for attr_name, vals in filter_values:
if attr_name not in attributes_slug_pk_map:
raise ValueError(f"Unknown attribute name: {attr_name}")
attr_pk = attributes_slug_pk_map[attr_name]
attr_val_pk = []
for val in vals:
if val in value_maps[attr_name]:
attr_val_pk.extend(value_maps[attr_name][val])
queries[attr_pk] += attr_val_pk
def _clean_product_attributes_range_filter_input(
filter_value, queries, database_connection_name
):
attributes = Attribute.objects.using(database_connection_name).filter(
input_type=AttributeInputType.NUMERIC
)
values = (
AttributeValue.objects.using(database_connection_name)
.filter(
Exists(attributes.filter(pk=OuterRef("attribute_id"))),
numeric__isnull=False,
)
.select_related("attribute")
)
attributes_map: dict[str, int] = {}
values_map: defaultdict[str, defaultdict[float, list[int]]] = defaultdict(
lambda: defaultdict(list)
)
for value_data in values.values_list(
"attribute_id", "attribute__slug", "pk", "numeric"
):
attr_pk, attr_slug, pk, numeric_value = value_data
attributes_map[attr_slug] = attr_pk
if not numeric_value:
continue
values_map[attr_slug][numeric_value].append(pk)
for attr_name, val_range in filter_value:
if attr_name not in attributes_map:
raise ValueError(f"Unknown numeric attribute name: {attr_name}")
gte, lte = val_range.get("gte", 0), val_range.get("lte", math.inf)
attr_pk = attributes_map[attr_name]
attr_values = values_map[attr_name]
matching_values = [
value for value in attr_values.keys() if gte <= value and lte >= value
]
queries[attr_pk] = []
for value in matching_values:
queries[attr_pk] += attr_values[value]
def _clean_product_attributes_date_time_range_filter_input(
filter_value, database_connection_name
):
attribute_slugs = [slug for slug, _ in filter_value]
matching_attributes = AttributeValue.objects.using(database_connection_name).filter(
attribute__slug__in=attribute_slugs
)
filters = {}
for _, val_range in filter_value:
if lte := val_range.get("lte"):
if not isinstance(lte, datetime.datetime):
lte = datetime.datetime.combine(
lte, datetime.datetime.max.time(), tzinfo=datetime.UTC
)
filters["date_time__lte"] = lte
if gte := val_range.get("gte"):
if not isinstance(gte, datetime.datetime):
gte = datetime.datetime.combine(
gte, datetime.datetime.min.time(), tzinfo=datetime.UTC
)
filters["date_time__gte"] = gte
return matching_attributes.filter(**filters)
class KeyValueDict(TypedDict):
pk: int
values: dict[bool | None, int]
def _clean_product_attributes_boolean_filter_input(
filter_value, queries, database_connection_name
):
attribute_slugs = [slug for slug, _ in filter_value]
attributes = (
Attribute.objects.using(database_connection_name)
.filter(input_type=AttributeInputType.BOOLEAN, slug__in=attribute_slugs)
.prefetch_related("values")
)
values_map: dict[str, KeyValueDict] = {
attr.slug: {
"pk": attr.pk,
"values": {val.boolean: val.pk for val in attr.values.all()},
}
for attr in attributes
}
for attr_slug, value in filter_value:
if attr_slug not in values_map:
raise ValueError(f"Unknown attribute name: {attr_slug}")
attr_pk = values_map[attr_slug].get("pk")
value_pk = values_map[attr_slug]["values"].get(value)
if not value_pk:
raise ValueError(f"Requested value for attribute {attr_slug} doesn't exist")
if attr_pk and value_pk:
queries[attr_pk] += [value_pk]
def filter_products_by_attributes_values(qs, queries: T_PRODUCT_FILTER_QUERIES):
filters = []
for values in queries.values():
assigned_product_attribute_values = AssignedProductAttributeValue.objects.using(
qs.db
).filter(value_id__in=values)
product_attribute_filter = Q(
Exists(assigned_product_attribute_values.filter(product_id=OuterRef("pk")))
)
assigned_variant_attribute_values = AssignedVariantAttributeValue.objects.using(
qs.db
).filter(value_id__in=values)
assigned_variant_attributes = AssignedVariantAttribute.objects.using(
qs.db
).filter(
Exists(
assigned_variant_attribute_values.filter(assignment_id=OuterRef("pk"))
)
)
product_variants = ProductVariant.objects.using(qs.db).filter(
Exists(assigned_variant_attributes.filter(variant_id=OuterRef("pk")))
)
variant_attribute_filter = Q(
Exists(product_variants.filter(product_id=OuterRef("pk")))
)
filters.append(product_attribute_filter | variant_attribute_filter)
return qs.filter(*filters)
def filter_products_by_attributes_values_qs(qs, values_qs):
assigned_product_attribute_values = AssignedProductAttributeValue.objects.using(
qs.db
).filter(value__in=values_qs)
product_attribute_filter = Q(
Exists(assigned_product_attribute_values.filter(product_id=OuterRef("pk")))
)
assigned_variant_attribute_values = AssignedVariantAttributeValue.objects.using(
qs.db
).filter(value__in=values_qs)
assigned_variant_attributes = AssignedVariantAttribute.objects.using(qs.db).filter(
Exists(assigned_variant_attribute_values.filter(assignment_id=OuterRef("pk")))
)
product_variants = ProductVariant.objects.using(qs.db).filter(
Exists(assigned_variant_attributes.filter(variant_id=OuterRef("pk")))
)
variant_attribute_filter = Q(
Exists(product_variants.filter(product_id=OuterRef("pk")))
)
return qs.filter(product_attribute_filter | variant_attribute_filter)
def _filter_products_by_deprecated_attributes_input(
qs,
filter_slug_values,
filter_range_values,
filter_boolean_values,
date_range_list,
date_time_range_list,
):
queries: dict[int, list[int]] = defaultdict(list)
try:
if filter_slug_values:
_clean_product_attributes_filter_input(filter_slug_values, queries, qs.db)
if filter_range_values:
_clean_product_attributes_range_filter_input(
filter_range_values, queries, qs.db
)
if date_range_list:
values_qs = _clean_product_attributes_date_time_range_filter_input(
date_range_list, qs.db
)
return filter_products_by_attributes_values_qs(qs, values_qs)
if date_time_range_list:
values_qs = _clean_product_attributes_date_time_range_filter_input(
date_time_range_list, qs.db
)
return filter_products_by_attributes_values_qs(qs, values_qs)
if filter_boolean_values:
_clean_product_attributes_boolean_filter_input(
filter_boolean_values, queries, qs.db
)
except ValueError:
return Product.objects.none()
return filter_products_by_attributes_values(qs, queries)
def deprecated_filter_attributes(qs, value):
if not value:
return qs.none()
slug_value_list = []
boolean_list = []
value_range_list = []
date_range_list = []
date_time_range_list = []
for v in value:
slug = v["slug"]
if "values" in v:
slug_value_list.append((slug, v["values"]))
elif "values_range" in v:
value_range_list.append((slug, v["values_range"]))
elif "date" in v:
date_range_list.append((slug, v["date"]))
elif "date_time" in v:
date_time_range_list.append((slug, v["date_time"]))
elif "boolean" in v:
boolean_list.append((slug, v["boolean"]))
qs = _filter_products_by_deprecated_attributes_input(
qs,
slug_value_list,
value_range_list,
boolean_list,
date_range_list,
date_time_range_list,
)
return qs
def _get_assigned_product_attribute_for_attribute_value(
attribute_values: QuerySet[AttributeValue],
db_connection_name: str,
):
return Q(
Exists(
AssignedProductAttributeValue.objects.using(db_connection_name).filter(
Exists(attribute_values.filter(id=OuterRef("value_id"))),
product_id=OuterRef("id"),
)
)
)
def filter_by_slug_or_name(
attr_id: int | None,
attr_value: dict,
db_connection_name: str,
):
attribute_values = get_attribute_values_by_slug_or_name_value(
attr_id=attr_id,
attr_value=attr_value,
db_connection_name=db_connection_name,
)
return _get_assigned_product_attribute_for_attribute_value(
attribute_values=attribute_values,
db_connection_name=db_connection_name,
)
def filter_by_numeric_attribute(
attr_id: int | None,
numeric_value: dict[str, Number | list[Number] | dict[str, Number]],
db_connection_name: str,
):
qs_by_numeric = get_attribute_values_by_numeric_value(
attr_id=attr_id,
numeric_value=numeric_value,
db_connection_name=db_connection_name,
)
return _get_assigned_product_attribute_for_attribute_value(
attribute_values=qs_by_numeric,
db_connection_name=db_connection_name,
)
def filter_by_boolean_attribute(
attr_id: int | None,
boolean_value,
db_connection_name: str,
):
qs_by_boolean = get_attribute_values_by_boolean_value(
attr_id=attr_id,
boolean_value=boolean_value,
db_connection_name=db_connection_name,
)
return _get_assigned_product_attribute_for_attribute_value(
qs_by_boolean,
db_connection_name,
)
def filter_by_date_attribute(
attr_id: int | None,
date_value,
db_connection_name: str,
):
qs_by_date = get_attribute_values_by_date_value(
attr_id=attr_id,
date_value=date_value,
db_connection_name=db_connection_name,
)
return _get_assigned_product_attribute_for_attribute_value(
qs_by_date,
db_connection_name,
)
def filter_by_date_time_attribute(
attr_id: int | None,
date_value,
db_connection_name: str,
):
qs_by_date_time = get_attribute_values_by_date_time_value(
attr_id=attr_id,
date_value=date_value,
db_connection_name=db_connection_name,
)
return _get_assigned_product_attribute_for_attribute_value(
qs_by_date_time,
db_connection_name,
)
def _filter_contains_single_expression(
attr_id: int | None,
db_connection_name: str,
referenced_attr_values: QuerySet[AttributeValue],
):
if attr_id:
referenced_attr_values = referenced_attr_values.filter(
attribute_id=attr_id,
)
return _get_assigned_product_attribute_for_attribute_value(
referenced_attr_values,
db_connection_name,
)
def filter_by_contains_referenced_page_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter products based on their references to pages.
- If `contains_all` is provided, only products that reference all of the
specified pages will match.
- If `contains_any` is provided, products that reference at least one of
the specified pages will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for page_slug in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_page_slugs(
slugs=[page_slug], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_page_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_product_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter products based on their references to products.
- If `contains_all` is provided, only products that reference all of the
specified products will match.
- If `contains_any` is provided, products that reference at least one of
the specified products will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for product_slug in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_product_slugs(
slugs=[product_slug], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_product_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_variant_skus(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter products based on their references to variants.
- If `contains_all` is provided, only products that reference all of the
specified variants will match.
- If `contains_any` is provided, products that reference at least one of
the specified variants will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for variant_sku in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_variant_skus(
slugs=[variant_sku], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_variant_skus(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_category_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter products based on their references to categories.
- If `contains_all` is provided, only products that reference all of the
specified categories will match.
- If `contains_any` is provided, products that reference at least one of
the specified categories will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for category_slug in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_category_slugs(
slugs=[category_slug], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_category_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_collection_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter products based on their references to collections.
- If `contains_all` is provided, only products that reference all of the
specified collections will match.
- If `contains_any` is provided, products that reference at least one of
the specified collections will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for collection_slug in contains_all:
referenced_attr_values = (
get_attribute_values_by_referenced_collection_slugs(
slugs=[collection_slug], db_connection_name=db_connection_name
)
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_collection_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def _filter_by_contains_all_referenced_object_ids(
variant_ids: set[int],
product_ids: set[int],
page_ids: set[int],
category_ids: set[int],
collection_ids: set[int],
attr_id: int | None,
db_connection_name: str,
) -> Q:
expression = Q()
if page_ids:
for page_id in page_ids:
referenced_attr_values = get_attribute_values_by_referenced_page_ids(
ids=[page_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if product_ids:
for product_id in product_ids:
referenced_attr_values = get_attribute_values_by_referenced_product_ids(
ids=[product_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if variant_ids:
for variant_id in variant_ids:
referenced_attr_values = get_attribute_values_by_referenced_variant_ids(
ids=[variant_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if category_ids:
for category_id in category_ids:
referenced_attr_values = get_attribute_values_by_referenced_category_ids(
ids=[category_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if collection_ids:
for collection_id in collection_ids:
referenced_attr_values = get_attribute_values_by_referenced_collection_ids(
ids=[collection_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
def _filter_by_contains_any_referenced_object_ids(
variant_ids: set[int],
product_ids: set[int],
page_ids: set[int],
category_ids: set[int],
collection_ids: set[int],
attr_id: int | None,
db_connection_name: str,
) -> Q:
expression = Q()
if page_ids:
referenced_attr_values = get_attribute_values_by_referenced_page_ids(
ids=list(page_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if product_ids:
referenced_attr_values = get_attribute_values_by_referenced_product_ids(
ids=list(product_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if variant_ids:
referenced_attr_values = get_attribute_values_by_referenced_variant_ids(
ids=list(variant_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if category_ids:
referenced_attr_values = get_attribute_values_by_referenced_category_ids(
ids=list(category_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if collection_ids:
referenced_attr_values = get_attribute_values_by_referenced_collection_ids(
ids=list(collection_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
def filter_by_contains_referenced_object_ids(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
) -> Q:
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
grouped_ids = clean_up_referenced_global_ids(contains_any or contains_all or [])
variant_ids = grouped_ids["ProductVariant"]
product_ids = grouped_ids["Product"]
page_ids = grouped_ids["Page"]
category_ids = grouped_ids["Category"]
collection_ids = grouped_ids["Collection"]
if contains_all:
return _filter_by_contains_all_referenced_object_ids(
variant_ids=variant_ids,
product_ids=product_ids,
page_ids=page_ids,
category_ids=category_ids,
collection_ids=collection_ids,
attr_id=attr_id,
db_connection_name=db_connection_name,
)
if contains_any:
return _filter_by_contains_any_referenced_object_ids(
variant_ids=variant_ids,
product_ids=product_ids,
page_ids=page_ids,
category_ids=category_ids,
collection_ids=collection_ids,
attr_id=attr_id,
db_connection_name=db_connection_name,
)
return Q()
def filter_objects_by_reference_attributes(
attr_id: int | None,
attr_value: dict[
Literal[
"referenced_ids",
"page_slugs",
"product_slugs",
"product_variant_skus",
"category_slugs",
"collection_slugs",
],
CONTAINS_TYPING,
],
db_connection_name: str,
):
filter_expression = Q()
if "referenced_ids" in attr_value:
filter_expression &= filter_by_contains_referenced_object_ids(
attr_id,
attr_value["referenced_ids"],
db_connection_name,
)
if "page_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_page_slugs(
attr_id,
attr_value["page_slugs"],
db_connection_name,
)
if "product_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_product_slugs(
attr_id,
attr_value["product_slugs"],
db_connection_name,
)
if "product_variant_skus" in attr_value:
filter_expression &= filter_by_contains_referenced_variant_skus(
attr_id,
attr_value["product_variant_skus"],
db_connection_name,
)
if "category_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_category_slugs(
attr_id,
attr_value["category_slugs"],
db_connection_name,
)
if "collection_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_collection_slugs(
attr_id,
attr_value["collection_slugs"],
db_connection_name,
)
return filter_expression
def _filter_products_by_attributes(
qs: QuerySet[Product], value: list[dict]
) -> QuerySet[Product]:
attribute_slugs = {
attr_filter["slug"] for attr_filter in value if "slug" in attr_filter
}
attributes_map = {
attr.slug: attr
for attr in Attribute.objects.using(qs.db).filter(slug__in=attribute_slugs)
}
if len(attribute_slugs) != len(attributes_map.keys()):
# Filter over non existing attribute
return qs.none()
attr_filter_expression = Q()
attr_without_values_input = []
for attr_filter in value:
if "slug" in attr_filter and "value" not in attr_filter:
attr_without_values_input.append(attributes_map[attr_filter["slug"]])
if attr_without_values_input:
atr_value_qs = AttributeValue.objects.using(qs.db).filter(
attribute_id__in=[attr.id for attr in attr_without_values_input]
)
attr_filter_expression = _get_assigned_product_attribute_for_attribute_value(
atr_value_qs, qs.db
)
for attr_filter in value:
attr_value = attr_filter.get("value")
if not attr_value:
# attrs without value input are handled separately
continue
attr_id = None
if attr_slug := attr_filter.get("slug"):
attr = attributes_map[attr_slug]
attr_id = attr.id
attr_value = attr_filter["value"]
if "slug" in attr_value or "name" in attr_value:
attr_filter_expression &= filter_by_slug_or_name(
attr_id,
attr_value,
qs.db,
)
elif "numeric" in attr_value:
attr_filter_expression &= filter_by_numeric_attribute(
attr_id,
attr_value["numeric"],
qs.db,
)
elif "boolean" in attr_value:
attr_filter_expression &= filter_by_boolean_attribute(
attr_id,
attr_value["boolean"],
qs.db,
)
elif "date" in attr_value:
attr_filter_expression &= filter_by_date_attribute(
attr_id,
attr_value["date"],
qs.db,
)
elif "date_time" in attr_value:
attr_filter_expression &= filter_by_date_time_attribute(
attr_id,
attr_value["date_time"],
qs.db,
)
elif "reference" in attr_value:
attr_filter_expression &= filter_objects_by_reference_attributes(
attr_id,
attr_value["reference"],
qs.db,
)
if attr_filter_expression != Q():
return qs.filter(attr_filter_expression)
return qs.none()
def validate_attribute_input(attributes: list[dict], db_connection_name: str):
value_used_with_deprecated_input = []
missing_slug_for_deprecated_input: list[str] = []
used_deprecated_filter = [
# When input contains other fields than slug and value
bool(set(attr_data.keys()).difference({"slug", "value"}))
for attr_data in attributes
]
mixed_filter_usage = len(set(used_deprecated_filter)) > 1
if mixed_filter_usage:
raise GraphQLError(
"The provided `attributes` input contains a mix of deprecated fields"
"and `value`. Please use either the `AttributeInput.value` field "
"exclusively or only the deprecated fields."
)
for index, attr_data in enumerate(attributes):
if set(attr_data.keys()).difference({"slug", "value"}):
# Input contains deprecated input values
if not attr_data.get("slug"):
missing_slug_for_deprecated_input.append(str(index))
continue
if "value" in attr_data:
value_used_with_deprecated_input.append(str(index))
continue
if missing_slug_for_deprecated_input:
raise GraphQLError(
"Attribute `slug` is required when using deprecated fields in "
"the `attributes` input. "
f"Missing at indices: {', '.join(missing_slug_for_deprecated_input)}."
)
if value_used_with_deprecated_input:
raise GraphQLError(
"The `value` field cannot be used with deprecated fields in "
"the `attributes` input. "
f"Used at indices: {', '.join(value_used_with_deprecated_input)}."
)
validate_attribute_value_input(attributes, db_connection_name)
def filter_products_by_attributes(
qs: QuerySet[Product], value: list[dict[str, str | dict | list | bool]]
) -> QuerySet[Product]:
if not value:
return qs.none()
if set(value[0].keys()).difference({"slug", "value"}):
return deprecated_filter_attributes(qs, value)
return _filter_products_by_attributes(qs, value)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/filters/product_attributes.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 876,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/product/filters/product_helpers.py | import datetime
from django.db.models import Exists, OuterRef, Q, Subquery, Sum
from django.db.models.expressions import ExpressionWrapper
from django.db.models.fields import IntegerField
from django.db.models.functions import Coalesce
from django.utils import timezone
from ....channel.models import Channel
from ....core.search import prefix_search
from ....product import ProductTypeKind
from ....product.models import (
Category,
CollectionProduct,
ProductChannelListing,
ProductType,
ProductVariant,
ProductVariantChannelListing,
)
from ....warehouse.models import Allocation, Reservation, Stock, Warehouse
from ...utils import resolve_global_ids_to_primary_keys
from ...utils.filters import (
filter_range_field,
filter_where_range_field_with_conditions,
)
from ...warehouse import types as warehouse_types
from .. import types as product_types
from ..enums import (
StockAvailability,
)
def filter_products_by_variant_price(qs, channel_slug, price_lte=None, price_gte=None):
channels = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
product_variant_channel_listings = ProductVariantChannelListing.objects.using(
qs.db
).filter(Exists(channels.filter(pk=OuterRef("channel_id"))))
if price_lte:
product_variant_channel_listings = product_variant_channel_listings.filter(
Q(price_amount__lte=price_lte) | Q(price_amount__isnull=True)
)
if price_gte:
product_variant_channel_listings = product_variant_channel_listings.filter(
Q(price_amount__gte=price_gte) | Q(price_amount__isnull=True)
)
product_variant_channel_listings = product_variant_channel_listings.values(
"variant_id"
)
variants = (
ProductVariant.objects.using(qs.db)
.filter(
Exists(product_variant_channel_listings.filter(variant_id=OuterRef("pk")))
)
.values("product_id")
)
return qs.filter(Exists(variants.filter(product_id=OuterRef("pk"))))
def filter_products_by_minimal_price(
qs, channel_slug, minimal_price_lte=None, minimal_price_gte=None
):
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).first()
if not channel:
return qs
product_channel_listings = ProductChannelListing.objects.using(qs.db).filter(
channel_id=channel.id
)
if minimal_price_lte:
product_channel_listings = product_channel_listings.filter(
discounted_price_amount__lte=minimal_price_lte
)
if minimal_price_gte:
product_channel_listings = product_channel_listings.filter(
discounted_price_amount__gte=minimal_price_gte
)
product_channel_listings = product_channel_listings.values("product_id")
return qs.filter(Exists(product_channel_listings.filter(product_id=OuterRef("pk"))))
def filter_products_by_categories(qs, category_ids):
categories = Category.objects.using(qs.db).filter(pk__in=category_ids)
categories = (
Category.tree.get_queryset_descendants(categories, include_self=True)
.using(qs.db)
.values("pk")
)
return qs.filter(Exists(categories.filter(pk=OuterRef("category_id"))))
def filter_products_by_collections(qs, collection_pks):
collection_products = (
CollectionProduct.objects.using(qs.db)
.filter(collection_id__in=collection_pks)
.values("product_id")
)
return qs.filter(Exists(collection_products.filter(product_id=OuterRef("pk"))))
def filter_products_by_stock_availability(qs, stock_availability, channel_slug):
allocations = (
Allocation.objects.using(qs.db)
.values("stock_id")
.filter(quantity_allocated__gt=0, stock_id=OuterRef("pk"))
.values_list(Sum("quantity_allocated"))
)
allocated_subquery = Subquery(queryset=allocations, output_field=IntegerField())
reservations = (
Reservation.objects.using(qs.db)
.values("stock_id")
.filter(
quantity_reserved__gt=0,
stock_id=OuterRef("pk"),
reserved_until__gt=timezone.now(),
)
.values_list(Sum("quantity_reserved"))
)
reservation_subquery = Subquery(queryset=reservations, output_field=IntegerField())
warehouse_pks = list(
Warehouse.objects.using(qs.db)
.for_channel_with_active_shipping_zone_or_cc(channel_slug)
.values_list("pk", flat=True)
)
stocks = (
Stock.objects.using(qs.db)
.filter(
warehouse_id__in=warehouse_pks,
quantity__gt=Coalesce(allocated_subquery, 0)
+ Coalesce(reservation_subquery, 0),
)
.values("product_variant_id")
)
variants = (
ProductVariant.objects.using(qs.db)
.filter(Exists(stocks.filter(product_variant_id=OuterRef("pk"))))
.values("product_id")
)
if stock_availability == StockAvailability.IN_STOCK.value: # type: ignore[attr-defined]
qs = qs.filter(Exists(variants.filter(product_id=OuterRef("pk"))))
if stock_availability == StockAvailability.OUT_OF_STOCK.value: # type: ignore[attr-defined]
qs = qs.filter(~Exists(variants.filter(product_id=OuterRef("pk"))))
return qs
def filter_categories(qs, _, value):
if value:
_, category_pks = resolve_global_ids_to_primary_keys(
value, product_types.Category
)
qs = filter_products_by_categories(qs, category_pks)
return qs
def filter_product_types(qs, _, value):
if not value:
return qs
_, product_type_pks = resolve_global_ids_to_primary_keys(
value, product_types.ProductType
)
return qs.filter(product_type_id__in=product_type_pks)
def filter_has_category(qs, _, value):
return qs.filter(category__isnull=not value)
def filter_has_preordered_variants(qs, _, value):
variants = (
ProductVariant.objects.using(qs.db)
.filter(is_preorder=True)
.filter(
Q(preorder_end_date__isnull=True) | Q(preorder_end_date__gt=timezone.now())
)
.values("product_id")
)
if value:
return qs.filter(Exists(variants.filter(product_id=OuterRef("pk"))))
return qs.filter(~Exists(variants.filter(product_id=OuterRef("pk"))))
def filter_collections(qs, _, value):
if value:
_, collection_pks = resolve_global_ids_to_primary_keys(
value, product_types.Collection
)
qs = filter_products_by_collections(qs, collection_pks)
return qs
def filter_products_is_published(qs, _, value, channel_slug):
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(Exists(channel.filter(pk=OuterRef("channel_id"))), is_published=value)
.values("product_id")
)
# Filter out product for which there is no variant with price
variant_channel_listings = (
ProductVariantChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))),
price_amount__isnull=False,
)
.values("id")
)
variants = (
ProductVariant.objects.using(qs.db)
.filter(Exists(variant_channel_listings.filter(variant_id=OuterRef("pk"))))
.values("product_id")
)
return qs.filter(
Exists(product_channel_listings.filter(product_id=OuterRef("pk"))),
Exists(variants.filter(product_id=OuterRef("pk"))),
)
def filter_products_is_available(qs, _, value, channel_slug):
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
now = datetime.datetime.now(tz=datetime.UTC)
if value:
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))),
available_for_purchase_at__lte=now,
)
.values("product_id")
)
else:
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))),
Q(available_for_purchase_at__gt=now)
| Q(available_for_purchase_at__isnull=True),
)
.values("product_id")
)
return qs.filter(Exists(product_channel_listings.filter(product_id=OuterRef("pk"))))
def filter_products_channel_field_from_date(qs, _, value, channel_slug, field):
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
lookup = {
f"{field}__lte": value,
}
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))),
**lookup,
)
.values("product_id")
)
return qs.filter(Exists(product_channel_listings.filter(product_id=OuterRef("pk"))))
def filter_products_visible_in_listing(qs, _, value, channel_slug):
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))), visible_in_listings=value
)
.values("product_id")
)
return qs.filter(Exists(product_channel_listings.filter(product_id=OuterRef("pk"))))
def filter_variant_price(qs, _, value, channel_slug):
qs = filter_products_by_variant_price(
qs, channel_slug, price_lte=value.get("lte"), price_gte=value.get("gte")
)
return qs
def filter_minimal_price(qs, _, value, channel_slug):
qs = filter_products_by_minimal_price(
qs,
channel_slug,
minimal_price_lte=value.get("lte"),
minimal_price_gte=value.get("gte"),
)
return qs
def filter_stock_availability(qs, _, value, channel_slug):
if value:
qs = filter_products_by_stock_availability(qs, value, channel_slug)
return qs
def filter_search(qs, _, value):
return prefix_search(qs, value)
def filter_gift_card(qs, _, value):
product_types = ProductType.objects.using(qs.db).filter(
kind=ProductTypeKind.GIFT_CARD
)
lookup = Exists(product_types.filter(id=OuterRef("product_type_id")))
return qs.filter(lookup) if value is True else qs.exclude(lookup)
def filter_stocks(qs, _, value):
warehouse_ids = value.get("warehouse_ids")
quantity = value.get("quantity")
if warehouse_ids and not quantity:
return filter_warehouses(qs, _, warehouse_ids)
if quantity and not warehouse_ids:
return filter_quantity(qs, quantity)
if quantity and warehouse_ids:
return filter_quantity(qs, quantity, warehouse_ids)
return qs
def filter_warehouses(qs, _, value):
if value:
_, warehouse_pks = resolve_global_ids_to_primary_keys(
value, warehouse_types.Warehouse
)
warehouses = (
Warehouse.objects.using(qs.db).filter(pk__in=warehouse_pks).values("pk")
)
variant_ids = (
Stock.objects.using(qs.db)
.filter(Exists(warehouses.filter(pk=OuterRef("warehouse"))))
.values("product_variant_id")
)
variants = (
ProductVariant.objects.using(qs.db).filter(id__in=variant_ids).values("pk")
)
return qs.filter(Exists(variants.filter(product=OuterRef("pk"))))
return qs
def filter_quantity(qs, quantity_value, warehouse_ids=None):
"""Filter products queryset by product variants quantity.
Return product queryset which contains at least one variant with aggregated quantity
between given range. If warehouses is given, it aggregates quantity only
from stocks which are in given warehouses.
"""
stocks = Stock.objects.using(qs.db).all()
if warehouse_ids:
_, warehouse_pks = resolve_global_ids_to_primary_keys(
warehouse_ids, warehouse_types.Warehouse
)
stocks = stocks.filter(warehouse_id__in=warehouse_pks)
stocks = stocks.values("product_variant_id").filter(
product_variant_id=OuterRef("pk")
)
stocks = Subquery(stocks.values_list(Sum("quantity")))
variants = ProductVariant.objects.using(qs.db).annotate(
total_quantity=ExpressionWrapper(stocks, output_field=IntegerField())
)
variants = list(
filter_range_field(variants, "total_quantity", quantity_value).values_list(
"product_id", flat=True
)
)
return qs.filter(pk__in=variants)
def where_filter_products_is_available(qs, _, value, channel_slug):
if value is None:
return qs.none()
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
now = datetime.datetime.now(tz=datetime.UTC)
if value:
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))),
available_for_purchase_at__lte=now,
)
.values("product_id")
)
else:
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))),
Q(available_for_purchase_at__gt=now)
| Q(available_for_purchase_at__isnull=True),
)
.values("product_id")
)
return qs.filter(Exists(product_channel_listings.filter(product_id=OuterRef("pk"))))
def where_filter_products_channel_field_from_date(qs, _, value, channel_slug, field):
if value is None:
return qs.none()
channel = Channel.objects.using(qs.db).filter(slug=channel_slug).values("pk")
lookup = {
f"{field}__lte": value,
}
product_channel_listings = (
ProductChannelListing.objects.using(qs.db)
.filter(
Exists(channel.filter(pk=OuterRef("channel_id"))),
**lookup,
)
.values("product_id")
)
return qs.filter(Exists(product_channel_listings.filter(product_id=OuterRef("pk"))))
def where_filter_has_category(qs, _, value):
if value is None:
return qs.none()
return qs.filter(category__isnull=not value)
def where_filter_stocks(qs, _, value):
if not value:
return qs.none()
warehouse_ids = value.get("warehouse_ids")
quantity = value.get("quantity")
if warehouse_ids and not quantity:
return where_filter_warehouses(qs, _, warehouse_ids)
if quantity and not warehouse_ids:
return where_filter_quantity(qs, quantity)
if quantity and warehouse_ids:
return where_filter_quantity(qs, quantity, warehouse_ids)
return qs.none()
def where_filter_warehouses(qs, _, value):
if not value:
return qs.none()
_, warehouse_pks = resolve_global_ids_to_primary_keys(
value, warehouse_types.Warehouse
)
warehouses = (
Warehouse.objects.using(qs.db).filter(pk__in=warehouse_pks).values("pk")
)
variant_ids = (
Stock.objects.using(qs.db)
.filter(Exists(warehouses.filter(pk=OuterRef("warehouse"))))
.values("product_variant_id")
)
variants = (
ProductVariant.objects.using(qs.db).filter(id__in=variant_ids).values("pk")
)
return qs.filter(Exists(variants.filter(product=OuterRef("pk"))))
def where_filter_quantity(qs, quantity_value, warehouse_ids=None):
"""Filter products queryset by product variants quantity.
Return product queryset which contains at least one variant with aggregated quantity
between given range. If warehouses is given, it aggregates quantity only
from stocks which are in given warehouses.
"""
stocks = Stock.objects.using(qs.db).all()
if warehouse_ids:
_, warehouse_pks = resolve_global_ids_to_primary_keys(
warehouse_ids, warehouse_types.Warehouse
)
stocks = stocks.filter(warehouse_id__in=warehouse_pks)
stocks = stocks.values("product_variant_id").filter(
product_variant_id=OuterRef("pk")
)
stocks = Subquery(stocks.values_list(Sum("quantity")))
variants = ProductVariant.objects.using(qs.db).annotate(
total_quantity=ExpressionWrapper(stocks, output_field=IntegerField())
)
variants = list(
_filter_range(variants, "total_quantity", quantity_value).values_list(
"product_id", flat=True
)
)
return qs.filter(pk__in=variants)
def _filter_range(qs, field, value):
gte, lte = value.get("gte"), value.get("lte")
if gte is None and lte is None:
return qs.none()
return filter_range_field(qs, field, value)
def where_filter_stock_availability(qs, _, value, channel_slug):
if value:
return filter_products_by_stock_availability(qs, value, channel_slug)
return qs.none()
def where_filter_gift_card(qs, _, value):
if value is None:
return qs.none()
product_types = ProductType.objects.using(qs.db).filter(
kind=ProductTypeKind.GIFT_CARD
)
lookup = Exists(product_types.filter(id=OuterRef("product_type_id")))
return qs.filter(lookup) if value is True else qs.exclude(lookup)
def where_filter_has_preordered_variants(qs, _, value):
if value is None:
return qs.none()
variants = (
ProductVariant.objects.using(qs.db)
.filter(is_preorder=True)
.filter(
Q(preorder_end_date__isnull=True) | Q(preorder_end_date__gt=timezone.now())
)
.values("product_id")
)
if value:
return qs.filter(Exists(variants.filter(product_id=OuterRef("pk"))))
return qs.filter(~Exists(variants.filter(product_id=OuterRef("pk"))))
def where_filter_updated_at_range(qs, _, value):
if value is None:
return qs.none()
return filter_where_range_field_with_conditions(qs, "updated_at", value)
def where_filter_by_categories(qs, value):
"""Filter products by categories and subcategories of provided categories."""
if not value:
return qs.none()
eq = value.get("eq")
one_of = value.get("one_of")
pks = None
if eq and isinstance(eq, str):
_, pks = resolve_global_ids_to_primary_keys([eq], "Category", True)
if one_of:
_, pks = resolve_global_ids_to_primary_keys(one_of, "Category", True)
if pks:
return filter_products_by_categories(qs, pks)
return qs.none()
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/filters/product_helpers.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 463,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/product/filters/product_type.py | import django_filters
import graphene
from django.db.models import Q
from saleor.graphql.warehouse.types import DEPRECATED_IN_3X_INPUT
from ....product.models import ProductType
from ...core.doc_category import DOC_CATEGORY_PRODUCTS
from ...core.filters import (
EnumFilter,
FilterInputObjectType,
GlobalIDMultipleChoiceFilter,
ListObjectTypeFilter,
MetadataFilterBase,
)
from ...utils.filters import filter_slug_list
from ..enums import (
ProductTypeConfigurable,
ProductTypeEnum,
ProductTypeKindEnum,
)
def filter_product_type_configurable(qs, _, value):
if value == ProductTypeConfigurable.CONFIGURABLE:
qs = qs.filter(has_variants=True)
elif value == ProductTypeConfigurable.SIMPLE:
qs = qs.filter(has_variants=False)
return qs
def filter_product_type(qs, _, value):
if value == ProductTypeEnum.DIGITAL:
qs = qs.filter(is_digital=True)
elif value == ProductTypeEnum.SHIPPABLE:
qs = qs.filter(is_shipping_required=True)
return qs
def filter_product_type_kind(qs, _, value):
if value:
qs = qs.filter(kind=value)
return qs
class ProductTypeFilter(MetadataFilterBase):
search = django_filters.CharFilter(method="filter_product_type_searchable")
configurable = EnumFilter(
input_class=ProductTypeConfigurable,
method=filter_product_type_configurable,
help_text=(
f"{DEPRECATED_IN_3X_INPUT} The field has no effect on the API behavior. "
"This is a leftover from the past Simple/Configurable product distinction. "
"Products can have multiple variants regardless of this setting. "
),
)
product_type = EnumFilter(input_class=ProductTypeEnum, method=filter_product_type)
kind = EnumFilter(input_class=ProductTypeKindEnum, method=filter_product_type_kind)
ids = GlobalIDMultipleChoiceFilter(field_name="id")
slugs = ListObjectTypeFilter(input_class=graphene.String, method=filter_slug_list)
class Meta:
model = ProductType
fields = ["search", "configurable", "product_type"]
@classmethod
def filter_product_type_searchable(cls, queryset, _name, value):
if not value:
return queryset
name_slug_qs = Q(name__ilike=value) | Q(slug__ilike=value)
return queryset.filter(name_slug_qs)
class ProductTypeFilterInput(FilterInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = ProductTypeFilter
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/filters/product_type.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/product/filters/product_variant.py | from typing import Literal
import django_filters
import graphene
from django.db.models import Exists, OuterRef, Q
from django.db.models.query import QuerySet
from django.utils import timezone
from ....attribute.models import (
AssignedVariantAttribute,
AssignedVariantAttributeValue,
Attribute,
AttributeValue,
)
from ....product.models import Product, ProductVariant
from ...attribute.shared_filters import (
CONTAINS_TYPING,
AssignedAttributeWhereInput,
clean_up_referenced_global_ids,
get_attribute_values_by_boolean_value,
get_attribute_values_by_date_time_value,
get_attribute_values_by_date_value,
get_attribute_values_by_numeric_value,
get_attribute_values_by_referenced_category_ids,
get_attribute_values_by_referenced_category_slugs,
get_attribute_values_by_referenced_collection_ids,
get_attribute_values_by_referenced_collection_slugs,
get_attribute_values_by_referenced_page_ids,
get_attribute_values_by_referenced_page_slugs,
get_attribute_values_by_referenced_product_ids,
get_attribute_values_by_referenced_product_slugs,
get_attribute_values_by_referenced_variant_ids,
get_attribute_values_by_referenced_variant_skus,
get_attribute_values_by_slug_or_name_value,
validate_attribute_value_input,
)
from ...core.descriptions import ADDED_IN_322
from ...core.doc_category import DOC_CATEGORY_PRODUCTS
from ...core.filters import (
FilterInputObjectType,
GlobalIDMultipleChoiceWhereFilter,
ListObjectTypeFilter,
ListObjectTypeWhereFilter,
MetadataFilterBase,
MetadataWhereFilterBase,
ObjectTypeFilter,
ObjectTypeWhereFilter,
)
from ...core.filters.where_input import StringFilterInput, WhereInputObjectType
from ...core.types import DateTimeRangeInput
from ...utils.filters import (
Number,
filter_by_ids,
filter_where_by_range_field,
filter_where_by_value_field,
)
from .shared import filter_updated_at_range
def filter_sku_list(qs, _, value):
return qs.filter(sku__in=value)
def filter_is_preorder(qs, _, value):
if value:
return qs.filter(is_preorder=True).filter(
Q(preorder_end_date__isnull=True) | Q(preorder_end_date__gte=timezone.now())
)
return qs.filter(
Q(is_preorder=False)
| (Q(is_preorder=True)) & Q(preorder_end_date__lt=timezone.now())
)
def filter_by_slug_or_name(
attr_id: int | None,
attr_value: dict,
db_connection_name: str,
):
attribute_values = get_attribute_values_by_slug_or_name_value(
attr_id=attr_id,
attr_value=attr_value,
db_connection_name=db_connection_name,
)
return _get_assigned_variant_attribute_for_attribute_value_qs(
attribute_values,
db_connection_name,
)
def filter_by_numeric_attribute(
attr_id: int | None,
numeric_value: dict[str, Number | list[Number] | dict[str, Number]],
db_connection_name: str,
):
qs_by_numeric = get_attribute_values_by_numeric_value(
attr_id=attr_id,
numeric_value=numeric_value,
db_connection_name=db_connection_name,
)
return _get_assigned_variant_attribute_for_attribute_value_qs(
qs_by_numeric,
db_connection_name,
)
def filter_by_boolean_attribute(
attr_id: int | None,
boolean_value,
db_connection_name: str,
):
qs_by_boolean = get_attribute_values_by_boolean_value(
attr_id=attr_id,
boolean_value=boolean_value,
db_connection_name=db_connection_name,
)
return _get_assigned_variant_attribute_for_attribute_value_qs(
qs_by_boolean,
db_connection_name,
)
def filter_by_date_attribute(
attr_id: int | None,
date_value,
db_connection_name: str,
):
qs_by_date = get_attribute_values_by_date_value(
attr_id=attr_id,
date_value=date_value,
db_connection_name=db_connection_name,
)
return _get_assigned_variant_attribute_for_attribute_value_qs(
qs_by_date,
db_connection_name,
)
def filter_by_date_time_attribute(
attr_id: int | None,
date_value,
db_connection_name: str,
):
qs_by_date_time = get_attribute_values_by_date_time_value(
attr_id=attr_id,
date_value=date_value,
db_connection_name=db_connection_name,
)
return _get_assigned_variant_attribute_for_attribute_value_qs(
qs_by_date_time,
db_connection_name,
)
def _get_assigned_variant_attribute_for_attribute_value_qs(
attribute_values: QuerySet[AttributeValue],
db_connection_name: str,
):
assigned_attr_value = AssignedVariantAttributeValue.objects.using(
db_connection_name
).filter(
value__in=attribute_values,
assignment_id=OuterRef("id"),
)
return Q(
Exists(
AssignedVariantAttribute.objects.using(db_connection_name).filter(
Exists(assigned_attr_value), variant_id=OuterRef("pk")
)
)
)
def _filter_contains_single_expression(
attr_id: int | None,
db_connection_name: str,
referenced_attr_values: QuerySet[AttributeValue],
):
if attr_id:
referenced_attr_values = referenced_attr_values.filter(
attribute_id=attr_id,
)
assigned_attr_value = AssignedVariantAttributeValue.objects.using(
db_connection_name
).filter(
value__in=referenced_attr_values,
assignment_id=OuterRef("id"),
)
return Q(
Exists(
AssignedVariantAttribute.objects.using(db_connection_name).filter(
Exists(assigned_attr_value), variant_id=OuterRef("pk")
)
)
)
def filter_by_contains_referenced_page_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter variants based on their references to pages.
- If `contains_all` is provided, only variants that reference all of the
specified pages will match.
- If `contains_any` is provided, variants that reference at least one of
the specified pages will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for page_slug in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_page_slugs(
slugs=[page_slug], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_page_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_category_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter variants based on their references to categories.
- If `contains_all` is provided, only variants that reference all of the
specified categories will match.
- If `contains_any` is provided, variants that reference at least one of
the specified categories will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for category_slug in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_category_slugs(
slugs=[category_slug], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_category_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_collection_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter variants based on their references to collections.
- If `contains_all` is provided, only variants that reference all of the
specified collections will match.
- If `contains_any` is provided, variants that reference at least one of
the specified collections will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for collection_slug in contains_all:
referenced_attr_values = (
get_attribute_values_by_referenced_collection_slugs(
slugs=[collection_slug], db_connection_name=db_connection_name
)
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_collection_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_product_slugs(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter variants based on their references to products.
- If `contains_all` is provided, only variants that reference all of the
specified products will match.
- If `contains_any` is provided, variants that reference at least one of
the specified products will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for product_slug in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_product_slugs(
slugs=[product_slug], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_product_slugs(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def filter_by_contains_referenced_variant_skus(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
):
"""Build an expression to filter variants based on their references to variants.
- If `contains_all` is provided, only variants that reference all of the
specified variants will match.
- If `contains_any` is provided, variants that reference at least one of
the specified variants will match.
"""
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
if contains_all:
expression = Q()
for variant_sku in contains_all:
referenced_attr_values = get_attribute_values_by_referenced_variant_skus(
slugs=[variant_sku], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
if contains_any:
referenced_attr_values = get_attribute_values_by_referenced_variant_skus(
slugs=contains_any, db_connection_name=db_connection_name
)
return _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return Q()
def _filter_by_contains_all_referenced_object_ids(
variant_ids: set[int],
product_ids: set[int],
page_ids: set[int],
category_ids: set[int],
collection_ids: set[int],
attr_id: int | None,
db_connection_name: str,
) -> Q:
expression = Q()
if page_ids:
for page_id in page_ids:
referenced_attr_values = get_attribute_values_by_referenced_page_ids(
ids=[page_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if product_ids:
for product_id in product_ids:
referenced_attr_values = get_attribute_values_by_referenced_product_ids(
ids=[product_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if variant_ids:
for variant_id in variant_ids:
referenced_attr_values = get_attribute_values_by_referenced_variant_ids(
ids=[variant_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if category_ids:
for category_id in category_ids:
referenced_attr_values = get_attribute_values_by_referenced_category_ids(
ids=[category_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if collection_ids:
for collection_id in collection_ids:
referenced_attr_values = get_attribute_values_by_referenced_collection_ids(
ids=[collection_id], db_connection_name=db_connection_name
)
expression &= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
def _filter_by_contains_any_referenced_object_ids(
variant_ids: set[int],
product_ids: set[int],
page_ids: set[int],
category_ids: set[int],
collection_ids: set[int],
attr_id: int | None,
db_connection_name: str,
) -> Q:
expression = Q()
if page_ids:
referenced_attr_values = get_attribute_values_by_referenced_page_ids(
ids=list(page_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if product_ids:
referenced_attr_values = get_attribute_values_by_referenced_product_ids(
ids=list(product_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if variant_ids:
referenced_attr_values = get_attribute_values_by_referenced_variant_ids(
ids=list(variant_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if category_ids:
referenced_attr_values = get_attribute_values_by_referenced_category_ids(
ids=list(category_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
if collection_ids:
referenced_attr_values = get_attribute_values_by_referenced_collection_ids(
ids=list(collection_ids), db_connection_name=db_connection_name
)
expression |= _filter_contains_single_expression(
attr_id=attr_id,
db_connection_name=db_connection_name,
referenced_attr_values=referenced_attr_values,
)
return expression
def filter_by_contains_referenced_object_ids(
attr_id: int | None,
attr_value: CONTAINS_TYPING,
db_connection_name: str,
) -> Q:
contains_all = attr_value.get("contains_all")
contains_any = attr_value.get("contains_any")
grouped_ids = clean_up_referenced_global_ids(contains_any or contains_all or [])
variant_ids = grouped_ids["ProductVariant"]
product_ids = grouped_ids["Product"]
page_ids = grouped_ids["Page"]
category_ids = grouped_ids["Category"]
collection_ids = grouped_ids["Collection"]
if contains_all:
return _filter_by_contains_all_referenced_object_ids(
variant_ids=variant_ids,
product_ids=product_ids,
page_ids=page_ids,
category_ids=category_ids,
collection_ids=collection_ids,
attr_id=attr_id,
db_connection_name=db_connection_name,
)
if contains_any:
return _filter_by_contains_any_referenced_object_ids(
variant_ids=variant_ids,
product_ids=product_ids,
page_ids=page_ids,
category_ids=category_ids,
collection_ids=collection_ids,
attr_id=attr_id,
db_connection_name=db_connection_name,
)
return Q()
def filter_objects_by_reference_attributes(
attr_id: int | None,
attr_value: dict[
Literal[
"referenced_ids",
"page_slugs",
"product_slugs",
"product_variant_skus",
"category_slugs",
"collection_slugs",
],
CONTAINS_TYPING,
],
db_connection_name: str,
):
filter_expression = Q()
if "referenced_ids" in attr_value:
filter_expression &= filter_by_contains_referenced_object_ids(
attr_id,
attr_value["referenced_ids"],
db_connection_name,
)
if "page_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_page_slugs(
attr_id,
attr_value["page_slugs"],
db_connection_name,
)
if "product_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_product_slugs(
attr_id,
attr_value["product_slugs"],
db_connection_name,
)
if "product_variant_skus" in attr_value:
filter_expression &= filter_by_contains_referenced_variant_skus(
attr_id,
attr_value["product_variant_skus"],
db_connection_name,
)
if "category_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_category_slugs(
attr_id,
attr_value["category_slugs"],
db_connection_name,
)
if "collection_slugs" in attr_value:
filter_expression &= filter_by_contains_referenced_collection_slugs(
attr_id,
attr_value["collection_slugs"],
db_connection_name,
)
return filter_expression
def filter_variants_by_attributes(
qs: QuerySet[ProductVariant], value: list[dict]
) -> QuerySet[ProductVariant]:
attribute_slugs = {
attr_filter["slug"] for attr_filter in value if "slug" in attr_filter
}
attributes_map = {
attr.slug: attr
for attr in Attribute.objects.using(qs.db).filter(slug__in=attribute_slugs)
}
if len(attribute_slugs) != len(attributes_map.keys()):
# Filter over non existing attribute
return qs.none()
attr_filter_expression = Q()
attr_without_values_input = []
for attr_filter in value:
if "slug" in attr_filter and "value" not in attr_filter:
attr_without_values_input.append(attributes_map[attr_filter["slug"]])
if attr_without_values_input:
atr_value_qs = AttributeValue.objects.using(qs.db).filter(
attribute_id__in=[attr.id for attr in attr_without_values_input]
)
attr_filter_expression = _get_assigned_variant_attribute_for_attribute_value_qs(
atr_value_qs,
qs.db,
)
for attr_filter in value:
attr_value = attr_filter.get("value")
if not attr_value:
# attrs without value input are handled separately
continue
attr_id = None
if attr_slug := attr_filter.get("slug"):
attr = attributes_map[attr_slug]
attr_id = attr.id
attr_value = attr_filter["value"]
if "slug" in attr_value or "name" in attr_value:
attr_filter_expression &= filter_by_slug_or_name(
attr_id,
attr_value,
qs.db,
)
elif "numeric" in attr_value:
attr_filter_expression &= filter_by_numeric_attribute(
attr_id,
attr_value["numeric"],
qs.db,
)
elif "boolean" in attr_value:
attr_filter_expression &= filter_by_boolean_attribute(
attr_id,
attr_value["boolean"],
qs.db,
)
elif "date" in attr_value:
attr_filter_expression &= filter_by_date_attribute(
attr_id,
attr_value["date"],
qs.db,
)
elif "date_time" in attr_value:
attr_filter_expression &= filter_by_date_time_attribute(
attr_id,
attr_value["date_time"],
qs.db,
)
elif "reference" in attr_value:
attr_filter_expression &= filter_objects_by_reference_attributes(
attr_id,
attr_value["reference"],
qs.db,
)
return qs.filter(attr_filter_expression)
class ProductVariantFilter(MetadataFilterBase):
search = django_filters.CharFilter(method="product_variant_filter_search")
sku = ListObjectTypeFilter(input_class=graphene.String, method=filter_sku_list)
is_preorder = django_filters.BooleanFilter(method=filter_is_preorder)
updated_at = ObjectTypeFilter(
input_class=DateTimeRangeInput, method=filter_updated_at_range
)
class Meta:
model = ProductVariant
fields = ["search", "sku"]
def product_variant_filter_search(self, queryset, _name, value):
if not value:
return queryset
qs = Q(name__ilike=value) | Q(sku__ilike=value)
products = (
Product.objects.using(queryset.db).filter(name__ilike=value).values("pk")
)
qs |= Q(Exists(products.filter(variants=OuterRef("pk"))))
return queryset.filter(qs)
class ProductVariantWhere(MetadataWhereFilterBase):
ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids("ProductVariant"))
sku = ObjectTypeWhereFilter(
input_class=StringFilterInput,
method="filter_product_sku",
help_text="Filter by product SKU.",
)
updated_at = ObjectTypeWhereFilter(
input_class=DateTimeRangeInput,
method="filter_updated_at",
help_text="Filter by when was the most recent update.",
)
attributes = ListObjectTypeWhereFilter(
input_class=AssignedAttributeWhereInput,
method="filter_attributes",
help_text="Filter by attributes associated with the variant." + ADDED_IN_322,
)
class Meta:
model = ProductVariant
fields = []
@staticmethod
def filter_product_sku(qs, _, value):
return filter_where_by_value_field(qs, "sku", value)
@staticmethod
def filter_updated_at(qs, _, value):
return filter_where_by_range_field(qs, "updated_at", value)
@staticmethod
def filter_attributes(qs, _, value):
if not value:
return qs
return filter_variants_by_attributes(qs, value)
def is_valid(self):
if attributes := self.data.get("attributes"):
validate_attribute_value_input(attributes, self.queryset.db)
return super().is_valid()
class ProductVariantFilterInput(FilterInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = ProductVariantFilter
class ProductVariantWhereInput(WhereInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_PRODUCTS
filterset_class = ProductVariantWhere
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/filters/product_variant.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 691,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/attribute/shared_filters.py | from typing import Literal, TypedDict, cast
import graphene
from django.db.models import Exists, OuterRef, QuerySet
from graphql import GraphQLError
from ...attribute import AttributeInputType
from ...attribute.models import (
AssignedPageAttributeValue,
AssignedProductAttributeValue,
Attribute,
AttributeValue,
)
from ...page import models as page_models
from ...product import models as product_models
from ..core.filters import DecimalFilterInput
from ..core.filters.where_input import ContainsFilterInput, StringFilterInput
from ..core.types import DateRangeInput, DateTimeRangeInput
from ..core.types.base import BaseInputObjectType
from ..core.utils import from_global_id_or_error
from ..utils.filters import (
Number,
filter_range_field,
filter_where_by_numeric_field,
filter_where_by_value_field,
)
class AssignedAttributeReferenceInput(BaseInputObjectType):
referenced_ids = ContainsFilterInput(
description="Returns objects with a reference pointing to an object identified by the given ID.",
)
page_slugs = ContainsFilterInput(
description="Returns objects with a reference pointing to a page identified by the given slug.",
)
product_slugs = ContainsFilterInput(
description=(
"Returns objects with a reference pointing to a product identified by the given slug."
)
)
product_variant_skus = ContainsFilterInput(
description=(
"Returns objects with a reference pointing "
"to a product variant identified by the given sku."
)
)
category_slugs = ContainsFilterInput(
description=(
"Returns objects with a reference pointing "
"to a category identified by the given slug."
)
)
collection_slugs = ContainsFilterInput(
description=(
"Returns objects with a reference pointing "
"to a collection identified by the given slug."
)
)
class AssignedAttributeValueInput(BaseInputObjectType):
slug = StringFilterInput(
description="Filter by slug assigned to AttributeValue.",
)
name = StringFilterInput(
description="Filter by name assigned to AttributeValue.",
)
numeric = DecimalFilterInput(
required=False,
description="Filter by numeric value for attributes of numeric type.",
)
date = DateRangeInput(
required=False,
description="Filter by date value for attributes of date type.",
)
date_time = DateTimeRangeInput(
required=False,
description="Filter by date time value for attributes of date time type.",
)
boolean = graphene.Boolean(
required=False,
description="Filter by boolean value for attributes of boolean type.",
)
reference = AssignedAttributeReferenceInput(
required=False,
description="Filter by reference attribute value.",
)
class AssignedAttributeWhereInput(BaseInputObjectType):
slug = graphene.String(description="Filter by attribute slug.", required=False)
value = AssignedAttributeValueInput(
required=False,
description=(
"Filter by value of the attribute. Only one value input field is allowed. "
"If provided more than one, the error will be raised."
),
)
CONTAINS_TYPING = dict[Literal["contains_any", "contains_all"], list[str]]
class SharedContainsFilterParams(TypedDict):
attr_id: int | None
db_connection_name: str
assigned_attr_model: type[
AssignedPageAttributeValue | AssignedProductAttributeValue
]
assigned_id_field_name: Literal["page_id", "product_id"]
identifier_field_name: Literal["slug", "id", "sku"]
def get_attribute_values_by_slug_or_name_value(
attr_id: int | None,
attr_value: dict,
db_connection_name: str,
) -> QuerySet[AttributeValue]:
attribute_values = AttributeValue.objects.using(db_connection_name).filter(
**{"attribute_id": attr_id} if attr_id else {}
)
if "slug" in attr_value:
attribute_values = filter_where_by_value_field(
attribute_values, "slug", attr_value["slug"]
)
if "name" in attr_value:
attribute_values = filter_where_by_value_field(
attribute_values, "name", attr_value["name"]
)
return attribute_values
def get_attribute_values_by_numeric_value(
attr_id: int | None,
numeric_value: dict[str, Number | list[Number] | dict[str, Number]],
db_connection_name: str,
) -> QuerySet[AttributeValue]:
qs_by_numeric = AttributeValue.objects.using(db_connection_name).filter(
attribute__input_type=AttributeInputType.NUMERIC,
**{"attribute_id": attr_id} if attr_id else {},
)
qs_by_numeric = filter_where_by_numeric_field(
qs_by_numeric,
"numeric",
numeric_value,
)
return qs_by_numeric
def get_attribute_values_by_boolean_value(
attr_id: int | None,
boolean_value: bool,
db_connection_name: str,
) -> QuerySet[AttributeValue]:
qs_by_boolean = AttributeValue.objects.using(db_connection_name).filter(
attribute__input_type=AttributeInputType.BOOLEAN,
**{"attribute_id": attr_id} if attr_id else {},
)
return qs_by_boolean.filter(boolean=boolean_value)
def get_attribute_values_by_date_value(
attr_id: int | None,
date_value: dict[str, str],
db_connection_name: str,
) -> QuerySet[AttributeValue]:
qs_by_date = AttributeValue.objects.using(db_connection_name).filter(
attribute__input_type=AttributeInputType.DATE,
**{"attribute_id": attr_id} if attr_id else {},
)
return filter_range_field(
qs_by_date,
"date_time__date",
date_value,
)
def get_attribute_values_by_date_time_value(
attr_id: int | None,
date_value: dict[str, str],
db_connection_name: str,
) -> QuerySet[AttributeValue]:
qs_by_date = AttributeValue.objects.using(db_connection_name).filter(
attribute__input_type=AttributeInputType.DATE_TIME,
**{"attribute_id": attr_id} if attr_id else {},
)
return filter_range_field(
qs_by_date,
"date_time",
date_value,
)
def _get_attribute_values_by_referenced_page_identifiers(
field_name: str,
identifiers: list[str] | list[int],
db_connection_name: str,
):
pages = page_models.Page.objects.using(db_connection_name).filter(
**{f"{field_name}__in": identifiers}
)
return AttributeValue.objects.using(db_connection_name).filter(
Exists(pages.filter(id=OuterRef("reference_page_id"))),
)
def get_attribute_values_by_referenced_page_slugs(
slugs: list[str], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_page_identifiers(
"slug", slugs, db_connection_name
)
def get_attribute_values_by_referenced_page_ids(
ids: list[int], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_page_identifiers(
"id", ids, db_connection_name
)
def _get_attribute_values_by_referenced_product_identifiers(
field_name: str,
identifiers: list[str] | list[int],
db_connection_name: str,
) -> QuerySet[AttributeValue]:
products = product_models.Product.objects.using(db_connection_name).filter(
**{f"{field_name}__in": identifiers}
)
return AttributeValue.objects.using(db_connection_name).filter(
Exists(products.filter(id=OuterRef("reference_product_id"))),
)
def get_attribute_values_by_referenced_category_slugs(
slugs: list[str], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_category_identifiers(
"slug", slugs, db_connection_name
)
def get_attribute_values_by_referenced_category_ids(
ids: list[int], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_category_identifiers(
"id", ids, db_connection_name
)
def _get_attribute_values_by_referenced_category_identifiers(
field_name: str,
identifiers: list[str] | list[int],
db_connection_name: str,
) -> QuerySet[AttributeValue]:
categories = product_models.Category.objects.using(db_connection_name).filter(
**{f"{field_name}__in": identifiers}
)
return AttributeValue.objects.using(db_connection_name).filter(
Exists(categories.filter(id=OuterRef("reference_category_id"))),
)
def get_attribute_values_by_referenced_collection_slugs(
slugs: list[str], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_collection_identifiers(
"slug", slugs, db_connection_name
)
def get_attribute_values_by_referenced_collection_ids(
ids: list[int], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_collection_identifiers(
"id", ids, db_connection_name
)
def _get_attribute_values_by_referenced_collection_identifiers(
field_name: str,
identifiers: list[str] | list[int],
db_connection_name: str,
) -> QuerySet[AttributeValue]:
collections = product_models.Collection.objects.using(db_connection_name).filter(
**{f"{field_name}__in": identifiers}
)
return AttributeValue.objects.using(db_connection_name).filter(
Exists(collections.filter(id=OuterRef("reference_collection_id"))),
)
def get_attribute_values_by_referenced_product_slugs(
slugs: list[str], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_product_identifiers(
"slug", slugs, db_connection_name
)
def get_attribute_values_by_referenced_product_ids(
ids: list[int], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_product_identifiers(
"id", ids, db_connection_name
)
def _get_attribute_values_by_referenced_variant_identifiers(
field_name: str,
identifiers: list[str] | list[int],
db_connection_name: str,
):
variants = product_models.ProductVariant.objects.using(db_connection_name).filter(
**{f"{field_name}__in": identifiers}
)
return AttributeValue.objects.using(db_connection_name).filter(
Exists(variants.filter(id=OuterRef("reference_variant_id"))),
)
def get_attribute_values_by_referenced_variant_skus(
slugs: list[str], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_variant_identifiers(
"sku", slugs, db_connection_name
)
def get_attribute_values_by_referenced_variant_ids(
ids: list[int], db_connection_name: str
) -> QuerySet[AttributeValue]:
return _get_attribute_values_by_referenced_variant_identifiers(
"id", ids, db_connection_name
)
def clean_up_referenced_global_ids(global_ids: list[str]) -> dict[str, set[int]]:
grouped_ids: dict[str, set[int]] = {
"Product": set(),
"ProductVariant": set(),
"Page": set(),
"Category": set(),
"Collection": set(),
}
for global_id in global_ids:
type_, id_ = graphene.Node.from_global_id(global_id)
if type_ in grouped_ids:
id_ = cast(int, id_)
grouped_ids[type_].add(id_)
return grouped_ids
def _has_valid_reference_global_id(global_id: "str") -> bool:
try:
obj_type, _ = from_global_id_or_error(global_id)
except GraphQLError:
return False
if obj_type not in (
"Page",
"Product",
"ProductVariant",
"Category",
"Collection",
):
return False
return True
def _has_valid_reference_global_ids(
single_key_value: CONTAINS_TYPING,
) -> bool:
for global_id in single_key_value.get("contains_all", []):
if not _has_valid_reference_global_id(global_id):
return False
for global_id in single_key_value.get("contains_any", []):
if not _has_valid_reference_global_id(global_id):
return False
return True
def validate_attribute_value_reference_input(
index_with_values: list[
tuple[
str,
dict[
Literal[
"referenced_ids",
"page_slugs",
"product_slugs",
"product_variant_skus",
"category_slugs",
"collection_slugs",
],
CONTAINS_TYPING,
]
| None,
]
],
):
"""Validate the input for reference attributes.
This function checks if the input for reference attributes is valid.
It raises a GraphQLError if the input is invalid.
"""
duplicated_error = set()
empty_input_value_error = set()
invalid_input_type_error = set()
invalid_reference_global_id_error = set()
for index, value in index_with_values:
if not value:
invalid_input_type_error.add(index)
continue
for key in value:
single_key_value = value[key]
if (
"contains_all" in single_key_value
and "contains_any" in single_key_value
):
duplicated_error.add(index)
continue
if (
"contains_all" in single_key_value
and not single_key_value["contains_all"]
):
empty_input_value_error.add(index)
continue
if (
"contains_any" in single_key_value
and not single_key_value["contains_any"]
):
empty_input_value_error.add(index)
if key == "referenced_ids":
if not _has_valid_reference_global_ids(single_key_value):
invalid_reference_global_id_error.add(index)
if invalid_reference_global_id_error:
raise GraphQLError(
message=(
"Invalid input for reference attributes. For attribute input on positions: "
f"{', '.join(invalid_reference_global_id_error)}. "
"Provided values must contain valid global IDs."
)
)
if invalid_input_type_error:
raise GraphQLError(
message=(
"Invalid input for reference attributes. For attribute input on positions: "
f"{', '.join(invalid_input_type_error)}. "
"Provided values must contains 'containsAll' or 'containsAny' key."
)
)
if empty_input_value_error:
raise GraphQLError(
message=(
"Invalid input for reference attributes. For attribute input on positions: "
f"{', '.join(empty_input_value_error)}. "
"Provided values cannot be null or empty."
)
)
if duplicated_error:
raise GraphQLError(
message=(
"Invalid input for reference attributes. For attribute input on positions: "
f"{', '.join(duplicated_error)}. "
"Cannot provide both 'containsAll' and 'containsAny' for the same reference filter."
)
)
def validate_attribute_value_input(attributes: list[dict], db_connection_name: str):
slug_list = [attr.get("slug") for attr in attributes if "slug" in attr]
value_as_empty_or_null_list = []
value_more_than_one_list = []
invalid_input_type_list = []
reference_value_list = []
if len(slug_list) != len(set(slug_list)):
raise GraphQLError(
message="Duplicated attribute slugs in attribute 'where' input are not allowed."
)
type_specific_value_with_attr_slug_list = {}
for index, attr in enumerate(attributes):
if not attr.get("value") and not attr.get("slug"):
value_as_empty_or_null_list.append(str(index))
continue
attr_slug = attr.get("slug")
attr_slug_provided_as_none = attr_slug is None and "slug" in attr
if attr_slug_provided_as_none:
value_as_empty_or_null_list.append(str(index))
continue
value_as_empty = "value" in attr and not attr["value"]
if value_as_empty:
value_as_empty_or_null_list.append(str(index))
continue
value = attr.get("value")
if not value:
continue
value_keys = value.keys()
if len(value_keys) > 1:
value_more_than_one_list.append(str(index))
continue
value_key = list(value_keys)[0]
if value_key not in ["slug", "name"] and attr_slug:
type_specific_value_with_attr_slug_list[attr_slug] = (str(index), value_key)
if value[value_key] is None:
value_as_empty_or_null_list.append(str(index))
continue
if value_key == "reference":
reference_value_list.append((str(index), value["reference"]))
if type_specific_value_with_attr_slug_list:
attribute_input_type_map = Attribute.objects.using(db_connection_name).in_bulk(
type_specific_value_with_attr_slug_list.keys(),
field_name="slug",
)
for attr_slug, (
index_str,
value_key,
) in type_specific_value_with_attr_slug_list.items():
if attr_slug not in attribute_input_type_map:
continue
input_type = attribute_input_type_map[attr_slug].input_type
if "numeric" == value_key and input_type != AttributeInputType.NUMERIC:
invalid_input_type_list.append(index_str)
if "date" == value_key and input_type != AttributeInputType.DATE:
invalid_input_type_list.append(index_str)
if "date_time" == value_key and input_type != AttributeInputType.DATE_TIME:
invalid_input_type_list.append(index_str)
if "boolean" == value_key and input_type != AttributeInputType.BOOLEAN:
invalid_input_type_list.append(index_str)
if "reference" == value_key and input_type not in [
AttributeInputType.REFERENCE,
AttributeInputType.SINGLE_REFERENCE,
]:
invalid_input_type_list.append(index_str)
validate_attribute_value_reference_input(reference_value_list)
if value_as_empty_or_null_list:
raise GraphQLError(
message=(
f"Incorrect input for attributes on position: {','.join(value_as_empty_or_null_list)}. "
"Provided 'value' cannot be empty or null."
)
)
if value_more_than_one_list:
raise GraphQLError(
message=(
f"Incorrect input for attributes on position: {','.join(value_more_than_one_list)}. "
"Provided 'value' must have only one input key."
)
)
if invalid_input_type_list:
raise GraphQLError(
message=(
f"Incorrect input for attributes on position: {','.join(invalid_input_type_list)}. "
"Provided 'value' do not match the attribute input type."
)
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/attribute/shared_filters.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 493,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/shared.py | QUERY_PAGES_WITH_WHERE = """
query ($where: PageWhereInput) {
pages(first: 5, where:$where) {
totalCount
edges {
node {
id
}
}
}
}
"""
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/shared.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes.py | import graphene
import pytest
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
def test_pages_query_with_attribute_slug(
staff_api_client, page_list, page_type, size_page_attribute
):
# given
page_type.page_attributes.add(size_page_attribute)
page_attr_value = size_page_attribute.values.first()
associate_attribute_values_to_instance(
page_list[0], {size_page_attribute.pk: [page_attr_value]}
)
variables = {"where": {"attributes": [{"slug": size_page_attribute.slug}]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == 1
assert pages_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Page", page_list[0].pk
)
@pytest.mark.parametrize(
("attribute_input", "expected_count"),
[
({"value": {"slug": {"eq": "test-slug-1"}}}, 1),
({"value": {"slug": {"oneOf": ["test-slug-1", "test-slug-2"]}}}, 2),
({"slug": "size_page_attribute", "value": {"slug": {"eq": "test-slug-1"}}}, 1),
(
{
"slug": "size_page_attribute",
"value": {"slug": {"oneOf": ["test-slug-1", "test-slug-2"]}},
},
2,
),
],
)
def test_pages_query_with_attribute_value_slug(
attribute_input,
expected_count,
staff_api_client,
page_list,
page_type,
size_page_attribute,
):
# given
size_page_attribute.slug = "size_page_attribute"
size_page_attribute.save()
page_type.page_attributes.add(size_page_attribute)
attr_value_1 = size_page_attribute.values.first()
attr_value_1.slug = "test-slug-1"
attr_value_1.save()
attr_value_2 = size_page_attribute.values.last()
attr_value_2.slug = "test-slug-2"
attr_value_2.save()
associate_attribute_values_to_instance(
page_list[0], {size_page_attribute.pk: [attr_value_1]}
)
associate_attribute_values_to_instance(
page_list[1], {size_page_attribute.pk: [attr_value_2]}
)
variables = {"where": {"attributes": [attribute_input]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
@pytest.mark.parametrize(
("attribute_input", "expected_count"),
[
({"value": {"name": {"eq": "test-name-1"}}}, 1),
({"value": {"name": {"oneOf": ["test-name-1", "test-name-2"]}}}, 2),
({"slug": "size_page_attribute", "value": {"name": {"eq": "test-name-1"}}}, 1),
(
{
"slug": "size_page_attribute",
"value": {"name": {"oneOf": ["test-name-1", "test-name-2"]}},
},
2,
),
],
)
def test_pages_query_with_attribute_value_name(
attribute_input,
expected_count,
staff_api_client,
page_list,
page_type,
size_page_attribute,
):
# given
size_page_attribute.slug = "size_page_attribute"
size_page_attribute.save()
page_type.page_attributes.add(size_page_attribute)
attr_value_1 = size_page_attribute.values.first()
attr_value_1.name = "test-name-1"
attr_value_1.save()
attr_value_2 = size_page_attribute.values.last()
attr_value_2.name = "test-name-2"
attr_value_2.save()
associate_attribute_values_to_instance(
page_list[0], {size_page_attribute.pk: [attr_value_1]}
)
associate_attribute_values_to_instance(
page_list[1], {size_page_attribute.pk: [attr_value_2]}
)
variables = {"where": {"attributes": [attribute_input]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_boolean.py | import graphene
import pytest
from ......attribute import AttributeInputType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
"boolean_input",
[
{"value": {"boolean": True}},
{"value": {"name": {"eq": "True-name"}}},
{"value": {"slug": {"eq": "true_slug"}}},
{"value": {"name": {"oneOf": ["True-name", "True-name-2"]}}},
{"value": {"slug": {"oneOf": ["true_slug"]}}},
{"slug": "b_s", "value": {"boolean": True}},
{"slug": "b_s", "value": {"name": {"eq": "True-name"}}},
{"slug": "b_s", "value": {"slug": {"eq": "true_slug"}}},
{"slug": "b_s", "value": {"name": {"oneOf": ["True-name", "True-name-2"]}}},
{"slug": "b_s", "value": {"slug": {"oneOf": ["true_slug"]}}},
],
)
def test_pages_query_with_attribute_value_boolean(
boolean_input,
staff_api_client,
page_list,
page_type,
boolean_attribute,
):
# given
boolean_attribute.slug = "b_s"
boolean_attribute.type = "PAGE_TYPE"
boolean_attribute.save()
second_attribute = Attribute.objects.create(
slug="s_boolean",
name="Boolean",
type=AttributeType.PAGE_TYPE,
input_type=AttributeInputType.BOOLEAN,
)
page_type.page_attributes.add(boolean_attribute)
page_type.page_attributes.add(second_attribute)
true_value = boolean_attribute.values.filter(boolean=True).first()
true_value.name = "True-name"
true_value.slug = "true_slug"
true_value.save()
associate_attribute_values_to_instance(
page_list[0], {boolean_attribute.pk: [true_value]}
)
value_for_second_attr = AttributeValue.objects.create(
attribute=second_attribute,
name=f"{second_attribute.name}: Yes",
slug=f"{second_attribute.id}_false",
boolean=False,
)
associate_attribute_values_to_instance(
page_list[1], {second_attribute.pk: [value_for_second_attr]}
)
variables = {"where": {"attributes": [boolean_input]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == 1
assert pages_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Page", page_list[0].pk
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_boolean.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_date.py | import datetime
import pytest
from ......attribute import AttributeInputType, AttributeType
from ......attribute.models import Attribute
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("date_input", "expected_count"),
[
({"slug": "date", "value": {"date": {"gte": "2021-01-01"}}}, 1),
({"slug": "date", "value": {"name": {"eq": "date-name-1"}}}, 1),
({"slug": "date", "value": {"slug": {"eq": "date-slug-1"}}}, 1),
(
{
"slug": "date",
"value": {"name": {"oneOf": ["date-name-1", "date-name-2"]}},
},
1,
),
(
{
"slug": "date",
"value": {"slug": {"oneOf": ["date-slug-1", "date-slug-2"]}},
},
1,
),
(
{
"slug": "date",
"value": {"date": {"gte": "2021-01-02", "lte": "2021-01-03"}},
},
1,
),
({"value": {"date": {"gte": "2021-01-01"}}}, 2),
({"value": {"name": {"eq": "date-name-1"}}}, 1),
({"value": {"slug": {"eq": "date-slug-1"}}}, 1),
({"value": {"name": {"oneOf": ["date-name-1", "date-name-2"]}}}, 2),
({"value": {"slug": {"oneOf": ["date-slug-1", "date-slug-2"]}}}, 2),
({"value": {"date": {"gte": "2021-01-01", "lte": "2021-01-02"}}}, 1),
],
)
def test_pages_query_with_attribute_value_date(
date_input,
expected_count,
staff_api_client,
page_list,
page_type,
date_attribute,
):
# given
date_attribute.type = "PAGE_TYPE"
date_attribute.slug = "date"
date_attribute.save()
second_date_attribute = Attribute.objects.create(
slug="second_date",
name="Second date",
type=AttributeType.PAGE_TYPE,
input_type=AttributeInputType.DATE,
)
page_type.page_attributes.add(date_attribute)
page_type.page_attributes.add(second_date_attribute)
attr_value_1 = date_attribute.values.first()
attr_value_1.date_time = datetime.datetime(2021, 1, 3, tzinfo=datetime.UTC)
attr_value_1.name = "date-name-1"
attr_value_1.slug = "date-slug-1"
attr_value_1.save()
associate_attribute_values_to_instance(
page_list[0], {date_attribute.pk: [attr_value_1]}
)
second_attr_value = second_date_attribute.values.create(
date_time=datetime.datetime(2021, 1, 2, tzinfo=datetime.UTC),
name="date-name-2",
slug="date-slug-2",
)
associate_attribute_values_to_instance(
page_list[1], {second_date_attribute.pk: [second_attr_value]}
)
variables = {"where": {"attributes": [date_input]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_date.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_datetime.py | import datetime
import pytest
from ......attribute import AttributeInputType, AttributeType
from ......attribute.models import Attribute
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("date_time_input", "expected_count"),
[
({"slug": "dt", "value": {"name": {"eq": "datetime-name-1"}}}, 1),
({"slug": "dt", "value": {"slug": {"eq": "datetime-slug-1"}}}, 1),
(
{
"slug": "dt",
"value": {"name": {"oneOf": ["datetime-name-1", "datetime-name-2"]}},
},
2,
),
(
{
"slug": "dt",
"value": {"slug": {"oneOf": ["datetime-slug-1", "datetime-slug-2"]}},
},
2,
),
({"slug": "dt", "value": {"dateTime": {"gte": "2021-01-01T00:00:00Z"}}}, 2),
(
{
"slug": "dt",
"value": {
"dateTime": {
"gte": "2021-01-01T00:00:00Z",
"lte": "2021-01-02T00:00:00Z",
}
},
},
1,
),
({"value": {"name": {"eq": "datetime-name-1"}}}, 1),
({"value": {"slug": {"eq": "datetime-slug-1"}}}, 1),
({"value": {"name": {"oneOf": ["datetime-name-1", "datetime-name-2"]}}}, 2),
({"value": {"slug": {"oneOf": ["datetime-slug-1", "datetime-slug-2"]}}}, 2),
({"value": {"dateTime": {"gte": "2021-01-01T00:00:00Z"}}}, 3),
(
{
"value": {
"dateTime": {
"gte": "2021-01-01T00:00:00Z",
"lte": "2021-01-02T00:00:00Z",
}
}
},
2,
),
],
)
def test_pages_query_with_attribute_value_date_time(
date_time_input,
expected_count,
staff_api_client,
page_list,
page_type,
date_time_attribute,
):
# given
date_time_attribute.slug = "dt"
date_time_attribute.type = "PAGE_TYPE"
date_time_attribute.save()
second_date_attribute = Attribute.objects.create(
slug="second_dt",
name="Second dt",
type=AttributeType.PAGE_TYPE,
input_type=AttributeInputType.DATE_TIME,
)
page_type.page_attributes.add(date_time_attribute)
page_type.page_attributes.add(second_date_attribute)
attr_value_1 = date_time_attribute.values.first()
attr_value_1.date_time = datetime.datetime(2021, 1, 3, tzinfo=datetime.UTC)
attr_value_1.name = "datetime-name-1"
attr_value_1.slug = "datetime-slug-1"
attr_value_1.save()
associate_attribute_values_to_instance(
page_list[0], {date_time_attribute.pk: [attr_value_1]}
)
second_attr_value = date_time_attribute.values.last()
second_attr_value.date_time = datetime.datetime(2021, 1, 1, tzinfo=datetime.UTC)
second_attr_value.name = "datetime-name-2"
second_attr_value.slug = "datetime-slug-2"
second_attr_value.save()
associate_attribute_values_to_instance(
page_list[1], {date_time_attribute.pk: [second_attr_value]}
)
value_for_second_attr = second_date_attribute.values.create(
date_time=datetime.datetime(2021, 1, 1, tzinfo=datetime.UTC),
name="second-datetime-name",
slug="second-datetime-slug",
)
associate_attribute_values_to_instance(
page_list[2], {second_date_attribute.pk: [value_for_second_attr]}
)
variables = {"where": {"attributes": [date_time_input]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_datetime.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_numeric.py | import pytest
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("numeric_input", "expected_count"),
[
({"slug": "num-slug", "value": {"numeric": {"eq": 1.2}}}, 1),
({"slug": "num-slug", "value": {"numeric": {"oneOf": [1.2, 2]}}}, 2),
(
{"slug": "num-slug", "value": {"numeric": {"range": {"gte": 1, "lte": 2}}}},
2,
),
({"slug": "num-slug", "value": {"name": {"eq": "1.2"}}}, 1),
({"slug": "num-slug", "value": {"slug": {"eq": "1.2"}}}, 1),
({"slug": "num-slug", "value": {"name": {"oneOf": ["1.2", "2"]}}}, 2),
({"slug": "num-slug", "value": {"slug": {"oneOf": ["1.2", "2"]}}}, 2),
({"value": {"numeric": {"eq": 1.2}}}, 1),
({"value": {"numeric": {"oneOf": [1.2, 2]}}}, 2),
({"value": {"numeric": {"range": {"gte": 1, "lte": 2}}}}, 2),
({"value": {"numeric": {"range": {"gte": 1}}}}, 3),
({"value": {"name": {"eq": "1.2"}}}, 1),
({"value": {"slug": {"eq": "1.2"}}}, 1),
({"value": {"name": {"oneOf": ["1.2", "2"]}}}, 2),
({"value": {"slug": {"oneOf": ["1.2", "2"]}}}, 2),
],
)
def test_pages_query_with_attribute_value_numeric(
numeric_input,
expected_count,
staff_api_client,
page_list,
page_type,
numeric_attribute_without_unit,
numeric_attribute,
):
# given
numeric_attribute_without_unit.slug = "num-slug"
numeric_attribute_without_unit.type = "PAGE_TYPE"
numeric_attribute_without_unit.save()
page_type.page_attributes.add(numeric_attribute_without_unit)
page_type.page_attributes.add(numeric_attribute)
attr_value_1 = numeric_attribute_without_unit.values.first()
attr_value_1.name = "1.2"
attr_value_1.slug = "1.2"
attr_value_1.numeric = 1.2
attr_value_1.save()
attr_value_2 = numeric_attribute_without_unit.values.last()
attr_value_2.name = "2"
attr_value_2.slug = "2"
attr_value_2.numeric = 2
attr_value_2.save()
second_attr_value = numeric_attribute.values.first()
associate_attribute_values_to_instance(
page_list[0],
{
numeric_attribute_without_unit.pk: [attr_value_1],
},
)
associate_attribute_values_to_instance(
page_list[1], {numeric_attribute_without_unit.pk: [attr_value_2]}
)
associate_attribute_values_to_instance(
page_list[2], {numeric_attribute.pk: [second_attr_value]}
)
variables = {"where": {"attributes": [numeric_input]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_attributes_numeric.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_ids.py | import graphene
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
def test_pages_query_with_where_by_ids(
staff_api_client, permission_manage_pages, page_list, page_list_unpublished
):
# given
query = QUERY_PAGES_WITH_WHERE
page_ids = [
graphene.Node.to_global_id("Page", page.pk)
for page in [page_list[0], page_list_unpublished[-1]]
]
variables = {"where": {"ids": page_ids}}
# when
staff_api_client.user.user_permissions.add(permission_manage_pages)
# then
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["pages"]["totalCount"] == len(page_ids)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_ids.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_metadata.py | import graphene
import pytest
from ......page.models import Page
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("metadata", "expected_indexes"),
[
({"key": "foo"}, [0, 1]),
({"key": "foo", "value": {"eq": "bar"}}, [0]),
({"key": "foo", "value": {"eq": "baz"}}, []),
({"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}, [0, 1]),
({"key": "notfound"}, []),
({"key": "foo", "value": {"eq": None}}, []),
({"key": "foo", "value": {"oneOf": []}}, []),
(None, []),
],
)
def test_pages_with_where_metadata(
metadata,
expected_indexes,
page_list,
page_type,
staff_api_client,
):
# given
page_list[0].metadata = {"foo": "bar"}
page_list[1].metadata = {"foo": "zaz"}
Page.objects.bulk_update(page_list, ["metadata"])
page_list.append(
Page.objects.create(
title="Test page",
slug="test-url-3",
is_published=True,
page_type=page_type,
metadata={},
)
)
variables = {"where": {"metadata": metadata}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages = content["data"]["pages"]["edges"]
assert len(pages) == len(expected_indexes)
ids = {node["node"]["id"] for node in pages}
assert ids == {
graphene.Node.to_global_id("Page", page_list[i].pk) for i in expected_indexes
}
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_metadata.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_multiple_arguments.py | import pytest
from ......attribute.utils import associate_attribute_values_to_instance
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
"attribute_filter",
[
# Non-existing attribute slug
[{"slug": "non-existing-attribute"}],
# Existing attribute with non-existing value name
[{"slug": "tag", "value": {"name": {"eq": "Non-existing Name"}}}],
[{"value": {"name": {"eq": "Non-existing Name"}}}],
# Existing numeric attribute with out-of-range value
[{"slug": "count", "value": {"numeric": {"eq": 999}}}],
[{"value": {"numeric": {"eq": 999}}}],
# Existing boolean attribute with no matching boolean value
[{"slug": "boolean", "value": {"boolean": False}}],
[{"value": {"boolean": False}}],
# Multiple attributes where one doesn't exist
[
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
{"slug": "non-existing-attr", "value": {"slug": {"eq": "some-value"}}},
],
[
{"value": {"slug": {"eq": "10"}}},
{"slug": "non-existing-attr", "value": {"slug": {"eq": "some-value"}}},
],
],
)
def test_pages_query_with_non_matching_records(
attribute_filter,
staff_api_client,
page_list,
page_type,
size_page_attribute,
tag_page_attribute,
boolean_attribute,
numeric_attribute_without_unit,
date_attribute,
date_time_attribute,
):
# given
boolean_attribute.type = "PAGE_TYPE"
boolean_attribute.save()
numeric_attribute_without_unit.type = "PAGE_TYPE"
numeric_attribute_without_unit.save()
page_type.page_attributes.add(size_page_attribute)
page_type.page_attributes.add(tag_page_attribute)
page_type.page_attributes.add(boolean_attribute)
page_type.page_attributes.add(numeric_attribute_without_unit)
page_type.page_attributes.add(date_attribute)
page_type.page_attributes.add(date_time_attribute)
size_value = size_page_attribute.values.get(slug="10")
tag_value = tag_page_attribute.values.get(name="About")
boolean_value = boolean_attribute.values.filter(boolean=True).first()
numeric_value = numeric_attribute_without_unit.values.first()
date_time_value = date_time_attribute.values.first()
date_value = date_attribute.values.first()
date_attribute.slug = "date"
date_attribute.save()
date_time_attribute.slug = "date_time"
date_time_attribute.save()
associate_attribute_values_to_instance(
page_list[0],
{
size_page_attribute.pk: [size_value],
tag_page_attribute.pk: [tag_value],
boolean_attribute.pk: [boolean_value],
numeric_attribute_without_unit.pk: [numeric_value],
date_attribute.pk: [date_value],
date_time_attribute.pk: [date_time_value],
},
)
variables = {"where": {"attributes": attribute_filter}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == 0
@pytest.mark.parametrize(
("attribute_where_input", "expected_count_result"),
[
(
[
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
{"slug": "author", "value": {"slug": {"oneOf": ["test-author-1"]}}},
{"slug": "boolean", "value": {"boolean": True}},
],
1,
),
(
[
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
],
1,
),
(
[
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
{"slug": "boolean", "value": {"boolean": False}},
],
0,
),
(
[
{"slug": "tag", "value": {"name": {"eq": "About"}}},
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
],
1,
),
(
[
{"slug": "page-size", "value": {"slug": {"eq": "15"}}},
{"slug": "tag", "value": {"name": {"eq": "Help"}}},
{"slug": "boolean", "value": {"boolean": False}},
],
0,
),
(
[
{
"slug": "author",
"value": {"slug": {"oneOf": ["test-author-1", "test-author-2"]}},
},
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
],
1,
),
(
[
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
{"slug": "author", "value": {"name": {"eq": "Test author 1"}}},
],
1,
),
(
[
{"slug": "page-size", "value": {"slug": {"eq": "10"}}},
{"slug": "tag", "value": {"name": {"eq": "About"}}},
{"slug": "author", "value": {"slug": {"eq": "test-author-1"}}},
],
1,
),
(
[
{"slug": "page-size", "value": {"slug": {"oneOf": ["10", "15"]}}},
{"slug": "tag", "value": {"name": {"oneOf": ["About", "Help"]}}},
],
2,
),
(
[
{"slug": "page-size", "value": {"slug": {"oneOf": ["10", "15"]}}},
{"slug": "boolean", "value": {"boolean": True}},
],
1,
),
([{"value": {"slug": {"oneOf": ["test-author-1", "test-author-2"]}}}], 2),
(
[
{"value": {"slug": {"oneOf": ["10", "15"]}}},
{"value": {"boolean": True}},
],
1,
),
],
)
def test_pages_query_with_multiple_attribute_filters(
attribute_where_input,
expected_count_result,
staff_api_client,
page_list,
page_type,
size_page_attribute,
tag_page_attribute,
author_page_attribute,
boolean_attribute,
):
# given
boolean_attribute.type = "PAGE_TYPE"
boolean_attribute.save()
page_type.page_attributes.add(size_page_attribute)
page_type.page_attributes.add(tag_page_attribute)
page_type.page_attributes.add(author_page_attribute)
page_type.page_attributes.add(boolean_attribute)
size_value = size_page_attribute.values.get(slug="10")
tag_value = tag_page_attribute.values.get(name="About")
author_value = author_page_attribute.values.get(slug="test-author-1")
second_author_value = author_page_attribute.values.get(slug="test-author-2")
boolean_value = boolean_attribute.values.filter(boolean=True).first()
associate_attribute_values_to_instance(
page_list[0],
{
size_page_attribute.pk: [size_value],
tag_page_attribute.pk: [tag_value],
author_page_attribute.pk: [author_value],
boolean_attribute.pk: [boolean_value],
},
)
tag_value_2 = tag_page_attribute.values.get(name="Help")
size_value_15 = size_page_attribute.values.get(slug="15")
associate_attribute_values_to_instance(
page_list[1],
{
size_page_attribute.pk: [size_value_15],
tag_page_attribute.pk: [tag_value_2],
author_page_attribute.pk: [second_author_value],
},
)
variables = {"where": {"attributes": attribute_where_input}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count_result
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_multiple_arguments.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_page_type.py | import graphene
from ......page.models import Page, PageType
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
def test_pages_with_where_page_type_eq(staff_api_client, page_type_list):
# given
page = Page.objects.first()
assigned_page_type = page.page_type
page_type_id = graphene.Node.to_global_id("PageType", page.page_type.pk)
pages_for_page_type = Page.objects.filter(page_type=assigned_page_type).count()
assert PageType.objects.exclude(pk=assigned_page_type.pk).count() != 0
assert pages_for_page_type != 0
variables = {"where": {"pageType": {"eq": page_type_id}}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == pages_for_page_type
def test_pages_with_where_page_type_one_of(staff_api_client, page_type_list):
# given
page = Page.objects.first()
assigned_page_type = page.page_type
page_type_id = graphene.Node.to_global_id("PageType", page.page_type.pk)
pages_for_page_type = Page.objects.filter(page_type=assigned_page_type).count()
assert PageType.objects.exclude(pk=assigned_page_type.pk).count() != 0
assert pages_for_page_type != 0
variables = {"where": {"pageType": {"oneOf": [page_type_id]}}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == pages_for_page_type
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_page_type.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_references_pages.py | import pytest
from ......attribute import AttributeEntityType, AttributeInputType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from ......page.models import Page
from .....core.utils import to_global_id_or_none
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 2), ("containsAll", 1)]
)
def test_pages_query_with_attr_slug_and_attribute_value_reference_to_pages(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_page_reference_attribute,
):
# given
page_type.page_attributes.add(page_type_page_reference_attribute)
reference_page_1_slug = "referenced-page-1"
reference_page_2_slug = "referenced-page-2"
referenced_page_1, referenced_page_2 = Page.objects.bulk_create(
[
Page(
title="Referenced Page 1",
slug=reference_page_1_slug,
page_type=page_type,
is_published=True,
),
Page(
title="Referenced Page 2",
slug=reference_page_2_slug,
page_type=page_type,
is_published=True,
),
]
)
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_page_reference_attribute,
name=f"Page {referenced_page_1.pk}",
slug=f"page-{referenced_page_1.pk}",
reference_page=referenced_page_1,
),
AttributeValue(
attribute=page_type_page_reference_attribute,
name=f"Page {referenced_page_2.pk}",
slug=f"page-{referenced_page_2.pk}",
reference_page=referenced_page_2,
),
]
)
page_with_both_references = page_list[0]
associate_attribute_values_to_instance(
page_with_both_references,
{page_type_page_reference_attribute.pk: [attribute_value_1, attribute_value_2]},
)
page_with_single_reference = page_list[1]
associate_attribute_values_to_instance(
page_with_single_reference,
{page_type_page_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"slug": "page-reference",
"value": {
"reference": {
"pageSlugs": {
filter_type: [
reference_page_1_slug,
reference_page_2_slug,
]
}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 2), ("containsAll", 1)]
)
def test_pages_query_with_attribute_value_reference_to_pages(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_page_reference_attribute,
):
# given
second_page_reference_attribute = Attribute.objects.create(
slug="second-page-reference",
name="Page reference",
type=AttributeType.PAGE_TYPE,
input_type=AttributeInputType.REFERENCE,
entity_type=AttributeEntityType.PAGE,
)
page_type.page_attributes.add(page_type_page_reference_attribute)
page_type.page_attributes.add(second_page_reference_attribute)
reference_1 = "referenced-page-1"
reference_2 = "referenced-page-2"
referenced_page_1, referenced_page_2 = Page.objects.bulk_create(
[
Page(
title="Referenced Page 1",
slug=reference_1,
page_type=page_type,
is_published=True,
),
Page(
title="Referenced Page 2",
slug=reference_2,
page_type=page_type,
is_published=True,
),
]
)
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_page_reference_attribute,
name=f"Page {referenced_page_1.pk}",
slug=f"page-{referenced_page_1.pk}",
reference_page=referenced_page_1,
),
AttributeValue(
attribute=second_page_reference_attribute,
name=f"Page {referenced_page_2.pk}",
slug=f"page-{referenced_page_2.pk}",
reference_page=referenced_page_2,
),
]
)
page_with_both_references = page_list[0]
associate_attribute_values_to_instance(
page_with_both_references,
{
page_type_page_reference_attribute.pk: [attribute_value_1],
second_page_reference_attribute.pk: [attribute_value_2],
},
)
page_with_single_reference = page_list[1]
associate_attribute_values_to_instance(
page_with_single_reference,
{second_page_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"value": {
"reference": {
"pageSlugs": {filter_type: [reference_1, reference_2]}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 3), ("containsAll", 2)]
)
def test_pages_query_with_attr_slug_and_attribute_value_referenced_page_ids(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_page_reference_attribute,
):
# given
page_type.page_attributes.add(
page_type_page_reference_attribute,
)
referenced_first_page, referenced_second_page, referenced_third_page = (
Page.objects.bulk_create(
[
Page(
title="Referenced Page",
slug="referenced-page",
page_type=page_type,
is_published=True,
),
Page(
title="Referenced Page",
slug="referenced-page2",
page_type=page_type,
is_published=True,
),
Page(
title="Referenced Page",
slug="referenced-page3",
page_type=page_type,
is_published=True,
),
]
)
)
first_attr_value, second_attr_value, third_attr_value = (
AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_page_reference_attribute,
name=f"Page {referenced_first_page.pk}",
slug=f"page-{referenced_first_page.pk}",
reference_page=referenced_first_page,
),
AttributeValue(
attribute=page_type_page_reference_attribute,
name=f"Page {referenced_second_page.pk}",
slug=f"page-{referenced_second_page.pk}",
reference_page=referenced_second_page,
),
AttributeValue(
attribute=page_type_page_reference_attribute,
name=f"Page {referenced_third_page.pk}",
slug=f"page-{referenced_third_page.pk}",
reference_page=referenced_third_page,
),
]
)
)
fist_page_with_all_ids = page_list[0]
second_page_with_all_ids = page_list[1]
page_with_single_id = page_list[2]
associate_attribute_values_to_instance(
fist_page_with_all_ids,
{
page_type_page_reference_attribute.pk: [
first_attr_value,
second_attr_value,
third_attr_value,
],
},
)
associate_attribute_values_to_instance(
second_page_with_all_ids,
{
page_type_page_reference_attribute.pk: [
first_attr_value,
second_attr_value,
third_attr_value,
],
},
)
associate_attribute_values_to_instance(
page_with_single_id,
{page_type_page_reference_attribute.pk: [first_attr_value]},
)
referenced_first_global_id = to_global_id_or_none(referenced_first_page)
referenced_second_global_id = to_global_id_or_none(referenced_second_page)
referenced_third_global_id = to_global_id_or_none(referenced_third_page)
variables = {
"where": {
"attributes": [
{
"slug": page_type_page_reference_attribute.slug,
"value": {
"reference": {
"referencedIds": {
filter_type: [
referenced_first_global_id,
referenced_second_global_id,
referenced_third_global_id,
]
}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(page_list) > len(pages_nodes)
assert len(pages_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_references_pages.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_references_products.py | import graphene
import pytest
from ......attribute import AttributeEntityType, AttributeInputType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from .....core.utils import to_global_id_or_none
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("filter_type", "expected_count"),
[("containsAny", 2), ("containsAll", 1)],
)
def test_pages_query_with_attr_slug_and_attribute_value_reference_to_products(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_product_reference_attribute,
product_list,
):
# given
page_type.page_attributes.add(page_type_product_reference_attribute)
first_product = product_list[0]
second_product = product_list[1]
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_product_reference_attribute,
name=f"Product {first_product.pk}",
slug=f"product-{first_product.pk}",
reference_product=first_product,
),
AttributeValue(
attribute=page_type_product_reference_attribute,
name=f"Product {second_product.pk}",
slug=f"product-{second_product.pk}",
reference_product=second_product,
),
]
)
page_with_both_references = page_list[0]
associate_attribute_values_to_instance(
page_with_both_references,
{
page_type_product_reference_attribute.pk: [
attribute_value_1,
attribute_value_2,
]
},
)
page_with_single_reference = page_list[1]
associate_attribute_values_to_instance(
page_with_single_reference,
{page_type_product_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"slug": "product-reference",
"value": {
"reference": {
"productSlugs": {
filter_type: [first_product.slug, second_product.slug]
}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
assert pages_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Page", page_list[0].pk
)
@pytest.mark.parametrize(
("filter_type", "expected_count"),
[("containsAny", 2), ("containsAll", 1)],
)
def test_pages_query_with_attribute_value_reference_to_products(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_product_reference_attribute,
product_list,
):
# given
second_product_reference_attribute = Attribute.objects.create(
slug="second-product-reference",
name="Product reference",
type=AttributeType.PRODUCT_TYPE,
input_type=AttributeInputType.REFERENCE,
entity_type=AttributeEntityType.PRODUCT,
)
page_type.page_attributes.add(page_type_product_reference_attribute)
page_type.page_attributes.add(second_product_reference_attribute)
first_product = product_list[0]
second_product = product_list[1]
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_product_reference_attribute,
name=f"Product {first_product.pk}",
slug=f"product-{first_product.pk}",
reference_product=first_product,
),
AttributeValue(
attribute=second_product_reference_attribute,
name=f"Product {second_product.pk}",
slug=f"product-{second_product.pk}",
reference_product=second_product,
),
]
)
page_with_both_references = page_list[0]
associate_attribute_values_to_instance(
page_with_both_references,
{
page_type_product_reference_attribute.pk: [
attribute_value_1,
],
second_product_reference_attribute.pk: [attribute_value_2],
},
)
page_with_single_reference = page_list[1]
associate_attribute_values_to_instance(
page_with_single_reference,
{second_product_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"value": {
"reference": {
"productSlugs": {
filter_type: [first_product.slug, second_product.slug]
}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
assert pages_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Page", page_list[0].pk
)
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 3), ("containsAll", 2)]
)
def test_pages_query_with_attr_slug_and_attribute_value_referenced_product_ids(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_product_reference_attribute,
product_list,
):
# given
page_type.page_attributes.add(
page_type_product_reference_attribute,
)
first_product = product_list[0]
second_product = product_list[1]
third_product = product_list[2]
first_attr_value, second_attr_value, third_attr_value = (
AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_product_reference_attribute,
name=f"Product {first_product.pk}",
slug=f"Product-{first_product.pk}",
reference_product=first_product,
),
AttributeValue(
attribute=page_type_product_reference_attribute,
name=f"Product {second_product.pk}",
slug=f"product-{second_product.pk}",
reference_product=second_product,
),
AttributeValue(
attribute=page_type_product_reference_attribute,
name=f"Product {third_product.pk}",
slug=f"Product-{third_product.pk}",
reference_product=third_product,
),
]
)
)
fist_page_with_all_ids = page_list[0]
second_page_with_all_ids = page_list[1]
page_with_single_id = page_list[2]
associate_attribute_values_to_instance(
fist_page_with_all_ids,
{
page_type_product_reference_attribute.pk: [
first_attr_value,
second_attr_value,
third_attr_value,
],
},
)
associate_attribute_values_to_instance(
second_page_with_all_ids,
{
page_type_product_reference_attribute.pk: [
first_attr_value,
second_attr_value,
third_attr_value,
],
},
)
associate_attribute_values_to_instance(
page_with_single_id,
{
page_type_product_reference_attribute.pk: [
first_attr_value,
],
},
)
referenced_first_global_id = to_global_id_or_none(first_product)
referenced_second_global_id = to_global_id_or_none(second_product)
referenced_third_global_id = to_global_id_or_none(third_product)
variables = {
"where": {
"attributes": [
{
"slug": page_type_product_reference_attribute.slug,
"value": {
"reference": {
"referencedIds": {
filter_type: [
referenced_first_global_id,
referenced_second_global_id,
referenced_third_global_id,
]
}
}
},
},
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(page_list) > len(pages_nodes)
assert len(pages_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_references_products.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_references_variants.py | import graphene
import pytest
from ......attribute import AttributeEntityType, AttributeInputType, AttributeType
from ......attribute.models import Attribute, AttributeValue
from ......attribute.utils import associate_attribute_values_to_instance
from .....core.utils import to_global_id_or_none
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 2), ("containsAll", 1)]
)
def test_pages_query_with_attribute_value_reference_to_product_variants(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_variant_reference_attribute,
product_variant_list,
):
# given
page_type.page_attributes.add(page_type_variant_reference_attribute)
second_variant_reference_attribute = Attribute.objects.create(
slug="second-product-reference",
name="Product reference",
type=AttributeType.PRODUCT_TYPE,
input_type=AttributeInputType.REFERENCE,
entity_type=AttributeEntityType.PRODUCT_VARIANT,
)
first_variant_sku = "test-variant-1"
second_variant_sku = "test-variant-2"
first_variant = product_variant_list[0]
first_variant.sku = first_variant_sku
first_variant.save()
second_variant = product_variant_list[1]
second_variant.sku = second_variant_sku
second_variant.save()
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_variant_reference_attribute,
name=f"Variant {first_variant.pk}",
slug=f"variant-{first_variant.pk}",
reference_variant=first_variant,
),
AttributeValue(
attribute=second_variant_reference_attribute,
name=f"Variant {second_variant.pk}",
slug=f"variant-{second_variant.pk}",
reference_variant=second_variant,
),
]
)
page_with_both_references = page_list[0]
associate_attribute_values_to_instance(
page_with_both_references,
{
page_type_variant_reference_attribute.pk: [attribute_value_1],
second_variant_reference_attribute.pk: [attribute_value_2],
},
)
page_with_single_reference = page_list[1]
associate_attribute_values_to_instance(
page_with_single_reference,
{second_variant_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"value": {
"reference": {
"productVariantSkus": {
filter_type: [
first_variant_sku,
second_variant_sku,
]
}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
assert pages_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Page", page_list[0].pk
)
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 2), ("containsAll", 1)]
)
def test_pages_query_with_attr_slug_and_attribute_value_reference_to_product_variants(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_variant_reference_attribute,
product_variant_list,
):
# given
page_type.page_attributes.add(page_type_variant_reference_attribute)
first_variant_sku = "test-variant-1"
second_variant_sku = "test-variant-2"
first_variant = product_variant_list[0]
first_variant.sku = first_variant_sku
first_variant.save()
second_variant = product_variant_list[1]
second_variant.sku = second_variant_sku
second_variant.save()
attribute_value_1, attribute_value_2 = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_variant_reference_attribute,
name=f"Variant {first_variant.pk}",
slug=f"variant-{first_variant.pk}",
reference_variant=first_variant,
),
AttributeValue(
attribute=page_type_variant_reference_attribute,
name=f"Variant {second_variant.pk}",
slug=f"variant-{second_variant.pk}",
reference_variant=second_variant,
),
]
)
page_with_both_references = page_list[0]
associate_attribute_values_to_instance(
page_with_both_references,
{
page_type_variant_reference_attribute.pk: [
attribute_value_1,
attribute_value_2,
]
},
)
page_with_single_reference = page_list[1]
associate_attribute_values_to_instance(
page_with_single_reference,
{page_type_variant_reference_attribute.pk: [attribute_value_2]},
)
variables = {
"where": {
"attributes": [
{
"slug": "variant-reference",
"value": {
"reference": {
"productVariantSkus": {
filter_type: [
first_variant_sku,
second_variant_sku,
]
}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == expected_count
assert pages_nodes[0]["node"]["id"] == graphene.Node.to_global_id(
"Page", page_list[0].pk
)
@pytest.mark.parametrize(
("filter_type", "expected_count"), [("containsAny", 3), ("containsAll", 2)]
)
def test_pages_query_with_attr_slug_attribute_value_referenced_variant_ids(
filter_type,
expected_count,
staff_api_client,
page_list,
page_type,
page_type_variant_reference_attribute,
product_variant_list,
):
# given
page_type.page_attributes.add(
page_type_variant_reference_attribute,
)
first_variant = product_variant_list[0]
second_variant = product_variant_list[1]
third_variant = product_variant_list[2]
first_attr_value, second_attr_value, third_attr_value = (
AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=page_type_variant_reference_attribute,
name=f"Variant {first_variant.pk}",
slug=f"variant-{first_variant.pk}",
reference_variant=first_variant,
),
AttributeValue(
attribute=page_type_variant_reference_attribute,
name=f"Variant {second_variant.pk}",
slug=f"variant-{second_variant.pk}",
reference_variant=second_variant,
),
AttributeValue(
attribute=page_type_variant_reference_attribute,
name=f"Variant {third_variant.pk}",
slug=f"variant-{third_variant.pk}",
reference_variant=third_variant,
),
]
)
)
fist_page_with_all_ids = page_list[0]
second_page_with_all_ids = page_list[1]
page_with_single_id = page_list[2]
associate_attribute_values_to_instance(
fist_page_with_all_ids,
{
page_type_variant_reference_attribute.pk: [
first_attr_value,
second_attr_value,
third_attr_value,
],
},
)
associate_attribute_values_to_instance(
second_page_with_all_ids,
{
page_type_variant_reference_attribute.pk: [
first_attr_value,
second_attr_value,
third_attr_value,
],
},
)
associate_attribute_values_to_instance(
page_with_single_id,
{page_type_variant_reference_attribute.pk: [first_attr_value]},
)
referenced_first_global_id = to_global_id_or_none(first_variant)
referenced_second_global_id = to_global_id_or_none(second_variant)
referenced_third_global_id = to_global_id_or_none(third_variant)
variables = {
"where": {
"attributes": [
{
"slug": page_type_variant_reference_attribute.slug,
"value": {
"reference": {
"referencedIds": {
filter_type: [
referenced_first_global_id,
referenced_second_global_id,
referenced_third_global_id,
]
}
}
},
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(page_list) > len(pages_nodes)
assert len(pages_nodes) == expected_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_references_variants.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_slug.py | import pytest
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
("where", "pages_count"),
[
({"slug": {"eq": "test-url-1"}}, 1),
({"slug": {"oneOf": ["test-url-1", "test-url-2"]}}, 2),
],
)
def test_pages_with_where_slug(where, pages_count, staff_api_client, page_list):
# given
variables = {"where": where}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response)
pages_nodes = content["data"]["pages"]["edges"]
assert len(pages_nodes) == pages_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_slug.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/page/tests/queries/pages_with_where/test_with_where_validation.py | import pytest
from .....tests.utils import get_graphql_content
from .shared import QUERY_PAGES_WITH_WHERE
@pytest.mark.parametrize(
"attribute_value_filter",
[{"numeric": None}, {"name": None}, {"slug": None}, {"boolean": False}],
)
def test_pages_query_failed_filter_validation_for_numeric_with_slug_input(
attribute_value_filter, staff_api_client, numeric_attribute_without_unit, page_type
):
# given
attr_slug_input = "numeric"
numeric_attribute_without_unit.slug = attr_slug_input
numeric_attribute_without_unit.save()
page_type.page_attributes.add(numeric_attribute_without_unit)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[{"boolean": None}, {"name": None}, {"slug": None}, {"numeric": {"eq": 1.2}}],
)
def test_pages_query_failed_filter_validation_for_boolean_with_slug_input(
attribute_value_filter, staff_api_client, boolean_attribute, page_type
):
# given
attr_slug_input = "boolean"
boolean_attribute.slug = attr_slug_input
boolean_attribute.save()
page_type.page_attributes.add(boolean_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"dateTime": None},
{"name": None},
{"slug": None},
{"numeric": {"eq": 1.2}},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_pages_query_failed_filter_validation_for_date_attribute_with_slug_input(
attribute_value_filter, staff_api_client, date_attribute, page_type
):
# given
attr_slug_input = "date"
date_attribute.slug = attr_slug_input
date_attribute.save()
page_type.page_attributes.add(date_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"dateTime": None},
{"name": None},
{"slug": None},
{"numeric": {"eq": 1.2}},
{"date": None},
{"reference": {"referencedIds": {"containsAll": ["global-id-1"]}}},
],
)
def test_pages_query_failed_filter_validation_for_datetime_attribute_with_slug_input(
attribute_value_filter, staff_api_client, date_time_attribute, page_type
):
# given
attr_slug_input = "date_time"
date_time_attribute.slug = attr_slug_input
date_time_attribute.save()
page_type.page_attributes.add(date_time_attribute)
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"slug": None, "value": None},
{"slug": None, "value": {"name": {"eq": "name"}}},
],
)
def test_pages_query_failed_filter_validation_null_in_input(
attribute_value_filter,
staff_api_client,
):
# given
variables = {"where": {"attributes": [attribute_value_filter]}}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{"slug": None},
{"name": None},
{
"slug": {"eq": "true_slug"},
"name": {"eq": "name"},
},
{
"slug": {"oneOf": ["true_slug"]},
"name": {"oneOf": ["name"]},
},
],
)
def test_pages_query_failed_filter_validation_for_basic_value_fields_with_attr_slug(
attribute_value_filter,
staff_api_client,
):
# given
attr_slug_input = "page-size"
variables = {
"where": {
"attributes": [{"slug": attr_slug_input, "value": attribute_value_filter}]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
def test_pages_query_failed_filter_validation_for_duplicated_attr_slug(
staff_api_client,
):
# given
attr_slug_input = "page-size"
variables = {
"where": {
"attributes": [
{"slug": attr_slug_input},
{"slug": attr_slug_input},
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
@pytest.mark.parametrize(
"attribute_value_filter",
[
{},
{"reference": {}},
{"reference": None},
{"reference": {"referencedIds": {"containsAll": []}}},
{"reference": {"pageSlugs": {"containsAll": []}}},
{"reference": {"productSlugs": {"containsAll": []}}},
{"reference": {"productVariantSkus": {"containsAll": []}}},
{"reference": {"pageSlugs": {"containsAny": []}}},
{"reference": {"productSlugs": {"containsAny": []}}},
{"reference": {"productVariantSkus": {"containsAny": []}}},
{"reference": {"referencedIds": {"containsAny": []}}},
{"reference": {"pageSlugs": {"containsAny": [], "containsAll": []}}},
{"reference": {"productSlugs": {"containsAny": [], "containsAll": []}}},
{"reference": {"productVariantSkus": {"containsAny": [], "containsAll": []}}},
{"reference": {"referencedIds": {"containsAny": [], "containsAll": []}}},
{"reference": {"referencedIds": {"containsAll": None}}},
{"reference": {"pageSlugs": {"containsAll": None}}},
{"reference": {"productSlugs": {"containsAll": None}}},
{"reference": {"productVariantSkus": {"containsAll": None}}},
{"reference": {"pageSlugs": {"containsAny": None}}},
{"reference": {"productSlugs": {"containsAny": None}}},
{"reference": {"productVariantSkus": {"containsAny": None}}},
{"reference": {"referencedIds": {"containsAny": None}}},
{"reference": {"referencedIds": {"containsAny": ["non-existing-id"]}}},
{"reference": {"referencedIds": {"containsAll": ["non-existing-id"]}}},
# ID of not valid object
{"reference": {"referencedIds": {"containsAny": ["T3JkZXI6MQ=="]}}},
{"reference": {"referencedIds": {"containsAll": ["T3JkZXI6MQ=="]}}},
],
)
def test_pages_query_failed_filter_validation_for_reference_attribute_with_slug_input(
attribute_value_filter,
staff_api_client,
page_type,
page_type_product_reference_attribute,
):
# given
attr_slug_input = "reference-product"
variables = {
"where": {
"attributes": [
{
"slug": attr_slug_input,
"value": attribute_value_filter,
}
]
}
}
# when
response = staff_api_client.post_graphql(
QUERY_PAGES_WITH_WHERE,
variables,
)
# then
content = get_graphql_content(response, ignore_errors=True)
assert "errors" in content
assert content["data"]["pages"] is None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/page/tests/queries/pages_with_where/test_with_where_validation.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/attribute/lock_objects.py | from django.db.models import QuerySet
from .models.base import Attribute, AttributeValue
def attribute_value_qs_select_for_update() -> QuerySet[AttributeValue]:
return AttributeValue.objects.order_by("sort_order", "pk").select_for_update(
of=(["self"])
)
def attribute_reference_product_types_qs_select_for_update() -> QuerySet:
return Attribute.reference_product_types.through.objects.order_by(
"pk"
).select_for_update(of=["self"])
def attribute_reference_page_types_qs_select_for_update() -> QuerySet:
return Attribute.reference_page_types.through.objects.order_by(
"pk"
).select_for_update(of=["self"])
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/attribute/lock_objects.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/attribute/tests/type_handlers/test_reference_handler.py | from collections import defaultdict
import graphene
from django.utils.text import slugify
from text_unidecode import unidecode
from ...enums import AttributeValueBulkActionEnum
from ...utils.shared import AttrValuesInput
from ...utils.type_handlers import AttributeInputErrors, ReferenceAttributeHandler
def test_reference_handler_clean_and_validate_product_reference(
product_type_product_reference_attribute, product_list
):
# given
product_ids = [graphene.Node.to_global_id("Product", p.id) for p in product_list]
attribute = product_type_product_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=product_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.references
assert handler.values_input.reference_objects
assert set(handler.values_input.reference_objects) == set(product_list)
def test_reference_handler_clean_and_validate_page_reference(
product_type_page_reference_attribute, page_list
):
# given
page_ids = [graphene.Node.to_global_id("Page", p.id) for p in page_list]
attribute = product_type_page_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=page_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.references
assert handler.values_input.reference_objects
assert set(handler.values_input.reference_objects) == set(page_list)
def test_reference_handler_clean_and_validate_variant_reference(
page_type_variant_reference_attribute, product_variant_list
):
# given
variant_ids = [
graphene.Node.to_global_id("ProductVariant", v.id) for v in product_variant_list
]
attribute = page_type_variant_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=variant_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.references
assert handler.values_input.reference_objects
assert set(handler.values_input.reference_objects) == set(product_variant_list)
def test_reference_handler_clean_and_validate_product_reference_with_reference_types(
product_type_product_reference_attribute, product_list, product_type
):
# given
product_type_product_reference_attribute.reference_product_types.add(product_type)
product_ids = [graphene.Node.to_global_id("Product", p.id) for p in product_list]
attribute = product_type_product_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=product_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.references
assert handler.values_input.reference_objects
assert set(handler.values_input.reference_objects) == set(product_list)
def test_reference_handler_clean_and_validate_page_reference_with_reference_types(
product_type_page_reference_attribute, page_list, page_type
):
# given
product_type_page_reference_attribute.reference_page_types.add(page_type)
page_ids = [graphene.Node.to_global_id("Page", p.id) for p in page_list]
attribute = product_type_page_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=page_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.references
assert handler.values_input.reference_objects
assert set(handler.values_input.reference_objects) == set(page_list)
def test_reference_handler_clean_and_validate_variant_reference_with_reference_types(
page_type_variant_reference_attribute, product_variant_list, product_type
):
# given
page_type_variant_reference_attribute.reference_product_types.add(product_type)
variant_ids = [
graphene.Node.to_global_id("ProductVariant", v.id) for v in product_variant_list
]
attribute = page_type_variant_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=variant_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.references
assert handler.values_input.reference_objects
assert set(handler.values_input.reference_objects) == set(product_variant_list)
def test_reference_handler_clean_and_validate_category_reference(
product_type_category_reference_attribute, category_list
):
# given
category_ids = [graphene.Node.to_global_id("Category", c.id) for c in category_list]
attribute = product_type_category_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=category_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference_objects
assert handler.values_input.references
assert set(handler.values_input.reference_objects) == set(category_list)
def test_reference_handler_clean_and_validate_collection_reference(
product_type_collection_reference_attribute, collection_list
):
# given
collection_ids = [
graphene.Node.to_global_id("Collection", c.id) for c in collection_list
]
attribute = product_type_collection_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, references=collection_ids)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.references
assert handler.values_input.reference_objects
assert set(handler.values_input.reference_objects) == set(collection_list)
def test_reference_handler_clean_and_validate_invalid_product_reference_type(
product_type_product_reference_attribute,
product,
product_type_with_variant_attributes,
):
# given
attribute = product_type_product_reference_attribute
product_type_product_reference_attribute.reference_product_types.add(
product_type_with_variant_attributes
)
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
references=[graphene.Node.to_global_id("Product", product.id)],
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE_TYPE]
def test_reference_handler_clean_and_validate_invalid_product_variant_ref_type(
product_type_variant_reference_attribute,
variant,
product_type_with_variant_attributes,
):
# given
attribute = product_type_variant_reference_attribute
product_type_variant_reference_attribute.reference_product_types.add(
product_type_with_variant_attributes
)
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
references=[graphene.Node.to_global_id("ProductVariant", variant.id)],
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE_TYPE]
def test_reference_handler_clean_and_validate_invalid_page_reference_type(
product_type_page_reference_attribute, page, page_type_list
):
# given
attribute = product_type_page_reference_attribute
product_type_page_reference_attribute.reference_page_types.add(page_type_list[1])
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
references=[graphene.Node.to_global_id("Page", page.id)],
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE_TYPE]
def test_single_reference_handler_clean_and_validate_page_reference(
product_type_page_single_reference_attribute, page
):
# given
attribute = product_type_page_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
page_id = graphene.Node.to_global_id("Page", page.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=page_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference_objects == [page]
def test_single_reference_handler_clean_and_validate_variant_reference(
product_type_variant_single_reference_attribute, variant
):
# given
attribute = product_type_variant_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=variant_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference_objects == [variant]
def test_single_reference_handler_clean_and_validate_category_reference(
product_type_category_single_reference_attribute, category
):
# given
attribute = product_type_category_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
category_id = graphene.Node.to_global_id("Category", category.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=category_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference_objects == [category]
def test_single_reference_handler_clean_and_validate_collection_reference(
page_type_collection_single_reference_attribute, collection
):
# given
attribute = page_type_collection_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=collection_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference_objects == [collection]
def test_single_reference_handler_clean_and_validate_product_ref_with_reference_types(
product_type_product_single_reference_attribute, product, product_type
):
# given
product_type_product_single_reference_attribute.reference_product_types.add(
product_type
)
product_id = graphene.Node.to_global_id("Product", product.id)
attribute = product_type_product_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=product_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference
assert handler.values_input.reference_objects == [product]
def test_single_reference_handler_clean_and_validate_page_ref_with_reference_types(
product_type_page_single_reference_attribute, page, page_type
):
# given
product_type_page_single_reference_attribute.reference_page_types.add(page_type)
page_id = graphene.Node.to_global_id("Page", page.id)
attribute = product_type_page_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=page_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference
assert handler.values_input.reference_objects == [page]
def test_single_reference_handler_clean_and_validate_variant_ref_with_reference_types(
page_type_variant_single_reference_attribute, variant, product_type
):
# given
page_type_variant_single_reference_attribute.reference_product_types.add(
product_type
)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
attribute = page_type_variant_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=variant_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference
assert handler.values_input.reference_objects == [variant]
def test_single_reference_handler_clean_and_validate_success(
product_type_product_single_reference_attribute, product
):
# given
attribute = product_type_product_single_reference_attribute
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
product_id = graphene.Node.to_global_id("Product", product.id)
values_input = AttrValuesInput(global_id=attribute_id, reference=product_id)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert not attribute_errors
assert handler.values_input.reference_objects == [product]
def test_reference_handler_clean_and_validate_value_required(
product_type_product_reference_attribute,
):
# given
attribute = product_type_product_reference_attribute
attribute.value_required = True
attribute.save(update_fields=["value_required"])
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id), references=[]
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.REFERENCE_REQUIRED]
def test_single_reference_handlers_clean_and_validate_value_required(
product_type_product_single_reference_attribute,
):
# given
attribute = product_type_product_single_reference_attribute
attribute.value_required = True
attribute.save(update_fields=["value_required"])
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id), reference=None
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.REFERENCE_REQUIRED]
def test_reference_handler_clean_and_validate_invalid_reference(
product_type_product_reference_attribute, product
):
# given
attribute = product_type_product_reference_attribute
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
references=[
graphene.Node.to_global_id("ProductVariant", product.id),
graphene.Node.to_global_id("Product", "123"),
"ABC",
],
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE]
def test_single_reference_handler_clean_and_validate_invalid_reference(
product_type_product_single_reference_attribute, product
):
# given
attribute = product_type_product_single_reference_attribute
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
reference=graphene.Node.to_global_id("Order", product.id),
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE]
def test_single_reference_handler_clean_and_validate_invalid_product_reference_type(
product_type_product_single_reference_attribute,
product,
product_type_with_variant_attributes,
):
# given
attribute = product_type_product_single_reference_attribute
product_type_product_single_reference_attribute.reference_product_types.add(
product_type_with_variant_attributes
)
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
reference=graphene.Node.to_global_id("Product", product.id),
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE_TYPE]
def test_single_reference_handler_clean_and_validate_invalid_product_variant_ref_type(
product_type_variant_single_reference_attribute,
variant,
product_type_with_variant_attributes,
):
# given
attribute = product_type_variant_single_reference_attribute
product_type_variant_single_reference_attribute.reference_product_types.add(
product_type_with_variant_attributes
)
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
reference=graphene.Node.to_global_id("ProductVariant", variant.id),
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE_TYPE]
def test_single_reference_handler_clean_and_validate_invalid_page_reference_type(
product_type_page_single_reference_attribute, page, page_type_list
):
# given
attribute = product_type_page_single_reference_attribute
product_type_page_single_reference_attribute.reference_page_types.add(
page_type_list[1]
)
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
reference=graphene.Node.to_global_id("Page", page.id),
)
handler = ReferenceAttributeHandler(attribute, values_input)
attribute_errors = defaultdict(list)
# when
handler.clean_and_validate(attribute_errors)
# then
assert attribute_errors[AttributeInputErrors.INVALID_REFERENCE_TYPE]
def test_reference_handler_pre_save_value(
product_type_product_reference_attribute, product_list, product
):
# given
attribute = product_type_product_reference_attribute
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
reference_objects=product_list,
)
handler = ReferenceAttributeHandler(attribute, values_input)
instance = product
# when
result = handler.pre_save_value(instance)
# then
assert len(result) == len(product_list)
for i, ref_product in enumerate(product_list):
action, value_data = result[i]
expected_slug = slugify(unidecode(f"{instance.id}_{ref_product.id}"))
assert action == AttributeValueBulkActionEnum.GET_OR_CREATE
assert value_data["attribute"] == attribute
assert value_data["slug"] == expected_slug
assert value_data["defaults"]["name"] == ref_product.name
assert value_data["reference_product"] == ref_product
def test_single_reference_handler_pre_save_value(
product_type_product_single_reference_attribute, product_list, product
):
# given
attribute = product_type_product_single_reference_attribute
ref_product = product_list[0]
values_input = AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", attribute.id),
reference_objects=[ref_product],
)
handler = ReferenceAttributeHandler(attribute, values_input)
instance = product
# when
result = handler.pre_save_value(instance)
# then
assert len(result) == 1
action, value_data = result[0]
expected_slug = slugify(unidecode(f"{instance.id}_{ref_product.id}"))
assert action == AttributeValueBulkActionEnum.GET_OR_CREATE
assert value_data["attribute"] == attribute
assert value_data["slug"] == expected_slug
assert value_data["defaults"]["name"] == ref_product.name
assert value_data["reference_product"] == ref_product
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/attribute/tests/type_handlers/test_reference_handler.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 508,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/attribute/utils/attribute_assignment.py | from collections import defaultdict
from typing import TYPE_CHECKING, cast
import graphene
from django.core.exceptions import ValidationError
from django.db.models import Prefetch, Q
from django.db.models.expressions import Exists, OuterRef
from graphql.error import GraphQLError
from ....attribute import AttributeInputType
from ....attribute import models as attribute_models
from ....attribute.models import AttributeValue
from ....attribute.utils import associate_attribute_values_to_instance
from ....page import models as page_models
from ....page.error_codes import PageErrorCode
from ....product import models as product_models
from ....product.error_codes import ProductErrorCode
from ...core.utils import from_global_id_or_error
from ...core.validators import validate_one_of_args_is_in_mutation
from ..enums import AttributeValueBulkActionEnum
from .shared import (
T_ERROR_DICT,
T_INSTANCE,
AttrValuesInput,
get_assignment_model_and_fk,
)
from .type_handlers import (
AttributeTypeHandler,
BooleanAttributeHandler,
DateTimeAttributeHandler,
FileAttributeHandler,
LegacyValuesHandler,
MultiSelectableAttributeHandler,
NumericAttributeHandler,
PlainTextAttributeHandler,
ReferenceAttributeHandler,
RichTextAttributeHandler,
SelectableAttributeHandler,
)
if TYPE_CHECKING:
from django.db.models import QuerySet
T_INPUT_MAP = list[tuple["attribute_models.Attribute", "AttrValuesInput"]]
T_PRE_SAVE_BULK = dict[
AttributeValueBulkActionEnum, dict[attribute_models.Attribute, list]
]
class AttributeAssignmentMixin:
"""Handles cleaning, validation, and saving of attribute data."""
HANDLER_MAPPING = {
AttributeInputType.DROPDOWN: SelectableAttributeHandler,
AttributeInputType.SWATCH: SelectableAttributeHandler,
AttributeInputType.MULTISELECT: MultiSelectableAttributeHandler,
AttributeInputType.FILE: FileAttributeHandler,
AttributeInputType.REFERENCE: ReferenceAttributeHandler,
AttributeInputType.SINGLE_REFERENCE: ReferenceAttributeHandler,
AttributeInputType.RICH_TEXT: RichTextAttributeHandler,
AttributeInputType.PLAIN_TEXT: PlainTextAttributeHandler,
AttributeInputType.NUMERIC: NumericAttributeHandler,
AttributeInputType.DATE: DateTimeAttributeHandler,
AttributeInputType.DATE_TIME: DateTimeAttributeHandler,
AttributeInputType.BOOLEAN: BooleanAttributeHandler,
}
@classmethod
def _resolve_attribute_nodes(
cls,
qs: "QuerySet",
error_class,
*,
id_map: dict[int, str],
ext_ref_set: set[str],
):
"""Retrieve attributes nodes from given identifiers."""
nodes = qs.filter(
Q(pk__in=id_map.keys()) | Q(external_reference__in=ext_ref_set)
)
nodes = list(
nodes.prefetch_related(
Prefetch(
"reference_product_types",
queryset=product_models.ProductType.objects.only("id"),
),
Prefetch(
"reference_page_types",
queryset=page_models.PageType.objects.only("id"),
),
)
)
resolved_pks = {node.pk for node in nodes}
resolved_ext_refs = {node.external_reference for node in nodes}
missing_pks = [gid for pk, gid in id_map.items() if pk not in resolved_pks]
missing_ext_refs = list(ext_ref_set - resolved_ext_refs)
if missing_pks or missing_ext_refs:
missing = [f"ID: {gid}" for gid in missing_pks] + [
f"ExtRef: {ref}" for ref in missing_ext_refs
]
raise ValidationError(
f"Could not resolve attributes: {', '.join(missing)}.",
code=error_class.NOT_FOUND.value,
)
return nodes
@classmethod
def _clear_prefetch_cache(cls, attributes: list[attribute_models.Attribute]):
"""Clear prefetched objects cache.
After validation is complete, the prefetched relations are no longer needed.
"""
for attribute in attributes:
if hasattr(attribute, "_prefetched_objects_cache"):
attribute._prefetched_objects_cache.clear()
@classmethod
def clean_input(
cls,
raw_input: list[dict],
attributes_qs: "QuerySet",
creation: bool = True,
is_page_attributes: bool = False,
) -> T_INPUT_MAP:
"""Resolve, validate, and prepare attribute input."""
error_class = PageErrorCode if is_page_attributes else ProductErrorCode
id_to_values_input_map, ext_ref_to_values_input_map = (
cls._prepare_attribute_input_maps(raw_input, error_class)
)
attributes = cls._resolve_attribute_nodes(
attributes_qs,
error_class,
id_map={pk: v.global_id for pk, v in id_to_values_input_map.items()}, # type: ignore[misc]
ext_ref_set=set(ext_ref_to_values_input_map.keys()),
)
cleaned_input = cls._validate_and_clean_attributes(
attributes,
attributes_qs,
id_to_values_input_map,
ext_ref_to_values_input_map,
error_class,
creation,
)
# Clear prefetched cache after validation to prevent memory leaks
cls._clear_prefetch_cache(attributes)
return cleaned_input
@classmethod
def _prepare_attribute_input_maps(
cls, raw_input: list[dict], error_class
) -> tuple[dict[int, AttrValuesInput], dict[str, AttrValuesInput]]:
"""Prepare maps for attribute input based on IDs and external references."""
id_map: dict[int, AttrValuesInput] = {}
ext_ref_map: dict[str, AttrValuesInput] = {}
for attr_input in raw_input:
gid = attr_input.pop("id", None)
ext_ref = attr_input.pop("external_reference", None)
try:
validate_one_of_args_is_in_mutation(
"id", gid, "external_reference", ext_ref, use_camel_case=True
)
except ValidationError as e:
raise ValidationError(e.message, code=error_class.REQUIRED.value) from e
values = AttrValuesInput(
global_id=gid,
external_reference=ext_ref,
values=attr_input.pop("values", []),
file_url=attr_input.pop("file", None),
**attr_input,
)
if gid:
pk = cls._resolve_attribute_global_id(error_class, gid)
id_map[int(pk)] = values
if ext_ref:
ext_ref_map[ext_ref] = values
return id_map, ext_ref_map
@classmethod
def _validate_and_clean_attributes(
cls,
attributes: list[attribute_models.Attribute],
attributes_qs: "QuerySet[attribute_models.Attribute]",
id_to_values_input_map: dict[int, AttrValuesInput],
ext_ref_to_values_input_map: dict[str, AttrValuesInput],
error_class,
creation: bool,
) -> T_INPUT_MAP:
"""Validate and clean attribute inputs."""
cleaned_input = []
attribute_errors: T_ERROR_DICT = defaultdict(list)
for attribute in attributes:
values_input = id_to_values_input_map.get(
attribute.pk
) or ext_ref_to_values_input_map.get(attribute.external_reference) # type: ignore[arg-type]
values_input = cast(AttrValuesInput, values_input)
is_legacy_path = values_input.values and attribute.input_type in {
AttributeInputType.DROPDOWN,
AttributeInputType.MULTISELECT,
AttributeInputType.SWATCH,
AttributeInputType.NUMERIC,
}
handler_class: type[LegacyValuesHandler | AttributeTypeHandler]
if is_legacy_path:
handler_class = LegacyValuesHandler
else:
handler_class = cls.HANDLER_MAPPING[attribute.input_type]
if handler_class:
handler = handler_class(attribute, values_input)
handler.clean_and_validate(attribute_errors)
cleaned_input.append((attribute, values_input))
errors = cls.prepare_error_list_from_error_attribute_mapping(
attribute_errors, error_class
)
if creation:
cls._validate_required_attributes(attributes_qs, cleaned_input, errors)
if errors:
raise ValidationError(errors)
return cleaned_input
@classmethod
def _resolve_attribute_global_id(cls, error_class, global_id: str) -> int:
"""Resolve an Attribute global ID into an internal ID (int)."""
try:
graphene_type, internal_id = from_global_id_or_error(
global_id, only_type="Attribute"
)
except GraphQLError as e:
raise ValidationError(str(e), code=error_class.GRAPHQL_ERROR.value) from e
if not internal_id.isnumeric():
raise ValidationError(
f"An invalid ID value was passed: {global_id}",
code=error_class.INVALID.value,
)
return int(internal_id)
@staticmethod
def prepare_error_list_from_error_attribute_mapping(
attribute_errors: T_ERROR_DICT, error_code_enum
):
errors = []
for error_data, attributes in attribute_errors.items():
error_msg, error_type = error_data
error = ValidationError(
error_msg,
code=getattr(error_code_enum, error_type).value,
params={"attributes": attributes},
)
errors.append(error)
return errors
@classmethod
def _validate_required_attributes(
cls,
attributes_qs: "QuerySet[attribute_models.Attribute]",
cleaned_input: T_INPUT_MAP,
errors: list[ValidationError],
):
"""Validate that all required attributes are provided."""
supplied_pks = {attr.pk for attr, _ in cleaned_input}
missing_required = attributes_qs.filter(
Q(value_required=True) & ~Q(pk__in=supplied_pks)
)
if missing_required:
missing_ids = [
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in missing_required
]
error = ValidationError(
"All attributes flagged as having a value required must be supplied.",
code=ProductErrorCode.REQUIRED.value,
params={"attributes": missing_ids},
)
errors.append(error)
@classmethod
def pre_save_values(
cls, instance: T_INSTANCE, cleaned_input: T_INPUT_MAP
) -> T_PRE_SAVE_BULK:
"""Prepare attribute values data for bulk database operations.
Example return structure:
{
AttributeValueBulkActionEnum.CREATE: {
<Attribute>: [<AttributeValue>, <AttributeValue>],
...
},
AttributeValueBulkActionEnum.UPDATE_OR_CREATE: {
<Attribute>: [
{
"attribute": <Attribute>,
"slug": "instance_id_attribute_id",
"defaults": {"name": "...", "plain_text": "..."}
}
],
...
},
AttributeValueBulkActionEnum.GET_OR_CREATE: {
<Attribute>: [
{
"attribute": <Attribute>,
"slug": "attribute_id_True",
"defaults": {"name": "Attribute: Yes", "boolean": True}
}
],
...
},
AttributeValueBulkActionEnum.NONE: {
<Attribute>: [<AttributeValue>, <AttributeValue>],
...
},
}
"""
pre_save_bulk: T_PRE_SAVE_BULK = defaultdict(lambda: defaultdict(list))
for attribute, values_input in cleaned_input:
is_legacy_path = values_input.values and attribute.input_type in {
AttributeInputType.DROPDOWN,
AttributeInputType.MULTISELECT,
AttributeInputType.SWATCH,
AttributeInputType.NUMERIC,
}
handler_class: type[AttributeTypeHandler] | None = None
if is_legacy_path:
handler_class = LegacyValuesHandler
else:
handler_class = cls.HANDLER_MAPPING.get(attribute.input_type)
if not handler_class:
continue
handler = handler_class(attribute, values_input)
prepared_values = handler.pre_save_value(instance)
if not prepared_values:
pre_save_bulk[AttributeValueBulkActionEnum.NONE].setdefault(
attribute, []
)
else:
for action, value_data in prepared_values:
pre_save_bulk[action][attribute].append(value_data)
return pre_save_bulk
@classmethod
def save(
cls,
instance: T_INSTANCE,
cleaned_input: T_INPUT_MAP,
pre_save_bulk: T_PRE_SAVE_BULK | None = None,
):
"""Save the cleaned input against the given instance."""
if pre_save_bulk is None:
pre_save_bulk = cls.pre_save_values(instance, cleaned_input)
attribute_and_values = cls._bulk_create_pre_save_values(pre_save_bulk)
attr_val_map = defaultdict(list)
clean_assignment_pks = []
for attribute, values in attribute_and_values.items():
if not values:
clean_assignment_pks.append(attribute.pk)
else:
attr_val_map[attribute.pk].extend(values)
associate_attribute_values_to_instance(instance, attr_val_map)
cls._clean_assignments(instance, clean_assignment_pks)
@classmethod
def _clean_assignments(cls, instance: T_INSTANCE, clean_assignment_pks: list[int]):
"""Clean attribute assignments from the given instance."""
if not clean_assignment_pks:
return
values_to_unassign = attribute_models.AttributeValue.objects.filter(
attribute_id__in=clean_assignment_pks
)
# variant has old attribute structure so need to handle it differently
if isinstance(instance, product_models.ProductVariant):
cls._clean_variants_assignment(instance, clean_assignment_pks)
return
assignment_model, instance_fk = get_assignment_model_and_fk(instance)
assignment_model.objects.filter(
Exists(values_to_unassign.filter(id=OuterRef("value_id"))),
**{instance_fk: instance.pk},
).delete()
@classmethod
def _clean_variants_assignment(cls, instance: T_INSTANCE, attribute_ids: list[int]):
attribute_variant = Exists(
attribute_models.AttributeVariant.objects.filter(
pk=OuterRef("assignment_id"),
attribute_id__in=attribute_ids,
)
)
attribute_models.AssignedVariantAttribute.objects.filter(
attribute_variant
).filter(
variant_id=instance.id,
).delete()
@classmethod
def _bulk_create_pre_save_values(cls, pre_save_bulk):
"""Execute bulk database operations based on prepared data."""
results: dict[attribute_models.Attribute, list[AttributeValue]] = defaultdict(
list
)
for action, attribute_data in pre_save_bulk.items():
for attribute, values in attribute_data.items():
if action == AttributeValueBulkActionEnum.CREATE:
values = AttributeValue.objects.bulk_create(values)
elif action == AttributeValueBulkActionEnum.UPDATE_OR_CREATE:
values = AttributeValue.objects.bulk_update_or_create(values)
elif action == AttributeValueBulkActionEnum.GET_OR_CREATE:
values = AttributeValue.objects.bulk_get_or_create(values)
else:
# ensuring that empty values will be added to results,
# so assignments will be removed properly in that case
results.setdefault(attribute, [])
results[attribute].extend(values)
return results
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/attribute/utils/attribute_assignment.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/attribute/utils/shared.py | import datetime
from collections import defaultdict
from dataclasses import dataclass
from typing import TYPE_CHECKING, NamedTuple, cast
import orjson
from django.db.models import Model
from django.db.models.expressions import Exists, OuterRef
from ....attribute import AttributeEntityType, AttributeInputType
from ....attribute import models as attribute_models
from ....page import models as page_models
from ....product import models as product_models
from ..enums import AttributeValueBulkActionEnum
if TYPE_CHECKING:
from ....attribute.models import Attribute
T_INSTANCE = product_models.Product | product_models.ProductVariant | page_models.Page
T_ERROR_DICT = dict[tuple[str, str], list]
T_REFERENCE = (
product_models.Product
| product_models.ProductVariant
| product_models.Category
| product_models.Collection
| page_models.Page
)
@dataclass
class AssignedAttributeData:
attribute: attribute_models.Attribute
channel_slug: str | None
product_id: int | None = None
page_id: int | None = None
variant_id: int | None = None
@dataclass
class AttrValuesForSelectableFieldInput:
id: str | None = None
external_reference: str | None = None
value: str | None = None
@dataclass
class AttrValuesInput:
global_id: str | None
external_reference: str | None = None
values: list[str] | None = None
dropdown: AttrValuesForSelectableFieldInput | None = None
swatch: AttrValuesForSelectableFieldInput | None = None
multiselect: list[AttrValuesForSelectableFieldInput] | None = None
numeric: str | None = None
reference: str | None = None
references: list[str] | None = None
reference_objects: list[T_REFERENCE] | None = None
file_url: str | None = None
content_type: str | None = None
rich_text: dict | None = None
plain_text: str | None = None
boolean: bool | None = None
date: datetime.date | None = None
date_time: datetime.datetime | None = None
class EntityTypeData(NamedTuple):
"""Defines metadata for a referenceable entity type."""
model: type[Model]
name_field: str
value_field: str
ENTITY_TYPE_MAPPING = {
AttributeEntityType.PAGE: EntityTypeData(
page_models.Page, "title", "reference_page"
),
AttributeEntityType.PRODUCT: EntityTypeData(
product_models.Product, "name", "reference_product"
),
AttributeEntityType.PRODUCT_VARIANT: EntityTypeData(
product_models.ProductVariant, "name", "reference_variant"
),
AttributeEntityType.CATEGORY: EntityTypeData(
product_models.Category, "name", "reference_category"
),
AttributeEntityType.COLLECTION: EntityTypeData(
product_models.Collection, "name", "reference_collection"
),
}
def get_assignment_model_and_fk(instance: T_INSTANCE):
if isinstance(instance, page_models.Page):
return attribute_models.AssignedPageAttributeValue, "page_id"
if isinstance(instance, product_models.Product):
return attribute_models.AssignedProductAttributeValue, "product_id"
raise NotImplementedError(
f"Assignment for {type(instance).__name__} not implemented."
)
def get_assigned_attribute_value_if_exists(
instance: T_INSTANCE, attribute: "Attribute", lookup_field: str, value
):
"""Unified method to find an existing assigned value."""
if isinstance(instance, product_models.ProductVariant):
# variant has old attribute structure so need to handle it differently
return get_variant_assigned_attribute_value_if_exists(
instance, attribute, lookup_field, value
)
assignment_model, instance_fk = get_assignment_model_and_fk(instance)
assigned_values = assignment_model.objects.filter(**{instance_fk: instance.pk})
return attribute_models.AttributeValue.objects.filter(
Exists(assigned_values.filter(value_id=OuterRef("id"))),
attribute_id=attribute.pk,
**{lookup_field: value},
).first()
def get_variant_assigned_attribute_value_if_exists(
instance: T_INSTANCE, attribute: "Attribute", lookup_field: str, value: str
):
variant = cast(product_models.ProductVariant, instance)
attribute_variant = Exists(
attribute_models.AttributeVariant.objects.filter(
pk=OuterRef("assignment_id"),
attribute_id=attribute.pk,
)
)
assigned_variant = Exists(
attribute_models.AssignedVariantAttribute.objects.filter(
attribute_variant
).filter(
variant_id=variant.pk,
values=OuterRef("pk"),
)
)
return attribute_models.AttributeValue.objects.filter(
assigned_variant, **{lookup_field: value}
).first()
def has_input_modified_attribute_values(
variant: product_models.ProductVariant,
pre_save_bulk_data: dict[
AttributeValueBulkActionEnum, dict[attribute_models.Attribute, list]
],
) -> bool:
"""Compare already assigned attribute values with the input values.
The change in the attribute values order is also considered a modification.
Return:
`False` if the attribute values are equal, otherwise `True`.
"""
if variant.product_id is not None:
assigned_attributes = get_attribute_to_values_map_for_variant(variant)
input_attribute_values = get_values_from_pre_save_bulk_data(pre_save_bulk_data)
if input_attribute_values != assigned_attributes:
return True
return False
def get_attribute_to_values_map_for_variant(
variant: product_models.ProductVariant,
) -> dict[int, list]:
"""Create a dict of attributes values for variant.
Sample result is:
{
"attribute_pk": [AttributeValue1, AttributeValue2],
"attribute_pk": [AttributeValue3]
}
"""
attribute_values: defaultdict[int, list[str | None | datetime.datetime]] = (
defaultdict(list)
)
for assigned_variant_attribute in variant.attributes.all():
attribute = assigned_variant_attribute.attribute
attribute_id = attribute.pk
for attr_value in assigned_variant_attribute.values.all():
if attribute.input_type == AttributeInputType.PLAIN_TEXT:
attribute_values[attribute_id].append(attr_value.plain_text)
elif attribute.input_type == AttributeInputType.RICH_TEXT:
attribute_values[attribute_id].append(
orjson.dumps(attr_value.rich_text, option=orjson.OPT_UTC_Z).decode(
"utf-8"
)
)
elif attribute.input_type == AttributeInputType.NUMERIC:
attribute_values[attribute_id].append(str(attr_value.numeric))
elif attribute.input_type in [
AttributeInputType.DATE,
AttributeInputType.DATE_TIME,
]:
attribute_values[attribute_id].append(attr_value.date_time)
else:
attribute_values[attribute_id].append(attr_value.slug)
return attribute_values
def get_values_from_pre_save_bulk_data(
pre_save_bulk_data: dict[
AttributeValueBulkActionEnum, dict[attribute_models.Attribute, list]
],
) -> dict[int, list[str | None | datetime.datetime]]:
input_type_to_field_and_action = {
AttributeInputType.PLAIN_TEXT: ("plain_text", None),
AttributeInputType.RICH_TEXT: (
"rich_text",
lambda x: orjson.dumps(x, option=orjson.OPT_UTC_Z).decode("utf-8"),
),
AttributeInputType.NUMERIC: ("numeric", str),
AttributeInputType.DATE: ("date_time", None),
AttributeInputType.DATE_TIME: ("date_time", None),
}
input_attribute_values: defaultdict[int, list[str | None | datetime.datetime]] = (
defaultdict(list)
)
for action, attributes in pre_save_bulk_data.items():
for attr, values_data in attributes.items():
values = []
if action == AttributeValueBulkActionEnum.GET_OR_CREATE:
values = [value["slug"] for value in values_data]
elif action == AttributeValueBulkActionEnum.UPDATE_OR_CREATE:
field_name, transform = input_type_to_field_and_action.get(
attr.input_type, (None, None)
)
if field_name:
values = [
transform(value["defaults"][field_name])
if transform
else value["defaults"][field_name]
for value in values_data
]
else:
values = [value.slug for value in values_data]
input_attribute_values[attr.pk].extend(values)
return input_attribute_values
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/attribute/utils/shared.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/attribute/utils/type_handlers.py | import abc
import datetime
import re
from collections.abc import Sequence
from typing import TYPE_CHECKING
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils.text import slugify
from graphql.error import GraphQLError
from text_unidecode import unidecode
from ....attribute import AttributeEntityType, AttributeInputType
from ....attribute import models as attribute_models
from ....attribute.models import AttributeValue
from ....core.utils import (
generate_unique_slug,
prepare_unique_slug,
)
from ....core.utils.editorjs import clean_editor_js
from ....core.utils.text import safe_truncate
from ....core.utils.url import get_default_storage_root_url
from ...core.utils import from_global_id_or_error, get_duplicated_values
from ...utils import get_nodes
from ..enums import AttributeValueBulkActionEnum
from .shared import (
ENTITY_TYPE_MAPPING,
T_ERROR_DICT,
T_INSTANCE,
AttrValuesForSelectableFieldInput,
AttrValuesInput,
get_assigned_attribute_value_if_exists,
)
if TYPE_CHECKING:
from ....attribute.models import Attribute
class AttributeInputErrors:
"""Defines error messages and codes for attribute validation."""
# General Errors
VALUE_REQUIRED = ("Attribute expects a value but none were given.", "REQUIRED")
BLANK_VALUE = ("Attribute values cannot be blank.", "REQUIRED")
DUPLICATED_VALUES = (
"Duplicated attribute values are provided.",
"DUPLICATED_INPUT_ITEM",
)
INVALID_INPUT = ("Invalid value provided for attribute.", "INVALID")
MORE_THAN_ONE_VALUE = (
"Attribute must take only one value.",
"INVALID",
)
ID_AND_VALUE_PROVIDED = (
"Attribute values cannot be assigned by both id and value.",
"INVALID",
)
ID_AND_EXTERNAL_REFERENCE_PROVIDED = (
"Attribute values cannot be assigned by both id and external reference",
"INVALID",
)
MAX_LENGTH_EXCEEDED = ("Attribute value length is exceeded.", "INVALID")
# File Errors
FILE_URL_REQUIRED = ("A file URL is required for this attribute.", "REQUIRED")
INVALID_FILE_URL = (
"The file_url must be the path to the default storage.",
"INVALID",
)
# Reference Errors
REFERENCE_REQUIRED = ("A reference is required for this attribute.", "REQUIRED")
INVALID_REFERENCE = ("Invalid reference type.", "INVALID")
INVALID_REFERENCE_TYPE = (
"Invalid reference, must be an object from available choices defined by "
"reference types on attribute.",
"INVALID",
)
# Numeric Errors
ERROR_NUMERIC_VALUE_REQUIRED = ("Numeric value is required.", "INVALID")
class AttributeTypeHandler(abc.ABC):
"""Abstract base class for attribute type-specific logic."""
def __init__(
self,
attribute: "Attribute",
values_input: AttrValuesInput,
):
self.attribute = attribute
self.values_input = values_input
self.attribute_identifier = (
values_input.global_id or values_input.external_reference
)
self.attr_identifier = values_input.global_id or values_input.external_reference
@abc.abstractmethod
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
"""Clean, resolve, and validate input values."""
raise NotImplementedError
@abc.abstractmethod
def pre_save_value(
self, instance: T_INSTANCE
) -> list[tuple[AttributeValueBulkActionEnum, dict | AttributeValue]]:
"""Prepare attribute value data for bulk database operations."""
raise NotImplementedError
def _update_or_create_value(
self,
instance: T_INSTANCE,
value_defaults: dict,
):
slug = slugify(unidecode(f"{instance.id}_{self.attribute.id}"))
value = {
"attribute": self.attribute,
"slug": slug,
"defaults": value_defaults,
}
return [
(AttributeValueBulkActionEnum.UPDATE_OR_CREATE, value),
]
@classmethod
def prepare_attribute_values(
cls, attribute: attribute_models.Attribute, values: list[str]
) -> list[tuple]:
slug_to_value_map = {}
name_to_value_map = {}
for val in attribute.values.filter(Q(name__in=values) | Q(slug__in=values)):
slug_to_value_map[val.slug] = val
name_to_value_map[val.name] = val
existing_slugs = cls.get_existing_slugs(attribute, values)
results = []
for value_str in values:
value_obj = slug_to_value_map.get(value_str) or name_to_value_map.get(
value_str
)
if value_obj:
results.append((AttributeValueBulkActionEnum.NONE, value_obj))
else:
# If no existing value is found, prepare a new one for creation.
unique_slug = prepare_unique_slug(
slugify(unidecode(value_str)), existing_slugs
)
new_value = AttributeValue(
attribute=attribute, name=value_str, slug=unique_slug
)
results.append((AttributeValueBulkActionEnum.CREATE, new_value))
# the set of existing slugs must be updated to not generate
# accidentally the same slug for two or more values
existing_slugs.add(unique_slug)
# extend name to slug value to not create two elements with the same name
name_to_value_map[new_value.name] = new_value
return results
def prepare_attribute_values_with_external_reference(
self, values: list[tuple[str, str]]
) -> list[tuple]:
existing_slugs = self.get_existing_slugs(self.attribute, [v[1] for v in values])
results = []
for ext_ref, value_str in values:
unique_slug = prepare_unique_slug(
slugify(unidecode(value_str)), existing_slugs
)
new_value = AttributeValue(
attribute=self.attribute,
name=value_str,
slug=unique_slug,
external_reference=ext_ref,
)
results.append((AttributeValueBulkActionEnum.CREATE, new_value))
existing_slugs.add(unique_slug)
return results
@staticmethod
def get_existing_slugs(attribute: attribute_models.Attribute, values: list[str]):
lookup = Q()
for value in values:
lookup |= Q(slug__startswith=slugify(unidecode(value)))
existing_slugs = set(
attribute.values.filter(lookup).values_list("slug", flat=True)
)
return existing_slugs
class SelectableAttributeHandler(AttributeTypeHandler):
"""Handler for Dropdown and Swatch attribute types."""
def get_selectable_input(self) -> AttrValuesForSelectableFieldInput | None:
"""Get the specific input object for dropdown or swatch."""
if self.attribute.input_type == AttributeInputType.DROPDOWN:
return self.values_input.dropdown
if self.attribute.input_type == AttributeInputType.SWATCH:
return self.values_input.swatch
return None
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
selectable_input = self.get_selectable_input()
if not selectable_input:
if self.attribute.value_required:
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
return
self._validate_selectable_field(
selectable_input,
value_required=self.attribute.value_required,
attribute_errors=attribute_errors,
)
def _validate_selectable_field(
self,
attr_value_input: AttrValuesForSelectableFieldInput,
value_required: bool,
attribute_errors: T_ERROR_DICT,
):
"""Validate a single input for a selectable field."""
id = attr_value_input.id
value = attr_value_input.value
external_reference = attr_value_input.external_reference
if id and external_reference:
attribute_errors[
AttributeInputErrors.ID_AND_EXTERNAL_REFERENCE_PROVIDED
].append(self.attr_identifier)
return
if id and value:
attribute_errors[AttributeInputErrors.ID_AND_VALUE_PROVIDED].append(
self.attr_identifier
)
return
if not id and not external_reference and not value and value_required:
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attr_identifier
)
return
if value:
max_length = self.attribute.values.model.name.field.max_length
if not value.strip():
attribute_errors[AttributeInputErrors.BLANK_VALUE].append(
self.attr_identifier
)
elif max_length and len(value) > max_length:
attribute_errors[AttributeInputErrors.MAX_LENGTH_EXCEEDED].append(
self.attr_identifier
)
value_identifier = id or external_reference
if value_identifier:
if not value_identifier.strip():
attribute_errors[AttributeInputErrors.BLANK_VALUE].append(
self.attr_identifier
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
selectable_input = self.get_selectable_input()
if not selectable_input:
return []
id, value, ext_ref = (
selectable_input.id,
selectable_input.value,
selectable_input.external_reference,
)
if ext_ref and value:
return self._parse_external_reference_and_value(ext_ref, value)
if ext_ref:
value_instance = attribute_models.AttributeValue.objects.filter(
external_reference=ext_ref
).first()
if not value_instance:
raise ValidationError(
"Attribute value with given externalReference can't be found"
)
return [(AttributeValueBulkActionEnum.NONE, value_instance)]
if id:
_, attr_value_id = from_global_id_or_error(id)
value_instance = attribute_models.AttributeValue.objects.filter(
pk=attr_value_id
).first()
if not value_instance:
raise ValidationError("Attribute value with given ID can't be found")
return [(AttributeValueBulkActionEnum.NONE, value_instance)]
if value:
return self.prepare_attribute_values(self.attribute, [value])
return []
def _parse_external_reference_and_value(
self, external_reference: str, attr_value: str | None
) -> list[tuple[AttributeValueBulkActionEnum, AttributeValue]]:
"""Get or create an AttributeValue by external reference."""
value_instance = attribute_models.AttributeValue.objects.filter(
external_reference=external_reference
).first()
if value_instance:
if value_instance.name != attr_value:
raise ValidationError(
f"Attribute value with external reference '{external_reference}' already exists "
f"with different value '{value_instance.name}'."
)
return [
(
AttributeValueBulkActionEnum.NONE,
value_instance,
)
]
if not attr_value:
raise ValidationError(
f"Attribute value with given external reference can't be found: {external_reference}"
)
return self.prepare_attribute_values_with_external_reference(
[(external_reference, attr_value or external_reference)]
)
class MultiSelectableAttributeHandler(SelectableAttributeHandler):
"""Handler for Multiselect attribute type."""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
multi_values = self.values_input.multiselect
if not multi_values:
if self.attribute.value_required:
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
return
ids = [value.id for value in multi_values if value.id is not None]
values = [value.value for value in multi_values if value.value is not None]
external_refs = [
value.external_reference
for value in multi_values
if value.external_reference is not None
]
if ids and values:
attribute_errors[AttributeInputErrors.ID_AND_VALUE_PROVIDED].append(
self.attribute_identifier
)
elif (
not ids
and not external_refs
and not values
and self.attribute.value_required
):
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
elif (
len(ids) > len(set(ids))
or len(values) > len(set(values))
or len(external_refs) > len(set(external_refs))
):
attribute_errors[AttributeInputErrors.DUPLICATED_VALUES].append(
self.attribute_identifier
)
for value_input in multi_values:
self._validate_selectable_field(
value_input,
value_required=self.attribute.value_required,
attribute_errors=attribute_errors,
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
multi_values = self.values_input.multiselect
if not multi_values:
return []
ext_refs = [
value.external_reference
for value in multi_values
if value.external_reference
]
ids = [
from_global_id_or_error(v_input.id, "AttributeValue")[1]
for v_input in multi_values
if v_input.id
]
ext_ref_to_value_map = self.attribute.values.filter(
external_reference__in=ext_refs
).in_bulk(field_name="external_reference")
id_to_value_map = self.attribute.values.filter(id__in=ids).in_bulk()
results = []
values_to_create = []
external_refs_with_value_to_create = []
invalid_ids = []
invalid_ext_refs = []
invalid_ext_refs_with_current_value = []
for v_input in multi_values:
if v_input.id:
_, pk = from_global_id_or_error(v_input.id, "AttributeValue")
if value_instance := id_to_value_map.get(int(pk)):
results.append((AttributeValueBulkActionEnum.NONE, value_instance))
else:
invalid_ids.append(v_input.id)
elif v_input.external_reference and not v_input.value:
if value_instance := ext_ref_to_value_map.get(
v_input.external_reference
):
results.append((AttributeValueBulkActionEnum.NONE, value_instance))
else:
invalid_ext_refs.append(v_input.external_reference)
elif v_input.external_reference and v_input.value:
if value_instance := ext_ref_to_value_map.get(
v_input.external_reference
):
if value_instance.name != v_input.value:
invalid_ext_refs_with_current_value.append(
(v_input.external_reference, value_instance.name)
)
continue
results.append((AttributeValueBulkActionEnum.NONE, value_instance))
continue
external_refs_with_value_to_create.append(
(v_input.external_reference, v_input.value)
)
elif v_input.value:
values_to_create.append(v_input.value)
if invalid_ids:
raise ValidationError(
f"Attribute value(s) with given ID(s) can't be found: {', '.join(invalid_ids)}"
)
if invalid_ext_refs:
raise ValidationError(
f"Attribute value(s) with given external reference(s) can't be found: {', '.join(invalid_ext_refs)}"
)
if invalid_ext_refs_with_current_value:
error_messages = [
f"Attribute value with external reference '{ext_ref}' already exists with different value '{existing_value}'."
for ext_ref, existing_value in invalid_ext_refs_with_current_value
]
raise ValidationError("/n".join(error_messages))
# Prepare new values
if values_to_create:
results.extend(
self.prepare_attribute_values(self.attribute, values_to_create)
)
# Prepare new values with external references
if external_refs_with_value_to_create:
results.extend(
self.prepare_attribute_values_with_external_reference(
external_refs_with_value_to_create
)
)
return results
class FileAttributeHandler(AttributeTypeHandler):
"""Handler for File attribute type."""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
storage_root_url = get_default_storage_root_url()
file_url = self.values_input.file_url
if self.attribute.value_required and (not file_url or not file_url.strip()):
attribute_errors[AttributeInputErrors.FILE_URL_REQUIRED].append(
self.attribute_identifier
)
if file_url and not file_url.startswith(storage_root_url):
attribute_errors[AttributeInputErrors.INVALID_FILE_URL].append(
self.attribute_identifier
)
self.values_input.file_url = (
re.sub(storage_root_url, "", file_url) if file_url is not None else file_url
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
file_url = self.values_input.file_url
if not file_url:
return []
# File attributes should be unique per assignment, so we create a new value
# unless this exact URL is already assigned to this instance.
value = get_assigned_attribute_value_if_exists(
instance, self.attribute, "file_url", file_url
)
if value:
return [(AttributeValueBulkActionEnum.NONE, value)]
name = file_url.split("/")[-1]
value_obj = AttributeValue(
attribute=self.attribute,
file_url=file_url,
name=name,
content_type=self.values_input.content_type,
)
value_obj.slug = generate_unique_slug(value_obj, name)
return [(AttributeValueBulkActionEnum.CREATE, value_obj)]
class ReferenceAttributeHandler(AttributeTypeHandler):
"""Handler for Reference and Single Reference attribute type."""
def get_references(self) -> Sequence[str]:
if self.attribute.input_type == AttributeInputType.SINGLE_REFERENCE:
return [self.values_input.reference] if self.values_input.reference else []
return self.values_input.references or []
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
"""Resolve Graphene IDs and then validate the result.
Modifies `self.values_input.references` in place.
"""
references = self.get_references()
if not references:
if self.attribute.value_required:
attribute_errors[AttributeInputErrors.REFERENCE_REQUIRED].append(
self.attribute_identifier
)
return
if not self.attribute.entity_type:
attribute_errors[AttributeInputErrors.INVALID_INPUT].append(
self.attribute_identifier
)
return
entity_data = ENTITY_TYPE_MAPPING[self.attribute.entity_type]
prefetch_related = (
["product"]
if self.attribute.entity_type == AttributeEntityType.PRODUCT_VARIANT
else []
)
try:
ref_instances = get_nodes(
references,
self.attribute.entity_type,
model=entity_data.model,
prefetch_related=prefetch_related,
)
except GraphQLError:
attribute_errors[AttributeInputErrors.INVALID_REFERENCE].append(
self.attribute_identifier
)
return
invalid_refs = self.get_references_with_invalid_reference_types(
ref_instances, attribute_errors
)
if invalid_refs:
attribute_errors[AttributeInputErrors.INVALID_REFERENCE_TYPE].append(
self.attribute_identifier
)
return
self.values_input.reference_objects = ref_instances
def get_references_with_invalid_reference_types(
self, ref_instances: list, attribute_errors: T_ERROR_DICT
):
"""Validate that all references are of the correct type.
For `PRODUCT` and `PRODUCT_VARIANT` entity types, check if the
references belong to the reference product types defined in the attribute.
For `PAGE` entity type, check if the references belong to the reference page
types defined in the attribute.
"""
# `reference_product_types` and `reference_page_types` are pre-fetched
# in `AttributeAssignmentMixin._resolve_attribute_nodes`
if reference_product_types := self.attribute.reference_product_types.all():
ref_product_type_ids = set()
if self.attribute.entity_type == AttributeEntityType.PRODUCT:
ref_product_type_ids = {ref.product_type_id for ref in ref_instances}
elif self.attribute.entity_type == AttributeEntityType.PRODUCT_VARIANT:
# product is pre-fetched in `get_nodes`
ref_product_type_ids = {
ref.product.product_type_id for ref in ref_instances
}
attribute_product_type_ids = {pt.id for pt in reference_product_types}
invalid_refs = ref_product_type_ids - attribute_product_type_ids
return invalid_refs
if reference_page_types := self.attribute.reference_page_types.all():
ref_page_type_ids = {ref.page_type_id for ref in ref_instances}
# `reference_page_types` are pre-fetched in `AttributeAssignmentMixin._resolve_attribute_nodes`
attribute_page_type_ids = {pt.id for pt in reference_page_types}
invalid_refs = ref_page_type_ids - attribute_page_type_ids
return invalid_refs
return {}
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
references = self.values_input.reference_objects
entity_type = self.attribute.entity_type
if not references or not entity_type:
return []
entity_data = ENTITY_TYPE_MAPPING[entity_type]
results = []
for ref in references:
name = getattr(ref, entity_data.name_field)
if entity_type == AttributeEntityType.PRODUCT_VARIANT:
name = f"{ref.product.name}: {name}" # type: ignore[union-attr]
# Reference values are unique per referenced entity
slug = slugify(unidecode(f"{instance.id}_{ref.id}"))
defaults = {"name": name}
value_data = {
"attribute": self.attribute,
"slug": slug,
"defaults": defaults,
entity_data.value_field: ref,
}
results.append((AttributeValueBulkActionEnum.GET_OR_CREATE, value_data))
return results
class PlainTextAttributeHandler(AttributeTypeHandler):
"""Handler for Plain Text attribute type."""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
plain_text = self.values_input.plain_text
if self.attribute.value_required and (not plain_text or not plain_text.strip()):
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
plain_text = self.values_input.plain_text
if plain_text is None:
return []
defaults = {
"plain_text": plain_text,
"name": safe_truncate(plain_text, 200),
}
return self._update_or_create_value(instance, defaults)
class RichTextAttributeHandler(AttributeTypeHandler):
"""Handler for Rich Text attribute type."""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
text = clean_editor_js(self.values_input.rich_text or {}, to_string=True)
if not text.strip() and self.attribute.value_required:
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
rich_text = self.values_input.rich_text
if rich_text is None:
return []
defaults = {
"rich_text": rich_text,
"name": safe_truncate(clean_editor_js(rich_text, to_string=True), 200),
}
return self._update_or_create_value(instance, defaults)
class NumericAttributeHandler(AttributeTypeHandler):
"""Handler for Numeric attribute type."""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
numeric_val = self.values_input.numeric
if self.attribute.value_required and numeric_val is None:
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
if numeric_val is not None:
try:
float(numeric_val)
except (ValueError, TypeError):
attribute_errors[
AttributeInputErrors.ERROR_NUMERIC_VALUE_REQUIRED
].append(self.attribute_identifier)
if isinstance(numeric_val, bool):
attribute_errors[AttributeInputErrors.ERROR_NUMERIC_VALUE_REQUIRED].append(
self.attribute_identifier
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
numeric_val = self.values_input.numeric
if numeric_val is None:
return []
defaults = {
"name": numeric_val,
"numeric": float(numeric_val),
}
return self._update_or_create_value(instance, defaults)
class DateTimeAttributeHandler(AttributeTypeHandler):
"""Handler for Date and DateTime attribute types."""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
is_date = self.attribute.input_type == AttributeInputType.DATE
has_value = self.values_input.date if is_date else self.values_input.date_time
if self.attribute.value_required and not has_value:
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
is_date = self.attribute.input_type == AttributeInputType.DATE
value = self.values_input.date if is_date else self.values_input.date_time
if not value:
return []
date_time_val = (
datetime.datetime.combine(value, datetime.time.min, tzinfo=datetime.UTC)
if is_date
else value
)
defaults = {"name": str(value), "date_time": date_time_val}
return self._update_or_create_value(instance, defaults)
class BooleanAttributeHandler(AttributeTypeHandler):
"""Handler for Boolean attribute type."""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
if self.attribute.value_required and self.values_input.boolean is None:
attribute_errors[AttributeInputErrors.VALUE_REQUIRED].append(
self.attribute_identifier
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
boolean_val = self.values_input.boolean
if boolean_val is None:
return []
boolean = bool(boolean_val)
value = {
"attribute": self.attribute,
"slug": slugify(unidecode(f"{self.attribute.id}_{boolean}")),
"defaults": {
"name": f"{self.attribute.name}: {'Yes' if boolean else 'No'}",
"boolean": boolean,
},
}
return [(AttributeValueBulkActionEnum.GET_OR_CREATE, value)]
class LegacyValuesHandler(AttributeTypeHandler):
"""Handler for the deprecated `values: [String!]` field.
Applicable for Dropdown, Swatch, Multiselect, and Numeric attribute types.
"""
def clean_and_validate(self, attribute_errors: T_ERROR_DICT):
"""Validate a list of raw string values."""
values = self.values_input.values or []
# Validation for single-select types
if (
self.attribute.input_type not in [AttributeInputType.MULTISELECT]
and len(values) > 1
):
attribute_errors[AttributeInputErrors.MORE_THAN_ONE_VALUE].append(
self.attr_identifier
)
# Shared validation
if get_duplicated_values(values):
attribute_errors[AttributeInputErrors.DUPLICATED_VALUES].append(
self.attr_identifier
)
is_numeric = self.attribute.input_type == AttributeInputType.NUMERIC
name_field = self.attribute.values.model.name.field
for value in values:
if value is None or (not is_numeric and not str(value).strip()):
attribute_errors[AttributeInputErrors.BLANK_VALUE].append(
self.attr_identifier
)
continue
if is_numeric:
try:
float(value)
except (ValueError, TypeError):
attribute_errors[AttributeInputErrors.INVALID_INPUT].append(
self.attr_identifier
)
elif name_field.max_length and len(value) > name_field.max_length:
attribute_errors[AttributeInputErrors.MAX_LENGTH_EXCEEDED].append(
self.attr_identifier
)
def pre_save_value(self, instance: T_INSTANCE) -> list[tuple]:
if not self.values_input.values:
return []
if self.attribute.input_type == AttributeInputType.NUMERIC:
value = self.values_input.values[0]
defaults = {
"name": value,
"numeric": float(value),
}
return self._update_or_create_value(instance, defaults)
return self.prepare_attribute_values(self.attribute, self.values_input.values)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/attribute/utils/type_handlers.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 704,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/core/types/context.py | from typing import Generic, TypeVar, cast
from django.db.models import Model
from graphene.types.resolver import get_default_resolver
from ...translations.resolvers import resolve_translation
from .. import ResolveInfo
from ..context import ChannelContext
from .base import BaseObjectType
from .model import ModelObjectType
N = TypeVar("N", bound=Model)
class ChannelContextTypeForObjectType(Generic[N], BaseObjectType):
"""A Graphene type that supports resolvers' root as ChannelContext objects."""
class Meta:
abstract = True
@staticmethod
def resolver_with_context(
attname, default_value, root: ChannelContext[N], info: ResolveInfo, **args
):
resolver = get_default_resolver()
return resolver(attname, default_value, root.node, info, **args)
@staticmethod
def resolve_translation(
root: ChannelContext[N], info: ResolveInfo, *, language_code
):
# Resolver for TranslationField; needs to be manually specified.
return resolve_translation(root.node, info, language_code=language_code)
T = TypeVar("T", bound=Model)
class ChannelContextType(ChannelContextTypeForObjectType[T], ModelObjectType[T]):
"""A Graphene type that supports resolvers' root as ChannelContext objects."""
class Meta:
abstract = True
@staticmethod
def resolve_id(root: ChannelContext[T], _info: ResolveInfo):
return root.node.pk
@classmethod
def is_type_of(cls, root: ChannelContext[T] | T, _info: ResolveInfo) -> bool:
# Unwrap node from ChannelContext if it didn't happen already
if isinstance(root, ChannelContext):
root = root.node
if isinstance(root, cls):
return True
if cls._meta.model._meta.proxy:
model = root._meta.model
else:
model = cast(type[Model], root._meta.model._meta.concrete_model)
return model == cls._meta.model
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/core/types/context.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/product/tests/mutations/test_collection_add_products.py | from unittest.mock import patch
import graphene
from .....discount.utils.promotion import get_active_catalogue_promotion_rules
from .....product.error_codes import CollectionErrorCode
from ....tests.utils import (
get_graphql_content,
)
COLLECTION_ADD_PRODUCTS_MUTATION = """
mutation collectionAddProducts(
$id: ID!, $products: [ID!]!) {
collectionAddProducts(collectionId: $id, products: $products) {
collection {
products {
totalCount
}
}
errors {
field
message
code
}
}
}
"""
def test_add_products_to_collection(
staff_api_client,
collection,
product_list,
permission_manage_products,
):
# given
query = COLLECTION_ADD_PRODUCTS_MUTATION
collection_id = graphene.Node.to_global_id("Collection", collection.id)
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
products_before = collection.products.count()
variables = {"id": collection_id, "products": product_ids}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionAddProducts"]["collection"]
assert data["products"]["totalCount"] == products_before + len(product_ids)
for rule in get_active_catalogue_promotion_rules():
assert rule.variants_dirty is True
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_add_products_to_collection_trigger_product_updated_webhook(
product_updated_mock,
staff_api_client,
collection,
product_list,
permission_manage_products,
):
query = COLLECTION_ADD_PRODUCTS_MUTATION
collection_id = graphene.Node.to_global_id("Collection", collection.id)
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
products_before = collection.products.count()
variables = {"id": collection_id, "products": product_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionAddProducts"]["collection"]
assert data["products"]["totalCount"] == products_before + len(product_ids)
assert len(product_list) == product_updated_mock.call_count
def test_add_products_to_collection_on_sale_trigger_discounted_price_recalculation(
staff_api_client, collection, product_list, permission_manage_products
):
query = COLLECTION_ADD_PRODUCTS_MUTATION
collection_id = graphene.Node.to_global_id("Collection", collection.id)
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
products_before = collection.products.count()
variables = {"id": collection_id, "products": product_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionAddProducts"]["collection"]
assert data["products"]["totalCount"] == products_before + len(product_ids)
def test_add_products_to_collection_with_product_without_variants(
staff_api_client, collection, product_list, permission_manage_products
):
query = COLLECTION_ADD_PRODUCTS_MUTATION
product_list[0].variants.all().delete()
collection_id = graphene.Node.to_global_id("Collection", collection.id)
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
variables = {"id": collection_id, "products": product_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
error = content["data"]["collectionAddProducts"]["errors"][0]
assert (
error["code"] == CollectionErrorCode.CANNOT_MANAGE_PRODUCT_WITHOUT_VARIANT.name
)
assert error["message"] == "Cannot manage products without variants."
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_collection_add_products.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/mutations/test_collection_create.py | import os
from unittest.mock import patch
import graphene
import pytest
from .....product.models import Collection
from .....product.tests.utils import create_image
from .....tests.utils import dummy_editorjs
from ....tests.utils import (
get_graphql_content,
get_multipart_request_body,
)
CREATE_COLLECTION_MUTATION = """
mutation createCollection(
$name: String!, $slug: String,
$description: JSONString, $products: [ID!],
$backgroundImage: Upload, $backgroundImageAlt: String
$metadata: [MetadataInput!], $privateMetadata: [MetadataInput!]) {
collectionCreate(
input: {
name: $name,
slug: $slug,
description: $description,
products: $products,
backgroundImage: $backgroundImage,
backgroundImageAlt: $backgroundImageAlt
metadata: $metadata
privateMetadata: $privateMetadata
}) {
collection {
name
slug
description
products {
totalCount
}
backgroundImage{
alt
}
metadata {
key
value
}
privateMetadata {
key
value
}
}
errors {
field
message
code
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.collection_updated")
@patch("saleor.plugins.manager.PluginsManager.collection_created")
def test_create_collection(
created_webhook_mock,
updated_webhook_mock,
monkeypatch,
staff_api_client,
product_list,
media_root,
permission_manage_products,
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
image_file, image_name = create_image()
image_alt = "Alt text for an image."
name = "test-name"
slug = "test-slug"
description = dummy_editorjs("description", True)
metadata_key = "md key"
metadata_value = "md value"
variables = {
"name": name,
"slug": slug,
"description": description,
"products": product_ids,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"metadata": [{"key": metadata_key, "value": metadata_value}],
"privateMetadata": [{"key": metadata_key, "value": metadata_value}],
}
body = get_multipart_request_body(
CREATE_COLLECTION_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
content = get_graphql_content(response)
data = content["data"]["collectionCreate"]["collection"]
# then
assert data["name"] == name
assert data["slug"] == slug
assert data["description"] == description
assert data["products"]["totalCount"] == len(product_ids)
collection = Collection.objects.get(slug=slug)
assert collection.background_image.file
img_name, format = os.path.splitext(image_file._name)
file_name = collection.background_image.name
assert file_name != image_file._name
assert file_name.startswith(f"collection-backgrounds/{img_name}")
assert file_name.endswith(format)
assert data["backgroundImage"]["alt"] == image_alt
assert collection.metadata == {metadata_key: metadata_value}
assert collection.private_metadata == {metadata_key: metadata_value}
created_webhook_mock.assert_called_once()
updated_webhook_mock.assert_not_called()
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_create_collection_trigger_product_update_webhook(
product_updated_mock,
staff_api_client,
product_list,
media_root,
permission_manage_products,
):
query = CREATE_COLLECTION_MUTATION
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
name = "test-name"
slug = "test-slug"
description = dummy_editorjs("description", True)
variables = {
"name": name,
"slug": slug,
"description": description,
"products": product_ids,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionCreate"]["collection"]
assert data["name"] == name
assert data["slug"] == slug
assert data["description"] == description
assert data["products"]["totalCount"] == len(product_ids)
assert len(product_ids) == product_updated_mock.call_count
def test_create_collection_without_background_image(
monkeypatch, staff_api_client, product_list, permission_manage_products
):
query = CREATE_COLLECTION_MUTATION
slug = "test-slug"
variables = {"name": "test-name", "slug": slug}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionCreate"]
assert not data["errors"]
assert data["collection"]["slug"] == slug
@pytest.mark.parametrize(
("input_slug", "expected_slug"),
[
("test-slug", "test-slug"),
(None, "test-collection"),
("", "test-collection"),
("わたし-わ-にっぽん-です", "わたし-わ-にっぽん-です"),
],
)
def test_create_collection_with_given_slug(
staff_api_client, permission_manage_products, input_slug, expected_slug, channel_USD
):
query = CREATE_COLLECTION_MUTATION
name = "Test collection"
variables = {"name": name, "slug": input_slug}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionCreate"]
assert not data["errors"]
assert data["collection"]["slug"] == expected_slug
def test_create_collection_name_with_unicode(
staff_api_client, permission_manage_products, channel_USD
):
query = CREATE_COLLECTION_MUTATION
name = "わたし わ にっぽん です"
variables = {"name": name}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionCreate"]
assert not data["errors"]
assert data["collection"]["name"] == name
assert data["collection"]["slug"] == "watasi-wa-nitupon-desu"
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_collection_create.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/mutations/test_collection_delete.py | from unittest.mock import MagicMock, patch
import graphene
import pytest
from django.core.files import File
from .....attribute.models import AttributeValue
from .....attribute.utils import associate_attribute_values_to_instance
from .....discount.utils.promotion import get_active_catalogue_promotion_rules
from .....thumbnail.models import Thumbnail
from ....tests.utils import (
get_graphql_content,
)
DELETE_COLLECTION_MUTATION = """
mutation deleteCollection($id: ID!) {
collectionDelete(id: $id) {
collection {
name
}
errors {
field
message
code
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.collection_deleted")
def test_delete_collection(
deleted_webhook_mock,
staff_api_client,
collection,
product_list,
permission_manage_products,
):
# given
query = DELETE_COLLECTION_MUTATION
collection.products.set(product_list)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
variables = {"id": collection_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionDelete"]["collection"]
assert data["name"] == collection.name
with pytest.raises(collection._meta.model.DoesNotExist):
collection.refresh_from_db()
deleted_webhook_mock.assert_called_once()
for rule in get_active_catalogue_promotion_rules():
assert rule.variants_dirty is True
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_delete_collection_with_background_image(
delete_from_storage_task_mock,
staff_api_client,
collection_with_image,
permission_manage_products,
):
# given
query = DELETE_COLLECTION_MUTATION
collection = collection_with_image
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(collection=collection, size=128, image=thumbnail_mock)
Thumbnail.objects.create(collection=collection, size=200, image=thumbnail_mock)
collection_id = collection.id
variables = {"id": graphene.Node.to_global_id("Collection", collection.id)}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionDelete"]["collection"]
assert data["name"] == collection.name
with pytest.raises(collection._meta.model.DoesNotExist):
collection.refresh_from_db()
# ensure all related thumbnails has been deleted
assert not Thumbnail.objects.filter(collection_id=collection_id)
assert delete_from_storage_task_mock.call_count == 3
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_delete_collection_trigger_product_updated_webhook(
product_updated_mock,
staff_api_client,
collection,
product_list,
permission_manage_products,
):
query = DELETE_COLLECTION_MUTATION
collection.products.add(*product_list)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
variables = {"id": collection_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionDelete"]["collection"]
assert data["name"] == collection.name
with pytest.raises(collection._meta.model.DoesNotExist):
collection.refresh_from_db()
assert len(product_list) == product_updated_mock.call_count
def test_collection_delete_removes_reference_to_product(
staff_api_client,
collection,
product_type_product_reference_attribute,
product_type,
product,
permission_manage_products,
):
# given
query = DELETE_COLLECTION_MUTATION
product_type.product_attributes.add(product_type_product_reference_attribute)
attr_value = AttributeValue.objects.create(
attribute=product_type_product_reference_attribute,
name=collection.name,
slug=f"{product.pk}_{collection.pk}",
reference_collection=collection,
)
associate_attribute_values_to_instance(
product, {product_type_product_reference_attribute.pk: [attr_value]}
)
reference_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {"id": reference_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionDelete"]
with pytest.raises(attr_value._meta.model.DoesNotExist):
attr_value.refresh_from_db()
with pytest.raises(collection._meta.model.DoesNotExist):
collection.refresh_from_db()
assert not data["errors"]
def test_collection_delete_removes_reference_to_product_variant(
staff_api_client,
collection,
product_type_product_reference_attribute,
product_type,
product_list,
permission_manage_products,
):
# given
query = DELETE_COLLECTION_MUTATION
variant = product_list[0].variants.first()
product_type.variant_attributes.set([product_type_product_reference_attribute])
attr_value = AttributeValue.objects.create(
attribute=product_type_product_reference_attribute,
name=collection.name,
slug=f"{variant.pk}_{collection.pk}",
reference_collection=collection,
)
associate_attribute_values_to_instance(
variant, {product_type_product_reference_attribute.pk: [attr_value]}
)
reference_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {"id": reference_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionDelete"]
with pytest.raises(attr_value._meta.model.DoesNotExist):
attr_value.refresh_from_db()
with pytest.raises(collection._meta.model.DoesNotExist):
collection.refresh_from_db()
assert not data["errors"]
def test_collection_delete_removes_reference_to_page(
staff_api_client,
collection,
page,
page_type_product_reference_attribute,
permission_manage_products,
):
# given
query = DELETE_COLLECTION_MUTATION
page_type = page.page_type
page_type.page_attributes.add(page_type_product_reference_attribute)
attr_value = AttributeValue.objects.create(
attribute=page_type_product_reference_attribute,
name=page.title,
slug=f"{page.pk}_{collection.pk}",
reference_collection=collection,
)
associate_attribute_values_to_instance(
page, {page_type_product_reference_attribute.pk: [attr_value]}
)
reference_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {"id": reference_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionDelete"]
with pytest.raises(attr_value._meta.model.DoesNotExist):
attr_value.refresh_from_db()
with pytest.raises(collection._meta.model.DoesNotExist):
collection.refresh_from_db()
assert not data["errors"]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_collection_delete.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/mutations/test_collection_remove_products.py | from unittest.mock import patch
import graphene
from .....discount.utils.promotion import get_active_catalogue_promotion_rules
from ....tests.utils import (
get_graphql_content,
)
COLLECTION_REMOVE_PRODUCTS_MUTATION = """
mutation collectionRemoveProducts(
$id: ID!, $products: [ID!]!) {
collectionRemoveProducts(collectionId: $id, products: $products) {
collection {
products {
totalCount
}
}
}
}
"""
def test_remove_products_from_collection(
staff_api_client,
collection,
product_list,
permission_manage_products,
):
# given
query = COLLECTION_REMOVE_PRODUCTS_MUTATION
collection.products.add(*product_list)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
products_before = collection.products.count()
variables = {"id": collection_id, "products": product_ids}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionRemoveProducts"]["collection"]
assert data["products"]["totalCount"] == products_before - len(product_ids)
for rule in get_active_catalogue_promotion_rules():
assert rule.variants_dirty is True
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_remove_products_from_collection_trigger_product_updated_webhook(
product_updated_mock,
staff_api_client,
collection,
product_list,
permission_manage_products,
):
query = COLLECTION_REMOVE_PRODUCTS_MUTATION
collection.products.add(*product_list)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
product_ids = [
graphene.Node.to_global_id("Product", product.pk) for product in product_list
]
products_before = collection.products.count()
variables = {"id": collection_id, "products": product_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionRemoveProducts"]["collection"]
assert data["products"]["totalCount"] == products_before - len(product_ids)
assert len(product_list) == product_updated_mock.call_count
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_collection_remove_products.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/mutations/test_collection_update.py | from unittest.mock import MagicMock, Mock, patch
import graphene
import pytest
from django.core.files import File
from .....product.error_codes import ProductErrorCode
from .....product.models import Collection
from .....product.tests.utils import create_image, create_zip_file_with_image_ext
from .....tests.utils import dummy_editorjs
from .....thumbnail.models import Thumbnail
from ....tests.utils import (
get_graphql_content,
get_multipart_request_body,
)
@patch("saleor.plugins.manager.PluginsManager.collection_updated")
@patch("saleor.plugins.manager.PluginsManager.collection_created")
def test_update_collection(
created_webhook_mock,
updated_webhook_mock,
monkeypatch,
staff_api_client,
collection,
permission_manage_products,
):
# given
query = """
mutation updateCollection(
$name: String!, $slug: String!, $description: JSONString, $id: ID!,
$metadata: [MetadataInput!], $privateMetadata: [MetadataInput!]
) {
collectionUpdate(
id: $id, input: {
name: $name, slug: $slug, description: $description,
metadata: $metadata, privateMetadata: $privateMetadata
}) {
collection {
name
slug
description
metadata {
key
value
}
privateMetadata {
key
value
}
}
}
}
"""
description = dummy_editorjs("test description", True)
old_meta = {"old": "meta"}
collection.store_value_in_metadata(items=old_meta)
collection.store_value_in_private_metadata(items=old_meta)
collection.save(update_fields=["metadata", "private_metadata"])
metadata_key = "md key"
metadata_value = "md value"
name = "new-name"
slug = "new-slug"
description = description
variables = {
"name": name,
"slug": slug,
"description": description,
"id": graphene.Node.to_global_id("Collection", collection.id),
"metadata": [{"key": metadata_key, "value": metadata_value}],
"privateMetadata": [{"key": metadata_key, "value": metadata_value}],
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionUpdate"]["collection"]
collection.refresh_from_db()
# then
assert data["name"] == name
assert data["slug"] == slug
assert collection.metadata == {metadata_key: metadata_value, **old_meta}
assert collection.private_metadata == {metadata_key: metadata_value, **old_meta}
created_webhook_mock.assert_not_called()
updated_webhook_mock.assert_called_once()
def test_update_collection_metadata_marks_prices_to_recalculate(
staff_api_client,
collection,
permission_manage_products,
catalogue_promotion,
product,
):
# given
query = """
mutation updateCollection(
$id: ID!,
$metadata: [MetadataInput!]
) {
collectionUpdate(
id: $id, input: {
metadata: $metadata,
}) {
collection {
name
slug
description
metadata {
key
value
}
privateMetadata {
key
value
}
}
}
}
"""
metadata_key = "md key"
metadata_value = "md value"
collection.products.set([product])
variables = {
"id": graphene.Node.to_global_id("Collection", collection.id),
"metadata": [{"key": metadata_key, "value": metadata_value}],
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
collection.refresh_from_db()
# then
assert not catalogue_promotion.rules.filter(variants_dirty=False).exists()
MUTATION_UPDATE_COLLECTION_WITH_BACKGROUND_IMAGE = """
mutation updateCollection($name: String!, $slug: String!, $id: ID!,
$backgroundImage: Upload, $backgroundImageAlt: String) {
collectionUpdate(
id: $id, input: {
name: $name,
slug: $slug,
backgroundImage: $backgroundImage,
backgroundImageAlt: $backgroundImageAlt,
}
) {
collection {
slug
backgroundImage(size: 0) {
alt
url
}
}
errors {
field
message
}
}
}"""
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_update_collection_with_background_image(
delete_from_storage_task_mock,
staff_api_client,
collection_with_image,
permission_manage_products,
media_root,
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
collection = collection_with_image
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
thumbnail = Thumbnail.objects.create(
collection=collection, size=size, image=thumbnail_mock
)
img_path = thumbnail.image.name
variables = {
"name": "new-name",
"slug": "new-slug",
"id": graphene.Node.to_global_id("Collection", collection.id),
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
}
body = get_multipart_request_body(
MUTATION_UPDATE_COLLECTION_WITH_BACKGROUND_IMAGE,
variables,
image_file,
image_name,
)
# when
response = staff_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["collectionUpdate"]
assert not data["errors"]
slug = data["collection"]["slug"]
collection = Collection.objects.get(slug=slug)
assert data["collection"]["backgroundImage"]["alt"] == image_alt
assert data["collection"]["backgroundImage"]["url"].startswith(
f"https://example.com/media/collection-backgrounds/{image_name}"
)
# ensure that thumbnails for old background image has been deleted
assert not Thumbnail.objects.filter(collection_id=collection.id)
delete_from_storage_task_mock.assert_called_once_with(img_path)
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_update_collection_invalid_background_image_content_type(
delete_from_storage_task_mock,
staff_api_client,
collection,
permission_manage_products,
media_root,
):
# given
image_file, image_name = create_zip_file_with_image_ext()
image_alt = "Alt text for an image."
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(collection=collection, size=size, image=thumbnail_mock)
variables = {
"name": "new-name",
"slug": "new-slug",
"id": graphene.Node.to_global_id("Collection", collection.id),
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
}
body = get_multipart_request_body(
MUTATION_UPDATE_COLLECTION_WITH_BACKGROUND_IMAGE,
variables,
image_file,
image_name,
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionUpdate"]
assert data["errors"][0]["field"] == "backgroundImage"
assert data["errors"][0]["message"] == "Invalid file type."
# ensure that thumbnails for old background image hasn't been deleted
assert Thumbnail.objects.filter(collection_id=collection.id)
delete_from_storage_task_mock.assert_not_called()
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_update_collection_invalid_background_image(
delete_from_storage_task_mock,
monkeypatch,
staff_api_client,
collection,
permission_manage_products,
media_root,
):
# given
image_file, image_name = create_image()
image_alt = "Alt text for an image."
error_msg = "Test syntax error"
image_file_mock = Mock(side_effect=SyntaxError(error_msg))
monkeypatch.setattr(
"saleor.graphql.core.validators.file.Image.open", image_file_mock
)
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(collection=collection, size=size, image=thumbnail_mock)
variables = {
"name": "new-name",
"slug": "new-slug",
"id": graphene.Node.to_global_id("Collection", collection.id),
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
}
body = get_multipart_request_body(
MUTATION_UPDATE_COLLECTION_WITH_BACKGROUND_IMAGE,
variables,
image_file,
image_name,
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["collectionUpdate"]
assert data["errors"][0]["field"] == "backgroundImage"
assert error_msg in data["errors"][0]["message"]
# ensure that thumbnails for old background image hasn't been deleted
assert Thumbnail.objects.filter(collection_id=collection.id)
delete_from_storage_task_mock.assert_not_called()
UPDATE_COLLECTION_SLUG_MUTATION = """
mutation($id: ID!, $slug: String) {
collectionUpdate(
id: $id
input: {
slug: $slug
}
) {
collection{
name
slug
}
errors {
field
message
code
}
}
}
"""
@pytest.mark.parametrize(
("input_slug", "expected_slug", "error_message"),
[
("test-slug", "test-slug", None),
("", "", "Slug value cannot be blank."),
(None, "", "Slug value cannot be blank."),
],
)
def test_update_collection_slug(
staff_api_client,
collection,
permission_manage_products,
input_slug,
expected_slug,
error_message,
):
query = UPDATE_COLLECTION_SLUG_MUTATION
old_slug = collection.slug
assert old_slug != input_slug
Node_id = graphene.Node.to_global_id("Collection", collection.id)
variables = {"slug": input_slug, "id": Node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionUpdate"]
errors = data["errors"]
if not error_message:
assert not errors
assert data["collection"]["slug"] == expected_slug
else:
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_collection_slug_exists(
staff_api_client, collection, permission_manage_products
):
query = UPDATE_COLLECTION_SLUG_MUTATION
input_slug = "test-slug"
second_collection = Collection.objects.get(pk=collection.pk)
second_collection.pk = None
second_collection.slug = input_slug
second_collection.name = "Second collection"
second_collection.save()
assert input_slug != collection.slug
Node_id = graphene.Node.to_global_id("Collection", collection.id)
variables = {"slug": input_slug, "id": Node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionUpdate"]
errors = data["errors"]
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.UNIQUE.name
@pytest.mark.parametrize(
("input_slug", "expected_slug", "input_name", "error_message", "error_field"),
[
("test-slug", "test-slug", "New name", None, None),
("", "", "New name", "Slug value cannot be blank.", "slug"),
(None, "", "New name", "Slug value cannot be blank.", "slug"),
("test-slug", "", None, "This field cannot be blank.", "name"),
("test-slug", "", "", "This field cannot be blank.", "name"),
(None, None, None, "Slug value cannot be blank.", "slug"),
],
)
def test_update_collection_slug_and_name(
staff_api_client,
collection,
permission_manage_products,
input_slug,
expected_slug,
input_name,
error_message,
error_field,
):
query = """
mutation($id: ID!, $name: String, $slug: String) {
collectionUpdate(
id: $id
input: {
name: $name
slug: $slug
}
) {
collection{
name
slug
}
errors {
field
message
code
}
}
}
"""
old_name = collection.name
old_slug = collection.slug
assert input_slug != old_slug
assert input_name != old_name
Node_id = graphene.Node.to_global_id("Collection", collection.id)
variables = {"slug": input_slug, "name": input_name, "id": Node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
collection.refresh_from_db()
data = content["data"]["collectionUpdate"]
errors = data["errors"]
if not error_message:
assert data["collection"]["name"] == input_name == collection.name
assert data["collection"]["slug"] == input_slug == collection.slug
else:
assert errors
assert errors[0]["field"] == error_field
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_collection_mutation_remove_background_image(
staff_api_client, collection_with_image, permission_manage_products
):
query = """
mutation updateCollection($id: ID!, $backgroundImage: Upload) {
collectionUpdate(
id: $id, input: {
backgroundImage: $backgroundImage
}
) {
collection {
backgroundImage{
url
}
}
errors {
field
message
}
}
}
"""
assert collection_with_image.background_image
variables = {
"id": graphene.Node.to_global_id("Collection", collection_with_image.id),
"backgroundImage": None,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionUpdate"]["collection"]
assert not data["backgroundImage"]
collection_with_image.refresh_from_db()
assert not collection_with_image.background_image
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_collection_update.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 461,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/test_collection_query.py | import logging
from unittest.mock import MagicMock
import graphene
from django.core.files import File
from .....product.tests.utils import create_image
from .....thumbnail.models import Thumbnail
from ....core.enums import LanguageCodeEnum, ThumbnailFormatEnum
from ....tests.utils import get_graphql_content, get_graphql_content_from_response
QUERY_COLLECTION = """
query ($id: ID, $slug: String, $channel: String, $slugLanguageCode: LanguageCodeEnum){
collection(
id: $id,
slug: $slug,
channel: $channel,
slugLanguageCode: $slugLanguageCode,
) {
id
name
}
}
"""
def test_collection_query_by_id(user_api_client, published_collection, channel_USD):
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_COLLECTION, variables=variables)
content = get_graphql_content(response)
collection_data = content["data"]["collection"]
assert collection_data is not None
assert collection_data["name"] == published_collection.name
def test_collection_query_unpublished_collection_by_id_as_app(
app_api_client, unpublished_collection, permission_manage_products, channel_USD
):
# given
variables = {
"id": graphene.Node.to_global_id("Collection", unpublished_collection.pk),
"channel": channel_USD.slug,
}
# when
response = app_api_client.post_graphql(
QUERY_COLLECTION,
variables=variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collection_data = content["data"]["collection"]
assert collection_data is not None
assert collection_data["name"] == unpublished_collection.name
def test_collection_query_by_slug(user_api_client, published_collection, channel_USD):
variables = {
"slug": published_collection.slug,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_COLLECTION, variables=variables)
content = get_graphql_content(response)
collection_data = content["data"]["collection"]
assert collection_data is not None
assert collection_data["name"] == published_collection.name
def test_collection_query_by_translated_slug(
user_api_client, published_collection, collection_translation_fr, channel_USD
):
variables = {
"slug": collection_translation_fr.slug,
"channel": channel_USD.slug,
"slugLanguageCode": LanguageCodeEnum.FR.name,
}
response = user_api_client.post_graphql(QUERY_COLLECTION, variables=variables)
content = get_graphql_content(response)
collection_data = content["data"]["collection"]
assert collection_data is not None
assert collection_data["name"] == published_collection.name
def test_collection_query_unpublished_collection_by_slug_as_staff(
staff_api_client, unpublished_collection, permission_manage_products, channel_USD
):
# given
user = staff_api_client.user
user.user_permissions.add(permission_manage_products)
variables = {"slug": unpublished_collection.slug, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(QUERY_COLLECTION, variables=variables)
# then
content = get_graphql_content(response)
collection_data = content["data"]["collection"]
assert collection_data is not None
assert collection_data["name"] == unpublished_collection.name
def test_collection_query_unpublished_collection_by_slug_and_anonymous_user(
api_client, unpublished_collection, channel_USD
):
# given
variables = {"slug": unpublished_collection.slug, "channel": channel_USD.slug}
# when
response = api_client.post_graphql(QUERY_COLLECTION, variables=variables)
# then
content = get_graphql_content(response)
collection_data = content["data"]["collection"]
assert collection_data is None
def test_collection_query_error_when_id_and_slug_provided(
user_api_client,
collection,
graphql_log_handler,
):
# given
handled_errors_logger = logging.getLogger("saleor.graphql.errors.handled")
handled_errors_logger.setLevel(logging.DEBUG)
variables = {
"id": graphene.Node.to_global_id("Collection", collection.pk),
"slug": collection.slug,
}
# when
response = user_api_client.post_graphql(QUERY_COLLECTION, variables=variables)
# then
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[DEBUG].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
def test_collection_query_error_when_no_param(
user_api_client,
collection,
graphql_log_handler,
):
# given
handled_errors_logger = logging.getLogger("saleor.graphql.errors.handled")
handled_errors_logger.setLevel(logging.DEBUG)
variables = {}
# when
response = user_api_client.post_graphql(QUERY_COLLECTION, variables=variables)
# then
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[DEBUG].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
GET_FILTERED_PRODUCTS_COLLECTION_QUERY = """
query CollectionProducts(
$id: ID!,
$channel: String,
$filters: ProductFilterInput,
$where: ProductWhereInput,
$search: String,
) {
collection(id: $id, channel: $channel) {
products(first: 10, filter: $filters, where: $where, search: $search) {
edges {
node {
id
assignedAttributes(limit:10) {
attribute {
choices(first: 10) {
edges {
node {
slug
}
}
}
}
}
attributes {
attribute {
choices(first: 10) {
edges {
node {
slug
}
}
}
}
}
}
}
}
}
}
"""
def test_filter_collection_products(
user_api_client, product_list, published_collection, channel_USD, channel_PLN
):
# given
query = GET_FILTERED_PRODUCTS_COLLECTION_QUERY
for product in product_list:
published_collection.products.add(product)
product = product_list[0]
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"filters": {"search": product.name},
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(query, variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["collection"]["products"]["edges"][0]["node"]
assert product_data["id"] == graphene.Node.to_global_id("Product", product.pk)
def test_filter_collection_published_products(
user_api_client, product_list, published_collection, channel_USD, channel_PLN
):
# given
query = GET_FILTERED_PRODUCTS_COLLECTION_QUERY
for product in product_list:
published_collection.products.add(product)
product = product_list[0]
listing = product.channel_listings.first()
listing.is_published = False
listing.save(update_fields=["is_published"])
product_id = graphene.Node.to_global_id("Product", product.id)
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"filters": {"isPublished": True},
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(query, variables)
# then
content = get_graphql_content(response)
products = content["data"]["collection"]["products"]["edges"]
assert len(products) == len(product_list) - 1
assert product_id not in {node["node"]["id"] for node in products}
def test_filter_collection_products_by_multiple_attributes(
user_api_client,
published_collection,
product_with_two_variants,
product_with_multiple_values_attributes,
channel_USD,
):
# given
published_collection.products.set(
[product_with_two_variants, product_with_multiple_values_attributes]
)
assert published_collection.products.count() == 2
filters = {
"attributes": [{"slug": "modes", "values": ["eco"]}],
}
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"filters": filters,
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(
GET_FILTERED_PRODUCTS_COLLECTION_QUERY, variables
)
# then
content = get_graphql_content(response)
products_data = content["data"]["collection"]["products"]["edges"]
product = products_data[0]["node"]
_, _id = graphene.Node.from_global_id(product["id"])
assert len(products_data) == 1
assert product["id"] == graphene.Node.to_global_id(
"Product", product_with_multiple_values_attributes.pk
)
assert product["attributes"] == [
{
"attribute": {
"choices": {
"edges": [
{"node": {"slug": "eco"}},
{"node": {"slug": "power"}},
]
}
}
}
]
assert product["assignedAttributes"] == [
{
"attribute": {
"choices": {
"edges": [
{"node": {"slug": "eco"}},
{"node": {"slug": "power"}},
]
}
}
}
]
def test_filter_where_collection_products(
user_api_client, product_list, published_collection, channel_USD, channel_PLN
):
# given
query = GET_FILTERED_PRODUCTS_COLLECTION_QUERY
for product in product_list:
published_collection.products.add(product)
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"channel": channel_USD.slug,
"where": {
"AND": [
{"slug": {"oneOf": ["test-product-a", "test-product-b"]}},
{"price": {"range": {"gte": 15}}},
]
},
}
# when
response = user_api_client.post_graphql(query, variables)
# then
content = get_graphql_content(response)
products = content["data"]["collection"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product_list[1].pk
)
def test_search_collection_products(
user_api_client, product_list, published_collection, channel_USD, channel_PLN
):
# given
query = GET_FILTERED_PRODUCTS_COLLECTION_QUERY
for product in product_list:
published_collection.products.add(product)
product = product_list[0]
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"search": product.name,
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(query, variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["collection"]["products"]["edges"][0]["node"]
assert product_data["id"] == graphene.Node.to_global_id("Product", product.pk)
FETCH_COLLECTION_IMAGE_QUERY = """
query fetchCollection(
$id: ID!, $channel: String, $size: Int, $format: ThumbnailFormatEnum
){
collection(id: $id, channel: $channel) {
name
backgroundImage(size: $size, format: $format) {
url
alt
}
}
}
"""
def test_collection_image_query_with_size_and_format_proxy_url_returned(
user_api_client, published_collection, media_root, channel_USD
):
# given
alt_text = "Alt text for an image."
collection = published_collection
image_file, image_name = create_image()
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
collection.background_image = background_mock
collection.background_image_alt = alt_text
collection.save(update_fields=["background_image", "background_image_alt"])
format = ThumbnailFormatEnum.WEBP.name
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {
"id": collection_id,
"channel": channel_USD.slug,
"size": 120,
"format": format,
}
# when
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["collection"]
assert data["backgroundImage"]["alt"] == alt_text
expected_url = (
f"https://example.com/thumbnail/{collection_id}/128/{format.lower()}/"
)
assert data["backgroundImage"]["url"] == expected_url
def test_collection_image_query_with_size_proxy_url_returned(
user_api_client, published_collection, media_root, channel_USD
):
# given
alt_text = "Alt text for an image."
collection = published_collection
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
collection.background_image = background_mock
collection.background_image_alt = alt_text
collection.save(update_fields=["background_image", "background_image_alt"])
size = 128
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {
"id": collection_id,
"channel": channel_USD.slug,
"size": size,
}
# when
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["collection"]
assert data["backgroundImage"]["alt"] == alt_text
assert (
data["backgroundImage"]["url"]
== f"https://example.com/thumbnail/{collection_id}/{size}/"
)
def test_collection_image_query_with_size_thumbnail_url_returned(
user_api_client, published_collection, media_root, channel_USD
):
# given
alt_text = "Alt text for an image."
collection = published_collection
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
collection.background_image = background_mock
collection.background_image_alt = alt_text
collection.save(update_fields=["background_image", "background_image_alt"])
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(collection=collection, size=size, image=thumbnail_mock)
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {
"id": collection_id,
"channel": channel_USD.slug,
"size": 120,
}
# when
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["collection"]
assert data["backgroundImage"]["alt"] == alt_text
assert (
data["backgroundImage"]["url"]
== f"https://example.com/media/thumbnails/{thumbnail_mock.name}"
)
def test_collection_image_query_zero_size_custom_format_provided(
user_api_client, published_collection, media_root, channel_USD
):
# given
alt_text = "Alt text for an image."
collection = published_collection
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
collection.background_image = background_mock
collection.background_image_alt = alt_text
collection.save(update_fields=["background_image", "background_image_alt"])
format = ThumbnailFormatEnum.WEBP.name
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {
"id": collection_id,
"channel": channel_USD.slug,
"format": format,
"size": 0,
}
# when
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["collection"]
assert data["backgroundImage"]["alt"] == alt_text
expected_url = (
f"https://example.com/media/collection-backgrounds/{background_mock.name}"
)
assert data["backgroundImage"]["url"] == expected_url
def test_collection_image_query_zero_size_value_original_image_returned(
user_api_client, published_collection, media_root, channel_USD
):
# given
alt_text = "Alt text for an image."
collection = published_collection
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
collection.background_image = background_mock
collection.background_image_alt = alt_text
collection.save(update_fields=["background_image", "background_image_alt"])
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {
"id": collection_id,
"channel": channel_USD.slug,
"size": 0,
}
# when
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["collection"]
assert data["backgroundImage"]["alt"] == alt_text
expected_url = (
f"https://example.com/media/collection-backgrounds/{background_mock.name}"
)
assert data["backgroundImage"]["url"] == expected_url
def test_collection_image_query_without_associated_file(
user_api_client, published_collection, channel_USD
):
# given
collection = published_collection
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
variables = {"id": collection_id, "channel": channel_USD.slug}
# when
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["collection"]
assert data["name"] == collection.name
assert data["backgroundImage"] is None
def test_collection_query_invalid_id(
user_api_client, published_collection, channel_USD
):
collection_id = "'"
variables = {
"id": collection_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert (
content["errors"][0]["message"]
== f"Invalid ID: {collection_id}. Expected: Collection."
)
assert content["data"]["collection"] is None
def test_collection_query_object_with_given_id_does_not_exist(
user_api_client, published_collection, channel_USD
):
collection_id = graphene.Node.to_global_id("Collection", -1)
variables = {
"id": collection_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["collection"] is None
def test_collection_query_object_with_invalid_object_type(
user_api_client, published_collection, channel_USD
):
collection_id = graphene.Node.to_global_id("Product", published_collection.pk)
variables = {
"id": collection_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(FETCH_COLLECTION_IMAGE_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["collection"] is None
def _fetch_collection(client, collection, channel_slug, permissions=None):
query = """
query fetchCollection($id: ID!, $channel: String){
collection(id: $id, channel: $channel) {
name,
channelListings {
isPublished
}
}
}
"""
variables = {
"id": graphene.Node.to_global_id("Collection", collection.id),
"channel": channel_slug,
}
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["collection"]
def test_fetch_unpublished_collection_staff_user(
staff_api_client, unpublished_collection, permission_manage_products, channel_USD
):
collection_data = _fetch_collection(
staff_api_client,
unpublished_collection,
channel_USD.slug,
permissions=[permission_manage_products],
)
assert collection_data["name"] == unpublished_collection.name
assert collection_data["channelListings"][0]["isPublished"] is False
def test_fetch_unpublished_collection_customer(
user_api_client, unpublished_collection, channel_USD
):
collection_data = _fetch_collection(
user_api_client, unpublished_collection, channel_USD.slug
)
assert collection_data is None
def test_fetch_unpublished_collection_anonymous_user(
api_client, unpublished_collection, channel_USD
):
collection_data = _fetch_collection(
api_client, unpublished_collection, channel_USD.slug
)
assert collection_data is None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/test_collection_query.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 577,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/test_collections_query.py | import datetime
import graphene
import pytest
from .....product.models import Collection, CollectionChannelListing, Product
from .....tests.utils import dummy_editorjs
from ....tests.utils import (
get_graphql_content,
)
def test_collections_query(
user_api_client,
published_collection,
unpublished_collection,
permission_manage_products,
channel_USD,
):
query = """
query Collections ($channel: String) {
collections(first:2, channel: $channel) {
edges {
node {
name
slug
description
descriptionJson
products {
totalCount
}
}
}
}
}
"""
# query public collections only as regular user
variables = {"channel": channel_USD.slug}
description = dummy_editorjs("Test description.", json_format=True)
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
edges = content["data"]["collections"]["edges"]
assert len(edges) == 1
collection_data = edges[0]["node"]
assert collection_data["name"] == published_collection.name
assert collection_data["slug"] == published_collection.slug
assert collection_data["description"] == description
assert collection_data["descriptionJson"] == description
assert (
collection_data["products"]["totalCount"]
== published_collection.products.count()
)
def test_collections_query_without_description(
user_api_client,
published_collection,
unpublished_collection,
permission_manage_products,
channel_USD,
):
query = """
query Collections ($channel: String) {
collections(first:2, channel: $channel) {
edges {
node {
name
slug
description
descriptionJson
}
}
}
}
"""
# query public collections only as regular user
variables = {"channel": channel_USD.slug}
collection = published_collection
collection.description = None
collection.save()
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
edges = content["data"]["collections"]["edges"]
assert len(edges) == 1
collection_data = edges[0]["node"]
assert collection_data["name"] == collection.name
assert collection_data["slug"] == collection.slug
assert collection_data["description"] is None
assert collection_data["descriptionJson"] == "{}"
def test_collections_query_as_staff(
staff_api_client,
published_collection,
unpublished_collection_PLN,
permission_manage_products,
channel_USD,
):
query = """
query Collections($channel: String) {
collections(first: 2, channel: $channel) {
edges {
node {
name
slug
description
products {
totalCount
}
}
}
}
}
"""
# query all collections only as a staff user with proper permissions
variables = {"channel": channel_USD.slug}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
edges = content["data"]["collections"]["edges"]
assert len(edges) == 1
def test_collections_query_as_staff_without_channel(
staff_api_client,
published_collection,
unpublished_collection_PLN,
permission_manage_products,
channel_USD,
):
query = """
query Collections($channel: String) {
collections(first: 2, channel: $channel) {
edges {
node {
name
slug
description
products {
totalCount
}
}
}
}
}
"""
# query all collections only as a staff user with proper permissions
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
edges = content["data"]["collections"]["edges"]
assert len(edges) == 2
NOT_EXISTS_IDS_COLLECTIONS_QUERY = """
query ($filter: CollectionFilterInput!, $channel: String) {
collections(first: 5, filter: $filter, channel: $channel) {
edges {
node {
id
name
}
}
}
}
"""
def test_collections_query_ids_not_exists(
user_api_client, published_collection, channel_USD
):
query = NOT_EXISTS_IDS_COLLECTIONS_QUERY
variables = {
"filter": {"ids": ["ncXc5tP7kmV6pxE=", "yMyDVE5S2LWWTqK="]},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids":[{"message":"Invalid ID specified.","code":""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["collections"] is None
GET_SORTED_PRODUCTS_COLLECTION_QUERY = """
query CollectionProducts($id: ID!, $channel: String, $sortBy: ProductOrder) {
collection(id: $id, channel: $channel) {
products(first: 10, sortBy: $sortBy) {
edges {
node {
id
}
}
}
}
}
"""
def test_sort_collection_products_by_name(
staff_api_client, published_collection, product_list, channel_USD
):
# given
for product in product_list:
published_collection.products.add(product)
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"sortBy": {"direction": "DESC", "field": "NAME"},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
GET_SORTED_PRODUCTS_COLLECTION_QUERY, variables
)
# then
content = get_graphql_content(response)
data = content["data"]["collection"]["products"]["edges"]
assert [node["node"]["id"] for node in data] == [
graphene.Node.to_global_id("Product", product.pk)
for product in Product.objects.order_by("-name")
]
GET_SORTED_COLLECTION_QUERY = """
query Collections($sortBy: CollectionSortingInput) {
collections(first: 10, sortBy: $sortBy) {
edges {
node {
id
publicationDate
}
}
}
}
"""
def test_query_collection_for_federation(api_client, published_collection, channel_USD):
collection_id = graphene.Node.to_global_id("Collection", published_collection.pk)
variables = {
"representations": [
{
"__typename": "Collection",
"id": collection_id,
"channel": channel_USD.slug,
},
],
}
query = """
query GetCollectionInFederation($representations: [_Any!]!) {
_entities(representations: $representations) {
__typename
... on Collection {
id
name
}
}
}
"""
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "Collection",
"id": collection_id,
"name": published_collection.name,
}
]
QUERY_COLLECTIONS_WITH_SORT = """
query ($sort_by: CollectionSortingInput!, $channel: String) {
collections(first:5, sortBy: $sort_by, channel: $channel) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
("collection_sort", "result_order"),
[
({"field": "NAME", "direction": "ASC"}, ["Coll1", "Coll2", "Coll3"]),
({"field": "NAME", "direction": "DESC"}, ["Coll3", "Coll2", "Coll1"]),
({"field": "AVAILABILITY", "direction": "ASC"}, ["Coll2", "Coll1", "Coll3"]),
({"field": "AVAILABILITY", "direction": "DESC"}, ["Coll3", "Coll1", "Coll2"]),
({"field": "PRODUCT_COUNT", "direction": "ASC"}, ["Coll1", "Coll3", "Coll2"]),
({"field": "PRODUCT_COUNT", "direction": "DESC"}, ["Coll2", "Coll3", "Coll1"]),
],
)
def test_collections_query_with_sort(
collection_sort,
result_order,
staff_api_client,
permission_manage_products,
product,
channel_USD,
):
collections = Collection.objects.bulk_create(
[
Collection(name="Coll1", slug="collection-1"),
Collection(name="Coll2", slug="collection-2"),
Collection(name="Coll3", slug="collection-3"),
]
)
published = (True, False, True)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
channel=channel_USD, collection=collection, is_published=published[num]
)
for num, collection in enumerate(collections)
]
)
product.collections.add(Collection.objects.get(name="Coll2"))
variables = {"sort_by": collection_sort, "channel": channel_USD.slug}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(QUERY_COLLECTIONS_WITH_SORT, variables)
content = get_graphql_content(response)
collections = content["data"]["collections"]["edges"]
for order, collection_name in enumerate(result_order):
assert collections[order]["node"]["name"] == collection_name
QUERY_PAGINATED_SORTED_COLLECTIONS = """
query (
$first: Int, $sort_by: CollectionSortingInput!, $after: String, $channel: String
) {
collections(first: $first, sortBy: $sort_by, after: $after, channel: $channel) {
edges{
node{
slug
}
}
pageInfo{
startCursor
endCursor
hasNextPage
hasPreviousPage
}
}
}
"""
def test_pagination_for_sorting_collections_by_published_at_date(
api_client, channel_USD
):
# given
collections = Collection.objects.bulk_create(
[
Collection(name="Coll1", slug="collection-1"),
Collection(name="Coll2", slug="collection-2"),
Collection(name="Coll3", slug="collection-3"),
]
)
now = datetime.datetime.now(tz=datetime.UTC)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
channel=channel_USD,
collection=collection,
is_published=True,
published_at=now - datetime.timedelta(days=num),
)
for num, collection in enumerate(collections)
]
)
first = 2
variables = {
"sort_by": {"direction": "DESC", "field": "PUBLISHED_AT"},
"channel": channel_USD.slug,
"first": first,
}
# first request
response = api_client.post_graphql(QUERY_PAGINATED_SORTED_COLLECTIONS, variables)
content = get_graphql_content(response)
data = content["data"]["collections"]
assert len(data["edges"]) == first
assert [node["node"]["slug"] for node in data["edges"]] == [
collection.slug for collection in collections[:first]
]
end_cursor = data["pageInfo"]["endCursor"]
variables["after"] = end_cursor
# when
# second request
response = api_client.post_graphql(QUERY_PAGINATED_SORTED_COLLECTIONS, variables)
# then
content = get_graphql_content(response)
data = content["data"]["collections"]
expected_count = len(collections) - first
assert len(data["edges"]) == expected_count
assert [node["node"]["slug"] for node in data["edges"]] == [
collection.slug for collection in collections[first:]
]
def test_collections_query_return_error_with_sort_by_rank_without_search(
staff_api_client, published_collection, product_list, channel_USD
):
# given
for product in product_list:
published_collection.products.add(product)
variables = {
"id": graphene.Node.to_global_id("Collection", published_collection.pk),
"sortBy": {"direction": "DESC", "field": "RANK"},
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(
GET_SORTED_PRODUCTS_COLLECTION_QUERY, variables
)
content = get_graphql_content(response, ignore_errors=True)
# then
errors = content["errors"]
expected_message = "Sorting by RANK is available only when using a search filter."
assert len(errors) == 1
assert errors[0]["message"] == expected_message
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/test_collections_query.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/test_collections_query_with_filter_and_sort.py | import datetime
import graphene
import pytest
from .....product.models import Collection, CollectionChannelListing
from .....tests.utils import dummy_editorjs
from ....tests.utils import (
assert_graphql_error_with_message,
get_graphql_content,
)
@pytest.mark.parametrize(
("collection_filter", "count"),
[
({"published": "PUBLISHED"}, 2),
({"published": "HIDDEN"}, 1),
({"search": "-published1"}, 1),
({"search": "Collection3"}, 1),
(
{
"ids": [
graphene.Node.to_global_id("Collection", 2),
graphene.Node.to_global_id("Collection", 3),
]
},
2,
),
],
)
def test_collections_query_with_filter(
collection_filter,
count,
channel_USD,
staff_api_client,
permission_manage_products,
):
query = """
query ($filter: CollectionFilterInput!, $channel: String) {
collections(first:5, filter: $filter, channel: $channel) {
edges{
node{
id
name
}
}
}
}
"""
collections = Collection.objects.bulk_create(
[
Collection(
id=1,
name="Collection1",
slug="collection-published1",
description=dummy_editorjs("Test description"),
),
Collection(
id=2,
name="Collection2",
slug="collection-published2",
description=dummy_editorjs("Test description"),
),
Collection(
id=3,
name="Collection3",
slug="collection-unpublished",
description=dummy_editorjs("Test description"),
),
]
)
published = (True, True, False)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
channel=channel_USD, collection=collection, is_published=published[num]
)
for num, collection in enumerate(collections)
]
)
variables = {
"filter": collection_filter,
"channel": channel_USD.slug,
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
collections = content["data"]["collections"]["edges"]
assert len(collections) == count
@pytest.fixture
def collections_for_sorting_with_channels(channel_USD, channel_PLN):
collections = Collection.objects.bulk_create(
[
Collection(name="Collection1", slug="collection1"),
Collection(name="Collection2", slug="collection2"),
Collection(name="Collection3", slug="collection3"),
Collection(name="Collection4", slug="collection4"),
Collection(name="Collection5", slug="collection5"),
]
)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
collection=collections[0],
published_at=None,
is_published=True,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[1],
published_at=None,
is_published=False,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[2],
published_at=datetime.datetime(2004, 1, 1, tzinfo=datetime.UTC),
is_published=False,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[3],
published_at=datetime.datetime(2003, 1, 1, tzinfo=datetime.UTC),
is_published=False,
channel=channel_USD,
),
# second channel
CollectionChannelListing(
collection=collections[0],
published_at=None,
is_published=False,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[1],
published_at=None,
is_published=True,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[2],
published_at=datetime.datetime(2002, 1, 1, tzinfo=datetime.UTC),
is_published=False,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[4],
published_at=datetime.datetime(2001, 1, 1, tzinfo=datetime.UTC),
is_published=False,
channel=channel_PLN,
),
]
)
return collections
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING = """
query (
$sortBy: CollectionSortingInput,
$filter: CollectionFilterInput, $channel: String
){
collections (
first: 10, sortBy: $sortBy, filter: $filter, channel: $channel
) {
edges {
node {
name
slug
}
}
}
}
"""
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "ASC"},
{"field": "PUBLISHED_AT", "direction": "DESC"},
],
)
def test_collections_with_sorting_and_without_channel(
sort_by,
staff_api_client,
permission_manage_products,
):
# given
variables = {"sortBy": sort_by}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
assert_graphql_error_with_message(response, "A default channel does not exist.")
@pytest.mark.parametrize(
("sort_by", "collections_order"),
[
(
{"field": "AVAILABILITY", "direction": "ASC"},
["Collection2", "Collection3", "Collection4", "Collection1"],
),
(
{"field": "AVAILABILITY", "direction": "DESC"},
["Collection1", "Collection4", "Collection3", "Collection2"],
),
(
{"field": "PUBLISHED_AT", "direction": "ASC"},
["Collection4", "Collection3", "Collection1", "Collection2"],
),
(
{"field": "PUBLISHED_AT", "direction": "DESC"},
["Collection2", "Collection1", "Collection3", "Collection4"],
),
],
)
def test_collections_with_sorting_and_channel_USD(
sort_by,
collections_order,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
for index, collection_name in enumerate(collections_order):
assert collection_name == collections_nodes[index]["node"]["name"]
@pytest.mark.parametrize(
("sort_by", "collections_order"),
[
(
{"field": "AVAILABILITY", "direction": "ASC"},
["Collection1", "Collection3", "Collection5", "Collection2"],
),
(
{"field": "AVAILABILITY", "direction": "DESC"},
["Collection2", "Collection5", "Collection3", "Collection1"],
),
(
{"field": "PUBLISHED_AT", "direction": "ASC"},
["Collection5", "Collection3", "Collection1", "Collection2"],
),
(
{"field": "PUBLISHED_AT", "direction": "DESC"},
["Collection2", "Collection1", "Collection3", "Collection5"],
),
],
)
def test_collections_with_sorting_and_channel_PLN(
sort_by,
collections_order,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_PLN,
):
# given
variables = {"sortBy": sort_by, "channel": channel_PLN.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
for index, collection_name in enumerate(collections_order):
assert collection_name == collections_nodes[index]["node"]["name"]
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "ASC"},
{"field": "PUBLISHED_AT", "direction": "ASC"},
],
)
def test_collections_with_sorting_and_not_existing_channel_asc(
sort_by,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": "Not-existing"}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["collections"]["edges"]
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "DESC"},
{"field": "PUBLISHED_AT", "direction": "DESC"},
],
)
def test_collections_with_sorting_and_not_existing_channel_desc(
sort_by,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": "Not-existing"}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["collections"]["edges"]
def test_collections_with_filtering_without_channel(
staff_api_client, permission_manage_products
):
# given
variables = {"filter": {"published": "PUBLISHED"}}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
assert_graphql_error_with_message(response, "A default channel does not exist.")
@pytest.mark.parametrize(
("filter_by", "collections_count"),
[
({"published": "PUBLISHED"}, 1),
({"published": "HIDDEN"}, 3),
({"slugs": ["collection1"]}, 1),
({"slugs": ["collection2", "collection3"]}, 2),
({"slugs": []}, 4),
],
)
def test_collections_with_filtering_with_channel_USD(
filter_by,
collections_count,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"filter": filter_by, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
assert len(collections_nodes) == collections_count
@pytest.mark.parametrize(
("filter_by", "collections_count"),
[({"published": "PUBLISHED"}, 1), ({"published": "HIDDEN"}, 3)],
)
def test_collections_with_filtering_with_channel_PLN(
filter_by,
collections_count,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_PLN,
):
# given
variables = {"filter": filter_by, "channel": channel_PLN.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
assert len(collections_nodes) == collections_count
@pytest.mark.parametrize(
"filter_by",
[
{"published": "PUBLISHED"},
{"published": "HIDDEN"},
{"slugs": ["collection1"]},
],
)
def test_collections_with_filtering_and_not_existing_channel(
filter_by,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"filter": filter_by, "channel": "Not-existing"}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
assert len(collections_nodes) == 0
COLLECTION_WHERE_QUERY = """
query($where: CollectionWhereInput!, $channel: String) {
collections(first: 10, where: $where, channel: $channel) {
edges {
node {
id
slug
}
}
}
}
"""
def test_collections_where_by_ids(api_client, collection_list, channel_USD):
# given
ids = [
graphene.Node.to_global_id("Collection", collection.pk)
for collection in collection_list[:2]
]
variables = {"channel": channel_USD.slug, "where": {"AND": [{"ids": ids}]}}
# when
response = api_client.post_graphql(COLLECTION_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
collections = data["data"]["collections"]["edges"]
assert len(collections) == 2
returned_slugs = {node["node"]["slug"] for node in collections}
assert returned_slugs == {
collection_list[0].slug,
collection_list[1].slug,
}
def test_collections_where_by_none_as_ids(api_client, collection_list, channel_USD):
# given
variables = {"channel": channel_USD.slug, "where": {"AND": [{"ids": None}]}}
# when
response = api_client.post_graphql(COLLECTION_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
collections = data["data"]["collections"]["edges"]
assert len(collections) == 0
def test_collections_where_by_ids_empty_list(api_client, collection_list, channel_USD):
# given
variables = {"channel": channel_USD.slug, "where": {"ids": []}}
# when
response = api_client.post_graphql(COLLECTION_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
collections = data["data"]["collections"]["edges"]
assert len(collections) == 0
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/test_collections_query_with_filter_and_sort.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 475,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/mutations/test_category_create.py | import json
import os
from unittest.mock import patch
import graphene
import pytest
from django.utils.functional import SimpleLazyObject
from django.utils.text import slugify
from freezegun import freeze_time
from .....core.utils.json_serializer import CustomJsonEncoder
from .....product.models import Category
from .....product.tests.utils import create_image
from .....tests.utils import dummy_editorjs
from .....webhook.event_types import WebhookEventAsyncType
from .....webhook.payloads import generate_meta, generate_requestor
from ....tests.utils import (
get_graphql_content,
get_multipart_request_body,
)
CATEGORY_CREATE_MUTATION = """
mutation(
$name: String, $slug: String,
$description: JSONString, $backgroundImage: Upload,
$backgroundImageAlt: String, $parentId: ID,
$metadata: [MetadataInput!], $privateMetadata: [MetadataInput!]) {
categoryCreate(
input: {
name: $name
slug: $slug
description: $description
backgroundImage: $backgroundImage
backgroundImageAlt: $backgroundImageAlt
metadata: $metadata
privateMetadata: $privateMetadata
},
parent: $parentId
) {
category {
id
name
slug
description
parent {
name
id
}
backgroundImage{
alt
}
metadata {
key
value
}
privateMetadata {
key
value
}
}
errors {
field
code
message
}
}
}
"""
def test_category_create_mutation(
monkeypatch, staff_api_client, permission_manage_products, media_root
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
category_name = "Test category"
description = "description"
category_slug = slugify(category_name)
category_description = dummy_editorjs(description, True)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
metadata_key = "md key"
metadata_value = "md value"
# test creating root category
variables = {
"name": category_name,
"description": category_description,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"slug": category_slug,
"metadata": [{"key": metadata_key, "value": metadata_value}],
"privateMetadata": [{"key": metadata_key, "value": metadata_value}],
}
body = get_multipart_request_body(
CATEGORY_CREATE_MUTATION, variables, image_file, image_name
)
response = staff_api_client.post_multipart(body)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
# then
assert data["errors"] == []
assert data["category"]["name"] == category_name
assert data["category"]["description"] == category_description
assert not data["category"]["parent"]
category = Category.objects.get(name=category_name)
assert category.description_plaintext == description
assert category.background_image.file
img_name, format = os.path.splitext(image_file._name)
file_name = category.background_image.name
assert file_name != image_file._name
assert file_name.startswith(f"category-backgrounds/{img_name}")
assert file_name.endswith(format)
assert data["category"]["backgroundImage"]["alt"] == image_alt
assert category.metadata == {metadata_key: metadata_value}
assert category.private_metadata == {metadata_key: metadata_value}
# test creating subcategory
parent_id = data["category"]["id"]
variables = {
"name": category_name,
"description": category_description,
"parentId": parent_id,
"slug": f"{category_slug}-2",
}
response = staff_api_client.post_graphql(CATEGORY_CREATE_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert data["errors"] == []
assert data["category"]["parent"]["id"] == parent_id
@freeze_time("2022-05-12 12:00:00")
@patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_category_create_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
monkeypatch,
staff_api_client,
permission_manage_products,
media_root,
settings,
):
staff_api_client.user.user_permissions.add(permission_manage_products)
query = CATEGORY_CREATE_MUTATION
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
category_name = "Test category"
description = "description"
category_slug = slugify(category_name)
category_description = dummy_editorjs(description, True)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
# test creating root category
variables = {
"name": category_name,
"description": category_description,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"slug": category_slug,
}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(body)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
category = Category.objects.first()
assert category
assert data["errors"] == []
mocked_webhook_trigger.assert_called_once_with(
json.dumps(
{
"id": graphene.Node.to_global_id("Category", category.id),
"meta": generate_meta(
requestor_data=generate_requestor(
SimpleLazyObject(lambda: staff_api_client.user)
)
),
},
cls=CustomJsonEncoder,
),
WebhookEventAsyncType.CATEGORY_CREATED,
[any_webhook],
category,
SimpleLazyObject(lambda: staff_api_client.user),
allow_replica=False,
)
@pytest.mark.parametrize(
("input_slug", "expected_slug"),
[
("test-slug", "test-slug"),
(None, "test-category"),
("", "test-category"),
("わたし-わ-にっぽん-です", "わたし-わ-にっぽん-です"),
],
)
def test_create_category_with_given_slug(
staff_api_client, permission_manage_products, input_slug, expected_slug
):
query = CATEGORY_CREATE_MUTATION
name = "Test category"
variables = {"name": name, "slug": input_slug}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert not data["errors"]
assert data["category"]["slug"] == expected_slug
def test_create_category_name_with_unicode(
staff_api_client, permission_manage_products
):
query = CATEGORY_CREATE_MUTATION
name = "わたし-わ にっぽん です"
variables = {"name": name}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert not data["errors"]
assert data["category"]["name"] == name
assert data["category"]["slug"] == "watasi-wa-nitupon-desu"
def test_category_create_mutation_without_background_image(
monkeypatch, staff_api_client, permission_manage_products
):
query = CATEGORY_CREATE_MUTATION
description = dummy_editorjs("description", True)
# test creating root category
category_name = "Test category"
variables = {
"name": category_name,
"description": description,
"slug": slugify(category_name),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert data["errors"] == []
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_category_create.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/mutations/test_category_delete.py | import json
from unittest.mock import MagicMock, patch
import graphene
import pytest
from django.core.files import File
from django.utils.functional import SimpleLazyObject
from freezegun import freeze_time
from .....attribute.models import AttributeValue
from .....attribute.utils import associate_attribute_values_to_instance
from .....core.utils.json_serializer import CustomJsonEncoder
from .....discount.utils.promotion import get_active_catalogue_promotion_rules
from .....product.models import Category, ProductChannelListing
from .....thumbnail.models import Thumbnail
from .....webhook.event_types import WebhookEventAsyncType
from .....webhook.payloads import generate_meta, generate_requestor
from ....tests.utils import (
get_graphql_content,
)
MUTATION_CATEGORY_DELETE = """
mutation($id: ID!) {
categoryDelete(id: $id) {
category {
name
}
errors {
field
message
}
}
}
"""
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_category_delete_mutation(
delete_from_storage_task_mock,
staff_api_client,
category,
product_list,
media_root,
permission_manage_products,
):
# given
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(category=category, size=128, image=thumbnail_mock)
Thumbnail.objects.create(category=category, size=200, image=thumbnail_mock)
category.products.add(*product_list)
category_id = category.id
variables = {"id": graphene.Node.to_global_id("Category", category_id)}
# when
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == category.name
with pytest.raises(category._meta.model.DoesNotExist):
category.refresh_from_db()
# ensure all related thumbnails has been deleted
assert not Thumbnail.objects.filter(category_id=category_id)
assert delete_from_storage_task_mock.call_count == 2
for rule in get_active_catalogue_promotion_rules():
assert rule.variants_dirty is True
@freeze_time("2022-05-12 12:00:00")
@patch("saleor.product.utils.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_category_delete_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
category,
permission_manage_products,
settings,
):
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
variables = {"id": graphene.Node.to_global_id("Category", category.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == category.name
assert not Category.objects.first()
mocked_webhook_trigger.assert_called_once_with(
json.dumps(
{
"id": variables["id"],
"meta": generate_meta(
requestor_data=generate_requestor(
SimpleLazyObject(lambda: staff_api_client.user)
)
),
},
cls=CustomJsonEncoder,
),
WebhookEventAsyncType.CATEGORY_DELETED,
[any_webhook],
category,
SimpleLazyObject(lambda: staff_api_client.user),
allow_replica=False,
)
def test_delete_category_with_background_image(
staff_api_client,
category_with_image,
permission_manage_products,
media_root,
):
"""Ensure deleting category deletes background image from storage."""
category = category_with_image
variables = {"id": graphene.Node.to_global_id("Category", category.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == category.name
with pytest.raises(category._meta.model.DoesNotExist):
category.refresh_from_db()
def test_category_delete_mutation_for_categories_tree(
staff_api_client,
categories_tree_with_published_products,
permission_manage_products,
):
parent = categories_tree_with_published_products
parent_product = parent.products.first()
child_product = parent.children.first().products.first()
product_list = [child_product, parent_product]
variables = {"id": graphene.Node.to_global_id("Category", parent.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == parent.name
with pytest.raises(parent._meta.model.DoesNotExist):
parent.refresh_from_db()
product_channel_listings = ProductChannelListing.objects.filter(
product__in=product_list
)
for product_channel_listing in product_channel_listings:
assert product_channel_listing.is_published is False
assert not product_channel_listing.published_at
assert product_channel_listings.count() == 4
for rule in get_active_catalogue_promotion_rules():
assert rule.variants_dirty is True
def test_category_delete_mutation_for_children_from_categories_tree(
staff_api_client,
categories_tree_with_published_products,
permission_manage_products,
):
parent = categories_tree_with_published_products
child = parent.children.first()
parent_product = parent.products.first()
child_product = child.products.first()
variables = {"id": graphene.Node.to_global_id("Category", child.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == child.name
with pytest.raises(child._meta.model.DoesNotExist):
child.refresh_from_db()
for rule in get_active_catalogue_promotion_rules():
assert rule.variants_dirty is True
parent_product.refresh_from_db()
assert parent_product.category
product_channel_listings = ProductChannelListing.objects.filter(
product=parent_product
)
for product_channel_listing in product_channel_listings:
assert product_channel_listing.is_published is True
assert product_channel_listing.published_at
child_product.refresh_from_db()
assert not child_product.category
product_channel_listings = ProductChannelListing.objects.filter(
product=child_product
)
for product_channel_listing in product_channel_listings:
assert product_channel_listing.is_published is False
assert not product_channel_listing.published_at
def test_category_delete_removes_reference_to_product(
staff_api_client,
category,
product_type_product_reference_attribute,
product_type,
product,
permission_manage_products,
):
# given
query = MUTATION_CATEGORY_DELETE
product_type.product_attributes.add(product_type_product_reference_attribute)
attr_value = AttributeValue.objects.create(
attribute=product_type_product_reference_attribute,
name=category.name,
slug=f"{product.pk}_{category.pk}",
reference_category=category,
)
associate_attribute_values_to_instance(
product, {product_type_product_reference_attribute.pk: [attr_value]}
)
reference_id = graphene.Node.to_global_id("Category", category.pk)
variables = {"id": reference_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
with pytest.raises(attr_value._meta.model.DoesNotExist):
attr_value.refresh_from_db()
with pytest.raises(category._meta.model.DoesNotExist):
category.refresh_from_db()
assert not data["errors"]
def test_category_delete_removes_reference_to_product_variant(
staff_api_client,
category,
product_type_product_reference_attribute,
product_type,
product_list,
permission_manage_products,
):
# given
query = MUTATION_CATEGORY_DELETE
variant = product_list[0].variants.first()
product_type.variant_attributes.set([product_type_product_reference_attribute])
attr_value = AttributeValue.objects.create(
attribute=product_type_product_reference_attribute,
name=category.name,
slug=f"{variant.pk}_{category.pk}",
reference_category=category,
)
associate_attribute_values_to_instance(
variant, {product_type_product_reference_attribute.pk: [attr_value]}
)
reference_id = graphene.Node.to_global_id("Category", category.pk)
variables = {"id": reference_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
with pytest.raises(attr_value._meta.model.DoesNotExist):
attr_value.refresh_from_db()
with pytest.raises(category._meta.model.DoesNotExist):
category.refresh_from_db()
assert not data["errors"]
def test_category_delete_removes_reference_to_page(
staff_api_client,
category,
page,
page_type_product_reference_attribute,
permission_manage_products,
):
# given
query = MUTATION_CATEGORY_DELETE
page_type = page.page_type
page_type.page_attributes.add(page_type_product_reference_attribute)
attr_value = AttributeValue.objects.create(
attribute=page_type_product_reference_attribute,
name=page.title,
slug=f"{page.pk}_{category.pk}",
reference_category=category,
)
associate_attribute_values_to_instance(
page, {page_type_product_reference_attribute.pk: [attr_value]}
)
reference_id = graphene.Node.to_global_id("Category", category.pk)
variables = {"id": reference_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
with pytest.raises(attr_value._meta.model.DoesNotExist):
attr_value.refresh_from_db()
with pytest.raises(category._meta.model.DoesNotExist):
category.refresh_from_db()
assert not data["errors"]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_category_delete.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/mutations/test_category_update.py | import json
from unittest.mock import MagicMock, Mock, patch
import graphene
import pytest
from django.core.files import File
from django.utils import timezone
from django.utils.functional import SimpleLazyObject
from django.utils.text import slugify
from freezegun import freeze_time
from .....core.utils.json_serializer import CustomJsonEncoder
from .....product.error_codes import ProductErrorCode
from .....product.models import Category
from .....product.tests.utils import create_image, create_zip_file_with_image_ext
from .....tests.utils import dummy_editorjs
from .....thumbnail.models import Thumbnail
from .....webhook.event_types import WebhookEventAsyncType
from .....webhook.payloads import generate_meta, generate_requestor
from ....tests.utils import (
get_graphql_content,
get_multipart_request_body,
)
MUTATION_CATEGORY_UPDATE_MUTATION = """
mutation($id: ID!, $name: String, $slug: String,
$backgroundImage: Upload, $backgroundImageAlt: String,
$description: JSONString,
$metadata: [MetadataInput!], $privateMetadata: [MetadataInput!]) {
categoryUpdate(
id: $id
input: {
name: $name
description: $description
backgroundImage: $backgroundImage
backgroundImageAlt: $backgroundImageAlt
slug: $slug
metadata: $metadata
privateMetadata: $privateMetadata
}
) {
category {
id
name
description
updatedAt
parent {
id
}
backgroundImage(size: 0) {
alt
url
}
}
errors {
field
message
}
}
}
"""
def test_category_update_mutation(
monkeypatch, staff_api_client, category, permission_manage_products, media_root
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
# create child category and test that the update mutation won't change
# it's parent
child_category = category.children.create(name="child")
category_name = "Updated name"
description = "description"
category_slug = slugify(category_name)
category_description = dummy_editorjs(description, True)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
old_meta = {"old": "meta"}
child_category.store_value_in_metadata(items=old_meta)
child_category.store_value_in_private_metadata(items=old_meta)
child_category.save(update_fields=["metadata", "private_metadata"])
metadata_key = "md key"
metadata_value = "md value"
category_id = graphene.Node.to_global_id("Category", child_category.pk)
variables = {
"name": category_name,
"description": category_description,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"id": category_id,
"slug": category_slug,
"metadata": [{"key": metadata_key, "value": metadata_value}],
"privateMetadata": [{"key": metadata_key, "value": metadata_value}],
}
body = get_multipart_request_body(
MUTATION_CATEGORY_UPDATE_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
# then
assert data["errors"] == []
assert data["category"]["id"] == category_id
assert data["category"]["name"] == category_name
assert data["category"]["description"] == category_description
parent_id = graphene.Node.to_global_id("Category", category.pk)
assert data["category"]["parent"]["id"] == parent_id
category = Category.objects.get(name=category_name)
assert category.description_plaintext == description
assert category.background_image.file
assert data["category"]["backgroundImage"]["alt"] == image_alt
assert category.metadata == {metadata_key: metadata_value, **old_meta}
assert category.private_metadata == {metadata_key: metadata_value, **old_meta}
def test_category_update_mutation_marks_prices_to_recalculate(
staff_api_client, category, permission_manage_products, catalogue_promotion, product
):
# given
product.category = category
product.save()
staff_api_client.user.user_permissions.add(permission_manage_products)
metadata_key = "md key"
metadata_value = "md value"
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {
"id": category_id,
"name": "Updated name",
"slug": "slug",
"metadata": [{"key": metadata_key, "value": metadata_value}],
}
# when
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_UPDATE_MUTATION,
variables,
)
# then
get_graphql_content(response)
assert not catalogue_promotion.rules.filter(variants_dirty=False).exists()
@freeze_time("2023-09-01 12:00:00")
def test_category_update_mutation_with_update_at_field(
monkeypatch, staff_api_client, category, permission_manage_products, media_root
):
# given
query = MUTATION_CATEGORY_UPDATE_MUTATION
# create child category and test that the update mutation won't change
# it's parent
child_category = category.children.create(name="child")
category_name = "Updated name"
description = "description"
category_slug = slugify(category_name)
category_description = dummy_editorjs(description, True)
category_id = graphene.Node.to_global_id("Category", child_category.pk)
variables = {
"name": category_name,
"description": category_description,
"id": category_id,
"slug": category_slug,
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
# then
assert data["category"]["id"] == category_id
assert data["category"]["name"] == category_name
assert data["category"]["description"] == category_description
assert data["category"]["updatedAt"] == timezone.now().isoformat()
@freeze_time("2022-05-12 12:00:00")
@patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_category_update_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
monkeypatch,
staff_api_client,
category,
permission_manage_products,
media_root,
settings,
):
staff_api_client.user.user_permissions.add(permission_manage_products)
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
category_name = "Updated name"
description = "description"
category_slug = slugify(category_name)
category_description = dummy_editorjs(description, True)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
variables = {
"name": category_name,
"description": category_description,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"id": graphene.Node.to_global_id("Category", category.pk),
"slug": category_slug,
}
body = get_multipart_request_body(
MUTATION_CATEGORY_UPDATE_MUTATION, variables, image_file, image_name
)
response = staff_api_client.post_multipart(body)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"] == []
mocked_webhook_trigger.assert_called_once_with(
json.dumps(
{
"id": variables["id"],
"meta": generate_meta(
requestor_data=generate_requestor(
SimpleLazyObject(lambda: staff_api_client.user)
)
),
},
cls=CustomJsonEncoder,
),
WebhookEventAsyncType.CATEGORY_UPDATED,
[any_webhook],
category,
SimpleLazyObject(lambda: staff_api_client.user),
allow_replica=False,
)
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_category_update_background_image_mutation(
delete_from_storage_task_mock,
monkeypatch,
staff_api_client,
category,
permission_manage_products,
media_root,
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
alt_text = "Alt text for an image."
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
category.background_image = background_mock
category.background_image_alt = alt_text
category.save(update_fields=["background_image", "background_image_alt"])
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
thumbnail = Thumbnail.objects.create(
category=category, size=size, image=thumbnail_mock
)
img_path = thumbnail.image.name
category_name = "Updated name"
image_file, image_name = create_image()
image_alt = "Alt text for an image."
category_slug = slugify(category_name)
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {
"name": category_name,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"id": category_id,
"slug": category_slug,
}
body = get_multipart_request_body(
MUTATION_CATEGORY_UPDATE_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"] == []
assert data["category"]["id"] == category_id
category = Category.objects.get(name=category_name)
assert category.background_image.file
assert data["category"]["backgroundImage"]["alt"] == image_alt
assert data["category"]["backgroundImage"]["url"].startswith(
f"https://example.com/media/category-backgrounds/{image_name}"
)
# ensure that thumbnails for old background image has been deleted
assert not Thumbnail.objects.filter(category_id=category.id)
delete_from_storage_task_mock.assert_called_once_with(img_path)
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_category_update_mutation_invalid_background_image_content_type(
delete_from_storage_task_mock,
staff_api_client,
category,
permission_manage_products,
media_root,
):
# given
image_file, image_name = create_zip_file_with_image_ext()
image_alt = "Alt text for an image."
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(category=category, size=size, image=thumbnail_mock)
variables = {
"name": "new-name",
"slug": "new-slug",
"id": graphene.Node.to_global_id("Category", category.id),
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"isPublished": True,
}
body = get_multipart_request_body(
MUTATION_CATEGORY_UPDATE_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"][0]["field"] == "backgroundImage"
assert data["errors"][0]["message"] == "Invalid file type."
# ensure that thumbnails for old background image hasn't been deleted
assert Thumbnail.objects.filter(category_id=category.id)
delete_from_storage_task_mock.assert_not_called()
@patch("saleor.core.tasks.delete_from_storage_task.delay")
def test_category_update_mutation_invalid_background_image(
delete_from_storage_task_mock,
monkeypatch,
staff_api_client,
category,
permission_manage_products,
media_root,
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
error_msg = "Test syntax error"
image_file_mock = Mock(side_effect=SyntaxError(error_msg))
monkeypatch.setattr(
"saleor.graphql.core.validators.file.Image.open", image_file_mock
)
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(category=category, size=size, image=thumbnail_mock)
variables = {
"name": "new-name",
"slug": "new-slug",
"id": graphene.Node.to_global_id("Category", category.id),
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"isPublished": True,
}
body = get_multipart_request_body(
MUTATION_CATEGORY_UPDATE_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"][0]["field"] == "backgroundImage"
assert error_msg in data["errors"][0]["message"]
# ensure that thumbnails for old background image hasn't been deleted
assert Thumbnail.objects.filter(category_id=category.id)
delete_from_storage_task_mock.assert_not_called()
def test_category_update_mutation_without_background_image(
monkeypatch, staff_api_client, category, permission_manage_products
):
query = """
mutation($id: ID!, $name: String, $slug: String, $description: JSONString) {
categoryUpdate(
id: $id
input: {
name: $name
description: $description
slug: $slug
}
) {
errors {
field
message
}
}
}
"""
category_name = "Updated name"
variables = {
"id": graphene.Node.to_global_id(
"Category", category.children.create(name="child").pk
),
"name": category_name,
"description": dummy_editorjs("description", True),
"slug": slugify(category_name),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"] == []
UPDATE_CATEGORY_SLUG_MUTATION = """
mutation($id: ID!, $slug: String) {
categoryUpdate(
id: $id
input: {
slug: $slug
}
) {
category{
name
slug
}
errors {
field
message
code
}
}
}
"""
@pytest.mark.parametrize(
("input_slug", "expected_slug", "error_message"),
[
("test-slug", "test-slug", None),
("", "", "Slug value cannot be blank."),
(None, "", "Slug value cannot be blank."),
],
)
def test_update_category_slug(
staff_api_client,
category,
permission_manage_products,
input_slug,
expected_slug,
error_message,
):
query = UPDATE_CATEGORY_SLUG_MUTATION
old_slug = category.slug
assert old_slug != input_slug
node_id = graphene.Node.to_global_id("Category", category.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
errors = data["errors"]
if not error_message:
assert not errors
assert data["category"]["slug"] == expected_slug
else:
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_category_slug_exists(
staff_api_client, category, permission_manage_products
):
query = UPDATE_CATEGORY_SLUG_MUTATION
input_slug = "test-slug"
second_category = Category.objects.get(pk=category.pk)
second_category.pk = None
second_category.slug = input_slug
second_category.save()
assert input_slug != category.slug
node_id = graphene.Node.to_global_id("Category", category.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
errors = data["errors"]
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.UNIQUE.name
@pytest.mark.parametrize(
("input_slug", "expected_slug", "input_name", "error_message", "error_field"),
[
("test-slug", "test-slug", "New name", None, None),
("", "", "New name", "Slug value cannot be blank.", "slug"),
(None, "", "New name", "Slug value cannot be blank.", "slug"),
("test-slug", "", None, "This field cannot be blank.", "name"),
("test-slug", "", "", "This field cannot be blank.", "name"),
(None, None, None, "Slug value cannot be blank.", "slug"),
],
)
def test_update_category_slug_and_name(
staff_api_client,
category,
permission_manage_products,
input_slug,
expected_slug,
input_name,
error_message,
error_field,
):
query = """
mutation($id: ID!, $name: String, $slug: String) {
categoryUpdate(
id: $id
input: {
name: $name
slug: $slug
}
) {
category{
name
slug
}
errors {
field
message
code
}
}
}
"""
old_name = category.name
old_slug = category.slug
assert input_slug != old_slug
assert input_name != old_name
node_id = graphene.Node.to_global_id("Category", category.id)
variables = {"slug": input_slug, "name": input_name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
category.refresh_from_db()
data = content["data"]["categoryUpdate"]
errors = data["errors"]
if not error_message:
assert data["category"]["name"] == input_name == category.name
assert data["category"]["slug"] == input_slug == category.slug
else:
assert errors
assert errors[0]["field"] == error_field
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_category_mutation_remove_background_image(
staff_api_client, category_with_image, permission_manage_products
):
query = """
mutation updateCategory($id: ID!, $backgroundImage: Upload) {
categoryUpdate(
id: $id, input: {
backgroundImage: $backgroundImage
}
) {
category {
backgroundImage{
url
}
}
errors {
field
message
}
}
}
"""
assert category_with_image.background_image
variables = {
"id": graphene.Node.to_global_id("Category", category_with_image.id),
"backgroundImage": None,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]["category"]
assert not data["backgroundImage"]
category_with_image.refresh_from_db()
assert not category_with_image.background_image
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/mutations/test_category_update.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 563,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/test_categories_query.py | import pytest
from .....product.models import Category, Product
from .....tests.utils import dummy_editorjs
from ....tests.utils import (
get_graphql_content,
)
LEVELED_CATEGORIES_QUERY = """
query leveled_categories($level: Int) {
categories(level: $level, first: 20) {
edges {
node {
name
parent {
name
}
}
}
}
}
"""
def test_category_level(user_api_client, category):
query = LEVELED_CATEGORIES_QUERY
child = Category.objects.create(name="child", slug="chi-ld", parent=category)
variables = {"level": 0}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
category_data = content["data"]["categories"]["edges"][0]["node"]
assert category_data["name"] == category.name
assert category_data["parent"] is None
variables = {"level": 1}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
category_data = content["data"]["categories"]["edges"][0]["node"]
assert category_data["name"] == child.name
assert category_data["parent"]["name"] == category.name
NOT_EXISTS_IDS_CATEGORIES_QUERY = """
query ($filter: CategoryFilterInput!) {
categories(first: 5, filter: $filter) {
edges {
node {
id
name
}
}
}
}
"""
def test_categories_query_ids_not_exists(user_api_client, category):
query = NOT_EXISTS_IDS_CATEGORIES_QUERY
variables = {"filter": {"ids": ["W3KATGDn3fq3ZH4=", "zH9pYmz7yWD3Hy8="]}}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids":[{"message":"Invalid ID specified.","code":""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["categories"] is None
QUERY_CATEGORIES_WITH_SORT = """
query ($sort_by: CategorySortingInput!) {
categories(first:5, sortBy: $sort_by) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
("category_sort", "result_order"),
[
(
{"field": "NAME", "direction": "ASC"},
["Cat1", "Cat2", "SubCat", "SubSubCat"],
),
(
{"field": "NAME", "direction": "DESC"},
["SubSubCat", "SubCat", "Cat2", "Cat1"],
),
(
{"field": "SUBCATEGORY_COUNT", "direction": "ASC"},
["Cat2", "SubSubCat", "Cat1", "SubCat"],
),
(
{"field": "SUBCATEGORY_COUNT", "direction": "DESC"},
["SubCat", "Cat1", "SubSubCat", "Cat2"],
),
(
{"field": "PRODUCT_COUNT", "direction": "ASC"},
["Cat2", "SubCat", "SubSubCat", "Cat1"],
),
(
{"field": "PRODUCT_COUNT", "direction": "DESC"},
["Cat1", "SubSubCat", "SubCat", "Cat2"],
),
],
)
def test_categories_query_with_sort(
category_sort,
result_order,
staff_api_client,
permission_manage_products,
product_type,
):
cat1 = Category.objects.create(
name="Cat1",
slug="slug_category1",
description=dummy_editorjs("Description cat1."),
)
Product.objects.create(
name="Test",
slug="test",
product_type=product_type,
category=cat1,
)
Category.objects.create(
name="Cat2",
slug="slug_category2",
description=dummy_editorjs("Description cat2."),
)
Category.objects.create(
name="SubCat",
slug="slug_subcategory1",
parent=Category.objects.get(name="Cat1"),
description=dummy_editorjs("Subcategory_description of cat1."),
)
subsubcat = Category.objects.create(
name="SubSubCat",
slug="slug_subcategory2",
parent=Category.objects.get(name="SubCat"),
description=dummy_editorjs("Subcategory_description of cat1."),
)
Product.objects.create(
name="Test2",
slug="test2",
product_type=product_type,
category=subsubcat,
)
variables = {"sort_by": category_sort}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(QUERY_CATEGORIES_WITH_SORT, variables)
content = get_graphql_content(response)
categories = content["data"]["categories"]["edges"]
for order, category_name in enumerate(result_order):
assert categories[order]["node"]["name"] == category_name
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/test_categories_query.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/tests/queries/test_category_query.py | import logging
from unittest.mock import MagicMock
import graphene
from django.core.files import File
from .....product.models import Category, Product
from .....product.utils.costs import get_product_costs_data
from .....tests.utils import dummy_editorjs
from .....thumbnail.models import Thumbnail
from ....core.enums import LanguageCodeEnum, ThumbnailFormatEnum
from ....tests.utils import (
get_graphql_content,
get_graphql_content_from_response,
)
QUERY_CATEGORY = """
query ($id: ID, $slug: String, $channel: String, $slugLanguageCode: LanguageCodeEnum){
category(
id: $id,
slug: $slug,
slugLanguageCode: $slugLanguageCode
) {
id
name
ancestors(first: 20) {
edges {
node {
name
}
}
}
children(first: 20) {
edges {
node {
name
}
}
}
products(first: 10, channel: $channel) {
edges {
node {
id
}
}
}
}
}
"""
def test_category_query_by_id(user_api_client, product, channel_USD):
category = Category.objects.first()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data is not None
assert category_data["name"] == category.name
assert len(category_data["ancestors"]["edges"]) == category.get_ancestors().count()
assert len(category_data["children"]["edges"]) == category.get_children().count()
def test_category_query_with_ancestors(user_api_client, product, channel_USD):
# given
category = Category.objects.first()
child = Category.objects.create(
name="Child Category", slug="child-category", parent=category
)
# when
variables = {
"id": graphene.Node.to_global_id("Category", child.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
# then
assert category_data is not None
assert len(category_data["ancestors"]["edges"]) == child.get_ancestors().count()
assert len(category_data["children"]["edges"]) == child.get_children().count()
def test_category_query_invalid_id(user_api_client, product, channel_USD):
category_id = "'"
variables = {
"id": category_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert (
content["errors"][0]["message"]
== f"Invalid ID: {category_id}. Expected: Category."
)
assert content["data"]["category"] is None
def test_category_query_object_with_given_id_does_not_exist(
user_api_client, product, channel_USD
):
category_id = graphene.Node.to_global_id("Category", -1)
variables = {
"id": category_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is None
def test_category_query_object_with_invalid_object_type(
user_api_client, product, channel_USD
):
category = Category.objects.first()
category_id = graphene.Node.to_global_id("Product", category.pk)
variables = {
"id": category_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is None
def test_category_query_doesnt_show_not_available_products(
user_api_client, product, channel_USD
):
category = Category.objects.first()
variant = product.variants.get()
# Set product as not visible due to lack of price.
variant.channel_listings.update(price_amount=None)
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data is not None
assert category_data["name"] == category.name
assert not category_data["products"]["edges"]
def test_category_query_description(user_api_client, product, channel_USD):
category = Category.objects.first()
description = dummy_editorjs("Test description.", json_format=True)
category.description = dummy_editorjs("Test description.")
category.save()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
query = """
query ($id: ID, $slug: String){
category(
id: $id,
slug: $slug,
) {
id
name
description
descriptionJson
}
}
"""
response = user_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data["description"] == description
assert category_data["descriptionJson"] == description
def test_category_query_without_description(user_api_client, product, channel_USD):
category = Category.objects.first()
category.save()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
query = """
query ($id: ID, $slug: String){
category(
id: $id,
slug: $slug,
) {
id
name
description
descriptionJson
}
}
"""
response = user_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data["description"] is None
assert category_data["descriptionJson"] == "{}"
def test_category_query_by_slug(user_api_client, product, channel_USD):
category = Category.objects.first()
variables = {"slug": category.slug, "channel": channel_USD.slug}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data is not None
assert category_data["name"] == category.name
assert len(category_data["ancestors"]["edges"]) == category.get_ancestors().count()
assert len(category_data["children"]["edges"]) == category.get_children().count()
def test_category_query_by_translated_slug(
user_api_client, category, category_translation_with_slug_pl, channel_USD
):
variables = {
"slug": category_translation_with_slug_pl.slug,
"channel": channel_USD.slug,
"slugLanguageCode": LanguageCodeEnum.PL.name,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
content = get_graphql_content(response)
data = content["data"]["category"]
assert data is not None
assert data["name"] == category.name
def test_category_query_error_when_id_and_slug_provided(
user_api_client, product, graphql_log_handler, channel_USD
):
# given
handled_errors_logger = logging.getLogger("saleor.graphql.errors.handled")
handled_errors_logger.setLevel(logging.DEBUG)
category = Category.objects.first()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"slug": category.slug,
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[DEBUG].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
def test_category_query_error_when_no_param(
user_api_client, product, graphql_log_handler
):
# given
handled_errors_logger = logging.getLogger("saleor.graphql.errors.handled")
handled_errors_logger.setLevel(logging.DEBUG)
variables = {}
# when
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[DEBUG].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
def test_query_category_product_only_visible_in_listings_as_customer(
user_api_client, product_list, channel_USD
):
# given
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert len(content["data"]["category"]["products"]["edges"]) == product_count - 1
def test_query_category_product_visible_in_listings_as_staff_without_manage_products(
staff_api_client, product_list, channel_USD
):
# given
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert (
len(content["data"]["category"]["products"]["edges"]) == product_count - 1
) # invisible doesn't count
def test_query_category_product_only_visible_in_listings_as_staff_with_perm(
staff_api_client, product_list, permission_manage_products
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = staff_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert len(content["data"]["category"]["products"]["edges"]) == product_count
def test_query_category_product_only_visible_in_listings_as_app_without_manage_products(
app_api_client, product_list, channel_USD
):
# given
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
# when
response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert (
len(content["data"]["category"]["products"]["edges"]) == product_count - 1
) # invisible doesn't count
def test_query_category_product_only_visible_in_listings_as_app_with_perm(
app_api_client, product_list, permission_manage_products
):
# given
app_api_client.app.permissions.add(permission_manage_products)
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert len(content["data"]["category"]["products"]["edges"]) == product_count
FETCH_CATEGORY_IMAGE_QUERY = """
query fetchCategory($id: ID!, $size: Int, $format: ThumbnailFormatEnum){
category(id: $id) {
name
backgroundImage(size: $size, format: $format) {
url
alt
}
}
}
"""
def test_category_image_query_with_size_and_format_proxy_url_returned(
user_api_client, non_default_category, media_root
):
# given
alt_text = "Alt text for an image."
category = non_default_category
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
category.background_image = background_mock
category.background_image_alt = alt_text
category.save(update_fields=["background_image", "background_image_alt"])
format = ThumbnailFormatEnum.WEBP.name
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {
"id": category_id,
"size": 120,
"format": format,
}
# when
response = user_api_client.post_graphql(FETCH_CATEGORY_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["category"]
assert data["backgroundImage"]["alt"] == alt_text
assert (
data["backgroundImage"]["url"]
== f"https://example.com/thumbnail/{category_id}/128/{format.lower()}/"
)
def test_category_image_query_with_size_proxy_url_returned(
user_api_client, non_default_category, media_root
):
# given
alt_text = "Alt text for an image."
category = non_default_category
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
category.background_image = background_mock
category.background_image_alt = alt_text
category.save(update_fields=["background_image", "background_image_alt"])
size = 128
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {
"id": category_id,
"size": size,
}
# when
response = user_api_client.post_graphql(FETCH_CATEGORY_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["category"]
assert data["backgroundImage"]["alt"] == alt_text
assert (
data["backgroundImage"]["url"]
== f"https://example.com/thumbnail/{category_id}/{size}/"
)
def test_category_image_query_with_size_thumbnail_url_returned(
user_api_client, non_default_category, media_root
):
# given
alt_text = "Alt text for an image."
category = non_default_category
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
category.background_image = background_mock
category.background_image_alt = alt_text
category.save(update_fields=["background_image", "background_image_alt"])
size = 128
thumbnail_mock = MagicMock(spec=File)
thumbnail_mock.name = "thumbnail_image.jpg"
Thumbnail.objects.create(category=category, size=size, image=thumbnail_mock)
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {
"id": category_id,
"size": 120,
}
# when
response = user_api_client.post_graphql(FETCH_CATEGORY_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["category"]
assert data["backgroundImage"]["alt"] == alt_text
assert (
data["backgroundImage"]["url"]
== f"https://example.com/media/thumbnails/{thumbnail_mock.name}"
)
def test_category_image_query_zero_size_custom_format_provided_original_image_returned(
user_api_client, non_default_category, media_root
):
# given
alt_text = "Alt text for an image."
category = non_default_category
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
category.background_image = background_mock
category.background_image_alt = alt_text
category.save(update_fields=["background_image", "background_image_alt"])
format = ThumbnailFormatEnum.WEBP.name
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {
"id": category_id,
"format": format,
"size": 0,
}
# when
response = user_api_client.post_graphql(FETCH_CATEGORY_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["category"]
assert data["backgroundImage"]["alt"] == alt_text
expected_url = (
f"https://example.com/media/category-backgrounds/{background_mock.name}"
)
assert data["backgroundImage"]["url"] == expected_url
def test_category_image_query_zero_size_value_original_image_returned(
user_api_client, non_default_category, media_root
):
# given
alt_text = "Alt text for an image."
category = non_default_category
background_mock = MagicMock(spec=File)
background_mock.name = "image.jpg"
category.background_image = background_mock
category.background_image_alt = alt_text
category.save(update_fields=["background_image", "background_image_alt"])
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {
"id": category_id,
"size": 0,
}
# when
response = user_api_client.post_graphql(FETCH_CATEGORY_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["category"]
assert data["backgroundImage"]["alt"] == alt_text
expected_url = (
f"https://example.com/media/category-backgrounds/{background_mock.name}"
)
assert data["backgroundImage"]["url"] == expected_url
def test_category_image_query_without_associated_file(
user_api_client, non_default_category
):
# given
category = non_default_category
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {"id": category_id}
# when
response = user_api_client.post_graphql(FETCH_CATEGORY_IMAGE_QUERY, variables)
# then
content = get_graphql_content(response)
data = content["data"]["category"]
assert data["name"] == category.name
assert data["backgroundImage"] is None
def test_query_category_for_federation(api_client, non_default_category):
category_id = graphene.Node.to_global_id("Category", non_default_category.pk)
variables = {
"representations": [
{
"__typename": "Category",
"id": category_id,
},
],
}
query = """
query GetCategoryInFederation($representations: [_Any!]!) {
_entities(representations: $representations) {
__typename
... on Category {
id
name
}
}
}
"""
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "Category",
"id": category_id,
"name": non_default_category.name,
}
]
def test_query_products_no_channel_shipping_zones(
staff_api_client, product, permission_manage_products, stock, channel_USD
):
channel_USD.shipping_zones.clear()
category = Category.objects.first()
product = category.products.first()
query = """
query CategoryProducts($id: ID, $channel: String, $address: AddressInput) {
category(id: $id) {
products(first: 20, channel: $channel) {
edges {
node {
id
name
isAvailable(address: $address)
}
}
}
}
}
"""
staff_api_client.user.user_permissions.add(permission_manage_products)
variables = {
"id": graphene.Node.to_global_id("Category", category.id),
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is not None
product_edges_data = content["data"]["category"]["products"]["edges"]
assert len(product_edges_data) == category.products.count()
product_data = product_edges_data[0]["node"]
assert product_data["name"] == product.name
assert product_data["isAvailable"] is False
def test_fetch_product_from_category_query(
staff_api_client, product, permission_manage_products, stock, channel_USD
):
category = Category.objects.first()
product = category.products.first()
query = """
query CategoryProducts($id: ID, $channel: String, $address: AddressInput) {
category(id: $id) {
products(first: 20, channel: $channel) {
edges {
node {
id
name
slug
thumbnail{
url
alt
}
media {
url
}
variants {
name
channelListings {
costPrice {
amount
}
}
}
channelListings {
purchaseCost {
start {
amount
}
stop {
amount
}
}
margin {
start
stop
}
}
isAvailable(address: $address)
pricing(address: $address) {
priceRange {
start {
gross {
amount
currency
}
net {
amount
currency
}
currency
}
}
}
}
}
}
}
}
"""
staff_api_client.user.user_permissions.add(permission_manage_products)
variables = {
"id": graphene.Node.to_global_id("Category", category.id),
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is not None
product_edges_data = content["data"]["category"]["products"]["edges"]
assert len(product_edges_data) == category.products.count()
product_data = product_edges_data[0]["node"]
assert product_data["name"] == product.name
assert product_data["slug"] == product.slug
variant = product.variants.first()
variant_channel_listing = variant.channel_listings.filter(channel_id=channel_USD.id)
purchase_cost, margin = get_product_costs_data(
variant_channel_listing, channel_USD.currency_code
)
cost_start = product_data["channelListings"][0]["purchaseCost"]["start"]["amount"]
cost_stop = product_data["channelListings"][0]["purchaseCost"]["stop"]["amount"]
assert purchase_cost.start.amount == cost_start
assert purchase_cost.stop.amount == cost_stop
assert product_data["isAvailable"] is True
assert margin[0] == product_data["channelListings"][0]["margin"]["start"]
assert margin[1] == product_data["channelListings"][0]["margin"]["stop"]
variant = product.variants.first()
variant_channel_listing = variant.channel_listings.get(channel_id=channel_USD.id)
variant_channel_data = product_data["variants"][0]["channelListings"][0]
variant_cost = variant_channel_data["costPrice"]["amount"]
assert variant_channel_listing.cost_price.amount == variant_cost
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/tests/queries/test_category_query.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 646,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/order/tests/queries/test_draft_order_with_where.py | import datetime
import graphene
import pytest
from django.utils import timezone
from freezegun import freeze_time
from .....account.models import Address
from .....order import (
OrderAuthorizeStatus,
OrderChargeStatus,
OrderEvents,
OrderStatus,
)
from .....order.models import Order, OrderEvent, OrderLine
from ....tests.utils import get_graphql_content, get_graphql_content_from_response
def test_order_query_with_filter_and_where(
staff_api_client,
permission_group_manage_orders,
orders,
draft_order,
):
# given
query = """
query ($where: DraftOrderWhereInput!, $filter: OrderDraftFilterInput!) {
draftOrders(first: 10, where: $where, filter: $filter) {
totalCount
edges {
node {
id
}
}
}
}
"""
variables = {
"where": {
"number": {
"eq": draft_order.number,
},
},
"filter": {
"search": "test",
},
}
permission_group_manage_orders.user_set.add(staff_api_client.user)
error_message = "Only one filtering argument (filter or where) can be specified."
# when
response = staff_api_client.post_graphql(query, variables)
# then
content = get_graphql_content_from_response(response)
assert content["errors"][0]["message"] == error_message
assert not content["data"]["draftOrders"]
DRAFT_ORDERS_WHERE_QUERY = """
query($where: DraftOrderWhereInput!) {
draftOrders(first: 10, where: $where) {
edges {
node {
id
number
created
updatedAt
}
}
}
}
"""
def test_draft_order_filter_by_ids(
staff_api_client, permission_group_manage_orders, draft_order_list
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
ids = [
graphene.Node.to_global_id("Order", order.pk) for order in draft_order_list[:2]
]
variables = {"where": {"ids": ids}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(draft_order_list[0].number),
str(draft_order_list[1].number),
}
def test_draft_order_filter_by_none_as_ids(
staff_api_client, permission_group_manage_orders, draft_order_list
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"ids": None}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 0
def test_draft_order_filter_by_ids_empty_list(
staff_api_client, permission_group_manage_orders, draft_order_list
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"ids": []}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"gte": (timezone.now() - datetime.timedelta(days=10)).isoformat(),
"lte": (timezone.now() - datetime.timedelta(days=3)).isoformat(),
},
[1, 2],
),
(
{
"gte": (timezone.now() - datetime.timedelta(days=3)).isoformat(),
},
[0],
),
(
{
"lte": (timezone.now() + datetime.timedelta(days=1)).isoformat(),
},
[0, 1, 2],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=15)).isoformat(),
},
[],
),
(None, []),
({"gte": None}, []),
({"lte": None}, []),
({"lte": None, "gte": None}, []),
({}, []),
],
)
def test_draft_orders_filter_by_created_at(
where,
indexes,
draft_order,
order_generator,
staff_api_client,
permission_group_manage_orders,
):
# given
with freeze_time((timezone.now() - datetime.timedelta(days=5)).isoformat()):
order_2 = order_generator(status=OrderStatus.DRAFT)
with freeze_time((timezone.now() - datetime.timedelta(days=10)).isoformat()):
order_3 = order_generator(status=OrderStatus.DRAFT)
order_list = [draft_order, order_2, order_3]
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"createdAt": where}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"gte": (timezone.now() + datetime.timedelta(days=3)).isoformat(),
"lte": (timezone.now() + datetime.timedelta(days=25)).isoformat(),
},
[0, 1],
),
(
{
"gte": (timezone.now() + datetime.timedelta(days=5)).isoformat(),
},
[0],
),
(
{
"lte": (timezone.now() + datetime.timedelta(days=25)).isoformat(),
},
[0, 1, 2],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=25)).isoformat(),
},
[],
),
(None, []),
({"gte": None}, []),
({"lte": None}, []),
({"lte": None, "gte": None}, []),
({}, []),
],
)
def test_draft_orders_filter_by_updated_at(
where,
indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
draft_order_list[0].updated_at = timezone.now() + datetime.timedelta(days=15)
draft_order_list[1].updated_at = timezone.now() + datetime.timedelta(days=3)
draft_order_list[2].updated_at = timezone.now() + datetime.timedelta(days=1)
Order.objects.bulk_update(draft_order_list, ["updated_at"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"updatedAt": where}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[index].number) for index in indexes}
def test_draft_order_filter_by_users(
staff_api_client, permission_group_manage_orders, draft_order_list, user_list
):
# given
draft_order_list[0].user = user_list[0]
draft_order_list[1].user = user_list[1]
draft_order_list[2].user = user_list[2]
Order.objects.bulk_update(draft_order_list, ["user"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
user_ids = [graphene.Node.to_global_id("User", user.pk) for user in user_list[:2]]
variables = {
"where": {"user": {"oneOf": user_ids}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 2
numbers = {node["node"]["number"] for node in orders}
assert numbers == {
str(draft_order_list[0].number),
str(draft_order_list[1].number),
}
def test_draft_order_filter_by_user(
staff_api_client, permission_group_manage_orders, draft_order_list, user_list
):
# given
draft_order_list[0].user = user_list[0]
draft_order_list[0].save(update_fields=["user"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
user_id = graphene.Node.to_global_id("User", user_list[0].pk)
variables = {
"where": {"user": {"eq": user_id}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 1
assert str(draft_order_list[0].number) == orders[0]["node"]["number"]
def test_draft_order_filter_by_none_as_user(
staff_api_client, permission_group_manage_orders, draft_order_list, user_list
):
# given
draft_order_list[0].user = user_list[0]
draft_order_list[0].save(update_fields=["user"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"user": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 0
def test_draft_order_filter_by_user_email(
staff_api_client, permission_group_manage_orders, draft_order_list, user_list
):
# given
draft_order_list[1].user_email = user_list[0].email
draft_order_list[1].save(update_fields=["user_email"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"userEmail": {"eq": user_list[0].email}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 1
assert str(draft_order_list[1].number) == orders[0]["node"]["number"]
def test_draft_order_filter_by_none_as_user_email(
staff_api_client, permission_group_manage_orders, draft_order_list, user_list
):
# given
draft_order_list[0].user_email = user_list[0].email
draft_order_list[0].save(update_fields=["user_email"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"userEmail": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 0
def test_draft_order_filter_by_numbers_range(
staff_api_client, permission_group_manage_orders, draft_order_list
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
ordered_orders = Order.objects.order_by("number")
variables = {
"where": {
"number": {
"range": {
"lte": ordered_orders[1].number,
}
}
},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(draft_order_list[0].number),
str(draft_order_list[1].number),
}
def test_draft_order_filter_by_number(
staff_api_client, permission_group_manage_orders, draft_order_list, draft_order
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"number": {"eq": draft_order.number}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 1
assert str(draft_order.number) == orders[0]["node"]["number"]
def test_draft_order_filter_by_none_as_number(
staff_api_client, permission_group_manage_orders, draft_order_list
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"number": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 0
def test_draft_order_filter_by_number_nothing_returned(
staff_api_client, permission_group_manage_orders, draft_order_list
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"number": {"eq": "999"}},
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 0
def test_draft_order_filter_by_channel_id(
staff_api_client,
permission_group_manage_orders,
draft_order_list,
channel_USD,
channel_PLN,
):
# given
draft_order_list[0].channel = channel_USD
draft_order_list[1].channel = channel_PLN
draft_order_list[2].channel = channel_USD
Order.objects.bulk_update(draft_order_list, ["channel"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"channelId": {"eq": graphene.Node.to_global_id("Channel", channel_USD.id)}
}
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 2
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[0].number), str(draft_order_list[2].number)}
def test_draft_order_filter_by_channel_ids(
staff_api_client,
permission_group_manage_orders,
draft_order_list,
channel_USD,
channel_PLN,
):
# given
draft_order_list[0].channel = channel_USD
draft_order_list[1].channel = channel_PLN
draft_order_list[2].channel = channel_USD
Order.objects.bulk_update(draft_order_list, ["channel"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"channelId": {
"oneOf": [
graphene.Node.to_global_id("Channel", channel.id)
for channel in [channel_USD, channel_PLN]
]
}
}
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 3
numbers = {node["node"]["number"] for node in orders}
assert numbers == {
str(draft_order_list[0].number),
str(draft_order_list[1].number),
str(draft_order_list[2].number),
}
def test_draft_order_filter_by_channel_id_none(
staff_api_client,
permission_group_manage_orders,
draft_order_list,
channel_USD,
channel_PLN,
):
# given
draft_order_list[0].channel = channel_USD
draft_order_list[1].channel = channel_PLN
draft_order_list[2].channel = channel_USD
Order.objects.bulk_update(draft_order_list, ["channel"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"channelId": {"eq": None}}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
data = get_graphql_content(response)
orders = data["data"]["draftOrders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("where", "indexes"),
[
({"eq": OrderAuthorizeStatus.FULL.upper()}, [0]),
({"eq": OrderAuthorizeStatus.PARTIAL.upper()}, [1]),
({"oneOf": [OrderAuthorizeStatus.NONE.upper()]}, [2]),
(
{
"oneOf": [
OrderAuthorizeStatus.FULL.upper(),
OrderAuthorizeStatus.PARTIAL.upper(),
]
},
[0, 1],
),
({"oneOf": [OrderAuthorizeStatus.FULL.upper()]}, [0]),
({}, []),
({"oneOf": []}, []),
({"eq": None}, []),
(None, []),
],
)
def test_draft_orders_filter_by_authorize_status(
where,
indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
draft_order_list[0].authorize_status = OrderAuthorizeStatus.FULL
draft_order_list[1].authorize_status = OrderAuthorizeStatus.PARTIAL
draft_order_list[2].authorize_status = OrderAuthorizeStatus.NONE
Order.objects.bulk_update(draft_order_list, ["authorize_status"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"authorizeStatus": where}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
({"eq": OrderChargeStatus.FULL.upper()}, []),
({"eq": OrderChargeStatus.PARTIAL.upper()}, [1]),
({"oneOf": [OrderChargeStatus.NONE.upper()]}, [2]),
(
{
"oneOf": [
OrderChargeStatus.FULL.upper(),
OrderChargeStatus.PARTIAL.upper(),
]
},
[1],
),
({"eq": OrderChargeStatus.OVERCHARGED.upper()}, [0]),
({}, []),
({"oneOf": []}, []),
({"eq": None}, []),
(None, []),
],
)
def test_draft_orders_filter_by_charge_status(
where,
indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
draft_order_list[0].charge_status = OrderChargeStatus.OVERCHARGED
draft_order_list[1].charge_status = OrderChargeStatus.PARTIAL
draft_order_list[2].charge_status = OrderChargeStatus.NONE
Order.objects.bulk_update(draft_order_list, ["charge_status"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"chargeStatus": where}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[index].number) for index in indexes}
def test_draft_order_filter_is_click_and_collect_true(
staff_api_client,
permission_group_manage_orders,
draft_order_list,
warehouse_for_cc,
):
# given
order_1 = draft_order_list[0]
order_1.collection_point = warehouse_for_cc
order_1.collection_point_name = warehouse_for_cc.name
order_2 = draft_order_list[1]
order_2.collection_point_name = warehouse_for_cc.name
Order.objects.bulk_update(
[order_1, order_2], ["collection_point", "collection_point_name"]
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"isClickAndCollect": True}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
returned_orders = content["data"]["draftOrders"]["edges"]
assert len(returned_orders) == 2
assert {order["node"]["id"] for order in returned_orders} == {
graphene.Node.to_global_id("Order", order.pk) for order in [order_1, order_2]
}
def test_draft_order_filter_is_click_and_collect_false(
staff_api_client,
permission_group_manage_orders,
draft_order_list,
warehouse_for_cc,
):
# given
order_1 = draft_order_list[0]
order_1.collection_point = warehouse_for_cc
order_1.collection_point_name = warehouse_for_cc.name
order_2 = draft_order_list[1]
order_2.collection_point = warehouse_for_cc
Order.objects.bulk_update(
[order_1, order_2], ["collection_point", "collection_point_name"]
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"isClickAndCollect": False}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
returned_orders = content["data"]["draftOrders"]["edges"]
assert len(returned_orders) == 1
assert returned_orders[0]["node"]["id"] == (
graphene.Node.to_global_id("Order", draft_order_list[2].pk)
)
def test_draft_order_filter_is_click_and_collect_none(
staff_api_client,
permission_group_manage_orders,
draft_order_list,
warehouse_for_cc,
):
# given
order_1 = draft_order_list[0]
order_1.collection_point = warehouse_for_cc
order_1.collection_point_name = warehouse_for_cc.name
order_2 = draft_order_list[1]
order_2.collection_point = warehouse_for_cc
Order.objects.bulk_update(
[order_1, order_2], ["collection_point", "collection_point_name"]
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"isClickAndCollect": None}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
returned_orders = content["data"]["draftOrders"]["edges"]
assert len(returned_orders) == 0
def test_draft_order_filter_by_voucher_code_eq(
draft_order_list,
staff_api_client,
permission_group_manage_orders,
voucher_with_many_codes,
):
# given
codes = voucher_with_many_codes.codes.all()
draft_order_list[0].voucher_code = codes[0].code
draft_order_list[1].voucher_code = codes[1].code
draft_order_list[1].voucher = voucher_with_many_codes
draft_order_list[2].voucher_code = codes[2].code
Order.objects.bulk_update(draft_order_list, ["voucher_code", "voucher"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"voucherCode": {"eq": codes[0].code}}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == 1
assert orders[0]["node"]["number"] == str(draft_order_list[0].number)
def test_draft_order_filter_by_voucher_code_one_of(
draft_order_list,
staff_api_client,
permission_group_manage_orders,
voucher_with_many_codes,
):
# given
codes = voucher_with_many_codes.codes.all()
draft_order_list[0].voucher_code = codes[0].code
draft_order_list[1].voucher_code = codes[1].code
draft_order_list[1].voucher = voucher_with_many_codes
draft_order_list[2].voucher_code = codes[2].code
Order.objects.bulk_update(draft_order_list, ["voucher_code", "voucher"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"voucherCode": {"oneOf": [codes[1].code, codes[2].code]}}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(draft_order_list[1].number),
str(draft_order_list[2].number),
}
@pytest.mark.parametrize(
"where",
[
{},
{"oneOf": []},
{"eq": None},
None,
],
)
def test_draft_order_filter_by_voucher_code_empty_value(
where,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
voucher_with_many_codes,
):
# given
codes = voucher_with_many_codes.codes.all()
draft_order_list[0].voucher_code = codes[0].code
draft_order_list[1].voucher_code = codes[1].code
draft_order_list[1].voucher = voucher_with_many_codes
draft_order_list[2].voucher_code = codes[2].code
Order.objects.bulk_update(draft_order_list, ["voucher_code", "voucher"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"voucherCode": where}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("filter_input", "expected_indexes"),
[
([{"metadata": {"key": "foo"}}], [0, 1]),
([{"metadata": {"key": "foo", "value": {"eq": "bar"}}}], [0]),
([{"metadata": {"key": "foo", "value": {"eq": "baz"}}}], []),
([{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}}], [0, 1]),
([{"metadata": {"key": "notfound"}}], []),
([{"metadata": {"key": "foo", "value": {"eq": None}}}], []),
([{"metadata": {"key": "foo", "value": {"oneOf": []}}}], []),
(None, []),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "foo", "value": {"eq": "bar"}}},
],
[0],
),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "baz", "value": {"eq": "zaz"}}},
],
[0, 1],
),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "foo", "value": {"eq": "baz"}}},
],
[],
),
],
)
def test_draft_orders_filter_by_lines_metadata(
filter_input,
expected_indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
metadata_values = [
{
"foo": "bar",
"baz": "zaz",
},
{
"foo": "zaz",
"baz": "zaz",
},
{},
]
for order, metadata_value in zip(draft_order_list, metadata_values, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
metadata=metadata_value,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"lines": filter_input}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
({"range": {"gte": 2, "lte": 4}}, [1, 2]),
({"range": {"gte": 3}}, [2]),
({"range": {"lte": 2}}, [0, 1]),
({"eq": 2}, [1]),
({"oneOf": [1, 3]}, [0, 2]),
({"eq": 99}, []),
({}, []),
({"range": {"gte": None}}, []),
({"range": {"lte": None}}, []),
({"eq": None}, []),
({"oneOf": []}, []),
(None, []),
],
)
def test_draft_orders_filter_by_lines_count(
where,
indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
draft_order_list[0].lines_count = 1
draft_order_list[1].lines_count = 2
draft_order_list[2].lines_count = 3
Order.objects.bulk_update(draft_order_list, ["lines_count"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"linesCount": where}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("currency", "where", "indexes"),
[
("USD", {"range": {"gte": "100.00", "lte": "200.00"}}, [0, 1]),
("USD", {"range": {"gte": "150.00"}}, [1]),
("PLN", {"range": {"gte": "150.00"}}, [2]),
(None, {"range": {"gte": "150.00"}}, [1, 2]),
("USD", {"range": {"lte": "120.00"}}, [0]),
("USD", {"eq": "150.00"}, [1]),
("PLN", {"eq": "150.00"}, []),
("USD", {"oneOf": ["100.00", "110.00"]}, [0]),
("USD", {}, []),
(None, {"range": {"gte": None}}, []),
("USD", {"range": {"lte": None}}, []),
("USD", {"eq": None}, []),
(None, {"eq": None}, []),
],
)
def test_draft_orders_filter_by_total_gross(
currency,
where,
indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
draft_order_list[0].total_gross_amount = "110.00"
draft_order_list[0].currency = "USD"
draft_order_list[1].total_gross_amount = "150.00"
draft_order_list[1].currency = "USD"
draft_order_list[2].total_gross_amount = "200.00"
draft_order_list[2].currency = "PLN"
Order.objects.bulk_update(draft_order_list, ["total_gross_amount", "currency"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"totalGross": {
"currency": currency,
"amount": where,
}
}
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("currency", "where", "indexes"),
[
("USD", {"range": {"gte": "100.00", "lte": "200.00"}}, [0, 1]),
("USD", {"range": {"gte": "150.00"}}, [1]),
("PLN", {"range": {"gte": "150.00"}}, [2]),
(None, {"range": {"gte": "150.00"}}, [1, 2]),
("USD", {"range": {"lte": "120.00"}}, [0]),
("USD", {"eq": "150.00"}, [1]),
("PLN", {"eq": "150.00"}, []),
("USD", {"oneOf": ["100.00", "110.00"]}, [0]),
("USD", {}, []),
(None, {"range": {"gte": None}}, []),
("USD", {"range": {"lte": None}}, []),
("USD", {"eq": None}, []),
(None, {"eq": None}, []),
],
)
def test_draft_orders_filter_by_total_net(
currency,
where,
indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
draft_order_list[0].total_net_amount = "110.00"
draft_order_list[0].currency = "USD"
draft_order_list[1].total_net_amount = "150.00"
draft_order_list[1].currency = "USD"
draft_order_list[2].total_net_amount = "200.00"
draft_order_list[2].currency = "PLN"
Order.objects.bulk_update(draft_order_list, ["total_net_amount", "currency"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"totalNet": {
"currency": currency,
"amount": where,
}
}
}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("metadata", "expected_indexes"),
[
({"key": "foo"}, [0, 1]),
({"key": "foo", "value": {"eq": "bar"}}, [0]),
({"key": "foo", "value": {"eq": "baz"}}, []),
({"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}, [0, 1]),
({"key": "notfound"}, []),
({"key": "foo", "value": {"eq": None}}, []),
({"key": "foo", "value": {"oneOf": []}}, []),
(None, []),
],
)
def test_draft_orders_filter_by_metadata(
metadata,
expected_indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
draft_order_list[0].metadata = {"foo": "bar"}
draft_order_list[1].metadata = {"foo": "zaz"}
draft_order_list[2].metadata = {}
Order.objects.bulk_update(draft_order_list, ["metadata"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"metadata": metadata}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[i].number) for i in expected_indexes}
def test_draft_orders_filter_by_product_type_id(
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(draft_order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
product_type_id = graphene.Node.to_global_id("ProductType", 4)
variables = {"where": {"productTypeId": {"eq": product_type_id}}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == 1
assert str(draft_order_list[1].number) == orders[0]["node"]["number"]
def test_draft_orders_filter_by_product_type_ids(
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(draft_order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
product_type_ids = [
graphene.Node.to_global_id("ProductType", id) for id in product_type_ids[:2]
]
variables = {"where": {"productTypeId": {"oneOf": product_type_ids}}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(draft_order_list[:2])
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order.number) for order in draft_order_list[:2]}
def test_draft_orders_filter_by_product_type_ids_nothing_match(
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(draft_order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
product_type_ids = [graphene.Node.to_global_id("ProductType", id) for id in [6, 7]]
variables = {"where": {"productTypeId": {"oneOf": product_type_ids}}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == 0
def test_draft_orders_filter_by_product_type_none(
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(draft_order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"productTypeId": {"eq": None}}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("event_input", "expected_indexes"),
[
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"eq": OrderEvents.NOTE_ADDED.upper()},
}
],
[0, 1, 2],
),
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"eq": OrderEvents.ORDER_FULLY_PAID.upper()},
}
],
[0, 1],
),
(
[
{
"date": {"gte": "2026-01-01T00:00:00Z"},
}
],
[],
),
(
[
{
"date": {"gte": "2020-01-01T00:00:00Z"},
}
],
[0, 1, 2],
),
(
[
{
"type": {
"oneOf": [
OrderEvents.NOTE_ADDED.upper(),
OrderEvents.ORDER_FULLY_PAID.upper(),
]
},
}
],
[0, 1, 2],
),
(
[
{
"type": {"eq": OrderEvents.NOTE_ADDED.upper()},
},
{
"type": {"eq": OrderEvents.ORDER_FULLY_PAID.upper()},
},
],
[0, 1],
),
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"oneOf": [OrderEvents.NOTE_ADDED.upper()]},
},
{
"date": {"gte": "2025-02-01T00:00:00Z"},
"type": {"oneOf": [OrderEvents.ORDER_FULLY_PAID.upper()]},
},
],
[0, 1],
),
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"eq": OrderEvents.NOTE_ADDED.upper()},
},
{
"date": {"gte": "2025-02-02T00:00:00Z"},
},
],
[0, 1],
),
],
)
def test_draft_orders_filter_by_order_events(
event_input,
expected_indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
with freeze_time("2025-01-01T00:00:00Z"):
OrderEvent.objects.bulk_create(
[
OrderEvent(order=order, type=OrderEvents.NOTE_ADDED)
for order in draft_order_list
]
)
with freeze_time("2025-02-02T00:00:00Z"):
OrderEvent.objects.bulk_create(
[
OrderEvent(order=order, type=OrderEvents.ORDER_FULLY_PAID)
for order in draft_order_list[:2]
]
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"events": event_input}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"eq": "CARD"},
}
}
]
},
[0, 2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"eq": "OTHER"},
}
}
]
},
[1],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"eq": "Brand"},
}
}
}
]
},
[0],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"eq": "Brand4"},
}
}
}
]
},
[2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"eq": "Brand2"},
}
}
}
]
},
[0],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"oneOf": ["CARD", "OTHER"]},
}
}
]
},
[0, 1, 2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"oneOf": ["Brand2", "Brand4"]},
}
}
}
]
},
[0, 2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"eq": "CARD"},
}
},
{
"paymentMethodDetails": {
"card": {"brand": {"eq": "Brand"}},
}
},
]
},
[0],
),
],
)
def test_draft_orders_filter_by_transaction_payment_details(
where,
indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
# first_transaction
transaction_item_generator(
order_id=draft_order_list[0].pk,
charged_value=draft_order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Credit card",
cc_brand="Brand",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
)
# second_transaction
transaction_item_generator(
order_id=draft_order_list[0].pk,
charged_value=draft_order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Second Credit card",
cc_brand="Brand2",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
)
# third_transaction
transaction_item_generator(
order_id=draft_order_list[1].pk,
charged_value=draft_order_list[1].total.gross.amount,
payment_method_type="other",
payment_method_name="Third payment method",
cc_brand=None,
cc_first_digits=None,
cc_last_digits=None,
cc_exp_month=None,
cc_exp_year=None,
)
# fourth_transaction
transaction_item_generator(
order_id=draft_order_list[2].pk,
charged_value=draft_order_list[2].total.gross.amount,
payment_method_type="card",
payment_method_name="Fourth Credit card",
cc_brand="Brand4",
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": where}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("metadata_list", "expected_indexes"),
[
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}},
],
[0, 1],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "bar"}}},
{"metadata": {"key": "foo", "value": {"eq": "zaz"}}},
],
[],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "bar"}}},
{"metadata": {"key": "notfound"}},
],
[],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "zaz"}}},
{"metadata": {"key": "foo"}},
],
[1],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "baz"}}},
{"metadata": {"key": "notfound"}},
],
[],
),
],
)
def test_draft_orders_filter_by_transaction_metadata(
metadata_list,
expected_indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
transaction_item_generator(
order_id=draft_order_list[0].pk,
charged_value=draft_order_list[0].total.gross.amount,
metadata={"foo": "bar"},
)
transaction_item_generator(
order_id=draft_order_list[0].pk,
charged_value=draft_order_list[0].total.gross.amount,
metadata={},
)
transaction_item_generator(
order_id=draft_order_list[1].pk,
charged_value=draft_order_list[1].total.gross.amount,
metadata={"foo": "zaz"},
)
transaction_item_generator(
order_id=draft_order_list[2].pk,
charged_value=draft_order_list[2].total.gross.amount,
metadata={},
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"transactions": metadata_list}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("transaction_filters", "expected_indexes"),
[
(
[
{"metadata": {"key": "foo"}},
{"paymentMethodDetails": {"type": {"eq": "CARD"}}},
],
[0, 2],
),
(
[
{"metadata": {"key": "foo"}},
{"paymentMethodDetails": {"type": {"eq": "OTHER"}}},
],
[1],
),
(
[
{"metadata": {"key": "notfound"}},
{"paymentMethodDetails": {"type": {"eq": "OTHER"}}},
],
[],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "baz"}}},
{"paymentMethodDetails": {"type": {"eq": "CARD"}}},
],
[0],
),
],
)
def test_draft_orders_filter_by_transactions_with_mixed_conditions(
transaction_filters,
expected_indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
transaction_item_generator(
order_id=draft_order_list[0].pk,
charged_value=draft_order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Credit card",
cc_brand="Brand",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
metadata={},
)
# second_transaction
transaction_item_generator(
order_id=draft_order_list[0].pk,
charged_value=draft_order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Second Credit card",
cc_brand="Brand2",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
metadata={"foo": "baz"},
)
# third_transaction
transaction_item_generator(
order_id=draft_order_list[1].pk,
charged_value=draft_order_list[1].total.gross.amount,
payment_method_type="other",
payment_method_name="Third payment method",
cc_brand=None,
cc_first_digits=None,
cc_last_digits=None,
cc_exp_month=None,
cc_exp_year=None,
metadata={"foo": "zaz"},
)
# fourth_transaction
transaction_item_generator(
order_id=draft_order_list[2].pk,
charged_value=draft_order_list[2].total.gross.amount,
payment_method_type="card",
payment_method_name="Fourth Credit card",
cc_brand="Brand4",
metadata={"foo": "bar"},
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"transactions": transaction_filters}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("address_filter", "expected_indexes"),
[
({"phoneNumber": {"eq": "+48123456789"}}, [0]),
({"phoneNumber": {"eq": "+1987654321"}}, [1]),
({"phoneNumber": {"eq": "notfound"}}, []),
({"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]}}, [0, 2]),
({"phoneNumber": {"oneOf": ["notfound"]}}, []),
({"country": {"eq": "GE"}}, [0]),
({"country": {"eq": "US"}}, [1]),
({"country": {"eq": "CN"}}, [2]),
({"country": {"eq": "JP"}}, []),
({"country": {"oneOf": ["GE", "CN"]}}, [0, 2]),
({"country": {"oneOf": ["JP"]}}, []),
({"country": {"notOneOf": ["GE", "CN", "PL"]}}, [1]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "GE"}}, [0]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "US"}}, []),
(
{
"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]},
"country": {"notOneOf": ["GE"]},
},
[2],
),
(None, []),
({"phoneNumber": {"eq": None}}, []),
({"phoneNumber": {"oneOf": []}}, []),
({"country": {"eq": None}}, []),
({"country": {"oneOf": []}}, []),
],
)
def test_draft_orders_filter_by_billing_address(
address_filter,
expected_indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
phones = [
"+48123456789",
"+1987654321",
"+86555555555",
]
countries = ["GE", "US", "CN"]
addresses = [
Address.objects.create(
first_name="John",
last_name="Doe",
company_name="Mirumee Software",
street_address_1="Tęczowa 7",
city="WROCŁAW",
postal_code="53-601",
country=country,
phone=phone,
)
for phone, country in zip(phones, countries, strict=True)
]
for order, address in zip(draft_order_list, addresses, strict=True):
order.billing_address = address
Order.objects.bulk_update(draft_order_list, ["billing_address"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"billingAddress": address_filter}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("address_filter", "expected_indexes"),
[
({"phoneNumber": {"eq": "+48123456789"}}, [0]),
({"phoneNumber": {"eq": "+1987654321"}}, [1]),
({"phoneNumber": {"eq": "notfound"}}, []),
({"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]}}, [0, 2]),
({"phoneNumber": {"oneOf": ["notfound"]}}, []),
({"country": {"eq": "GE"}}, [0]),
({"country": {"eq": "US"}}, [1]),
({"country": {"eq": "CN"}}, [2]),
({"country": {"eq": "JP"}}, []),
({"country": {"oneOf": ["GE", "CN"]}}, [0, 2]),
({"country": {"oneOf": ["JP"]}}, []),
({"country": {"notOneOf": ["GE", "CN", "PL"]}}, [1]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "GE"}}, [0]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "US"}}, []),
(
{
"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]},
"country": {"notOneOf": ["GE"]},
},
[2],
),
(None, []),
({"phoneNumber": {"eq": None}}, []),
({"phoneNumber": {"oneOf": []}}, []),
({"country": {"eq": None}}, []),
({"country": {"oneOf": []}}, []),
],
)
def test_draft_orders_filter_by_shipping_address(
address_filter,
expected_indexes,
draft_order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
phones = [
"+48123456789",
"+1987654321",
"+86555555555",
]
countries = ["GE", "US", "CN"]
addresses = [
Address.objects.create(
first_name="John",
last_name="Doe",
company_name="Mirumee Software",
street_address_1="Tęczowa 7",
city="WROCŁAW",
postal_code="53-601",
country=country,
phone=phone,
)
for phone, country in zip(phones, countries, strict=True)
]
for order, address in zip(draft_order_list, addresses, strict=True):
order.shipping_address = address
Order.objects.bulk_update(draft_order_list, ["shipping_address"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"shippingAddress": address_filter}}
# when
response = staff_api_client.post_graphql(DRAFT_ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["draftOrders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(draft_order_list[i].number) for i in expected_indexes}
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/order/tests/queries/test_draft_order_with_where.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1719,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/checkout/lock_objects.py | from django.db.models import QuerySet
from .models import Checkout, CheckoutLine
def checkout_qs_select_for_update() -> QuerySet[Checkout]:
return Checkout.objects.order_by("pk").select_for_update(of=(["self"]))
def checkout_lines_qs_select_for_update() -> QuerySet[CheckoutLine]:
return CheckoutLine.objects.order_by("pk").select_for_update(of=(["self"]))
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/lock_objects.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/order/lock_objects.py | from django.db.models import QuerySet
from .models import Order, OrderLine
def order_lines_qs_select_for_update() -> QuerySet[OrderLine]:
return OrderLine.objects.order_by("pk").select_for_update(of=["self"])
def order_qs_select_for_update() -> QuerySet[Order]:
return Order.objects.order_by("pk").select_for_update(of=(["self"]))
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/order/lock_objects.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/payment/lock_objects.py | from typing import TYPE_CHECKING, Optional
from uuid import UUID
from django.db.models import QuerySet
from ..checkout.lock_objects import checkout_qs_select_for_update
from ..order.lock_objects import order_qs_select_for_update
from .models import TransactionItem
if TYPE_CHECKING:
from ..checkout.models import Checkout
from ..order.models import Order
def transaction_item_qs_select_for_update() -> QuerySet[TransactionItem]:
return TransactionItem.objects.order_by("pk").select_for_update(of=["self"])
def get_order_and_transaction_item_locked_for_update(
order_id: UUID, transaction_item_id: int
) -> tuple["Order", TransactionItem]:
order = order_qs_select_for_update().get(pk=order_id)
transaction_item = transaction_item_qs_select_for_update().get(
pk=transaction_item_id
)
return order, transaction_item
def get_checkout_and_transaction_item_locked_for_update(
checkout_id: UUID, transaction_item_id: int
) -> tuple[Optional["Checkout"], TransactionItem]:
checkout = checkout_qs_select_for_update().filter(pk=checkout_id).first()
transaction_item = transaction_item_qs_select_for_update().get(
pk=transaction_item_id
)
return checkout, transaction_item
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/payment/lock_objects.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/warehouse/lock_objects.py | from .models import Allocation, Stock
def stock_select_for_update_for_existing_qs(qs):
return qs.order_by("pk").select_for_update(of=(["self"]))
def stock_qs_select_for_update():
return stock_select_for_update_for_existing_qs(Stock.objects.all())
def allocation_with_stock_qs_select_for_update():
return (
Allocation.objects.select_related("stock")
.select_for_update(
of=(
"self",
"stock",
)
)
.order_by("stock__pk")
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/warehouse/lock_objects.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/account/tests/queries/test_customers_where_filtering.py | import datetime
import graphene
import pytest
from django.utils import timezone
from freezegun import freeze_time
from .....account.models import Address, User
from .....account.tests.fixtures.user import dangerously_create_test_user
from .....order import OrderOrigin
from ....tests.utils import get_graphql_content
QUERY_CUSTOMERS_WITH_WHERE = """
query ($where: CustomerWhereInput!, ) {
customers(first: 5, where: $where) {
totalCount
edges {
node {
id
email
lastName
firstName
}
}
}
}
"""
def test_customers_filter_by_ids(
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
permission_group_manage_users.user_set.add(staff_api_client.user)
ids = [graphene.Node.to_global_id("User", user.pk) for user in customer_users[:2]]
variables = {"where": {"ids": ids}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
data = get_graphql_content(response)
customers = data["data"]["customers"]["edges"]
assert len(customers) == 2
returned_ids = {node["node"]["id"] for node in customers}
returned_emails = {node["node"]["email"] for node in customers}
expected_emails = {user.email for user in customer_users[:2]}
assert returned_ids == set(ids)
assert returned_emails == expected_emails
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"gte": (timezone.now() - datetime.timedelta(days=7)).isoformat(),
"lte": (timezone.now() - datetime.timedelta(days=1)).isoformat(),
},
[1, 2],
),
(
{
"gte": (timezone.now() - datetime.timedelta(days=4)).isoformat(),
},
[0, 2],
),
(
{
"lte": (timezone.now() + datetime.timedelta(days=1)).isoformat(),
},
[0, 1, 2],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=25)).isoformat(),
},
[],
),
(None, []),
({"gte": None}, []),
({"lte": None}, []),
({"lte": None, "gte": None}, []),
({}, []),
],
)
def test_customers_filter_by_date_joined(
where,
indexes,
staff_api_client,
permission_group_manage_users,
customer_user,
):
# given
with freeze_time((timezone.now() - datetime.timedelta(days=5)).isoformat()):
customer_2 = dangerously_create_test_user(
"test2@example.com",
"password",
first_name="Leslie",
last_name="Wade",
)
with freeze_time((timezone.now() - datetime.timedelta(days=2)).isoformat()):
customer_3 = dangerously_create_test_user(
"test3@example.com",
"password",
first_name="John",
last_name="Lee",
)
customer_list = [customer_user, customer_2, customer_3]
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"dateJoined": where}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_list[index].email for index in indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"gte": (timezone.now() - datetime.timedelta(days=6)).isoformat(),
"lte": (timezone.now() - datetime.timedelta(days=4)).isoformat(),
},
[0],
),
(
{
"gte": (timezone.now() - datetime.timedelta(days=3)).isoformat(),
},
[1, 2],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=2)).isoformat(),
},
[0, 1],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=7)).isoformat(),
},
[],
),
(None, []),
({"gte": None}, []),
({"lte": None}, []),
({"lte": None, "gte": None}, []),
({}, []),
],
)
def test_customers_filter_by_updated_at(
where,
indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
now = timezone.now()
updated_at_dates = [
now - datetime.timedelta(days=5),
now - datetime.timedelta(days=3),
now - datetime.timedelta(days=2),
]
for user, updated_at in zip(customer_users, updated_at_dates, strict=True):
user.updated_at = updated_at
User.objects.bulk_update(customer_users, ["updated_at"])
customer_list = customer_users
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"updatedAt": where}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_list[index].email for index in indexes}
@pytest.mark.parametrize(
("metadata", "expected_indexes"),
[
({"key": "foo"}, [0, 1]),
({"key": "foo", "value": {"eq": "bar"}}, [0]),
({"key": "foo", "value": {"eq": "baz"}}, []),
({"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}, [0, 1]),
({"key": "notfound"}, []),
({"key": "foo", "value": {"eq": None}}, []),
({"key": "foo", "value": {"oneOf": []}}, []),
(None, []),
],
)
def test_customers_filter_by_metadata(
metadata,
expected_indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
metadata_values = [
{"foo": "bar"},
{"foo": "zaz"},
{},
]
for user, metadata_value in zip(customer_users, metadata_values, strict=True):
user.metadata = metadata_value
User.objects.bulk_update(customer_users, ["metadata"])
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"metadata": metadata}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(expected_indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[i].email for i in expected_indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"gte": (timezone.now() - datetime.timedelta(days=10)).isoformat(),
"lte": (timezone.now() - datetime.timedelta(days=5)).isoformat(),
},
[0],
),
(
{
"gte": (timezone.now() - datetime.timedelta(days=4)).isoformat(),
},
[1, 2],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=6)).isoformat(),
},
[0],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=15)).isoformat(),
},
[],
),
(None, []),
({"gte": None}, []),
({"lte": None}, []),
({"lte": None, "gte": None}, []),
({}, []),
],
)
def test_customers_filter_by_placed_orders_at(
where,
indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
address,
channel_USD,
):
# given
now = timezone.now()
placed_orders_dates = [
now - datetime.timedelta(days=7),
now - datetime.timedelta(days=3),
now - datetime.timedelta(days=2),
]
for user, placed_at in zip(customer_users, placed_orders_dates, strict=True):
with freeze_time(placed_at.isoformat()):
user.orders.create(
billing_address=address,
user_email=user.email,
channel=channel_USD,
origin=OrderOrigin.CHECKOUT,
lines_count=0,
)
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"placedOrdersAt": where}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[index].email for index in indexes}
@pytest.mark.parametrize(
("email_filter", "expected_indexes"),
[
({"eq": "test1@example.com"}, [0]),
({"eq": "test2@example.com"}, [1]),
({"eq": "notfound@example.com"}, []),
({"oneOf": ["test1@example.com", "test3@example.com"]}, [0, 2]),
({"oneOf": ["notfound@example.com"]}, []),
(None, []),
({"eq": None}, []),
({"oneOf": []}, []),
],
)
def test_customers_filter_by_email(
email_filter,
expected_indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
customer_emails = [
"test1@example.com",
"test2@example.com",
"test3@example.com",
]
for user, email in zip(customer_users, customer_emails, strict=True):
user.email = email
User.objects.bulk_update(customer_users, ["email"])
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"email": email_filter}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(expected_indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[i].email for i in expected_indexes}
@pytest.mark.parametrize(
("is_active_filter", "expected_indexes"),
[
(True, [0, 1]),
(False, [2]),
(None, []),
],
)
def test_customers_filter_by_is_active(
is_active_filter,
expected_indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
is_active_values = [True, True, False]
for user, is_active in zip(customer_users, is_active_values, strict=True):
user.is_active = is_active
User.objects.bulk_update(customer_users, ["is_active"])
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"isActive": is_active_filter}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(expected_indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[i].email for i in expected_indexes}
@pytest.mark.parametrize(
("address_filter", "expected_indexes"),
[
({"phoneNumber": {"eq": "+48123456789"}}, [0]),
({"phoneNumber": {"eq": "+1987654321"}}, [1]),
({"phoneNumber": {"eq": "notfound"}}, []),
({"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]}}, [0, 2]),
({"phoneNumber": {"oneOf": ["notfound"]}}, []),
({"country": {"eq": "GE"}}, [0]),
({"country": {"eq": "US"}}, [1]),
({"country": {"eq": "CN"}}, [2]),
({"country": {"eq": "JP"}}, []),
({"country": {"oneOf": ["GE", "CN"]}}, [0, 2]),
({"country": {"oneOf": ["JP"]}}, []),
({"country": {"notOneOf": ["GE", "CN", "PL"]}}, [1]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "GE"}}, [0]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "US"}}, []),
(
{
"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]},
"country": {"notOneOf": ["GE"]},
},
[2],
),
(None, []),
({"phoneNumber": {"eq": None}}, []),
({"phoneNumber": {"oneOf": []}}, []),
({"country": {"eq": None}}, []),
({"country": {"oneOf": []}}, []),
],
)
def test_customers_filter_by_addresses(
address_filter,
expected_indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
phones = [
"+48123456789",
"+1987654321",
"+86555555555",
]
countries = ["GE", "US", "CN"]
addresses = [
Address.objects.create(
first_name="John",
last_name="Doe",
company_name="Mirumee Software",
street_address_1="Tęczowa 7",
city="WROCŁAW",
postal_code="53-601",
country=country,
phone=phone,
)
for phone, country in zip(phones, countries, strict=True)
]
for user, address in zip(customer_users, addresses, strict=True):
user.addresses.add(address)
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"addresses": address_filter}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(expected_indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[i].email for i in expected_indexes}
@pytest.mark.parametrize(
("orders_filter", "expected_indexes"),
[
({"range": {"gte": 2}}, [0, 1]),
({"range": {"lte": 1}}, [2]),
({"range": {"gte": 2, "lte": 3}}, [0, 1]),
({"eq": 3}, [0]),
({"eq": 0}, []),
({"range": {"gte": 4}}, []),
({"oneOf": [3, 1]}, [0, 2]),
({"oneOf": [2]}, [1]),
({"oneOf": [4, 5]}, []),
({"oneOf": []}, []),
(None, []),
({"range": {"gte": None}}, []),
({"range": {"lte": None}}, []),
({"eq": None}, []),
],
)
def test_customers_filter_by_number_of_orders(
orders_filter,
expected_indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
orders_counts = [3, 2, 1]
for user, orders_count in zip(customer_users, orders_counts, strict=True):
user.number_of_orders = orders_count
User.objects.bulk_update(customer_users, ["number_of_orders"])
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"numberOfOrders": orders_filter}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(expected_indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[i].email for i in expected_indexes}
@pytest.mark.parametrize(
("first_name_filter", "expected_indexes"),
[
({"eq": "John"}, [0]),
({"eq": "Leslie"}, [1]),
({"eq": "NotFound"}, []),
({"oneOf": ["John", "Jane"]}, [0, 2]),
({"oneOf": ["NotFound"]}, []),
(None, []),
({"eq": None}, []),
({"oneOf": []}, []),
],
)
def test_customers_filter_by_first_name(
first_name_filter,
expected_indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
first_names = [
"John",
"Leslie",
"Jane",
]
for user, first_name in zip(customer_users, first_names, strict=True):
user.first_name = first_name
User.objects.bulk_update(customer_users, ["first_name"])
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"firstName": first_name_filter}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(expected_indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[i].email for i in expected_indexes}
@pytest.mark.parametrize(
("last_name_filter", "expected_indexes"),
[
({"eq": "Doe"}, [0]),
({"eq": "Wade"}, [1]),
({"eq": "NotFound"}, []),
({"oneOf": ["Doe", "Smith"]}, [0, 2]),
({"oneOf": ["NotFound"]}, []),
(None, []),
({"eq": None}, []),
({"oneOf": []}, []),
],
)
def test_customers_filter_by_last_name(
last_name_filter,
expected_indexes,
staff_api_client,
permission_group_manage_users,
customer_users,
):
# given
last_names = [
"Doe",
"Wade",
"Smith",
]
for user, last_name in zip(customer_users, last_names, strict=True):
user.last_name = last_name
User.objects.bulk_update(customer_users, ["last_name"])
permission_group_manage_users.user_set.add(staff_api_client.user)
variables = {"where": {"lastName": last_name_filter}}
# when
response = staff_api_client.post_graphql(QUERY_CUSTOMERS_WITH_WHERE, variables)
# then
content = get_graphql_content(response)
customers = content["data"]["customers"]["edges"]
assert len(customers) == len(expected_indexes)
emails = {node["node"]["email"] for node in customers}
assert emails == {customer_users[i].email for i in expected_indexes}
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/account/tests/queries/test_customers_where_filtering.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 542,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/payment/mutations/transaction/shared.py | import graphene
from django.core.exceptions import ValidationError
from .....payment import PaymentMethodType
from .....payment.error_codes import (
TransactionCreateErrorCode,
TransactionEventReportErrorCode,
TransactionUpdateErrorCode,
)
from .....payment.interface import PaymentMethodDetails
from ....core.descriptions import ADDED_IN_322
from ....core.types.base import BaseInputObjectType
from ....core.validators import validate_one_of_args_is_in_mutation
class CardPaymentMethodDetailsInput(BaseInputObjectType):
name = graphene.String(
description="Name of the payment method used for the transaction. Max length is 256 characters.",
required=True,
)
brand = graphene.String(
description="Brand of the payment method used for the transaction. Max length is 40 characters.",
required=False,
)
first_digits = graphene.String(
description="First digits of the card used for the transaction. Max length is 4 characters.",
required=False,
)
last_digits = graphene.String(
description="Last digits of the card used for the transaction. Max length is 4 characters.",
required=False,
)
exp_month = graphene.Int(
description="Expiration month of the card used for the transaction. Value must be between 1 and 12.",
required=False,
)
exp_year = graphene.Int(
description="Expiration year of the card used for the transaction. Value must be between 2000 and 9999.",
required=False,
)
class OtherPaymentMethodDetailsInput(BaseInputObjectType):
name = graphene.String(
description="Name of the payment method used for the transaction.",
required=True,
)
class PaymentMethodDetailsInput(BaseInputObjectType):
card = graphene.Field(
CardPaymentMethodDetailsInput,
required=False,
description="Details of the card payment method used for the transaction.",
)
other = graphene.Field(
OtherPaymentMethodDetailsInput,
required=False,
description="Details of the non-card payment method used for this transaction.",
)
class Meta:
description = (
"Details of the payment method used for the transaction. "
"One of `card` or `other` is required." + ADDED_IN_322
)
def validate_card_payment_method_details_input(
card_method_details_input: CardPaymentMethodDetailsInput,
error_code_class: type[TransactionEventReportErrorCode]
| type[TransactionCreateErrorCode]
| type[TransactionUpdateErrorCode],
):
errors = []
if len(card_method_details_input.name) > 256:
errors.append(
{
"name": ValidationError(
"The `name` field must be less than 256 characters.",
code=error_code_class.INVALID.value,
)
}
)
if card_method_details_input.brand and len(card_method_details_input.brand) > 40:
errors.append(
{
"brand": ValidationError(
"The `brand` field must be less than 40 characters.",
code=error_code_class.INVALID.value,
)
}
)
if (
card_method_details_input.first_digits
and len(card_method_details_input.first_digits) > 4
):
errors.append(
{
"first_digits": ValidationError(
"The `firstDigits` field must be less than 4 characters.",
code=error_code_class.INVALID.value,
)
}
)
if (
card_method_details_input.last_digits
and len(card_method_details_input.last_digits) > 4
):
errors.append(
{
"last_digits": ValidationError(
"The `lastDigits` field must be less than 4 characters.",
code=error_code_class.INVALID.value,
)
}
)
if card_method_details_input.exp_month and (
card_method_details_input.exp_month < 1
or card_method_details_input.exp_month > 12
):
errors.append(
{
"exp_month": ValidationError(
"The `expMonth` field must be between 1 and 12.",
code=error_code_class.INVALID.value,
)
}
)
if card_method_details_input.exp_year and (
card_method_details_input.exp_year < 2000
or card_method_details_input.exp_year > 9999
):
errors.append(
{
"exp_year": ValidationError(
"The `expYear` field must be between 2000 and 9999.",
code=error_code_class.INVALID.value,
)
}
)
return errors
def validate_payment_method_details_input(
payment_method_details_input: PaymentMethodDetailsInput,
error_code_class: type[TransactionEventReportErrorCode]
| type[TransactionCreateErrorCode]
| type[TransactionUpdateErrorCode],
):
if (
payment_method_details_input.card is None
and payment_method_details_input.other is None
):
raise ValidationError(
{
"payment_method_details": ValidationError(
"One of `card` or `other` is required.",
code=error_code_class.INVALID.value,
)
}
)
try:
validate_one_of_args_is_in_mutation(
"card",
payment_method_details_input.card,
"other",
payment_method_details_input.other,
)
except ValidationError as e:
e.code = error_code_class.INVALID.value
raise ValidationError({"payment_method_details": e}) from e
errors = []
if payment_method_details_input.card:
errors.extend(
validate_card_payment_method_details_input(
payment_method_details_input.card, error_code_class
)
)
elif payment_method_details_input.other:
if len(payment_method_details_input.other.name) > 256:
errors.append(
{
"name": ValidationError(
"The `name` field must be less than 256 characters.",
code=error_code_class.INVALID.value,
)
}
)
if errors:
raise ValidationError({"payment_method_details": errors})
def get_payment_method_details(
payment_method_details_input: PaymentMethodDetailsInput | None,
) -> PaymentMethodDetails | None:
"""Get the payment method details dataclass from the input."""
if not payment_method_details_input:
return None
payment_details_data: PaymentMethodDetails | None = None
if payment_method_details_input.card:
card_details: CardPaymentMethodDetailsInput = payment_method_details_input.card
payment_details_data = PaymentMethodDetails(
type=PaymentMethodType.CARD,
name=card_details.name,
brand=card_details.brand,
first_digits=card_details.first_digits,
last_digits=card_details.last_digits,
exp_month=card_details.exp_month,
exp_year=card_details.exp_year,
)
elif payment_method_details_input.other:
other_details: OtherPaymentMethodDetailsInput = (
payment_method_details_input.other
)
payment_details_data = PaymentMethodDetails(
type=PaymentMethodType.OTHER,
name=other_details.name,
)
return payment_details_data
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/payment/mutations/transaction/shared.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/order/tests/queries/test_orders_search.py | from decimal import Decimal
import graphene
import pytest
from prices import Money, TaxedMoney
from .....core.postgres import FlatConcatSearchVector
from .....discount.models import OrderDiscount
from .....invoice.models import Invoice
from .....order import OrderEvents
from .....order.models import Order, Payment
from .....order.search import prepare_order_search_vector_value
from ....tests.utils import get_graphql_content
ORDERS_QUERY_WITH_SEARCH = """
query ($search: String) {
orders(first: 10, search:$search) {
totalCount
edges {
node {
id
number
}
}
}
}
"""
def update_orders_search_vector(orders):
for order in orders:
order.search_vector = FlatConcatSearchVector(
*prepare_order_search_vector_value(order)
)
Order.objects.bulk_update(orders, ["search_vector"])
@pytest.mark.parametrize(
("search_value", "count"),
[
("discount name", 2),
("Some other", 1),
("translated", 1),
("test@mirumee.com", 1),
("example.com", 3),
("mirumee.com", 1),
("Leslie", 1),
("Wade", 1),
("Leslie Wade", 1),
("", 3),
("ExternalID", 1),
("SKU_A", 1),
],
)
def test_orders_query_with_search(
search_value,
count,
staff_api_client,
permission_group_manage_orders,
customer_user,
channel_USD,
product,
variant,
):
# given
orders = Order.objects.bulk_create(
[
Order(
user=customer_user,
user_email="test@mirumee.com",
channel=channel_USD,
lines_count=0,
),
Order(
user_email="user_email1@example.com",
channel=channel_USD,
lines_count=0,
),
Order(
user_email="user_email2@example.com",
channel=channel_USD,
lines_count=0,
),
]
)
OrderDiscount.objects.bulk_create(
[
OrderDiscount(
order=orders[0],
name="Some discount name",
value=Decimal(1),
amount_value=Decimal(1),
translated_name="translated",
),
OrderDiscount(
order=orders[2],
name="Some other discount name",
value=Decimal(10),
amount_value=Decimal(10),
translated_name="PL_name",
),
]
)
order_with_payment = orders[1]
payment = Payment.objects.create(
order=order_with_payment, psp_reference="ExternalID"
)
payment.transactions.create(gateway_response={}, is_success=True)
order_with_orderline = orders[2]
channel = order_with_orderline.channel
channel_listing = variant.channel_listings.get(channel=channel)
net = variant.get_price(channel_listing)
currency = net.currency
gross = Money(amount=net.amount * Decimal(1.23), currency=currency)
unit_price = TaxedMoney(net=net, gross=gross)
order_with_orderline.lines.create(
product_name=str(product),
variant_name=str(variant),
product_sku=variant.sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
quantity=3,
variant=variant,
unit_price=unit_price,
total_price=unit_price * 3,
undiscounted_unit_price=unit_price,
undiscounted_total_price=unit_price * 3,
tax_rate=Decimal("0.23"),
)
update_orders_search_vector(orders)
variables = {"search": search_value}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == count
def test_orders_query_with_search_by_order_id(
staff_api_client,
permission_group_manage_orders,
order_list,
):
# given
update_orders_search_vector(order_list)
search_value = graphene.Node.to_global_id("Order", order_list[1].pk)
variables = {"search": search_value}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == 1
assert content["data"]["orders"]["edges"][0]["node"]["id"] == search_value
def test_orders_query_with_search_by_invoice_id(
staff_api_client,
permission_group_manage_orders,
order_list,
):
# given
invoices = Invoice.objects.bulk_create(
[Invoice(order=order, number=f"INV-{order.pk}") for order in order_list]
)
update_orders_search_vector(order_list)
search_value = graphene.Node.to_global_id("Invoice", invoices[2].pk)
variables = {"search": search_value}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == 1
assert content["data"]["orders"]["edges"][0]["node"][
"id"
] == graphene.Node.to_global_id("Order", order_list[2].pk)
def test_orders_query_with_search_by_order_event_message(
staff_api_client,
permission_group_manage_orders,
order_list,
):
# given
event_message = "Special event message for search"
order = order_list[0]
order.events.create(
type=OrderEvents.NOTE_ADDED,
user=None,
parameters={"message": event_message},
)
update_orders_search_vector(order_list)
variables = {"search": "Special event message"}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == 1
assert content["data"]["orders"]["edges"][0]["node"][
"id"
] == graphene.Node.to_global_id("Order", order_list[0].pk)
@pytest.mark.parametrize(
("search_value", "expected_count"),
[
("match in", 1),
("note", 2),
("partial", 1),
("unrelated", 0),
],
)
def test_orders_query_with_search_by_partial_customer_note(
search_value,
expected_count,
staff_api_client,
permission_group_manage_orders,
order_list,
):
# given
notes = [
"This is a match in the customer note",
"This note has a partial match",
"",
]
for order, note in zip(order_list, notes, strict=True):
order.customer_note = note
Order.objects.bulk_update(order_list, ["customer_note"])
update_orders_search_vector(order_list)
variables = {"search": search_value}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == expected_count
def test_orders_query_with_search_by_product_name(
staff_api_client,
permission_group_manage_orders,
order_list,
product,
variant,
):
# given
order = order_list[0]
channel = order.channel
channel_listing = variant.channel_listings.get(channel=channel)
net = variant.get_price(channel_listing)
currency = net.currency
gross = Money(amount=net.amount * Decimal(1.23), currency=currency)
unit_price = TaxedMoney(net=net, gross=gross)
product_name = str(product)
order.lines.create(
product_name=product_name,
variant_name=str(variant),
product_sku=variant.sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
quantity=2,
variant=variant,
unit_price=unit_price,
total_price=unit_price * 2,
undiscounted_unit_price=unit_price,
undiscounted_total_price=unit_price * 2,
tax_rate=Decimal("0.23"),
)
update_orders_search_vector(order_list)
variables = {"search": product_name}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == 1
assert content["data"]["orders"]["edges"][0]["node"][
"id"
] == graphene.Node.to_global_id("Order", order.pk)
def test_orders_query_with_search_by_variant_name(
staff_api_client,
permission_group_manage_orders,
order_list,
product,
variant,
):
# given
order = order_list[1]
channel = order.channel
channel_listing = variant.channel_listings.get(channel=channel)
net = variant.get_price(channel_listing)
currency = net.currency
gross = Money(amount=net.amount * Decimal(1.23), currency=currency)
unit_price = TaxedMoney(net=net, gross=gross)
variant_name = str(variant)
order.lines.create(
product_name=str(product),
variant_name=variant_name,
product_sku=variant.sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
quantity=1,
variant=variant,
unit_price=unit_price,
total_price=unit_price,
undiscounted_unit_price=unit_price,
undiscounted_total_price=unit_price,
tax_rate=Decimal("0.23"),
)
update_orders_search_vector(order_list)
variables = {"search": variant_name}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == 1
assert content["data"]["orders"]["edges"][0]["node"][
"id"
] == graphene.Node.to_global_id("Order", order.pk)
def test_orders_query_with_search_by_product_sku(
staff_api_client,
permission_group_manage_orders,
order_list,
product,
variant,
):
# given
order = order_list[2]
channel = order.channel
channel_listing = variant.channel_listings.get(channel=channel)
net = variant.get_price(channel_listing)
currency = net.currency
gross = Money(amount=net.amount * Decimal(1.23), currency=currency)
unit_price = TaxedMoney(net=net, gross=gross)
sku = variant.sku
order.lines.create(
product_name=str(product),
variant_name=str(variant),
product_sku=sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
quantity=4,
variant=variant,
unit_price=unit_price,
total_price=unit_price * 4,
undiscounted_unit_price=unit_price,
undiscounted_total_price=unit_price * 4,
tax_rate=Decimal("0.23"),
)
update_orders_search_vector(order_list)
variables = {"search": sku}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == 1
assert content["data"]["orders"]["edges"][0]["node"][
"id"
] == graphene.Node.to_global_id("Order", order.pk)
@pytest.mark.parametrize(
("search_value", "expected_count"),
[
("First", 1),
("Last", 1),
("First Last", 1),
("Billing Street", 1),
("PL", 1),
("US", 2),
("Nonexistent", 0),
],
)
def test_orders_query_with_search_by_billing_address_fields(
search_value,
expected_count,
staff_api_client,
permission_group_manage_orders,
order_list,
address,
address_usa,
):
# given
order = order_list[0]
address.first_name = "First"
address.last_name = "Last"
address.street_address_1 = "Billing Street"
address.country = "PL"
address.save()
order.billing_address = address
for order in order_list[1:]:
order.billing_address = address_usa
Order.objects.bulk_update(order_list, ["billing_address"])
update_orders_search_vector(order_list)
variables = {"search": search_value}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == expected_count
@pytest.mark.parametrize(
("search_value", "expected_count"),
[
("First", 1),
("Last", 1),
("First Last", 1),
("Shipping Street", 1),
("JP", 1),
("US", 2),
("Nonexistent", 0),
],
)
def test_orders_query_with_search_by_shipping_address_fields(
search_value,
expected_count,
staff_api_client,
permission_group_manage_orders,
order_list,
address,
address_usa,
):
# given
order = order_list[0]
address.first_name = "First"
address.last_name = "Last"
address.street_address_1 = "Shipping Street"
address.country = "JP"
address.save()
order.shipping_address = address
for order in order_list[1:]:
order.shipping_address = address_usa
Order.objects.bulk_update(order_list, ["shipping_address"])
update_orders_search_vector(order_list)
variables = {"search": search_value}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == expected_count
@pytest.mark.parametrize(
("search_value", "expected_order_idxes"),
[
("EXT-REF-12345", [0]),
("REF", [0, 1]),
("ANOTHER-REF-67890", [1]),
("nonexistent-ref", []),
],
)
def test_orders_query_with_search_by_external_reference(
search_value,
expected_order_idxes,
staff_api_client,
permission_group_manage_orders,
order_list,
):
# given
external_references = ["EXT-REF-12345", "ANOTHER-REF-67890", ""]
for order, ext_ref in zip(order_list, external_references, strict=True):
order.external_reference = ext_ref
Order.objects.bulk_update(order_list, ["external_reference"])
update_orders_search_vector(order_list)
variables = {"search": search_value}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert content["data"]["orders"]["totalCount"] == len(expected_order_idxes)
returned_numbers = [
edge["node"]["number"] for edge in content["data"]["orders"]["edges"]
]
expected_numbers = [str(order_list[idx].number) for idx in expected_order_idxes]
assert set(returned_numbers) == set(expected_numbers)
def test_orders_search_by_not_searchable_value(
staff_api_client,
permission_group_manage_orders,
order_list,
):
# given
update_orders_search_vector(order_list)
variables = {"search": ":::"}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert not content["data"]["orders"]["edges"]
def test_orders_search_by_none_value(
staff_api_client,
permission_group_manage_orders,
order_list,
):
# given
update_orders_search_vector(order_list)
variables = {"search": None}
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
response = staff_api_client.post_graphql(ORDERS_QUERY_WITH_SEARCH, variables)
# then
content = get_graphql_content(response)
assert len(content["data"]["orders"]["edges"]) == len(order_list)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/order/tests/queries/test_orders_search.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 493,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/core/filters/filter_input.py | import itertools
from django.db import models
from django_filters.filterset import FILTER_FOR_DBFIELD_DEFAULTS, BaseFilterSet
from graphene import Argument, InputField, String
from graphene.types.inputobjecttype import InputObjectTypeOptions
from graphene.types.utils import yank_fields_from_attrs
from ..descriptions import DEPRECATED_IN_3X_INPUT
from ..types.base import BaseInputObjectType
from .shared_filters import GlobalIDFilter, GlobalIDMultipleChoiceFilter
GLOBAL_ID_FILTERS = {
models.AutoField: {"filter_class": GlobalIDFilter},
models.OneToOneField: {"filter_class": GlobalIDFilter},
models.ForeignKey: {"filter_class": GlobalIDFilter},
models.ManyToManyField: {"filter_class": GlobalIDMultipleChoiceFilter},
models.ManyToOneRel: {"filter_class": GlobalIDMultipleChoiceFilter},
models.ManyToManyRel: {"filter_class": GlobalIDMultipleChoiceFilter},
}
class GraphQLFilterSetMixin(BaseFilterSet):
FILTER_DEFAULTS = dict(
itertools.chain(FILTER_FOR_DBFIELD_DEFAULTS.items(), GLOBAL_ID_FILTERS.items())
)
def get_filterset_class(filterset_class=None):
return type(
f"GraphQL{filterset_class.__name__}",
(filterset_class, GraphQLFilterSetMixin),
{},
)
class FilterInputObjectType(BaseInputObjectType):
"""Class for storing and serving django-filters as graphQL input.
FilterSet class which inherits from django-filters.FilterSet should be
provided with using fitlerset_class argument.
"""
@classmethod
def __init_subclass_with_meta__( # type: ignore[override]
cls, _meta=None, model=None, filterset_class=None, fields=None, **options
):
cls.custom_filterset_class = filterset_class
cls.filterset_class = None
cls.fields = fields
cls.model = model
if not _meta:
_meta = InputObjectTypeOptions(cls)
fields = cls.get_filtering_args_from_filterset()
fields = yank_fields_from_attrs(fields, _as=InputField)
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
super().__init_subclass_with_meta__(_meta=_meta, **options)
@classmethod
def get_filtering_args_from_filterset(cls):
from ..types.converter import convert_form_field
"""Retrieve the filtering arguments from the queryset.
Inspect a FilterSet and produce the arguments to pass to a Graphene field.
These arguments will be available to filter against in the GraphQL.
"""
if not cls.custom_filterset_class:
raise ValueError("Provide filterset class")
cls.filterset_class = get_filterset_class(cls.custom_filterset_class)
args = {}
for name, filter_field in cls.filterset_class.base_filters.items():
input_class = getattr(filter_field, "input_class", None)
if input_class:
field_type = convert_form_field(filter_field)
else:
field_type = convert_form_field(filter_field.field)
field_type.description = getattr(filter_field, "help_text", "")
kwargs = getattr(field_type, "kwargs", {})
field_type.kwargs = kwargs
args[name] = field_type
return args
class ChannelFilterInputObjectType(FilterInputObjectType):
channel = Argument(
String,
description=(
"Specifies the channel by which the data should be filtered. "
f"{DEPRECATED_IN_3X_INPUT} Use root-level channel argument instead."
),
)
class Meta:
abstract = True
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/core/filters/filter_input.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/core/filters/filters.py | import django_filters
import graphene
from django.core.exceptions import ValidationError
from django.forms import Field, MultipleChoiceField
from ...utils.filters import filter_range_field
from ..enums import JobStatusEnum
from ..types import DateTimeRangeInput
from .shared_filters import filter_metadata
class DefaultMultipleChoiceField(MultipleChoiceField):
default_error_messages = {"invalid_list": "Enter a list of values."}
def to_python(self, value):
if not value:
return []
if not isinstance(value, list):
value = [value]
return value
def validate(self, value):
"""Validate that the input is a list or tuple."""
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
if not isinstance(value, list | tuple):
raise ValidationError(
self.error_messages["invalid_list"], code="invalid_list"
)
return True
class EnumFilter(django_filters.CharFilter):
"""Filter class for Graphene enum object.
enum_class needs to be passed explicitly as well as the method.
"""
def __init__(self, input_class, *args, **kwargs):
assert kwargs.get("method"), (
"Providing exact filter method is required for EnumFilter"
)
self.input_class = input_class
super().__init__(*args, **kwargs)
class ListObjectTypeFilter(django_filters.MultipleChoiceFilter):
field_class = DefaultMultipleChoiceField
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
class ObjectTypeFilter(django_filters.Filter):
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
class DefaultOperationField(Field):
def validate(self, value):
if value and len(value) > 1:
raise ValidationError("Only one option can be specified.", code="invalid")
return super().validate(value)
class OperationObjectTypeFilter(django_filters.Filter):
field_class = DefaultOperationField
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
class MetadataFilter(graphene.InputObjectType):
key = graphene.String(required=True, description="Key of a metadata item.")
value = graphene.String(required=False, description="Value of a metadata item.")
class MetadataFilterBase(django_filters.FilterSet):
metadata = ListObjectTypeFilter(input_class=MetadataFilter, method=filter_metadata)
class Meta:
abstract = True
def filter_created_at(qs, _, value):
return filter_range_field(qs, "created_at", value)
def filter_updated_at(qs, _, value):
return filter_range_field(qs, "updated_at", value)
def filter_status(qs, _, value):
if not value:
return qs
return qs.filter(status=value)
class BaseJobFilter(django_filters.FilterSet):
created_at = ObjectTypeFilter(
input_class=DateTimeRangeInput, method=filter_created_at
)
updated_at = ObjectTypeFilter(
input_class=DateTimeRangeInput, method=filter_updated_at
)
status = EnumFilter(input_class=JobStatusEnum, method=filter_status)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/core/filters/filters.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/core/filters/shared_filters.py | from django.core.exceptions import ValidationError
from django.forms import CharField, Field, MultipleChoiceField
from django_filters import Filter, MultipleChoiceFilter
from graphql_relay import from_global_id
class GlobalIDFormField(Field):
default_error_messages = {"invalid": "Invalid ID specified."}
def clean(self, value):
if not value and not self.required:
return None
try:
_type, _id = from_global_id(value)
except (TypeError, ValueError) as e:
raise ValidationError(self.error_messages["invalid"]) from e
try:
CharField().clean(_id)
CharField().clean(_type)
except ValidationError as e:
raise ValidationError(self.error_messages["invalid"]) from e
return value
class GlobalIDFilter(Filter):
field_class = GlobalIDFormField
def filter(self, qs, value):
"""Convert the filter value to a primary key before filtering."""
_id = None
if value is not None:
_, _id = from_global_id(value)
return super().filter(qs, _id)
class GlobalIDMultipleChoiceField(MultipleChoiceField):
default_error_messages = {
"invalid_choice": "One of the specified IDs was invalid (%(value)s).",
"invalid_list": "Enter a list of values.",
}
def to_python(self, value):
return super().to_python(value)
def valid_value(self, value):
# Clean will raise a validation error if there is a problem
GlobalIDFormField().clean(value)
return True
class GlobalIDMultipleChoiceFilter(MultipleChoiceFilter):
field_class = GlobalIDMultipleChoiceField
def filter(self, qs, value):
gids = [from_global_id(v)[1] for v in value]
return super().filter(qs, gids)
def filter_metadata(qs, _, value):
for metadata_item in value:
metadata_value = metadata_item.get("value")
metadata_key = metadata_item.get("key")
if metadata_value:
qs = qs.filter(metadata__contains={metadata_key: metadata_value})
else:
qs = qs.filter(metadata__has_key=metadata_key)
return qs
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/core/filters/shared_filters.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/core/filters/where_filters.py | import django_filters
from django.db import models
from django.forms import CharField, NullBooleanField
from django_filters import Filter, MultipleChoiceFilter
from django_filters.filters import FilterMethod
from graphql_relay import from_global_id
from .filters import (
DefaultMultipleChoiceField,
DefaultOperationField,
ListObjectTypeFilter,
MetadataFilter,
)
from .shared_filters import (
GlobalIDFilter,
GlobalIDFormField,
GlobalIDMultipleChoiceField,
filter_metadata,
)
from .where_input import MetadataFilterInput
class WhereFilterSet(django_filters.FilterSet):
"""Implementation of FilterSet for where filtering.
Should be used for all where FilterSet classes.
"""
def filter_queryset(self, queryset):
"""Filter the queryset.
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
# Ensure that we not filter by fields that were not provided in the input.
# The cleaned_data has all filter fields, but we only want to filter
# by those that were specified in the query.
if name not in self.form.data:
continue
queryset = self.filters[name].filter(queryset, value)
assert isinstance(queryset, models.QuerySet), (
f"Expected '{type(self).__name__}.{name}' to return a QuerySet, but got a {type(queryset).__name__} instead."
)
return queryset
class WhereFilter(Filter):
@property
def method(self):
# Filter method needs to be lazily resolved, as it may be dependent on
# the 'parent' FilterSet.
return self._method
@method.setter
def method(self, value):
self._method = value
# clear existing FilterMethod
if isinstance(self.filter, WhereFilterMethod):
del self.filter
# override filter w/ FilterMethod.
if value is not None:
self.filter = WhereFilterMethod(self) # type: ignore[method-assign]
def filter(self, qs, value):
if self.distinct:
qs = qs.distinct()
lookup = f"{self.field_name}__{self.lookup_expr}"
qs = self.get_method(qs)(**{lookup: value})
return qs
class WhereFilterMethod(FilterMethod):
def __call__(self, qs, value):
"""Override the default FilterMethod to allow filtering by empty values."""
return self.method(qs, self.f.field_name, value)
class ObjectTypeWhereFilter(WhereFilter):
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
class OperationObjectTypeWhereFilter(WhereFilter):
field_class = DefaultOperationField
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
class ListObjectTypeWhereFilter(MultipleChoiceFilter, WhereFilter):
field_class = DefaultMultipleChoiceField
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
class BooleanWhereFilter(WhereFilter):
field_class = NullBooleanField
class CharWhereFilter(WhereFilter):
field_class = CharField
class EnumWhereFilter(CharWhereFilter):
"""Where filter class for Graphene enum object.
enum_class needs to be passed explicitly as well as the method.
"""
def __init__(self, input_class, *args, **kwargs):
assert kwargs.get("method"), (
"Providing exact filter method is required for EnumFilter"
)
self.input_class = input_class
super().__init__(*args, **kwargs)
class GlobalIDMultipleChoiceWhereFilter(MultipleChoiceFilter, WhereFilter):
field_class = GlobalIDMultipleChoiceField
def filter(self, qs, value):
gids = [from_global_id(v)[1] for v in value]
return super().filter(qs, gids)
class GlobalIDWhereFilter(WhereFilter):
field_class = GlobalIDFormField
def filter(self, qs, value):
"""Convert the filter value to a primary key before filtering."""
_id = None
if value is not None:
_, _id = from_global_id(value)
return super(GlobalIDFilter, self).filter(qs, _id) # type: ignore[misc]
class MetadataWhereFilterBase(WhereFilterSet):
metadata = ListObjectTypeFilter(input_class=MetadataFilter, method=filter_metadata)
class Meta:
abstract = True
def filter_where_metadata(qs, _, value):
"""Filter queryset by metadata.
We are allowing to filter metadata by:
- Key existence: returns items where the specified key exists (when no value is provided)
- Equals (`eq`): returns items where the key matches the given value
- One of (`one_of`): returns items where the key matches any value in the provided list
"""
if not value:
return qs.none()
key = value["key"]
value_data = value.get("value")
if not value_data:
return qs.filter(metadata__has_key=key)
if eq := value_data.get("eq"):
return qs.filter(metadata__contains={key: eq})
if one_of := value_data.get("one_of"):
lookup = models.Q()
for item in one_of:
lookup |= models.Q(metadata__contains={key: item})
return qs.filter(lookup)
return qs.none()
class MetadataWhereBase(WhereFilterSet):
metadata = ObjectTypeWhereFilter(
input_class=MetadataFilterInput,
method=filter_where_metadata,
help_text="Filter by metadata fields.",
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/core/filters/where_filters.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/app/tests/test_app_by_token_loader_use_cache.py | from unittest.mock import patch
from django.utils import timezone
from ....app.models import App, AppToken
from ...context import SaleorContext
from ..dataloaders.app import AppByTokenLoader, create_app_cache_key_from_token
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_cache_token_calculation(
mocked_cache, app, setup_mock_for_cache
):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
expected_cache_key = create_app_cache_key_from_token(raw_token)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token])
fetched_app = loaded_apps[0]
# then
cached_app_id, token_id = mocked_cache.get(expected_cache_key)
assert token.id == token_id
assert fetched_app.id == app.id == cached_app_id
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_invalid_token(mocked_cache, app, setup_mock_for_cache):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
expected_cache_key = create_app_cache_key_from_token(raw_token)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token])
fetched_app = loaded_apps[0]
# then
cached_data = mocked_cache.get(expected_cache_key)
assert fetched_app is None
assert cached_data is None
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_use_cached_app(mocked_cache, app, setup_mock_for_cache):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
expected_cache_key = create_app_cache_key_from_token(raw_token)
mocked_cache.set(expected_cache_key, (app.id, token.id), 123)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token])
fetched_app = loaded_apps[0]
# then
cached_app_id, cached_token_id = mocked_cache.get(expected_cache_key)
assert token.id == cached_token_id
assert fetched_app.id == app.id == cached_app_id
# Check that the cache was set only once during given test section
mocked_cache.set.assert_called_once_with(
expected_cache_key, (app.id, token.id), 123
)
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_cached_app_not_active(
mocked_cache, app, setup_mock_for_cache
):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
app.is_active = False
app.save(update_fields=["is_active"])
expected_cache_key = create_app_cache_key_from_token(raw_token)
mocked_cache.set(expected_cache_key, (app.id, token.id), 123)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token])
fetched_app = loaded_apps[0]
# then
cached_app_id, cached_token_id = mocked_cache.get(expected_cache_key)
assert token.id == cached_token_id
assert app.id == cached_app_id
# Check that the app was not fetched from the database
assert fetched_app is None
# Check that the cache was set only once during given test section
mocked_cache.set.assert_called_once_with(
expected_cache_key, (app.id, token.id), 123
)
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_cached_app_marked_as_removed(
mocked_cache, app, setup_mock_for_cache
):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
app.removed_at = timezone.now()
app.save(update_fields=["removed_at"])
expected_cache_key = create_app_cache_key_from_token(raw_token)
mocked_cache.set(expected_cache_key, (app.id, token.id), 123)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token])
fetched_app = loaded_apps[0]
# then
cached_app_id, cached_token_id = mocked_cache.get(expected_cache_key)
assert token.id == cached_token_id
assert app.id == cached_app_id
# Check that the app was not fetched from the database
assert fetched_app is None
# Check that the cache was set only once during given test section
mocked_cache.set.assert_called_once_with(
expected_cache_key, (app.id, token.id), 123
)
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_missing_app(mocked_cache, app, setup_mock_for_cache):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
deleted_app_id = app.id
app.delete()
expected_cache_key = create_app_cache_key_from_token(raw_token)
mocked_cache.set(expected_cache_key, (deleted_app_id, token.id), 123)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token])
fetched_app = loaded_apps[0]
# then
# Check that the app was removed from the database
assert not App.objects.exists()
# Check that the app was not fetched from the database
assert fetched_app is None
# Check that the cache was set only once during given test section
mocked_cache.set.assert_called_once_with(
expected_cache_key, (deleted_app_id, token.id), 123
)
# Check if the token was removed from the cache
assert mocked_cache.get(expected_cache_key) is None
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_removed_token(mocked_cache, app, setup_mock_for_cache):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
token_id = token.id
token.delete()
expected_cache_key = create_app_cache_key_from_token(raw_token)
mocked_cache.set(expected_cache_key, (app.id, token_id), 123)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token])
fetched_app = loaded_apps[0]
# then
# Check that the token was removed from the database
assert not AppToken.objects.exists()
# Check that the app was not fetched from the database
assert fetched_app is None
# Check that the cache was set only once during given test section
mocked_cache.set.assert_called_once_with(
expected_cache_key, (app.id, token_id), 123
)
# Check if the token was removed from the cache
assert mocked_cache.get(expected_cache_key) is None
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_one_of_tokens_in_cache(
mocked_cache, app, setup_mock_for_cache
):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
expected_cache_key = create_app_cache_key_from_token(raw_token)
mocked_cache.set(expected_cache_key, (app.id, token.id), 123)
raw_token2 = "test_token2"
token2, _ = app.tokens.create(
name="test_token2",
auth_token=raw_token2,
)
expected_cache_key2 = create_app_cache_key_from_token(raw_token2)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token, raw_token2])
fetched_app = loaded_apps[0]
fetched_app2 = loaded_apps[1]
# then
cached_app_id, cached_token_id = mocked_cache.get(expected_cache_key)
assert token.id == cached_token_id
assert fetched_app.id == app.id == cached_app_id
cached_app_id2, cached_token_id2 = mocked_cache.get(expected_cache_key2)
assert token2.id == cached_token_id2
assert fetched_app2.id == app.id == cached_app_id2
# Check that the cache was set once during given test section and second time inside dataloader
assert mocked_cache.set.call_count == 2
@patch("saleor.graphql.app.dataloaders.app.cache")
def test_app_by_token_loader_tokens_with_same_last_4(
mocked_cache, app, app_with_token, setup_mock_for_cache
):
# given
dummy_cache = {}
setup_mock_for_cache(dummy_cache, mocked_cache)
raw_token = "test_token1234"
token, _ = app.tokens.create(
name="test_token",
auth_token=raw_token,
)
expected_cache_key = create_app_cache_key_from_token(raw_token)
app2 = app_with_token
raw_token2 = "test2_token1234"
token2, _ = app2.tokens.create(
name="test_token2",
auth_token=raw_token2,
)
expected_cache_key2 = create_app_cache_key_from_token(raw_token2)
# when
context = SaleorContext()
app_by_token_loader = AppByTokenLoader(context)
loaded_apps = app_by_token_loader.batch_load([raw_token, raw_token2])
fetched_app = loaded_apps[0]
fetched_app2 = loaded_apps[1]
# then
assert token.token_last_4 == token2.token_last_4
cached_app_id, cached_token_id = mocked_cache.get(expected_cache_key)
assert token.id == cached_token_id
assert fetched_app.id == app.id == cached_app_id
cached_app_id2, cached_token_id2 = mocked_cache.get(expected_cache_key2)
assert token2.id == cached_token_id2
assert fetched_app2.id == app2.id == cached_app_id2
# Check that the cache was set once during given test section and second time inside dataloader
assert mocked_cache.set.call_count == 2
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/app/tests/test_app_by_token_loader_use_cache.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/payment/tests/test_utils/test_create_transaction_event_for_transaction_session.py | import datetime
import logging
from decimal import Decimal
from unittest.mock import patch
import pytest
from freezegun import freeze_time
from ....order import OrderAuthorizeStatus
from ... import TransactionEventType
from ...models import TransactionEvent
from ...utils import (
create_transaction_event_for_transaction_session,
create_transaction_event_from_request_and_webhook_response,
)
@patch("saleor.plugins.manager.PluginsManager.order_updated")
@patch("saleor.plugins.manager.PluginsManager.order_fully_paid")
@freeze_time("2018-05-31 12:00:01")
def test_create_transaction_event_from_request_triggers_webhooks_when_authorized(
mock_order_fully_paid,
mock_order_updated,
transaction_item_generator,
app,
order_with_lines,
django_capture_on_commit_callbacks,
plugins_manager,
):
# given
order = order_with_lines
transaction = transaction_item_generator(order_id=order.pk)
request_event = TransactionEvent.objects.create(
type=TransactionEventType.AUTHORIZATION_REQUEST,
amount_value=order.total.gross.amount,
currency="USD",
transaction_id=transaction.id,
)
event_amount = order.total.gross.amount
event_type = TransactionEventType.AUTHORIZATION_SUCCESS
expected_psp_reference = "psp:122:222"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
}
# when
with django_capture_on_commit_callbacks(execute=True):
create_transaction_event_for_transaction_session(
request_event, app, plugins_manager, response_data
)
# then
order.refresh_from_db()
assert order_with_lines.authorize_status == OrderAuthorizeStatus.FULL
assert not mock_order_fully_paid.called
mock_order_updated.assert_called_once_with(order_with_lines, webhooks=set())
@freeze_time("2018-05-31 12:00:01")
def test_create_transaction_event_from_request_updates_order_authorize(
transaction_item_generator, app, order_with_lines, plugins_manager
):
# given
order = order_with_lines
transaction = transaction_item_generator(order_id=order.pk)
request_event = TransactionEvent.objects.create(
type=TransactionEventType.AUTHORIZATION_REQUEST,
amount_value=Decimal(11.00),
currency="USD",
transaction_id=transaction.id,
)
event_amount = 12.00
event_type = TransactionEventType.AUTHORIZATION_SUCCESS
expected_psp_reference = "psp:122:222"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
}
# when
create_transaction_event_for_transaction_session(
request_event, app, plugins_manager, response_data
)
# then
order.refresh_from_db()
assert order.total_authorized_amount == Decimal(event_amount)
assert order.authorize_status == OrderAuthorizeStatus.PARTIAL
@pytest.mark.parametrize(
("response_result", "transaction_amount_field_name"),
[
(TransactionEventType.AUTHORIZATION_REQUEST, "authorize_pending_value"),
(TransactionEventType.AUTHORIZATION_SUCCESS, "authorized_value"),
(TransactionEventType.CHARGE_REQUEST, "charge_pending_value"),
(TransactionEventType.CHARGE_SUCCESS, "charged_value"),
],
)
def test_create_transaction_event_for_transaction_session_success_response(
response_result,
transaction_amount_field_name,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event.include_in_calculations
assert response_event.amount_value == expected_amount
transaction.refresh_from_db()
assert getattr(transaction, transaction_amount_field_name) == expected_amount
@pytest.mark.parametrize(
("response_result", "transaction_amount_field_name"),
[
(TransactionEventType.AUTHORIZATION_REQUEST, "authorize_pending_value"),
(TransactionEventType.AUTHORIZATION_SUCCESS, "authorized_value"),
(TransactionEventType.CHARGE_REQUEST, "charge_pending_value"),
(TransactionEventType.CHARGE_SUCCESS, "charged_value"),
],
)
def test_create_transaction_event_for_transaction_session_success_response_with_no_amount(
response_result,
transaction_amount_field_name,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
request_event_amount = Decimal(12)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = None
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=request_event_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event.include_in_calculations
assert response_event.amount_value == request_event_amount
transaction.refresh_from_db()
assert getattr(transaction, transaction_amount_field_name) == request_event_amount
@pytest.mark.parametrize(
("response_result", "transaction_amount_field_name"),
[
(TransactionEventType.AUTHORIZATION_REQUEST, "authorize_pending_value"),
(TransactionEventType.AUTHORIZATION_SUCCESS, "authorized_value"),
(TransactionEventType.CHARGE_REQUEST, "charge_pending_value"),
(TransactionEventType.CHARGE_SUCCESS, "charged_value"),
],
)
def test_create_transaction_event_for_transaction_session_success_response_with_0(
response_result,
transaction_amount_field_name,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(0)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event.include_in_calculations
assert response_event.amount_value == expected_amount
transaction.refresh_from_db()
assert getattr(transaction, transaction_amount_field_name) == expected_amount
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.REFUND_FAILURE,
TransactionEventType.REFUND_SUCCESS,
],
)
def test_create_transaction_event_for_transaction_session_not_success_events(
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=expected_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event.amount_value == expected_amount
assert response_event.type in [response_result, TransactionEventType.CHARGE_FAILURE]
transaction.refresh_from_db()
assert transaction.authorized_value == Decimal(0)
assert transaction.charged_value == Decimal(0)
assert transaction.authorize_pending_value == Decimal(0)
assert transaction.charge_pending_value == Decimal(0)
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.REFUND_FAILURE,
TransactionEventType.REFUND_SUCCESS,
],
)
def test_create_transaction_event_for_transaction_session_not_success_events_with_no_amount(
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = None
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=expected_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event.amount_value == expected_amount
assert response_event.type in [response_result, TransactionEventType.CHARGE_FAILURE]
transaction.refresh_from_db()
assert transaction.authorized_value == Decimal(0)
assert transaction.charged_value == Decimal(0)
assert transaction.authorize_pending_value == Decimal(0)
assert transaction.charge_pending_value == Decimal(0)
@pytest.mark.parametrize(
("response_result", "message"),
[
(
TransactionEventType.AUTHORIZATION_SUCCESS,
"Missing value for field: pspReference.",
),
(
TransactionEventType.CHARGE_SUCCESS,
"Missing value for field: pspReference.",
),
(
TransactionEventType.CHARGE_FAILURE,
"Message related to the payment",
),
(
TransactionEventType.CHARGE_REQUEST,
"Missing value for field: pspReference.",
),
(
TransactionEventType.AUTHORIZATION_REQUEST,
"Missing value for field: pspReference.",
),
],
)
def test_create_transaction_event_for_transaction_session_missing_psp_reference(
response_result,
message,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
del response["pspReference"]
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=expected_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event.amount_value == expected_amount
assert response_event.type == TransactionEventType.CHARGE_FAILURE
assert message in response_event.message
transaction.refresh_from_db()
assert transaction.authorized_value == Decimal(0)
assert transaction.charged_value == Decimal(0)
assert transaction.authorize_pending_value == Decimal(0)
assert transaction.charge_pending_value == Decimal(0)
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
],
)
def test_create_transaction_event_for_transaction_session_missing_reference_with_action(
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
del response["pspReference"]
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=expected_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event.amount_value == expected_amount
assert response_event.type == response_result
transaction.refresh_from_db()
assert transaction.authorized_value == Decimal(0)
assert transaction.charged_value == Decimal(0)
assert transaction.authorize_pending_value == Decimal(0)
assert transaction.charge_pending_value == Decimal(0)
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_SUCCESS,
],
)
@patch("saleor.plugins.manager.PluginsManager.order_updated")
@patch("saleor.plugins.manager.PluginsManager.order_fully_paid")
def test_create_transaction_event_for_transaction_session_call_webhook_order_updated(
mock_order_fully_paid,
mock_order_updated,
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
order_with_lines,
django_capture_on_commit_callbacks,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
transaction = transaction_item_generator(order_id=order_with_lines.pk)
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
with django_capture_on_commit_callbacks(execute=True):
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
order_with_lines.refresh_from_db()
assert not mock_order_fully_paid.called
mock_order_updated.assert_called_once_with(order_with_lines, webhooks=set())
@patch("saleor.plugins.manager.PluginsManager.order_updated")
@patch("saleor.plugins.manager.PluginsManager.order_fully_paid")
def test_create_transaction_event_for_transaction_session_call_webhook_for_fully_paid(
mock_order_fully_paid,
mock_order_updated,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
order_with_lines,
django_capture_on_commit_callbacks,
):
# given
response = transaction_session_response.copy()
response["result"] = TransactionEventType.CHARGE_SUCCESS.upper()
response["amount"] = order_with_lines.total.gross.amount
transaction = transaction_item_generator(order_id=order_with_lines.pk)
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
with django_capture_on_commit_callbacks(execute=True):
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
order_with_lines.refresh_from_db()
mock_order_fully_paid.assert_called_once_with(order_with_lines, webhooks=set())
mock_order_updated.assert_called_once_with(order_with_lines, webhooks=set())
@pytest.mark.parametrize(
"response_result,",
[
(TransactionEventType.AUTHORIZATION_REQUEST),
(TransactionEventType.AUTHORIZATION_SUCCESS),
(TransactionEventType.CHARGE_REQUEST),
(TransactionEventType.CHARGE_SUCCESS),
],
)
def test_create_transaction_event_for_transaction_session_success_sets_actions(
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
response["actions"] = ["CANCEL", "CANCEL", "CHARGE", "REFUND"]
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
assert len(transaction.available_actions) == 3
assert set(transaction.available_actions) == {"refund", "charge", "cancel"}
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.REFUND_FAILURE,
TransactionEventType.REFUND_SUCCESS,
],
)
def test_create_transaction_event_for_transaction_session_failure_doesnt_set_actions(
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
response["actions"] = ["CANCEL", "CHARGE", "REFUND"]
transaction = transaction_item_generator(available_actions=["charge"])
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=expected_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
assert transaction.available_actions == ["charge"]
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_REQUEST,
TransactionEventType.CHARGE_REQUEST,
],
)
def test_create_transaction_event_for_transaction_session_request_events_as_response(
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when the response event is the `*_REQUEST` event
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then the response event should update the request event with the values
# from the response
assert response_event.id == request_event.id
assert response_event.type == response_result
assert response_event.include_in_calculations is True
assert response_event.amount_value == expected_amount
assert response_event.message == response["message"]
assert response_event.external_url == response["externalUrl"]
assert response_event.created_at == datetime.datetime.fromisoformat(
response["time"]
)
assert response_event.psp_reference == response["pspReference"]
@freeze_time("2018-05-31 12:00:01")
def test_create_transaction_event_updates_transaction_modified_at(
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
checkout,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["amount"] = expected_amount
transaction = transaction_item_generator(checkout_id=checkout.pk)
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
with freeze_time("2023-03-18 12:00:00"):
calculation_time = datetime.datetime.now(tz=datetime.UTC)
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
checkout.refresh_from_db()
assert transaction.modified_at == calculation_time
assert checkout.last_transaction_modified_at == calculation_time
def test_create_transaction_event_for_transaction_session_failure_set_psp_reference(
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_psp_reference = "ABC"
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = TransactionEventType.CHARGE_FAILURE.upper()
response["amount"] = expected_amount
response["pspReference"] = expected_psp_reference
transaction = transaction_item_generator(available_actions=["charge"])
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=expected_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
assert transaction.events.count() == 2
failure_event = transaction.events.last()
assert failure_event.psp_reference == expected_psp_reference
assert failure_event.type == TransactionEventType.CHARGE_FAILURE
assert transaction.psp_reference == expected_psp_reference
def test_create_transaction_event_for_transaction_session_when_psp_ref_missing(
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = TransactionEventType.CHARGE_ACTION_REQUIRED.upper()
response["amount"] = expected_amount
response["pspReference"] = None
transaction = transaction_item_generator(available_actions=["charge"])
current_psp_reference = transaction.psp_reference
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
amount_value=expected_amount,
type=TransactionEventType.CHARGE_REQUEST,
)
# when
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
assert transaction.events.count() == 2
assert transaction.psp_reference == current_psp_reference
@freeze_time("2018-05-31 12:00:01")
def test_create_transaction_event_updates_transaction_modified_at_for_failure(
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
checkout,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["amount"] = expected_amount
response["result"] = TransactionEventType.CHARGE_FAILURE.upper()
transaction = transaction_item_generator(checkout_id=checkout.pk)
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
with freeze_time("2023-03-18 12:00:00"):
calculation_time = datetime.datetime.now(tz=datetime.UTC)
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
checkout.refresh_from_db()
assert transaction.modified_at == calculation_time
assert checkout.last_transaction_modified_at == calculation_time
def test_create_transaction_event_message_limit_exceeded(
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
checkout,
caplog,
):
# given
expected_amount = Decimal(15)
message = "m" * 1000
response = transaction_session_response.copy()
response["amount"] = expected_amount
response["message"] = message
transaction = transaction_item_generator(checkout_id=checkout.pk)
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
assert transaction.events.count() == 2
event = transaction.events.last()
assert event.message == message[:511] + "…"
assert len(caplog.records) == 1
assert caplog.records[0].message == (
"Value for field: message in response of transaction action webhook "
"exceeds the character field limit. Message has been truncated."
)
assert caplog.records[0].levelno == logging.WARNING
@pytest.mark.parametrize(
("input_message", "expected_message"),
[("m" * 512, "m" * 512), (None, ""), ("", ""), (5, "5"), ("你好世界", "你好世界")],
)
def test_create_transaction_event_with_message(
input_message,
expected_message,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
checkout,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["amount"] = expected_amount
response["message"] = input_message
transaction = transaction_item_generator(checkout_id=checkout.pk)
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
assert transaction.events.count() == 2
event = transaction.events.last()
assert event.message == expected_message
def test_create_transaction_event_with_invalid_message(
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
checkout,
caplog,
):
# given
class NonParsableObject:
def __str__(self):
raise "こんにちは".encode("ascii")
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["amount"] = expected_amount
response["message"] = NonParsableObject()
transaction = transaction_item_generator(checkout_id=checkout.pk)
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
transaction.refresh_from_db()
assert transaction.events.count() == 2
event = transaction.events.last()
assert event.message == ""
assert (
"Incorrect value for field: message in response of transaction action webhook."
) in (record.message for record in caplog.records)
def test_create_transaction_event_from_request_and_webhook_response_incorrect_data(
transaction_item_generator,
app,
):
# given
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
type=TransactionEventType.CHARGE_REQUEST,
amount_value=Decimal(11.00),
currency="USD",
transaction_id=transaction.id,
)
response_data = {"wrong-data": "psp:122:222"}
# when
failed_event = create_transaction_event_from_request_and_webhook_response(
request_event, app, response_data
)
# then
request_event.refresh_from_db()
assert TransactionEvent.objects.count() == 2
assert failed_event
assert failed_event.type == TransactionEventType.CHARGE_FAILURE
assert failed_event.amount_value == request_event.amount_value
assert failed_event.currency == request_event.currency
assert failed_event.transaction_id == transaction.id
@freeze_time("2018-05-31 12:00:01")
def test_create_transaction_event_for_transaction_session_twice_auth(
transaction_item_generator,
app,
plugins_manager,
):
# given
transaction = transaction_item_generator()
transaction.events.create(
type=TransactionEventType.AUTHORIZATION_SUCCESS,
amount_value=Decimal(22.00),
currency="USD",
)
request_event = TransactionEvent.objects.create(
type=TransactionEventType.AUTHORIZATION_REQUEST,
amount_value=Decimal(11.00),
currency="USD",
transaction_id=transaction.id,
)
event_amount = 12.00
event_type = TransactionEventType.AUTHORIZATION_SUCCESS
event_time = "2022-11-18T13:25:58.169685+00:00"
event_url = "http://localhost:3000/event/ref123"
expected_psp_reference = "psp:122:222"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
"time": event_time,
"externalUrl": event_url,
}
# when
failed_event = create_transaction_event_for_transaction_session(
request_event, app, plugins_manager, response_data
)
# then
assert TransactionEvent.objects.count() == 3
assert failed_event
assert failed_event.psp_reference == request_event.psp_reference
assert failed_event.type == TransactionEventType.AUTHORIZATION_FAILURE
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_REQUEST,
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_REQUEST,
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"firstDigits": "1234",
"lastDigits": "5678",
"expMonth": 12,
"expYear": 2025,
},
{
"type": "CARD",
"name": "Test Card",
},
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"lastDigits": "5678",
},
{
"type": "OTHER",
"name": "Test Other",
},
],
)
def test_create_transaction_event_for_transaction_session_sets_payment_method_details(
payment_method_details,
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
response["paymentMethodDetails"] = payment_method_details
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction, include_in_calculations=False
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event
transaction.refresh_from_db()
assert transaction.payment_method_name == payment_method_details["name"]
assert transaction.payment_method_type == payment_method_details["type"].lower()
assert transaction.cc_brand == payment_method_details.get("brand")
assert transaction.cc_first_digits == payment_method_details.get("firstDigits")
assert transaction.cc_last_digits == payment_method_details.get("lastDigits")
assert transaction.cc_exp_month == payment_method_details.get("expMonth")
assert transaction.cc_exp_year == payment_method_details.get("expYear")
@pytest.mark.parametrize(
"response_result",
[
TransactionEventType.AUTHORIZATION_REQUEST,
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_REQUEST,
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
# unknown type
{
"type": "WRONG-TYPE",
"name": "Test Card",
},
# Missing name
{
"type": "CARD",
},
# Missing type
{
"name": "Test Card",
},
],
)
def test_create_transaction_event_for_transaction_session_invalid_payment_method_details(
payment_method_details,
response_result,
transaction_item_generator,
transaction_session_response,
webhook_app,
plugins_manager,
):
# given
expected_amount = Decimal(15)
response = transaction_session_response.copy()
response["result"] = response_result.upper()
response["amount"] = expected_amount
response["paymentMethodDetails"] = payment_method_details
transaction = transaction_item_generator()
request_event = TransactionEvent.objects.create(
transaction=transaction,
include_in_calculations=False,
type=TransactionEventType.AUTHORIZATION_REQUEST,
)
# when
response_event = create_transaction_event_for_transaction_session(
request_event,
webhook_app,
manager=plugins_manager,
transaction_webhook_response=response,
)
# then
assert response_event
assert response_event.type == TransactionEventType.AUTHORIZATION_FAILURE
assert "paymentMethodDetails" in response_event.message
transaction.refresh_from_db()
assert not transaction.payment_method_name
assert not transaction.payment_method_type
assert not transaction.cc_brand
assert not transaction.cc_first_digits
assert not transaction.cc_last_digits
assert not transaction.cc_exp_month
assert not transaction.cc_exp_year
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/payment/tests/test_utils/test_create_transaction_event_for_transaction_session.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1027,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/payment/tests/test_utils/test_parse_transaction_action_data_for_action_webhook.py | import datetime
from decimal import Decimal
import pytest
from freezegun import freeze_time
from ... import TransactionEventType
from ...interface import (
TransactionRequestEventResponse,
TransactionRequestResponse,
)
from ...utils import (
parse_transaction_action_data_for_action_webhook,
)
def test_parse_transaction_action_data_for_action_webhook_with_only_psp_reference():
# given
expected_psp_reference = "psp:122:222"
response_data = {"pspReference": expected_psp_reference}
# when
parsed_data, _ = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.CHARGE_REQUEST, Decimal(10.00)
)
# then
assert isinstance(parsed_data, TransactionRequestResponse)
assert parsed_data.psp_reference == expected_psp_reference
assert parsed_data.event is None
@pytest.mark.parametrize(
("event_time", "expected_datetime"),
[
(
"2023-10-17T10:18:28.111Z",
datetime.datetime(2023, 10, 17, 10, 18, 28, 111000, tzinfo=datetime.UTC),
),
("2011-11-04", datetime.datetime(2011, 11, 4, 0, 0, tzinfo=datetime.UTC)),
(
"2011-11-04T00:05:23",
datetime.datetime(2011, 11, 4, 0, 5, 23, tzinfo=datetime.UTC),
),
(
"2011-11-04T00:05:23Z",
datetime.datetime(2011, 11, 4, 0, 5, 23, tzinfo=datetime.UTC),
),
(
"20111104T000523",
datetime.datetime(2011, 11, 4, 0, 5, 23, tzinfo=datetime.UTC),
),
(
"2011-W01-2T00:05:23.283",
datetime.datetime(2011, 1, 4, 0, 5, 23, 283000, tzinfo=datetime.UTC),
),
(
"2011-11-04 00:05:23.283",
datetime.datetime(2011, 11, 4, 0, 5, 23, 283000, tzinfo=datetime.UTC),
),
(
"2011-11-04 00:05:23.283+00:00",
datetime.datetime(2011, 11, 4, 0, 5, 23, 283000, tzinfo=datetime.UTC),
),
(
"1994-11-05T13:15:30Z",
datetime.datetime(1994, 11, 5, 13, 15, 30, tzinfo=datetime.UTC),
),
],
)
def test_parse_transaction_action_data_for_action_webhook_with_provided_time(
event_time, expected_datetime
):
# given
request_event_amount = Decimal(10.00)
expected_psp_reference = "psp:122:222"
event_amount = 12.00
event_type = TransactionEventType.CHARGE_SUCCESS
event_url = "http://localhost:3000/event/ref123"
event_cause = "No cause"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
"time": event_time,
"externalUrl": event_url,
"message": event_cause,
}
# when
parsed_data, error_msg = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.CHARGE_REQUEST, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionRequestResponse)
assert parsed_data.event is not None
assert parsed_data.event.time == expected_datetime
def test_parse_transaction_action_data_for_action_webhook_with_event_all_fields_provided():
# given
request_event_amount = Decimal(10.00)
expected_psp_reference = "psp:122:222"
event_amount = 12.00
event_type = TransactionEventType.CHARGE_SUCCESS
event_time = "2022-11-18T13:25:58.169685+00:00"
event_url = "http://localhost:3000/event/ref123"
event_cause = "No cause"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
"time": event_time,
"externalUrl": event_url,
"message": event_cause,
}
# when
parsed_data, error_msg = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.CHARGE_REQUEST, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionRequestResponse)
assert error_msg is None
assert parsed_data.psp_reference == expected_psp_reference
assert isinstance(parsed_data.event, TransactionRequestEventResponse)
assert parsed_data.event.psp_reference == expected_psp_reference
assert parsed_data.event.amount == event_amount
assert parsed_data.event.time == datetime.datetime.fromisoformat(event_time)
assert parsed_data.event.external_url == event_url
assert parsed_data.event.message == event_cause
assert parsed_data.event.type == event_type
def test_parse_transaction_action_data_for_action_webhook_with_incorrect_result():
# given
request_event_amount = Decimal(10.00)
expected_psp_reference = "psp:122:222"
event_amount = 12.00
event_type = TransactionEventType.CHARGE_SUCCESS
event_time = "2022-11-18T13:25:58.169685+00:00"
event_url = "http://localhost:3000/event/ref123"
event_cause = "No cause"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
"time": event_time,
"externalUrl": event_url,
"message": event_cause,
}
# when
parsed_data, error_msg = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.REFUND_REQUEST, request_event_amount
)
# then
assert parsed_data is None
assert (
error_msg
== f"Missing or invalid value for `result`: {response_data['result']}. Possible values: {TransactionEventType.REFUND_SUCCESS.upper()}, {TransactionEventType.REFUND_FAILURE.upper()}."
)
@freeze_time("2018-05-31 12:00:01")
def test_parse_transaction_action_data_for_action_webhook_with_event_only_mandatory_fields():
# given
expected_psp_reference = "psp:122:222"
expected_amount = Decimal("10.00")
response_data = {
"pspReference": expected_psp_reference,
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
}
# when
parsed_data, _ = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.CHARGE_REQUEST, expected_amount
)
# then
assert isinstance(parsed_data, TransactionRequestResponse)
assert parsed_data.psp_reference == expected_psp_reference
assert isinstance(parsed_data.event, TransactionRequestEventResponse)
assert parsed_data.event.psp_reference == expected_psp_reference
assert parsed_data.event.type == TransactionEventType.CHARGE_SUCCESS
assert parsed_data.event.amount == expected_amount
assert parsed_data.event.time == datetime.datetime.now(tz=datetime.UTC)
assert parsed_data.event.external_url == ""
assert parsed_data.event.message == ""
def test_parse_transaction_action_data_for_action_webhook_use_provided_amount_when_event_amount_is_missing():
# given
request_event_amount = Decimal(10.00)
response_data = {
"pspReference": "123",
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
}
# when
parsed_data, _ = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.CHARGE_REQUEST, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionRequestResponse)
assert parsed_data.event is not None
assert parsed_data.event.amount == request_event_amount
def test_parse_transaction_action_data_for_action_webhook_skips_input_amount_when_event_has_amount():
# given
request_event_amount = Decimal(10.00)
expected_amount = Decimal(12.00)
assert request_event_amount != expected_amount
response_data = {
"pspReference": "123",
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
"amount": expected_amount,
}
# when
parsed_data, _ = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.CHARGE_REQUEST, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionRequestResponse)
assert parsed_data.event is not None
assert parsed_data.event.amount == expected_amount
@freeze_time("2018-05-31 12:00:01")
def test_parse_transaction_action_data_for_action_webhook_with_missing_psp_reference():
# given
response_data = {}
# when
parsed_data, _ = parse_transaction_action_data_for_action_webhook(
response_data, TransactionEventType.AUTHORIZATION_REQUEST, Decimal(10.00)
)
# then
assert parsed_data is None
def test_parse_transaction_action_data_for_action_webhook_with_missing_optional_psp_reference():
# given
response_data = {
"result": TransactionEventType.CHARGE_FAILURE.upper(),
}
# when
parsed_data, _ = parse_transaction_action_data_for_action_webhook(
response_data,
TransactionEventType.CHARGE_REQUEST,
Decimal("10.00"),
)
# then
assert parsed_data
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/payment/tests/test_utils/test_parse_transaction_action_data_for_action_webhook.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/payment/tests/test_utils/test_parse_transaction_action_data_for_session_webhook.py | import datetime
from decimal import Decimal
import pytest
from freezegun import freeze_time
from ... import TransactionEventType
from ...interface import TransactionRequestEventResponse, TransactionSessionResponse
from ...utils import (
parse_transaction_action_data_for_session_webhook,
)
@pytest.mark.parametrize(
("event_time", "expected_datetime"),
[
(
"2023-10-17T10:18:28.111Z",
datetime.datetime(2023, 10, 17, 10, 18, 28, 111000, tzinfo=datetime.UTC),
),
("2011-11-04", datetime.datetime(2011, 11, 4, 0, 0, tzinfo=datetime.UTC)),
(
"2011-11-04T00:05:23",
datetime.datetime(2011, 11, 4, 0, 5, 23, tzinfo=datetime.UTC),
),
(
"2011-11-04T00:05:23Z",
datetime.datetime(2011, 11, 4, 0, 5, 23, tzinfo=datetime.UTC),
),
(
"20111104T000523",
datetime.datetime(2011, 11, 4, 0, 5, 23, tzinfo=datetime.UTC),
),
(
"2011-W01-2T00:05:23.283",
datetime.datetime(2011, 1, 4, 0, 5, 23, 283000, tzinfo=datetime.UTC),
),
(
"2011-11-04 00:05:23.283",
datetime.datetime(2011, 11, 4, 0, 5, 23, 283000, tzinfo=datetime.UTC),
),
(
"2011-11-04 00:05:23.283+00:00",
datetime.datetime(2011, 11, 4, 0, 5, 23, 283000, tzinfo=datetime.UTC),
),
(
"1994-11-05T13:15:30Z",
datetime.datetime(1994, 11, 5, 13, 15, 30, tzinfo=datetime.UTC),
),
],
)
def test_parse_transaction_action_data_for_session_webhook_with_provided_time(
event_time, expected_datetime
):
# given
request_event_amount = Decimal(10.00)
expected_psp_reference = "psp:122:222"
event_amount = 12.00
event_type = TransactionEventType.CHARGE_SUCCESS
event_url = "http://localhost:3000/event/ref123"
event_cause = "No cause"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
"time": event_time,
"externalUrl": event_url,
"message": event_cause,
}
# when
parsed_data, error_msg = parse_transaction_action_data_for_session_webhook(
response_data, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionSessionResponse)
assert parsed_data.event.time == expected_datetime
def test_parse_transaction_action_data_for_session_webhook_with_event_all_fields_provided():
# given
request_event_amount = Decimal(10.00)
expected_psp_reference = "psp:122:222"
event_amount = 12.00
event_type = TransactionEventType.CHARGE_SUCCESS
event_time = "2022-11-18T13:25:58.169685+00:00"
event_url = "http://localhost:3000/event/ref123"
event_cause = "No cause"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
"time": event_time,
"externalUrl": event_url,
"message": event_cause,
}
# when
parsed_data, error_msg = parse_transaction_action_data_for_session_webhook(
response_data, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionSessionResponse)
assert error_msg is None
assert parsed_data.psp_reference == expected_psp_reference
assert isinstance(parsed_data.event, TransactionRequestEventResponse)
assert parsed_data.event.psp_reference == expected_psp_reference
assert parsed_data.event.amount == event_amount
assert parsed_data.event.time == datetime.datetime.fromisoformat(event_time)
assert parsed_data.event.external_url == event_url
assert parsed_data.event.message == event_cause
assert parsed_data.event.type == event_type
def test_parse_transaction_action_data_for_session_webhook_with_incorrect_result():
# given
request_event_amount = Decimal(10.00)
expected_psp_reference = "psp:122:222"
event_amount = 12.00
event_type = TransactionEventType.REFUND_SUCCESS
event_time = "2022-11-18T13:25:58.169685+00:00"
event_url = "http://localhost:3000/event/ref123"
event_cause = "No cause"
response_data = {
"pspReference": expected_psp_reference,
"amount": event_amount,
"result": event_type.upper(),
"time": event_time,
"externalUrl": event_url,
"message": event_cause,
}
# when
parsed_data, error_msg = parse_transaction_action_data_for_session_webhook(
response_data, request_event_amount
)
# then
assert parsed_data is None
assert (
f"Missing or invalid value for `result`: {response_data['result']}" in error_msg
)
@freeze_time("2018-05-31 12:00:01")
def test_parse_transaction_action_data_for_session_webhook_with_event_only_mandatory_fields():
# given
expected_psp_reference = "psp:122:222"
expected_amount = Decimal("10.00")
response_data = {
"pspReference": expected_psp_reference,
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
}
# when
parsed_data, _ = parse_transaction_action_data_for_session_webhook(
response_data, expected_amount
)
# then
assert isinstance(parsed_data, TransactionSessionResponse)
assert parsed_data.psp_reference == expected_psp_reference
assert isinstance(parsed_data.event, TransactionRequestEventResponse)
assert parsed_data.event.psp_reference == expected_psp_reference
assert parsed_data.event.type == TransactionEventType.CHARGE_SUCCESS
assert parsed_data.event.amount == expected_amount
assert parsed_data.event.time == datetime.datetime.now(tz=datetime.UTC)
assert parsed_data.event.external_url == ""
assert parsed_data.event.message == ""
def test_parse_transaction_action_data_for_session_webhook_use_provided_amount_when_event_amount_is_missing():
# given
request_event_amount = Decimal(10.00)
response_data = {
"pspReference": "123",
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
}
# when
parsed_data, _ = parse_transaction_action_data_for_session_webhook(
response_data, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionSessionResponse)
assert parsed_data.event.amount == request_event_amount
def test_parse_transaction_action_data_for_session_webhook_skips_input_amount_when_event_has_amount():
# given
request_event_amount = Decimal(10.00)
expected_amount = Decimal(12.00)
assert request_event_amount != expected_amount
response_data = {
"pspReference": "123",
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
"amount": expected_amount,
}
# when
parsed_data, _ = parse_transaction_action_data_for_session_webhook(
response_data, request_event_amount
)
# then
assert isinstance(parsed_data, TransactionSessionResponse)
assert parsed_data.event.amount == expected_amount
@freeze_time("2018-05-31 12:00:01")
def test_parse_transaction_action_data_for_session_webhook_with_empty_response():
# given
response_data = {}
# when
parsed_data, _ = parse_transaction_action_data_for_session_webhook(
response_data, Decimal(10.00)
)
# then
assert parsed_data is None
def test_parse_transaction_action_data_for_session_webhook_with_missing_optional_psp_reference():
# given
response_data = {
"result": TransactionEventType.CHARGE_FAILURE.upper(),
}
# when
parsed_data, _ = parse_transaction_action_data_for_session_webhook(
response_data, Decimal("10.00")
)
# then
assert parsed_data
def test_parse_transaction_action_data_with_missing_mandatory_event_fields():
# given
expected_psp_reference = "psp:122:222"
response_data = {
"pspReference": expected_psp_reference,
}
# when
parsed_data, _ = parse_transaction_action_data_for_session_webhook(
response_data, Decimal("10.00")
)
# then
assert parsed_data is None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/payment/tests/test_utils/test_parse_transaction_action_data_for_session_webhook.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/core/management/commands/generate_json_schemas.py | import json
import os
import shutil
from typing import cast
from django.core.management.base import BaseCommand
from pydantic import BaseModel
from ....webhook.response_schemas import COMBINED_SCHEMAS_TO_EXPORT, SCHEMAS_TO_EXPORT
SCHEMA_OUTPUT_DIR = "saleor/json_schemas"
class Command(BaseCommand):
help = "Generate JSON schemas for synchronous webhooks responses."
def handle(self, *args, **options):
self.clear_dir()
os.makedirs(SCHEMA_OUTPUT_DIR, exist_ok=True)
self.export_single_schemas()
self.export_combined_schemas()
def export_single_schemas(self):
for schema_data in SCHEMAS_TO_EXPORT:
title, schema_cls = schema_data["title"], schema_data["schema"]
title = cast(str, title)
schema_cls = cast(type[BaseModel], schema_cls)
schema = schema_cls.model_json_schema()
schema["title"] = self.get_schema_title(schema_cls)
self.write_schema_to_file(schema, title)
def export_combined_schemas(self):
for combined_schema in COMBINED_SCHEMAS_TO_EXPORT:
title, schemas_cls = combined_schema["title"], combined_schema["schemas"]
title = cast(str, title)
defs: dict[str, dict] = {}
schemas = self.get_schemas(schemas_cls, defs)
combined_schema_dict = {
"title": title,
"anyOf": schemas,
}
if defs:
combined_schema_dict["$defs"] = defs
self.write_schema_to_file(combined_schema_dict, title)
def get_schemas(self, schemas_cls, merged_defs):
schemas = []
for cls in schemas_cls:
schema = cls.model_json_schema()
# move $defs to the top level
defs = schema.pop("$defs", {})
merged_defs.update(defs)
schema["title"] = self.get_schema_title(cls)
schemas.append(schema)
return schemas
@staticmethod
def get_schema_title(schema_cls: type[BaseModel]) -> str:
"""Get rid of `Schema` suffix from title."""
title = schema_cls.__name__
if title.endswith("Schema"):
title = title.removesuffix("Schema")
return title
def write_schema_to_file(self, schema: dict, title: str):
file_name = f"{title}.json"
path = os.path.join(SCHEMA_OUTPUT_DIR, file_name)
with open(path, "w") as f:
json.dump(schema, f, indent=2)
f.write("\n")
self.stdout.write(self.style.SUCCESS(f"Generated {path}"))
@staticmethod
def clear_dir():
if os.path.exists(SCHEMA_OUTPUT_DIR):
shutil.rmtree(SCHEMA_OUTPUT_DIR)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/management/commands/generate_json_schemas.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/webhook/transport/payment.py | import decimal
from typing import Any
from ...app.models import App
from ...payment.interface import (
GatewayResponse,
PaymentData,
PaymentGateway,
PaymentMethodInfo,
)
from .utils import to_payment_app_id
def parse_list_payment_gateways_response(
response_data: Any, app: "App"
) -> list["PaymentGateway"]:
gateways: list[PaymentGateway] = []
if not isinstance(response_data, list):
return gateways
for gateway_data in response_data:
gateway_id = gateway_data.get("id")
gateway_name = gateway_data.get("name")
gateway_currencies = gateway_data.get("currencies")
gateway_config = gateway_data.get("config")
if gateway_id:
gateways.append(
PaymentGateway(
id=to_payment_app_id(app, gateway_id),
name=gateway_name,
currencies=gateway_currencies,
config=gateway_config,
)
)
return gateways
def parse_payment_action_response(
payment_information: "PaymentData",
response_data: Any,
transaction_kind: "str",
) -> "GatewayResponse":
error = response_data.get("error")
is_success = not error
payment_method_info = None
payment_method_data = response_data.get("payment_method")
if payment_method_data:
payment_method_info = PaymentMethodInfo(
brand=payment_method_data.get("brand"),
exp_month=payment_method_data.get("exp_month"),
exp_year=payment_method_data.get("exp_year"),
last_4=payment_method_data.get("last_4"),
name=payment_method_data.get("name"),
type=payment_method_data.get("type"),
)
amount = payment_information.amount
if "amount" in response_data:
try:
amount = decimal.Decimal(response_data["amount"])
except decimal.DecimalException:
pass
return GatewayResponse(
action_required=response_data.get("action_required", False),
action_required_data=response_data.get("action_required_data"),
amount=amount,
currency=payment_information.currency,
customer_id=response_data.get("customer_id"),
error=error,
is_success=is_success,
kind=response_data.get("kind", transaction_kind),
payment_method_info=payment_method_info,
raw_response=response_data,
psp_reference=response_data.get("psp_reference"),
transaction_id=response_data.get("transaction_id", ""),
transaction_already_processed=response_data.get(
"transaction_already_processed", False
),
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/transport/payment.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/discount/tests/test_utils/test_get_customer_email_for_voucher_usage.py | from ...utils.voucher import get_customer_email_for_voucher_usage
def test_get_customer_email_for_voucher_usage_for_checkout_info_without_user_data(
checkout_info, customer_user
):
# given
checkout_info.user = None
checkout_info.checkout.email = None
checkout_info.checkout.user = None
# when
result = get_customer_email_for_voucher_usage(checkout_info)
# then
assert result is None
def test_get_customer_email_for_voucher_usage_for_checkout_info_with_user(
checkout_info, customer_user
):
# given
checkout_info.user = customer_user
checkout_info.checkout.save()
# when
result = get_customer_email_for_voucher_usage(checkout_info)
# then
assert result == checkout_info.user.email
def test_get_customer_email_for_voucher_usage_for_checkout_info_without_user(
checkout_info,
):
# given
expected_email = "test@example.com"
checkout_info.user = None
checkout_info.checkout.email = expected_email
# when
result = get_customer_email_for_voucher_usage(checkout_info)
# then
assert result == expected_email
def test_get_customer_email_for_voucher_usage_for_checkout_with_user(
checkout, customer_user
):
# given
checkout_email = "checkout@example.com"
checkout.email = checkout_email
checkout.user = customer_user
checkout.save()
# when
result = get_customer_email_for_voucher_usage(checkout)
# then
assert result == customer_user.email
def test_get_customer_email_for_voucher_usage_for_checkout_without_user(checkout):
# given
expected_checkout_email = "checkout@example.com"
checkout.user = None
checkout.email = expected_checkout_email
checkout.save()
# when
result = get_customer_email_for_voucher_usage(checkout)
# then
assert result == expected_checkout_email
def test_get_customer_email_for_voucher_usage_for_checkout_without_user_details(
checkout,
):
# given
checkout.user = None
checkout.email = None
checkout.save()
# when
result = get_customer_email_for_voucher_usage(checkout)
# then
assert result is None
def test_get_customer_email_for_voucher_usage_for_order_with_user(order, customer_user):
# given
order_email = "order@example.com"
order.user_email = order_email
order.user = customer_user
order.save()
# when
result = get_customer_email_for_voucher_usage(order)
# then
assert result == customer_user.email
def test_get_customer_email_for_voucher_usage_for_order_without_user(order):
# given
expected_order_email = "order@example.com"
order.user = None
order.user_email = expected_order_email
order.save()
# when
result = get_customer_email_for_voucher_usage(order)
# then
assert result == expected_order_email
def test_get_customer_email_for_voucher_usage_for_order_without_user_details(order):
# given
order.user = None
order.user_email = ""
order.save()
# when
result = get_customer_email_for_voucher_usage(order)
# then
assert result is None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/discount/tests/test_utils/test_get_customer_email_for_voucher_usage.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/checkout/tests/benchmark/test_checkout.py | import pytest
from ....core.utils import to_global_id_or_none
from ....tests.utils import get_graphql_content
CHECKOUT_DETAILS_QUERY = """
query checkout($id: ID){
checkout(
id:$id
) {
shippingMethods{
active
message
}
lines{
id
variant{
product{
name
}
}
totalPrice{
gross{
amount
}
net{
amount
}
}
}
totalPrice{
gross{
amount
}
}
subtotalPrice{
gross{
amount
}
}
shippingPrice{
net{
amount
}
gross{
amount
}
}
}
}
"""
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_user_checkout_details(
user_api_client,
checkouts_for_benchmarks,
count_queries,
):
# given
checkout = checkouts_for_benchmarks[0]
checkout_id = to_global_id_or_none(checkout)
# when
content = get_graphql_content(
user_api_client.post_graphql(
CHECKOUT_DETAILS_QUERY, variables={"id": checkout_id}
)
)
# then
assert content["data"]["checkout"] is not None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/checkout/tests/benchmark/test_checkout.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/order/tests/queries/test_order_with_where.py | import datetime
from uuid import uuid4
import graphene
import pytest
from django.utils import timezone
from freezegun import freeze_time
from .....account.models import Address
from .....core.postgres import FlatConcatSearchVector
from .....giftcard.events import gift_cards_bought_event, gift_cards_used_in_order_event
from .....invoice.models import Invoice
from .....order import (
FulfillmentStatus,
OrderAuthorizeStatus,
OrderChargeStatus,
OrderEvents,
OrderStatus,
)
from .....order.models import FulfillmentLine, Order, OrderEvent, OrderLine
from .....order.search import prepare_order_search_vector_value
from .....warehouse.models import Stock
from ....core.utils import to_global_id_or_none
from ....tests.utils import get_graphql_content, get_graphql_content_from_response
@pytest.fixture
def orders_with_fulfillments(
order_list, warehouses, order_lines_generator, product_variant_list
):
statuses = [
FulfillmentStatus.FULFILLED,
FulfillmentStatus.REFUNDED,
FulfillmentStatus.RETURNED,
]
metadata_values = [
{"foo": "bar"},
{"foo": "zaz"},
{},
]
variant_1 = product_variant_list[0]
variant_2 = product_variant_list[1]
variant_1_quantity = 10
variant_2_quantity = 5
stock_1, stock_2 = Stock.objects.bulk_create(
[
Stock(
product_variant=variant_1,
warehouse=warehouses[0],
quantity=variant_1_quantity * len(order_list),
),
Stock(
product_variant=variant_2,
warehouse=warehouses[1],
quantity=variant_2_quantity * len(order_list),
),
]
)
for order, status, metadata in zip(
order_list, statuses, metadata_values, strict=True
):
fulfillment = order.fulfillments.create(
tracking_number="123", status=status, metadata=metadata
)
line_1, line_2 = order_lines_generator(
order,
[variant_1, variant_2],
[10, 20],
[variant_1_quantity, variant_2_quantity],
create_allocations=False,
)
fulfillment.lines.create(
order_line=line_1, quantity=line_1.quantity, stock=stock_1
)
fulfillment.lines.create(
order_line=line_2, quantity=line_2.quantity, stock=stock_2
)
return order_list
def test_order_query_with_filter_and_where(
staff_api_client,
permission_group_manage_orders,
orders,
):
# given
query = """
query ($where: OrderWhereInput!, $filter: OrderFilterInput!) {
orders(first: 10, where: $where, filter: $filter) {
totalCount
edges {
node {
id
}
}
}
}
"""
variables = {
"where": {
"status": {
"eq": OrderStatus.UNFULFILLED.upper(),
},
},
"filter": {
"search": "test",
},
}
permission_group_manage_orders.user_set.add(staff_api_client.user)
error_message = "Only one filtering argument (filter or where) can be specified."
# when
response = staff_api_client.post_graphql(query, variables)
# then
content = get_graphql_content_from_response(response)
assert content["errors"][0]["message"] == error_message
assert not content["data"]["orders"]
ORDERS_WHERE_QUERY = """
query($where: OrderWhereInput!, $search: String) {
orders(first: 10, search: $search, where: $where) {
edges {
node {
id
number
created
updatedAt
}
}
}
}
"""
def test_order_filter_by_ids(
staff_api_client,
permission_group_manage_orders,
order_list,
channel_USD,
channel_PLN,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
ids = [graphene.Node.to_global_id("Order", order.pk) for order in order_list[:2]]
variables = {"where": {"ids": ids}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(order_list[0].number),
str(order_list[1].number),
}
def test_order_filter_by_none_as_ids(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"ids": None}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_by_ids_empty_list(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"ids": []}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"gte": (timezone.now() + datetime.timedelta(days=3)).isoformat(),
"lte": (timezone.now() + datetime.timedelta(days=25)).isoformat(),
},
[1, 2],
),
(
{
"gte": (timezone.now() + datetime.timedelta(days=5)).isoformat(),
},
[1, 2],
),
(
{
"lte": (timezone.now() + datetime.timedelta(days=25)).isoformat(),
},
[0, 1, 2],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=25)).isoformat(),
},
[],
),
(None, []),
({"gte": None}, []),
({"lte": None}, []),
({"lte": None, "gte": None}, []),
({}, []),
],
)
def test_orders_filter_by_created_at(
where,
indexes,
order,
order_generator,
staff_api_client,
permission_group_manage_orders,
channel_USD,
):
# given
with freeze_time((timezone.now() + datetime.timedelta(days=5)).isoformat()):
order_2 = order_generator()
with freeze_time((timezone.now() + datetime.timedelta(days=10)).isoformat()):
order_3 = order_generator()
order_list = [order, order_2, order_3]
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"createdAt": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"gte": (timezone.now() + datetime.timedelta(days=3)).isoformat(),
"lte": (timezone.now() + datetime.timedelta(days=25)).isoformat(),
},
[0, 1],
),
(
{
"gte": (timezone.now() + datetime.timedelta(days=5)).isoformat(),
},
[0],
),
(
{
"lte": (timezone.now() + datetime.timedelta(days=25)).isoformat(),
},
[0, 1, 2],
),
(
{
"lte": (timezone.now() - datetime.timedelta(days=25)).isoformat(),
},
[],
),
(None, []),
({"gte": None}, []),
({"lte": None}, []),
({"lte": None, "gte": None}, []),
({}, []),
],
)
def test_orders_filter_by_updated_at(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
channel_USD,
):
# given
order_list[0].updated_at = timezone.now() + datetime.timedelta(days=15)
order_list[1].updated_at = timezone.now() + datetime.timedelta(days=3)
order_list[2].updated_at = timezone.now() + datetime.timedelta(days=1)
Order.objects.bulk_update(order_list, ["updated_at"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"updatedAt": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
def test_order_filter_by_users(
staff_api_client, permission_group_manage_orders, order_list, user_list, channel_USD
):
# given
order_list[0].user = user_list[0]
order_list[1].user = user_list[1]
order_list[2].user = user_list[2]
Order.objects.bulk_update(order_list, ["user"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
type_ids = [graphene.Node.to_global_id("User", type.pk) for type in user_list[:2]]
variables = {
"where": {"user": {"oneOf": type_ids}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
numbers = {node["node"]["number"] for node in orders}
assert numbers == {
str(order_list[0].number),
str(order_list[1].number),
}
def test_order_filter_by_user(
staff_api_client, permission_group_manage_orders, order_list, channel_USD, user_list
):
# given
order_list[0].user = user_list[0]
order_list[0].save(update_fields=["user"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
type_id = graphene.Node.to_global_id("User", user_list[0].pk)
variables = {
"where": {"user": {"eq": type_id}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 1
assert str(order_list[0].number) == orders[0]["node"]["number"]
def test_order_filter_by_none_as_user(
staff_api_client, permission_group_manage_orders, order_list, channel_USD, user_list
):
# given
order_list[0].user = user_list[0]
order_list[0].save(update_fields=["user"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"user": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_by_user_emails(
staff_api_client, permission_group_manage_orders, order_list, user_list, channel_USD
):
# given
order_list[0].user_email = user_list[0].email
order_list[1].user_email = user_list[1].email
order_list[2].user_email = user_list[2].email
Order.objects.bulk_update(order_list, ["user_email"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
emails = [user_list[1].email, user_list[2].email]
variables = {
"where": {"userEmail": {"oneOf": emails}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
numbers = {node["node"]["number"] for node in orders}
assert numbers == {
str(order_list[1].number),
str(order_list[2].number),
}
def test_order_filter_by_user_email(
staff_api_client, permission_group_manage_orders, order_list, channel_USD, user_list
):
# given
order_list[1].user_email = user_list[0].email
order_list[1].save(update_fields=["user_email"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"userEmail": {"eq": user_list[0].email}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 1
assert str(order_list[1].number) == orders[0]["node"]["number"]
def test_order_filter_by_none_as_user_email(
staff_api_client, permission_group_manage_orders, order_list, channel_USD, user_list
):
# given
order_list[0].user_email = user_list[0].email
order_list[0].save(update_fields=["user_email"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"userEmail": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_by_numbers(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
numbers = [order_list[1].number, order_list[2].number]
variables = {
"where": {"number": {"oneOf": numbers}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(order_list[1].number),
str(order_list[2].number),
}
def test_order_filter_by_numbers_range(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
ordered_orders = Order.objects.order_by("number")
variables = {
"where": {
"number": {
"range": {
"lte": ordered_orders[1].number,
}
}
},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(order_list[0].number),
str(order_list[1].number),
}
def test_order_filter_by_number(
staff_api_client,
permission_group_manage_orders,
order_list,
order,
channel_USD,
user_list,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"number": {"eq": order.number}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 1
assert str(order.number) == orders[0]["node"]["number"]
def test_order_filter_by_none_as_number(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"number": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_by_number_nothing_returned(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"number": {"eq": "11111111"}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_by_channel_id(
staff_api_client,
permission_group_manage_orders,
order_list,
channel_USD,
channel_PLN,
):
# given
order_list[0].channel = channel_USD
order_list[1].channel = channel_PLN
order_list[2].channel = channel_USD
Order.objects.bulk_update(order_list, ["channel"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"channelId": {"eq": graphene.Node.to_global_id("Channel", channel_USD.id)}
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[0].number), str(order_list[2].number)}
def test_order_filter_by_channel_ids(
staff_api_client,
permission_group_manage_orders,
order_list,
channel_USD,
channel_PLN,
):
# given
order_list[0].channel = channel_USD
order_list[1].channel = channel_PLN
order_list[2].channel = channel_USD
Order.objects.bulk_update(order_list, ["channel"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"channelId": {
"oneOf": [
graphene.Node.to_global_id("Channel", channel.id)
for channel in [channel_USD, channel_PLN]
]
}
}
}
# then
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 3
numbers = {node["node"]["number"] for node in orders}
assert numbers == {
str(order_list[0].number),
str(order_list[1].number),
str(order_list[2].number),
}
def test_order_filter_by_channel_id_none(
staff_api_client,
permission_group_manage_orders,
order_list,
channel_USD,
channel_PLN,
):
# given
order_list[0].channel = channel_USD
order_list[1].channel = channel_PLN
order_list[2].channel = channel_USD
Order.objects.bulk_update(order_list, ["channel"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"channelId": {"eq": None}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("where", "indexes"),
[
({"eq": OrderAuthorizeStatus.FULL.upper()}, [0]),
({"eq": OrderAuthorizeStatus.PARTIAL.upper()}, [1]),
({"oneOf": [OrderAuthorizeStatus.NONE.upper()]}, [2]),
(
{
"oneOf": [
OrderAuthorizeStatus.FULL.upper(),
OrderAuthorizeStatus.PARTIAL.upper(),
]
},
[0, 1],
),
({"oneOf": [OrderAuthorizeStatus.FULL.upper()]}, [0]),
({}, []),
({"oneOf": []}, []),
({"eq": None}, []),
(None, []),
],
)
def test_orders_filter_by_authorize_status(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
channel_USD,
):
# given
order_list[0].authorize_status = OrderAuthorizeStatus.FULL
order_list[1].authorize_status = OrderAuthorizeStatus.PARTIAL
order_list[2].authorize_status = OrderAuthorizeStatus.NONE
Order.objects.bulk_update(order_list, ["authorize_status"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"authorizeStatus": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
({"eq": OrderChargeStatus.FULL.upper()}, []),
({"eq": OrderChargeStatus.PARTIAL.upper()}, [1]),
({"oneOf": [OrderChargeStatus.NONE.upper()]}, [2]),
(
{
"oneOf": [
OrderChargeStatus.FULL.upper(),
OrderChargeStatus.PARTIAL.upper(),
]
},
[1],
),
({"eq": OrderChargeStatus.OVERCHARGED.upper()}, [0]),
({}, []),
({"oneOf": []}, []),
({"eq": None}, []),
(None, []),
],
)
def test_orders_filter_by_charge_status(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
channel_USD,
):
# given
order_list[0].charge_status = OrderChargeStatus.OVERCHARGED
order_list[1].charge_status = OrderChargeStatus.PARTIAL
order_list[2].charge_status = OrderChargeStatus.NONE
Order.objects.bulk_update(order_list, ["charge_status"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"chargeStatus": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
({"eq": OrderStatus.UNFULFILLED.upper()}, [0]),
({"eq": OrderStatus.UNCONFIRMED.upper()}, [1]),
({"oneOf": [OrderStatus.FULFILLED.upper()]}, [2]),
(
{"oneOf": [OrderStatus.UNFULFILLED.upper(), OrderStatus.CANCELED.upper()]},
[0],
),
({"eq": OrderStatus.EXPIRED.upper()}, []),
({}, []),
({"oneOf": []}, []),
({"eq": None}, []),
(None, []),
],
)
def test_orders_filter_by_status(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
channel_USD,
):
# given
order_list[0].status = OrderStatus.UNFULFILLED
order_list[1].status = OrderStatus.UNCONFIRMED
order_list[2].status = OrderStatus.FULFILLED
Order.objects.bulk_update(order_list, ["status"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"status": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
def test_order_filter_by_checkout_tokens(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
ch_token_1 = uuid4()
ch_token_2 = uuid4()
ch_token_3 = uuid4()
order_list[0].checkout_token = ch_token_1
order_list[1].checkout_token = ch_token_2
order_list[2].checkout_token = ch_token_3
Order.objects.bulk_update(order_list, ["checkout_token"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"checkoutToken": {"oneOf": [ch_token_1, ch_token_3]}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
numbers = {node["node"]["number"] for node in orders}
assert numbers == {
str(order_list[0].number),
str(order_list[2].number),
}
def test_order_filter_by_checkout_token(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
token = uuid4()
order_list[0].checkout_token = token
order_list[0].save(update_fields=["checkout_token"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"checkoutToken": {"eq": token}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 1
assert str(order_list[0].number) == orders[0]["node"]["number"]
def test_order_filter_by_none_as_checkout_token(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
order_list[0].checkout_token = uuid4()
order_list[0].save(update_fields=["checkout_token"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"checkoutToken": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_by_checkout_ids(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
ch_token_1 = uuid4()
ch_token_2 = uuid4()
ch_token_3 = uuid4()
order_list[0].checkout_token = ch_token_1
order_list[1].checkout_token = ch_token_2
order_list[2].checkout_token = ch_token_3
Order.objects.bulk_update(order_list, ["checkout_token"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"checkoutId": {
"oneOf": [
graphene.Node.to_global_id("Checkout", token)
for token in [ch_token_1, ch_token_3]
]
}
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 2
numbers = {node["node"]["number"] for node in orders}
assert numbers == {
str(order_list[0].number),
str(order_list[2].number),
}
def test_order_filter_by_checkout_id(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
token = uuid4()
order_list[0].checkout_token = token
order_list[0].save(update_fields=["checkout_token"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"checkoutId": {"eq": graphene.Node.to_global_id("Checkout", token)}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 1
assert str(order_list[0].number) == orders[0]["node"]["number"]
def test_order_filter_by_none_as_checkout_id(
staff_api_client, permission_group_manage_orders, order_list, channel_USD
):
# given
order_list[0].checkout_token = uuid4()
order_list[0].save(update_fields=["checkout_token"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {"checkoutId": {"eq": None}},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
data = get_graphql_content(response)
orders = data["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_is_click_and_collect_true(
staff_api_client,
permission_group_manage_orders,
order_list_with_cc_orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
orders = order_list_with_cc_orders
variables = {"where": {"isClickAndCollect": True}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
returned_orders = content["data"]["orders"]["edges"]
expected_orders = {
order
for order in orders
if order.collection_point or order.collection_point_name
}
assert len(returned_orders) == len(expected_orders)
assert {order["node"]["id"] for order in returned_orders} == {
graphene.Node.to_global_id("Order", order.pk) for order in expected_orders
}
def test_order_filter_is_click_and_collect_false(
staff_api_client,
permission_group_manage_orders,
order_list_with_cc_orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
orders = order_list_with_cc_orders
variables = {"where": {"isClickAndCollect": False}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
returned_orders = content["data"]["orders"]["edges"]
expected_orders = {
order
for order in orders
if not order.collection_point
and not order.collection_point_name
and order.status != OrderStatus.DRAFT
}
assert len(returned_orders) == len(expected_orders)
assert {order["node"]["id"] for order in returned_orders} == {
graphene.Node.to_global_id("Order", order.pk) for order in expected_orders
}
def test_order_filter_is_click_and_collect_none(
staff_api_client,
permission_group_manage_orders,
order_list_with_cc_orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"isClickAndCollect": None}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
returned_orders = content["data"]["orders"]["edges"]
assert len(returned_orders) == 0
def test_order_filter_gift_card_used_true(
staff_api_client,
permission_group_manage_orders,
gift_card,
orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
gift_card_order = orders[0]
gift_cards_used_in_order_event(
[(gift_card, 20.0)], gift_card_order, staff_api_client.user, None
)
variables = {"where": {"isGiftCardUsed": True}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 1
assert orders[0]["node"]["id"] == graphene.Node.to_global_id(
"Order", gift_card_order.id
)
def test_order_filter_gift_card_used_false(
staff_api_client,
permission_group_manage_orders,
gift_card,
orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
gift_card_order = orders[0]
gift_card_order_id = graphene.Node.to_global_id("Order", gift_card_order.id)
gift_cards_used_in_order_event(
[(gift_card, 20.0)], gift_card_order, staff_api_client.user, None
)
variables = {"where": {"isGiftCardUsed": False}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders_data = content["data"]["orders"]["edges"]
assert gift_card_order_id not in {
order_data["node"]["id"] for order_data in orders_data
}
def test_order_filter_gift_card_used_none(
staff_api_client,
permission_group_manage_orders,
gift_card,
orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
gift_card_order = orders[0]
gift_cards_used_in_order_event(
[(gift_card, 20.0)], gift_card_order, staff_api_client.user, None
)
variables = {"where": {"isGiftCardUsed": None}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 0
def test_order_filter_gift_card_bough_true(
staff_api_client,
permission_group_manage_orders,
gift_card,
orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
gift_card_order = orders[-1]
gift_cards_bought_event([gift_card], gift_card_order, staff_api_client.user, None)
variables = {"where": {"isGiftCardBought": True}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 1
assert orders[0]["node"]["id"] == graphene.Node.to_global_id(
"Order", gift_card_order.id
)
def test_order_filter_gift_card_bought_false(
staff_api_client,
permission_group_manage_orders,
gift_card,
orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
gift_card_order = orders[-1]
gift_card_order_id = graphene.Node.to_global_id("Order", gift_card_order.id)
gift_cards_bought_event([gift_card], gift_card_order, staff_api_client.user, None)
variables = {"where": {"isGiftCardBought": False}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders_data = content["data"]["orders"]["edges"]
assert gift_card_order_id not in {
order_data["node"]["id"] for order_data in orders_data
}
def test_order_filter_gift_card_bought_none(
staff_api_client,
permission_group_manage_orders,
gift_card,
orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
gift_card_order = orders[-1]
gift_card_order_id = graphene.Node.to_global_id("Order", gift_card_order.id)
gift_cards_bought_event([gift_card], gift_card_order, staff_api_client.user, None)
variables = {"where": {"isGiftCardBought": None}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders_data = content["data"]["orders"]["edges"]
assert gift_card_order_id not in {
order_data["node"]["id"] for order_data in orders_data
}
def test_order_filter_with_search_and_charge_status(
staff_api_client,
permission_group_manage_orders,
orders,
customer_user,
):
# given
customer_user.first_name = "Search test Saleor"
customer_user.save()
for order in orders[:-1]:
order.user = customer_user
order.search_vector = FlatConcatSearchVector(
*prepare_order_search_vector_value(order)
)
order_full_charge_1 = orders[0]
order_full_charge_1.charge_status = OrderChargeStatus.FULL
order_full_charge_2 = orders[2]
order_full_charge_2.charge_status = OrderChargeStatus.FULL
order_partial_charge = orders[1]
order_partial_charge.charge_status = OrderChargeStatus.PARTIAL
order_full_charge_not_included_in_search = orders[-1]
order_full_charge_not_included_in_search.charge_status = OrderChargeStatus.FULL
Order.objects.bulk_update(orders, ["search_vector", "user", "charge_status"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"search": "test",
"where": {
"chargeStatus": {"eq": OrderChargeStatus.FULL.upper()},
},
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(order_full_charge_1.number),
str(order_full_charge_2.number),
}
def test_orders_filter_by_voucher_code_eq(
order_list,
staff_api_client,
permission_group_manage_orders,
voucher_with_many_codes,
):
# given
codes = voucher_with_many_codes.codes.all()
order_list[0].voucher_code = codes[0].code
order_list[1].voucher_code = codes[1].code
order_list[1].voucher = voucher_with_many_codes
order_list[2].voucher_code = codes[2].code
Order.objects.bulk_update(order_list, ["voucher_code", "voucher"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"voucherCode": {"eq": codes[0].code}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 1
assert orders[0]["node"]["number"] == str(order_list[0].number)
def test_orders_filter_by_voucher_code_one_of(
order_list,
staff_api_client,
permission_group_manage_orders,
voucher_with_many_codes,
):
# given
codes = voucher_with_many_codes.codes.all()
order_list[0].voucher_code = codes[0].code
order_list[1].voucher_code = codes[1].code
order_list[1].voucher = voucher_with_many_codes
order_list[2].voucher_code = codes[2].code
Order.objects.bulk_update(order_list, ["voucher_code", "voucher"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"voucherCode": {"oneOf": [codes[1].code, codes[2].code]}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 2
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {
str(order_list[1].number),
str(order_list[2].number),
}
@pytest.mark.parametrize(
"where",
[
{},
{"oneOf": []},
{"eq": None},
None,
],
)
def test_orders_filter_by_voucher_code_empty_value(
where,
order_list,
staff_api_client,
permission_group_manage_orders,
voucher_with_many_codes,
):
# given
codes = voucher_with_many_codes.codes.all()
order_list[0].voucher_code = codes[0].code
order_list[1].voucher_code = codes[1].code
order_list[1].voucher = voucher_with_many_codes
order_list[2].voucher_code = codes[2].code
Order.objects.bulk_update(order_list, ["voucher_code", "voucher"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"voucherCode": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 0
def test_orders_filter_by_has_invoices_true(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
for order in order_list[1:]:
Invoice.objects.create(order=order)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"hasInvoices": True}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(order_list[1:])
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {str(o.number) for o in order_list[1:]}
def test_orders_filter_by_has_invoices_false(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
for order in order_list[1:]:
Invoice.objects.create(order=order)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"hasInvoices": False}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 1
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {str(order_list[0].number)}
def test_orders_filter_by_has_invoices_none(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
for order in order_list[1:]:
Invoice.objects.create(order=order)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"hasInvoices": None}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("where", "indexes"),
[
(
[
{
"createdAt": {
"lte": (
timezone.now() - datetime.timedelta(days=3)
).isoformat(),
"gte": (
timezone.now() - datetime.timedelta(days=25)
).isoformat(),
}
},
{
"createdAt": {
"gte": (
timezone.now() - datetime.timedelta(days=15)
).isoformat(),
}
},
],
[1, 2],
),
(
[
{
"createdAt": {
"lte": (
timezone.now() - datetime.timedelta(days=4)
).isoformat(),
}
},
{
"createdAt": {
"gte": (
timezone.now() - datetime.timedelta(days=9)
).isoformat(),
}
},
],
[1, 2],
),
(
[
{
"createdAt": {
"lte": (
timezone.now() - datetime.timedelta(days=9)
).isoformat(),
}
}
],
[2],
),
(
[
{
"createdAt": {
"gte": (
timezone.now() - datetime.timedelta(days=2)
).isoformat(),
}
}
],
[0],
),
(None, []),
([{"createdAt": {"gte": None}}], []),
([{"createdAt": {"lte": None}}], []),
([{"createdAt": {"lte": None, "gte": None}}], []),
([{}], []),
],
)
def test_orders_filter_by_invoices(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
Invoice.objects.create(order=order_list[0])
with freeze_time((timezone.now() - datetime.timedelta(days=5)).isoformat()):
Invoice.objects.create(order=order_list[1])
Invoice.objects.create(order=order_list[2])
with freeze_time((timezone.now() - datetime.timedelta(days=10)).isoformat()):
Invoice.objects.create(order=order_list[2])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"invoices": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
def test_orders_filter_by_has_fulfillments_true(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
for order in order_list[1:]:
order.fulfillments.create(tracking_number="123")
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"hasFulfillments": True}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(order_list[1:])
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {str(o.number) for o in order_list[1:]}
def test_orders_filter_by_has_fulfillments_false(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
for order in order_list[1:]:
order.fulfillments.create(tracking_number="123")
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"hasFulfillments": False}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 1
returned_numbers = {node["node"]["number"] for node in orders}
assert returned_numbers == {str(order_list[0].number)}
def test_orders_filter_by_has_fulfillments_none(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
for order in order_list[1:]:
order.fulfillments.create(tracking_number="123")
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"hasFulfillments": None}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("where", "indexes"),
[
([{"status": {"eq": FulfillmentStatus.FULFILLED.upper()}}], [0]),
([{"status": {"eq": FulfillmentStatus.REFUNDED.upper()}}], [1]),
([{"status": {"eq": FulfillmentStatus.RETURNED.upper()}}], [2]),
(
[
{
"status": {
"oneOf": [
FulfillmentStatus.FULFILLED.upper(),
FulfillmentStatus.REFUNDED.upper(),
]
}
}
],
[0, 1],
),
(
[
{
"status": {
"oneOf": [
FulfillmentStatus.REPLACED.upper(),
FulfillmentStatus.CANCELED.upper(),
]
}
}
],
[],
),
([{"status": {"eq": FulfillmentStatus.WAITING_FOR_APPROVAL.upper()}}], []),
([{}], []),
([{"status": {"oneOf": []}}], []),
([{"status": {"eq": None}}], []),
(None, []),
],
)
def test_orders_filter_by_fulfillment_status(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
statuses = [
FulfillmentStatus.FULFILLED,
FulfillmentStatus.REFUNDED,
FulfillmentStatus.RETURNED,
]
for order, status in zip(order_list, statuses, strict=True):
order.fulfillments.create(tracking_number="123", status=status)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"fulfillments": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("where", "expected_indexes"),
[
([{"metadata": {"key": "foo"}}], [0, 1]),
([{"metadata": {"key": "foo", "value": {"eq": "bar"}}}], [0]),
([{"metadata": {"key": "foo", "value": {"eq": "baz"}}}], []),
([{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}}], [0, 1]),
([{"metadata": {"key": "notfound"}}], []),
([{"metadata": {"key": "foo", "value": {"eq": None}}}], []),
([{"metadata": {"key": "foo", "value": {"oneOf": []}}}], []),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}},
],
[0, 1],
),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "notfound"}},
],
[],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "bar"}}},
{"metadata": {"key": "baz", "value": {"eq": "zaz"}}},
],
[],
),
(
[
{"metadata": {"key": "baz"}},
{"metadata": {"key": "foo", "value": {"eq": "zaz"}}},
],
[1],
),
(
[
{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}},
{"metadata": {"key": "baz"}},
],
[1],
),
(None, []),
],
)
def test_orders_filter_by_fulfillment_metadata(
where,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
metadata_values = [
{"foo": "bar"},
{"foo": "zaz", "baz": "zaz"},
{},
]
for order, metadata_value in zip(order_list, metadata_values, strict=True):
order.fulfillments.create(tracking_number="123", metadata=metadata_value)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"fulfillments": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("fulfillment_filter", "expected_indexes"),
[
(
[
{"status": {"eq": FulfillmentStatus.FULFILLED.upper()}},
{"metadata": {"key": "foo"}},
],
[0],
),
(
[
{"status": {"eq": FulfillmentStatus.REFUNDED.upper()}},
{"metadata": {"key": "foo", "value": {"eq": "zaz"}}},
],
[1],
),
(
[
{"status": {"eq": FulfillmentStatus.RETURNED.upper()}},
{"metadata": {"key": "baz"}},
],
[],
),
(
[
{
"status": {
"oneOf": [
FulfillmentStatus.FULFILLED.upper(),
FulfillmentStatus.REFUNDED.upper(),
]
}
},
{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}},
],
[0, 1],
),
(
[
{"status": {"eq": FulfillmentStatus.FULFILLED.upper()}},
{"metadata": {"key": "notfound"}},
],
[],
),
(
[
{"status": {"eq": FulfillmentStatus.RETURNED.upper()}},
{"metadata": {"key": "foo", "value": {"eq": "baz"}}},
],
[],
),
(
[
{"status": {}},
{"metadata": {"key": "foo"}},
],
[0, 1],
),
(
[],
[],
),
],
)
def test_orders_filter_fulfillment_status_and_metadata_both_match(
fulfillment_filter,
expected_indexes,
orders_with_fulfillments,
staff_api_client,
permission_group_manage_orders,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"fulfillments": fulfillment_filter}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == len(expected_indexes)
assert {node["node"]["number"] for node in orders} == {
str(orders_with_fulfillments[i].number) for i in expected_indexes
}
def test_orders_filter_fulfillment_status_matches_metadata_not(
orders_with_fulfillments, staff_api_client, permission_group_manage_orders
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"fulfillments": [
{
"status": {"eq": FulfillmentStatus.FULFILLED.upper()},
"metadata": {"key": "foo", "value": {"eq": "notfound"}},
}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 0
def test_orders_filter_fulfillment_metadata_matches_status_not(
orders_with_fulfillments, staff_api_client, permission_group_manage_orders
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"fulfillments": [
{
"status": {"eq": FulfillmentStatus.REFUNDED.upper()},
"metadata": {"key": "foo", "value": {"eq": "bar"}},
}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 0
def test_orders_filter_fulfillment_status_and_metadata_both_not_match(
orders_with_fulfillments, staff_api_client, permission_group_manage_orders
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"fulfillments": [
{
"status": {"eq": FulfillmentStatus.RETURNED.upper()},
"metadata": {"key": "foo", "value": {"eq": "baz"}},
}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 0
def test_orders_filter_fulfillment_status_matches_metadata_none(
orders_with_fulfillments, staff_api_client, permission_group_manage_orders
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"fulfillments": [
{
"status": {"eq": FulfillmentStatus.FULFILLED.upper()},
"metadata": None,
}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 1
assert {node["node"]["number"] for node in orders} == {
str(orders_with_fulfillments[0].number)
}
def test_orders_filter_fulfillment_metadata_matches_status_none(
orders_with_fulfillments, staff_api_client, permission_group_manage_orders
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"fulfillments": [
{
"status": None,
"metadata": {"key": "foo", "value": {"eq": "bar"}},
}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 1
assert {node["node"]["number"] for node in orders} == {
str(orders_with_fulfillments[0].number)
}
def test_orders_filter_fulfillment_status_and_metadata_both_none(
orders_with_fulfillments, staff_api_client, permission_group_manage_orders
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"fulfillments": [
{
"status": None,
"metadata": None,
}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 0
def test_orders_filter_fulfillment_status_oneof_metadata_oneof(
orders_with_fulfillments, staff_api_client, permission_group_manage_orders
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"fulfillments": [
{
"status": {
"oneOf": [
FulfillmentStatus.FULFILLED.upper(),
FulfillmentStatus.REFUNDED.upper(),
]
},
"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}},
}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 2
assert {node["node"]["number"] for node in orders} == {
str(orders_with_fulfillments[0].number),
str(orders_with_fulfillments[1].number),
}
def test_orders_filter_fulfillment_warehouse_id_eq(
orders_with_fulfillments,
staff_api_client,
permission_group_manage_orders,
fulfilled_order,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
expected_order = fulfilled_order
fulfillment = expected_order.fulfillments.first()
warehouse = fulfillment.lines.first().stock.warehouse
variables = {
"where": {
"fulfillments": [
{"warehouse": {"id": {"eq": to_global_id_or_none(warehouse)}}}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 1
order_number_from_api = orders[0]["node"]["number"]
assert order_number_from_api == str(expected_order.number)
def test_orders_filter_fulfillment_warehouse_id_one_of(
orders_with_fulfillments,
staff_api_client,
permission_group_manage_orders,
fulfilled_order,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
expected_order = fulfilled_order
fulfillment = expected_order.fulfillments.first()
warehouse = fulfillment.lines.first().stock.warehouse
variables = {
"where": {
"fulfillments": [
{"warehouse": {"id": {"oneOf": [to_global_id_or_none(warehouse)]}}}
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 1
order_number_from_api = orders[0]["node"]["number"]
assert order_number_from_api == str(expected_order.number)
@pytest.mark.parametrize(
"where_warehouse_slug",
[
{"slug": {"eq": "warehouse-to-get"}},
{"slug": {"oneOf": ["warehouse-to-get"]}},
],
)
def test_orders_filter_fulfillment_warehouse_slug(
where_warehouse_slug,
orders_with_fulfillments,
staff_api_client,
permission_group_manage_orders,
fulfilled_order,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
expected_order = fulfilled_order
fulfillment = expected_order.fulfillments.first()
assert FulfillmentLine.objects.count() > 1
warehouse = fulfillment.lines.first().stock.warehouse
expected_warehouse_slug = "warehouse-to-get"
warehouse.slug = expected_warehouse_slug
warehouse.save()
variables = {"where": {"fulfillments": [{"warehouse": where_warehouse_slug}]}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 1
order_number_from_api = orders[0]["node"]["number"]
assert order_number_from_api == str(expected_order.number)
@pytest.mark.parametrize(
"where_warehouse_external_reference",
[
{"externalReference": {"eq": "warehouse-to-get"}},
{"externalReference": {"oneOf": ["warehouse-to-get"]}},
],
)
def test_orders_filter_fulfillment_warehouse_external_reference(
where_warehouse_external_reference,
orders_with_fulfillments,
staff_api_client,
permission_group_manage_orders,
fulfilled_order,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
expected_order = fulfilled_order
fulfillment = expected_order.fulfillments.first()
assert FulfillmentLine.objects.count() > 1
warehouse = fulfillment.lines.first().stock.warehouse
expected_warehouse_external_reference = "warehouse-to-get"
warehouse.external_reference = expected_warehouse_external_reference
warehouse.save()
variables = {
"where": {"fulfillments": [{"warehouse": where_warehouse_external_reference}]}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 1
order_number_from_api = orders[0]["node"]["number"]
assert order_number_from_api == str(expected_order.number)
@pytest.mark.parametrize(
"where_warehouse_non_existing_input",
[
{"externalReference": {"eq": "non-existing-warehouse"}},
{"externalReference": {"oneOf": ["non-existing-warehouse"]}},
{"slug": {"eq": "non-existing-warehouse"}},
{"slug": {"oneOf": ["non-existing-warehouse"]}},
{
"id": {
"eq": "V2FyZWhvdXNlOjJjMGNiODAwLTU0N2ItNDM1ZS04Y2UwLTkyYTFiOTE1ZmFkMQ=="
}
},
{
"id": {
"oneOf": [
"V2FyZWhvdXNlOjJjMGNiODAwLTU0N2ItNDM1ZS04Y2UwLTkyYTFiOTE1ZmFkMQ=="
]
}
},
{
"slug": {"oneOf": ["non-existing-warehouse"]},
"externalReference": {"eq": "existing-warehouse-ref"},
},
],
)
def test_orders_filter_fulfillment_warehouse_non_existing(
where_warehouse_non_existing_input,
orders_with_fulfillments,
staff_api_client,
permission_group_manage_orders,
fulfilled_order,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
fulfillment = fulfilled_order.fulfillments.first()
assert FulfillmentLine.objects.count() > 1
existing_warehouse = fulfillment.lines.first().stock.warehouse
existing_warehouse.slug = "existing-warehouse-slug"
existing_warehouse.external_reference = "existing-warehouse-ref"
existing_warehouse.save()
variables = {
"where": {"fulfillments": [{"warehouse": where_warehouse_non_existing_input}]}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 0
@pytest.mark.parametrize(
"where_additional_filters",
[
{"status": {"eq": FulfillmentStatus.FULFILLED.upper()}},
{"metadata": {"key": "notfound"}},
],
)
def test_orders_filter_fulfillment_warehouse_with_multiple_filters_with_no_match(
where_additional_filters,
staff_api_client,
permission_group_manage_orders,
fulfilled_order,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
expected_order = fulfilled_order
fulfillment = expected_order.fulfillments.first()
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.metadata = {"key": "value"}
fulfillment.save()
warehouse = fulfillment.lines.first().stock.warehouse
variables = {
"where": {
"fulfillments": [
{
"warehouse": {"id": {"eq": to_global_id_or_none(warehouse)}},
**where_additional_filters,
},
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 0
@pytest.mark.parametrize(
"where_additional_filters",
[
{"status": {"eq": FulfillmentStatus.FULFILLED.upper()}},
{"metadata": {"key": "meta-key"}},
],
)
def test_orders_filter_fulfillment_warehouse_multiple_filters(
where_additional_filters,
orders_with_fulfillments,
staff_api_client,
permission_group_manage_orders,
fulfilled_order,
):
# given
permission_group_manage_orders.user_set.add(staff_api_client.user)
expected_order = fulfilled_order
fulfillment = expected_order.fulfillments.first()
fulfillment.status = FulfillmentStatus.FULFILLED
fulfillment.metadata = {"meta-key": "meta-value"}
fulfillment.save()
assert FulfillmentLine.objects.count() > 1
warehouse = fulfillment.lines.first().stock.warehouse
expected_warehouse_external_reference = "warehouse-to-get"
warehouse.external_reference = expected_warehouse_external_reference
warehouse.save()
variables = {
"where": {
"fulfillments": [
{
"warehouse": {"id": {"eq": to_global_id_or_none(warehouse)}},
**where_additional_filters,
},
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
# then
assert len(orders) == 1
order_number_from_api = orders[0]["node"]["number"]
assert order_number_from_api == str(expected_order.number)
@pytest.mark.parametrize(
("filter_input", "expected_indexes"),
[
([{"metadata": {"key": "foo"}}], [0, 1]),
([{"metadata": {"key": "foo", "value": {"eq": "bar"}}}], [0]),
([{"metadata": {"key": "foo", "value": {"eq": "baz"}}}], []),
([{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}}], [0, 1]),
([{"metadata": {"key": "notfound"}}], []),
([{"metadata": {"key": "foo", "value": {"eq": None}}}], []),
([{"metadata": {"key": "foo", "value": {"oneOf": []}}}], []),
(None, []),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "foo", "value": {"eq": "bar"}}},
],
[0],
),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "baz", "value": {"eq": "zaz"}}},
],
[0, 1],
),
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "foo", "value": {"eq": "baz"}}},
],
[],
),
],
)
def test_orders_filter_by_lines_metadata(
filter_input,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
metadata_values = [
{
"foo": "bar",
"baz": "zaz",
},
{
"foo": "zaz",
"baz": "zaz",
},
{},
]
for order, metadata_value in zip(order_list, metadata_values, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
metadata=metadata_value,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"lines": filter_input}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
({"range": {"gte": 2, "lte": 4}}, [1, 2]),
({"range": {"gte": 3}}, [2]),
({"range": {"lte": 2}}, [0, 1]),
({"eq": 2}, [1]),
({"oneOf": [1, 3]}, [0, 2]),
({"eq": 99}, []),
({}, []),
({"range": {"gte": None}}, []),
({"range": {"lte": None}}, []),
({"eq": None}, []),
({"oneOf": []}, []),
(None, []),
],
)
def test_orders_filter_by_lines_count(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
order_list[0].lines_count = 1
order_list[1].lines_count = 2
order_list[2].lines_count = 3
Order.objects.bulk_update(order_list, ["lines_count"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"linesCount": where}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("currency", "where", "indexes"),
[
("USD", {"range": {"gte": "100.00", "lte": "200.00"}}, [0, 1]),
("USD", {"range": {"gte": "150.00"}}, [1]),
("PLN", {"range": {"gte": "150.00"}}, [2]),
(None, {"range": {"gte": "150.00"}}, [1, 2]),
("USD", {"range": {"lte": "120.00"}}, [0]),
("USD", {"eq": "150.00"}, [1]),
("PLN", {"eq": "150.00"}, []),
("USD", {"oneOf": ["100.00", "110.00"]}, [0]),
("USD", {}, []),
(None, {"range": {"gte": None}}, []),
("USD", {"range": {"lte": None}}, []),
("USD", {"eq": None}, []),
(None, {"eq": None}, []),
],
)
def test_orders_filter_by_total_gross(
currency,
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
order_list[0].total_gross_amount = "110.00"
order_list[0].currency = "USD"
order_list[1].total_gross_amount = "150.00"
order_list[1].currency = "USD"
order_list[2].total_gross_amount = "200.00"
order_list[2].currency = "PLN"
Order.objects.bulk_update(order_list, ["total_gross_amount", "currency"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"totalGross": {
"currency": currency,
"amount": where,
}
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("currency", "where", "indexes"),
[
("USD", {"range": {"gte": "100.00", "lte": "200.00"}}, [0, 1]),
("USD", {"range": {"gte": "150.00"}}, [1]),
("PLN", {"range": {"gte": "150.00"}}, [2]),
(None, {"range": {"gte": "150.00"}}, [1, 2]),
("USD", {"range": {"lte": "120.00"}}, [0]),
("USD", {"eq": "150.00"}, [1]),
("PLN", {"eq": "150.00"}, []),
("USD", {"oneOf": ["100.00", "110.00"]}, [0]),
("USD", {}, []),
(None, {"range": {"gte": None}}, []),
("USD", {"range": {"lte": None}}, []),
("USD", {"eq": None}, []),
(None, {"eq": None}, []),
],
)
def test_orders_filter_by_total_net(
currency,
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
order_list[0].total_net_amount = "110.00"
order_list[0].currency = "USD"
order_list[1].total_net_amount = "150.00"
order_list[1].currency = "USD"
order_list[2].total_net_amount = "200.00"
order_list[2].currency = "PLN"
Order.objects.bulk_update(order_list, ["total_net_amount", "currency"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"totalNet": {
"currency": currency,
"amount": where,
}
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("metadata", "expected_indexes"),
[
({"key": "foo"}, [0, 1]),
({"key": "foo", "value": {"eq": "bar"}}, [0]),
({"key": "foo", "value": {"eq": "baz"}}, []),
({"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}, [0, 1]),
({"key": "notfound"}, []),
({"key": "foo", "value": {"eq": None}}, []),
({"key": "foo", "value": {"oneOf": []}}, []),
(None, []),
],
)
def test_orders_filter_by_metadata(
metadata,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
order_list[0].metadata = {"foo": "bar"}
order_list[1].metadata = {"foo": "zaz"}
order_list[2].metadata = {}
Order.objects.bulk_update(order_list, ["metadata"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"metadata": metadata}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
def test_orders_filter_by_product_type_id(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
product_type_id = graphene.Node.to_global_id("ProductType", 4)
variables = {"where": {"productTypeId": {"eq": product_type_id}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 1
assert str(order_list[1].number) == orders[0]["node"]["number"]
def test_orders_filter_by_product_type_ids(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
product_type_ids = [
graphene.Node.to_global_id("ProductType", id) for id in product_type_ids[:2]
]
variables = {"where": {"productTypeId": {"oneOf": product_type_ids}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(order_list[:2])
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order.number) for order in order_list[:2]}
def test_orders_filter_by_product_type_ids_nothing_match(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
product_type_ids = [graphene.Node.to_global_id("ProductType", id) for id in [6, 7]]
variables = {"where": {"productTypeId": {"oneOf": product_type_ids}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 0
def test_orders_filter_by_product_type_none(
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
lines = []
product_type_ids = [3, 4, 5]
for order, product_type_id in zip(order_list, product_type_ids, strict=True):
lines.append(
OrderLine(
order=order,
product_name="Test Product",
is_shipping_required=True,
is_gift_card=False,
quantity=2,
currency="USD",
unit_price_net_amount="10.00",
unit_price_gross_amount="12.30",
total_price_net_amount="20.00",
total_price_gross_amount="24.60",
product_type_id=product_type_id,
)
)
OrderLine.objects.bulk_create(lines)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"productTypeId": {"eq": None}}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 0
@pytest.mark.parametrize(
("event_input", "expected_indexes"),
[
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"eq": OrderEvents.PLACED.upper()},
}
],
[0, 1, 2],
),
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"eq": OrderEvents.ORDER_FULLY_PAID.upper()},
}
],
[0, 1],
),
(
[
{
"date": {"gte": "2026-01-01T00:00:00Z"},
}
],
[],
),
(
[
{
"date": {"gte": "2020-01-01T00:00:00Z"},
}
],
[0, 1, 2],
),
(
[
{
"type": {
"oneOf": [
OrderEvents.PLACED.upper(),
OrderEvents.ORDER_FULLY_PAID.upper(),
]
},
}
],
[0, 1, 2],
),
(
[
{
"type": {"eq": OrderEvents.PLACED.upper()},
},
{
"type": {"eq": OrderEvents.ORDER_FULLY_PAID.upper()},
},
],
[0, 1],
),
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"oneOf": [OrderEvents.PLACED.upper()]},
},
{
"date": {"gte": "2025-02-01T00:00:00Z"},
"type": {"oneOf": [OrderEvents.ORDER_FULLY_PAID.upper()]},
},
],
[0, 1],
),
(
[
{
"date": {"gte": "2025-01-01T00:00:00Z"},
"type": {"eq": OrderEvents.PLACED.upper()},
},
{
"date": {"gte": "2025-02-02T00:00:00Z"},
},
],
[0, 1],
),
],
)
def test_orders_filter_by_order_events(
event_input,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
with freeze_time("2025-01-01T00:00:00Z"):
OrderEvent.objects.bulk_create(
[OrderEvent(order=order, type=OrderEvents.PLACED) for order in order_list]
)
with freeze_time("2025-02-02T00:00:00Z"):
OrderEvent.objects.bulk_create(
[
OrderEvent(order=order, type=OrderEvents.ORDER_FULLY_PAID)
for order in order_list[:2]
]
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"events": event_input}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("where", "indexes"),
[
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"eq": "CARD"},
}
}
]
},
[0, 2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"eq": "OTHER"},
}
}
]
},
[1],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"eq": "Brand"},
}
}
}
]
},
[0],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"eq": "Brand4"},
}
}
}
]
},
[2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"eq": "Brand2"},
}
}
}
]
},
[0],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"oneOf": ["CARD", "OTHER"]},
}
}
]
},
[0, 1, 2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"card": {
"brand": {"oneOf": ["Brand2", "Brand4"]},
}
}
}
]
},
[0, 2],
),
(
{
"transactions": [
{
"paymentMethodDetails": {
"type": {"eq": "CARD"},
}
},
{
"paymentMethodDetails": {
"card": {"brand": {"eq": "Brand"}},
}
},
]
},
[0],
),
],
)
def test_orders_filter_by_transaction_payment_details(
where,
indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
# first_transaction
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Credit card",
cc_brand="Brand",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
)
# second_transaction
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Second Credit card",
cc_brand="Brand2",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
)
# third_transaction
transaction_item_generator(
order_id=order_list[1].pk,
charged_value=order_list[1].total.gross.amount,
payment_method_type="other",
payment_method_name="Third payment method",
cc_brand=None,
cc_first_digits=None,
cc_last_digits=None,
cc_exp_month=None,
cc_exp_year=None,
)
# fourth_transaction
transaction_item_generator(
order_id=order_list[2].pk,
charged_value=order_list[2].total.gross.amount,
payment_method_type="card",
payment_method_name="Fourth Credit card",
cc_brand="Brand4",
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": where}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[index].number) for index in indexes}
@pytest.mark.parametrize(
("metadata_list", "expected_indexes"),
[
(
[
{"metadata": {"key": "foo"}},
{"metadata": {"key": "foo", "value": {"oneOf": ["bar", "zaz"]}}},
],
[0, 1],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "bar"}}},
{"metadata": {"key": "foo", "value": {"eq": "zaz"}}},
],
[],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "bar"}}},
{"metadata": {"key": "notfound"}},
],
[],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "zaz"}}},
{"metadata": {"key": "foo"}},
],
[1],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "baz"}}},
{"metadata": {"key": "notfound"}},
],
[],
),
],
)
def test_orders_filter_by_transaction_metadata(
metadata_list,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
metadata={"foo": "bar"},
)
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
metadata={},
)
transaction_item_generator(
order_id=order_list[1].pk,
charged_value=order_list[1].total.gross.amount,
metadata={"foo": "zaz"},
)
transaction_item_generator(
order_id=order_list[2].pk,
charged_value=order_list[2].total.gross.amount,
metadata={},
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"transactions": metadata_list}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("transaction_filters", "expected_indexes"),
[
(
[
{"metadata": {"key": "foo"}},
{"paymentMethodDetails": {"type": {"eq": "CARD"}}},
],
[0, 2],
),
(
[
{"metadata": {"key": "foo"}},
{"paymentMethodDetails": {"type": {"eq": "OTHER"}}},
],
[1],
),
(
[
{"metadata": {"key": "notfound"}},
{"paymentMethodDetails": {"type": {"eq": "OTHER"}}},
],
[],
),
(
[
{"metadata": {"key": "foo", "value": {"eq": "baz"}}},
{"paymentMethodDetails": {"type": {"eq": "CARD"}}},
],
[0],
),
],
)
def test_orders_filter_by_transactions_with_mixed_conditions(
transaction_filters,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Credit card",
cc_brand="Brand",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
metadata={},
)
# second_transaction
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
payment_method_type="card",
payment_method_name="Second Credit card",
cc_brand="Brand2",
cc_first_digits="1234",
cc_last_digits="5678",
cc_exp_month=12,
cc_exp_year=2025,
metadata={"foo": "baz"},
)
# third_transaction
transaction_item_generator(
order_id=order_list[1].pk,
charged_value=order_list[1].total.gross.amount,
payment_method_type="other",
payment_method_name="Third payment method",
cc_brand=None,
cc_first_digits=None,
cc_last_digits=None,
cc_exp_month=None,
cc_exp_year=None,
metadata={"foo": "zaz"},
)
# fourth_transaction
transaction_item_generator(
order_id=order_list[2].pk,
charged_value=order_list[2].total.gross.amount,
payment_method_type="card",
payment_method_name="Fourth Credit card",
cc_brand="Brand4",
metadata={"foo": "bar"},
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"transactions": transaction_filters}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("psp_reference_filter", "expected_indexes"),
[
({"eq": "PSP-ref-1"}, [0]),
({"eq": "PSP-ref-2"}, [0]),
({"eq": "PSP-ref-3"}, [1]),
({"eq": "PSP-ref-4"}, [2]),
({"eq": "non-existent-ref"}, []),
({"oneOf": ["PSP-ref-1", "PSP-ref-3"]}, [0, 1]),
({"oneOf": ["PSP-ref-2", "PSP-ref-4"]}, [0, 2]),
({"oneOf": ["PSP-ref-1", "PSP-ref-2", "PSP-ref-3", "PSP-ref-4"]}, [0, 1, 2]),
({"oneOf": ["non-existent-ref"]}, []),
({"oneOf": []}, []),
({"eq": None}, []),
(None, []),
],
)
def test_orders_filter_by_transaction_psp_reference(
psp_reference_filter,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
psp_reference="PSP-ref-1",
)
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
psp_reference="PSP-ref-2",
)
transaction_item_generator(
order_id=order_list[1].pk,
charged_value=order_list[1].total.gross.amount,
psp_reference="PSP-ref-3",
)
transaction_item_generator(
order_id=order_list[2].pk,
charged_value=order_list[2].total.gross.amount,
psp_reference="PSP-ref-4",
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"transactions": [{"pspReference": psp_reference_filter}]}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
def test_orders_filter_by_transaction_psp_reference_payment_details_and_metadata(
order_list,
staff_api_client,
permission_group_manage_orders,
transaction_item_generator,
):
# given
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
psp_reference="PSP-ref-1",
payment_method_type="card",
cc_brand="Visa",
metadata={"foo": "bar"},
)
transaction_item_generator(
order_id=order_list[0].pk,
charged_value=order_list[0].total.gross.amount,
psp_reference="PSP-ref-2",
payment_method_type="other",
metadata={"baz": "qux"},
)
transaction_item_generator(
order_id=order_list[1].pk,
charged_value=order_list[1].total.gross.amount,
psp_reference="PSP-ref-3",
payment_method_type="card",
cc_brand="Mastercard",
metadata={"foo": "bar"},
)
transaction_item_generator(
order_id=order_list[2].pk,
charged_value=order_list[2].total.gross.amount,
psp_reference="PSP-ref-4",
payment_method_type="card",
cc_brand="Visa",
metadata={},
)
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {
"where": {
"transactions": [
{"pspReference": {"oneOf": ["PSP-ref-1", "PSP-ref-3", "PSP-ref-4"]}},
{"paymentMethodDetails": {"card": {"brand": {"eq": "Visa"}}}},
{"metadata": {"key": "foo"}},
]
}
}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 1
assert orders[0]["node"]["number"] == str(order_list[0].number)
@pytest.mark.parametrize(
("address_filter", "expected_indexes"),
[
({"phoneNumber": {"eq": "+48123456789"}}, [0]),
({"phoneNumber": {"eq": "+1987654321"}}, [1]),
({"phoneNumber": {"eq": "notfound"}}, []),
({"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]}}, [0, 2]),
({"phoneNumber": {"oneOf": ["notfound"]}}, []),
({"country": {"eq": "GE"}}, [0]),
({"country": {"eq": "US"}}, [1]),
({"country": {"eq": "CN"}}, [2]),
({"country": {"eq": "JP"}}, []),
({"country": {"oneOf": ["GE", "CN"]}}, [0, 2]),
({"country": {"oneOf": ["JP"]}}, []),
({"country": {"notOneOf": ["GE", "CN", "PL"]}}, [1]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "GE"}}, [0]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "US"}}, []),
(
{
"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]},
"country": {"notOneOf": ["GE"]},
},
[2],
),
(None, []),
({"phoneNumber": {"eq": None}}, []),
({"phoneNumber": {"oneOf": []}}, []),
({"country": {"eq": None}}, []),
({"country": {"oneOf": []}}, []),
],
)
def test_orders_filter_by_billing_address(
address_filter,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
phones = [
"+48123456789",
"+1987654321",
"+86555555555",
]
countries = ["GE", "US", "CN"]
addresses = [
Address.objects.create(
first_name="John",
last_name="Doe",
company_name="Mirumee Software",
street_address_1="Tęczowa 7",
city="WROCŁAW",
postal_code="53-601",
country=country,
phone=phone,
)
for phone, country in zip(phones, countries, strict=True)
]
for order, address in zip(order_list, addresses, strict=True):
order.billing_address = address
Order.objects.bulk_update(order_list, ["billing_address"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"billingAddress": address_filter}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
@pytest.mark.parametrize(
("address_filter", "expected_indexes"),
[
({"phoneNumber": {"eq": "+48123456789"}}, [0]),
({"phoneNumber": {"eq": "+1987654321"}}, [1]),
({"phoneNumber": {"eq": "notfound"}}, []),
({"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]}}, [0, 2]),
({"phoneNumber": {"oneOf": ["notfound"]}}, []),
({"country": {"eq": "GE"}}, [0]),
({"country": {"eq": "US"}}, [1]),
({"country": {"eq": "CN"}}, [2]),
({"country": {"eq": "JP"}}, []),
({"country": {"oneOf": ["GE", "CN"]}}, [0, 2]),
({"country": {"oneOf": ["JP"]}}, []),
({"country": {"notOneOf": ["GE", "CN", "PL"]}}, [1]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "GE"}}, [0]),
({"phoneNumber": {"eq": "+48123456789"}, "country": {"eq": "US"}}, []),
(
{
"phoneNumber": {"oneOf": ["+48123456789", "+86555555555"]},
"country": {"notOneOf": ["GE"]},
},
[2],
),
(None, []),
({"phoneNumber": {"eq": None}}, []),
({"phoneNumber": {"oneOf": []}}, []),
({"country": {"eq": None}}, []),
({"country": {"oneOf": []}}, []),
],
)
def test_orders_filter_by_shipping_address(
address_filter,
expected_indexes,
order_list,
staff_api_client,
permission_group_manage_orders,
):
# given
phones = [
"+48123456789",
"+1987654321",
"+86555555555",
]
countries = ["GE", "US", "CN"]
addresses = [
Address.objects.create(
first_name="John",
last_name="Doe",
company_name="Mirumee Software",
street_address_1="Tęczowa 7",
city="WROCŁAW",
postal_code="53-601",
country=country,
phone=phone,
)
for phone, country in zip(phones, countries, strict=True)
]
for order, address in zip(order_list, addresses, strict=True):
order.shipping_address = address
Order.objects.bulk_update(order_list, ["shipping_address"])
permission_group_manage_orders.user_set.add(staff_api_client.user)
variables = {"where": {"shippingAddress": address_filter}}
# when
response = staff_api_client.post_graphql(ORDERS_WHERE_QUERY, variables)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == len(expected_indexes)
numbers = {node["node"]["number"] for node in orders}
assert numbers == {str(order_list[i].number) for i in expected_indexes}
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/order/tests/queries/test_order_with_where.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 3091,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/product/mutations/product/product_cleaner.py | from django.core.exceptions import ValidationError
from .....core.utils.editorjs import clean_editor_js
from .....product.error_codes import ProductErrorCode
from ....core.validators import validate_slug_and_generate_if_needed
def clean_weight(cleaned_input: dict):
weight = cleaned_input.get("weight")
if weight and weight.value < 0:
raise ValidationError(
{
"weight": ValidationError(
"Product can't have negative weight.",
code=ProductErrorCode.INVALID.value,
)
}
)
def clean_slug(cleaned_input: dict, instance):
try:
validate_slug_and_generate_if_needed(instance, "name", cleaned_input)
except ValidationError as e:
e.code = ProductErrorCode.REQUIRED.value
raise ValidationError({"slug": e}) from e
def clean_description(cleaned_input: dict):
if "description" in cleaned_input:
description = cleaned_input["description"]
cleaned_input["description_plaintext"] = (
clean_editor_js(description, to_string=True) if description else ""
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/product/mutations/product/product_cleaner.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/webhook/transport/asynchronous/tests/test_send_webhooks_async_for_app.py | from unittest.mock import ANY, patch
from .....core.models import EventDelivery, EventDeliveryAttempt, EventDeliveryStatus
from ..transport import (
WebhookResponse,
send_webhooks_async_for_app,
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhook_using_scheme_method"
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhooks_async_for_app.apply_async"
)
def test_send_webhooks_async_for_app(
mock_send_webhooks_async_for_app_apply_async,
mock_send_webhook_using_scheme_method,
app,
event_delivery,
):
# given
assert EventDelivery.objects.filter(status=EventDeliveryStatus.PENDING).exists()
mock_send_webhook_using_scheme_method.return_value = WebhookResponse(
content="", status=EventDeliveryStatus.SUCCESS
)
# when
send_webhooks_async_for_app(app_id=app.id)
# then
mock_send_webhook_using_scheme_method.assert_called_once()
mock_send_webhooks_async_for_app_apply_async.assert_called_once_with(
kwargs={"app_id": app.id, "telemetry_context": ANY},
)
# deliveries should be cleared
assert not EventDelivery.objects.exists()
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhook_using_scheme_method"
)
def test_send_webhooks_async_for_app_no_deliveries(
mock_send_webhook_using_scheme_method, app
):
# given
assert not EventDelivery.objects.filter(status=EventDeliveryStatus.PENDING).exists()
# when
send_webhooks_async_for_app(app_id=app.id)
# then
assert mock_send_webhook_using_scheme_method.called == 0
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhook_using_scheme_method"
)
def test_send_webhooks_async_for_app_doesnt_pick_failed(
mock_send_webhook_using_scheme_method, app, event_delivery
):
# given
event_delivery.status = EventDeliveryStatus.FAILED
event_delivery.save()
assert not EventDelivery.objects.filter(status=EventDeliveryStatus.PENDING).exists()
# when
send_webhooks_async_for_app(app_id=app.id)
# then
assert mock_send_webhook_using_scheme_method.called == 0
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhook_using_scheme_method"
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhooks_async_for_app.apply_async"
)
def test_send_webhooks_async_for_app_no_payload(
mock_send_webhooks_async_for_app_apply_async,
mock_send_webhook_using_scheme_method,
app,
event_delivery,
):
# given
event_delivery.payload = None
event_delivery.save()
assert EventDelivery.objects.filter(status=EventDeliveryStatus.PENDING).exists()
# when
send_webhooks_async_for_app(app_id=app.id)
# then
mock_send_webhook_using_scheme_method.assert_not_called()
deliveries = EventDelivery.objects.all()
assert len(deliveries) == 1
assert deliveries[0].status == EventDeliveryStatus.PENDING
assert EventDeliveryAttempt.objects.filter(
status=EventDeliveryStatus.FAILED
).exists()
mock_send_webhooks_async_for_app_apply_async.assert_called_once_with(
kwargs={"app_id": app.id, "telemetry_context": ANY},
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhook_using_scheme_method"
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhooks_async_for_app.apply_async"
)
def test_send_webhooks_async_for_app_failed_status(
mock_send_webhooks_async_for_app_apply_async,
mock_send_webhook_using_scheme_method,
app,
event_delivery,
):
# given
assert EventDelivery.objects.filter(status=EventDeliveryStatus.PENDING).exists()
mock_send_webhook_using_scheme_method.return_value = WebhookResponse(
content="", status=EventDeliveryStatus.FAILED
)
# when
send_webhooks_async_for_app(app_id=app.id)
# then
mock_send_webhook_using_scheme_method.assert_called_once()
deliveries = EventDelivery.objects.all()
assert len(deliveries) == 1
assert deliveries[0].status == EventDeliveryStatus.PENDING
assert EventDeliveryAttempt.objects.filter(
status=EventDeliveryStatus.FAILED
).exists()
mock_send_webhooks_async_for_app_apply_async.assert_called_once_with(
kwargs={"app_id": app.id, "telemetry_context": ANY},
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhook_using_scheme_method"
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhooks_async_for_app.apply_async"
)
def test_send_multiple_webhooks_async_for_app(
mock_send_webhooks_async_for_app_apply_async,
mock_send_webhook_using_scheme_method,
app,
event_deliveries,
):
# given
assert len(EventDelivery.objects.filter(status=EventDeliveryStatus.PENDING)) == 3
mock_send_webhook_using_scheme_method.return_value = WebhookResponse(
content="", status=EventDeliveryStatus.SUCCESS
)
# when
send_webhooks_async_for_app(app_id=app.id)
# then
assert mock_send_webhook_using_scheme_method.call_count == 3
mock_send_webhooks_async_for_app_apply_async.assert_called_once_with(
kwargs={"app_id": app.id, "telemetry_context": ANY},
)
# deliveries should be cleared
assert not EventDelivery.objects.exists()
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhook_using_scheme_method"
)
@patch(
"saleor.webhook.transport.asynchronous.transport.send_webhooks_async_for_app.apply_async"
)
def test_send_webhooks_async_for_app_last_retry_failed(
mock_send_webhooks_async_for_app_apply_async,
mock_send_webhook_using_scheme_method,
app,
event_delivery,
):
# given
assert EventDelivery.objects.filter(status=EventDeliveryStatus.PENDING).exists()
EventDeliveryAttempt.objects.bulk_create(
[
EventDeliveryAttempt(
delivery=event_delivery, status=EventDeliveryStatus.FAILED
)
for _ in range(5)
]
)
mock_send_webhook_using_scheme_method.return_value = WebhookResponse(
content="", status=EventDeliveryStatus.FAILED
)
# when
send_webhooks_async_for_app(app_id=app.id)
# then
mock_send_webhook_using_scheme_method.assert_called_once()
deliveries = EventDelivery.objects.all()
assert len(deliveries) == 1
assert deliveries[0].status == EventDeliveryStatus.FAILED
assert (
len(EventDeliveryAttempt.objects.filter(status=EventDeliveryStatus.FAILED)) == 6
)
mock_send_webhooks_async_for_app_apply_async.assert_called_once_with(
kwargs={"app_id": app.id, "telemetry_context": ANY},
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/transport/asynchronous/tests/test_send_webhooks_async_for_app.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/core/utils/tests/test_update_mutation_manager.py | from ....order import OrderStatus
from ..update_mutation_manager import InstanceTracker
def test_instance_tracker(product):
# given
fields_to_track = ["name", "slug"]
tracker = InstanceTracker(product, fields_to_track)
# when
product.name = product.name + "_updated"
modified_field = tracker.get_modified_fields()
# then
assert modified_field == ["name"]
def test_instance_tracker_no_instance_on_init(product):
# given
fields_to_track = ["name", "slug"]
tracker = InstanceTracker(None, fields_to_track)
# when
tracker.instance = product
modified_fields = tracker.get_modified_fields()
# then
assert modified_fields == fields_to_track
def test_instance_tracker_no_instance_on_init_and_on_get_modified_fields():
# given
fields_to_track = ["name", "slug"]
tracker = InstanceTracker(None, fields_to_track)
# when
modified_fields = tracker.get_modified_fields()
# then
assert modified_fields == []
def test_instance_tracker_remove_instance(product):
# given
fields_to_track = ["name", "slug"]
tracker = InstanceTracker(product, fields_to_track)
# when
tracker.instance = None
modified_fields = tracker.get_modified_fields()
# then
assert modified_fields == fields_to_track
def test_instance_tracker_foreign_relation(order_with_lines):
# given
order = order_with_lines
fields_to_track = ["status", "shipping_address", "billing_address"]
foreign_fields_to_track = ["last_name", "first_name"]
tracker = InstanceTracker(
order,
fields_to_track,
foreign_fields_to_track={"shipping_address": foreign_fields_to_track},
)
order.status = OrderStatus.FULFILLED
shipping_address = order.shipping_address
shipping_address.last_name = "new_last_name"
# when
modified_fields = tracker.get_modified_fields()
foreign_modified_fields = tracker.get_foreign_modified_fields()
# then
assert modified_fields == ["status", "shipping_address"]
assert foreign_modified_fields["shipping_address"] == ["last_name"]
def test_instance_tracker_foreign_relation_new_instance(order_with_lines):
# given
order = order_with_lines
order.shipping_address = None
fields_to_track = ["status", "shipping_address", "billing_address"]
foreign_fields_to_track = ["last_name", "first_name"]
tracker = InstanceTracker(
order,
fields_to_track,
foreign_fields_to_track={"shipping_address": foreign_fields_to_track},
)
order.status = OrderStatus.FULFILLED
order.shipping_address = order.billing_address
# when
modified_fields = tracker.get_modified_fields()
foreign_modified_fields = tracker.get_foreign_modified_fields()
# then
assert modified_fields == ["status", "shipping_address"]
assert foreign_modified_fields["shipping_address"] == foreign_fields_to_track
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/utils/tests/test_update_mutation_manager.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/core/utils/update_mutation_manager.py | from copy import deepcopy
from typing import Any, TypeVar
from django.db.models import Model
T = TypeVar("T", bound=Model)
class InstanceTracker:
"""Instance with modifications tracker.
It is used to determine modified fields of the instance.
"""
def __init__(
self,
instance: T | None,
fields_to_track: list[str],
foreign_fields_to_track: dict[str, list[str]] | None = None,
):
self.instance = instance
self.fields_to_track = fields_to_track
self.initial_instance_values: dict[str, Any] = (
deepcopy(self.get_field_values()) if instance else {}
)
self.foreign_instance_relation: dict[str, InstanceTracker] = {}
self.create = instance is None
if foreign_fields_to_track:
for lookup, fields in foreign_fields_to_track.items():
foreign_instance = getattr(instance, lookup, None)
self.foreign_instance_relation[lookup] = InstanceTracker(
foreign_instance,
fields,
None,
)
def get_field_values(self) -> dict[str, Any]:
"""Create a dict of tracked fields with related instance values."""
if not self.instance:
return {}
return {field: getattr(self.instance, field) for field in self.fields_to_track}
def get_modified_fields(self) -> list[str]:
"""Compare updated instance values with initial ones.
Raise exception when instance is None.
"""
if not self.initial_instance_values:
if self.instance:
return self.fields_to_track
return []
modified_instance_values: dict[str, Any] = self.get_field_values()
return [
field
for field in self.initial_instance_values
if self.initial_instance_values.get(field)
!= modified_instance_values.get(field)
]
def get_foreign_modified_fields(self) -> dict[str, list[str]]:
modified_fields = {}
for lookup, tracker in self.foreign_instance_relation.items():
tracker.instance = getattr(self.instance, lookup, None)
foreign_modified = tracker.get_modified_fields()
if foreign_modified:
modified_fields[lookup] = foreign_modified
return modified_fields
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/utils/update_mutation_manager.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/account/mutations/account/utils.py | ADDRESS_UPDATE_FIELDS = {
"city",
"city_area",
"company_name",
"country",
"country_area",
"first_name",
"last_name",
"metadata",
"phone",
"postal_code",
"private_metadata",
"street_address_1",
"street_address_2",
"validation_skipped",
}
ACCOUNT_UPDATE_FIELDS = {
"metadata",
"first_name",
"last_name",
"language_code",
}
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/account/mutations/account/utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/webhook/transport/tests/test_list_stored_payment_methods.py | import copy
from unittest.mock import patch
import pytest
from ....payment.interface import (
PaymentGateway,
PaymentGatewayInitializeTokenizationResult,
PaymentMethodCreditCardInfo,
PaymentMethodData,
PaymentMethodTokenizationResult,
StoredPaymentMethodRequestDeleteResult,
)
from ...response_schemas.utils.annotations import logger as annotations_logger
from ..list_stored_payment_methods import (
get_list_stored_payment_methods_from_response,
get_response_for_payment_gateway_initialize_tokenization,
get_response_for_payment_method_tokenization,
get_response_for_stored_payment_method_request_delete,
logger,
)
from ..utils import to_payment_app_id
@patch.object(annotations_logger, "warning")
def test_get_list_stored_payment_methods_from_response(mocked_logger, app):
# given
payment_method_response = {
"id": "method-1",
"supportedPaymentFlows": ["INTERACTIVE"],
"type": "Credit Card",
"creditCardInfo": {
"brand": "visa",
"lastDigits": "1234",
"expMonth": 1,
"expYear": 2023,
"firstDigits": "123456",
},
"name": "***1234",
"data": {"some": "data"},
}
# invalid second payment method due to to missing id
second_payment_method = copy.deepcopy(payment_method_response)
del second_payment_method["id"]
list_stored_payment_methods_response = {
"paymentMethods": [payment_method_response, second_payment_method]
}
currency = "usd"
# when
response = get_list_stored_payment_methods_from_response(
app, list_stored_payment_methods_response, currency
)
# then
assert len(response) == 1
assert response[0] == PaymentMethodData(
id=to_payment_app_id(app, payment_method_response["id"]),
external_id=payment_method_response["id"],
supported_payment_flows=[
flow.lower()
for flow in payment_method_response.get("supportedPaymentFlows", [])
],
type=payment_method_response["type"],
credit_card_info=PaymentMethodCreditCardInfo(
brand=payment_method_response["creditCardInfo"]["brand"],
last_digits=payment_method_response["creditCardInfo"]["lastDigits"],
exp_year=payment_method_response["creditCardInfo"]["expYear"],
exp_month=payment_method_response["creditCardInfo"]["expMonth"],
first_digits=payment_method_response["creditCardInfo"].get("firstDigits"),
)
if payment_method_response.get("creditCardInfo")
else None,
name=payment_method_response["name"],
data=payment_method_response["data"],
gateway=PaymentGateway(
id=app.identifier,
name=app.name,
currencies=[currency],
config=[],
),
)
assert mocked_logger.call_count == 1
error_msg = mocked_logger.call_args[0][1]
assert error_msg == "Skipping invalid stored payment method"
assert mocked_logger.call_args[1]["extra"]["app"] == app.id
def test_get_list_stored_payment_methods_from_response_only_required_fields(app):
# given
payment_method_response = {
"id": "method-1",
"type": "Credit Card",
}
list_stored_payment_methods_response = {"paymentMethods": [payment_method_response]}
currency = "usd"
# when
response = get_list_stored_payment_methods_from_response(
app, list_stored_payment_methods_response, currency
)
# then
assert len(response) == 1
assert response[0] == PaymentMethodData(
id=to_payment_app_id(app, payment_method_response["id"]),
external_id=payment_method_response["id"],
supported_payment_flows=[],
type=payment_method_response["type"],
credit_card_info=None,
gateway=PaymentGateway(
id=app.identifier,
name=app.name,
currencies=[currency],
config=[],
),
)
@patch.object(logger, "warning")
def test_get_list_stored_payment_methods_from_response_invalid_input_data(
mocked_logger, app
):
# given
list_stored_payment_methods_response = None
currency = "usd"
# when
response = get_list_stored_payment_methods_from_response(
app, list_stored_payment_methods_response, currency
)
# then
assert response == []
assert mocked_logger.call_count == 1
error_msg = mocked_logger.call_args[0][0]
assert "Skipping stored payment methods from app" in error_msg
assert mocked_logger.call_args[1]["extra"]["app"] == app.id
@pytest.mark.parametrize(
"response_data",
[
# Response with SUCCESSFULLY_DELETED result
{
"result": StoredPaymentMethodRequestDeleteResult.SUCCESSFULLY_DELETED.name,
"error": None,
},
# Response with FAILED_TO_DELETE result and error
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELETE.name,
"error": "Some error occurred",
},
# Response with FAILED_TO_DELIVER result and error
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELIVER.name,
"error": "Some error occurred",
},
# Response with FAILED_TO_DELETE result no error
{"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELETE.name},
# Response with FAILED_TO_DELETE result error as None
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELIVER.name,
"error": None,
},
],
)
def test_get_response_for_stored_payment_method_request_delete_valid_response(
response_data,
):
# when
response = get_response_for_stored_payment_method_request_delete(response_data)
# then
assert response.result.name == response_data["result"]
assert response.error == response_data.get("error")
@pytest.mark.parametrize(
("response_data", "expected_error"),
[
# Missing `result` in response
(
{"error": "Missing result"},
"Missing value for field: result. Input: {'error': 'Missing result'}.",
),
# Invalid `result` value
(
{"result": "INVALID_RESULT", "error": "Invalid result value"},
"Incorrect value (INVALID_RESULT) for field: result. Error: Value error, "
"Enum name not found: INVALID_RESULT.",
),
],
)
def test_get_response_for_stored_payment_method_request_delete_invalid_response(
response_data, expected_error
):
# when
response = get_response_for_stored_payment_method_request_delete(response_data)
# then
assert (
response.result.name
== StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELETE.name
)
assert expected_error in response.error
def test_get_response_for_stored_payment_method_request_delete_response_is_none():
# when
response = get_response_for_stored_payment_method_request_delete(None)
# then
assert response.result == StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELIVER
assert response.error == "Failed to delivery request."
@pytest.mark.parametrize(
"response_data",
[
# Response with SUCCESSFULLY_INITIALIZED result and data
{
"result": PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED.name,
"data": {"foo": "bar"},
},
# Response with SUCCESSFULLY_INITIALIZED result and no data
{
"result": PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED.name,
},
# Response with FAILED_TO_INITIALIZE result and error
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE.name,
"error": "Some error occurred",
},
# Response with FAILED_TO_DELIVER result, error and data as None
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER.name,
"error": None,
"data": None,
},
],
)
def test_get_response_for_payment_gateway_initialize_tokenization_valid_response(
response_data,
):
# when
response = get_response_for_payment_gateway_initialize_tokenization(response_data)
# then
assert response.result.name == response_data["result"]
assert response.error == response_data.get("error")
assert response.data == response_data.get("data")
@pytest.mark.parametrize(
("response_data", "expected_error"),
[
# Missing `result` in response
(
{"error": "Missing result"},
"Missing value for field: result. Input: {'error': 'Missing result'}.",
),
# Invalid `result` value
(
{"result": "INVALID_RESULT", "error": "Invalid result value"},
"Incorrect value (INVALID_RESULT) for field: result. Error: Value error, "
"Enum name not found: INVALID_RESULT.",
),
],
)
def test_get_response_for_payment_gateway_initialize_tokenization_invalid_response(
response_data, expected_error
):
# when
response = get_response_for_payment_gateway_initialize_tokenization(response_data)
# then
assert (
response.result
== PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE
)
assert response.error == expected_error
def test_get_response_for_payment_gateway_initialize_tokenization_response_is_none():
# when
response = get_response_for_payment_gateway_initialize_tokenization(None)
# then
assert (
response.result == PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER
)
assert response.error == "Failed to delivery request."
@pytest.mark.parametrize(
(
"response_data",
"expected_result",
"expected_error",
"expected_id",
"expected_data",
),
[
# Successfully tokenized
(
{
"id": "123",
"result": PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED.name,
"data": {"key": "value"},
"error": None,
},
PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED,
None,
"123",
{"key": "value"},
),
# Additional action required
(
{
"id": "456",
"result": PaymentMethodTokenizationResult.ADDITIONAL_ACTION_REQUIRED.name,
"data": {"action": "verify"},
"error": None,
},
PaymentMethodTokenizationResult.ADDITIONAL_ACTION_REQUIRED,
None,
"456",
{"action": "verify"},
),
# Pending
(
{
"result": PaymentMethodTokenizationResult.PENDING.name,
"data": {"status": "pending"},
},
PaymentMethodTokenizationResult.PENDING,
None,
None,
{"status": "pending"},
),
# Failed to tokenize
(
{
"result": PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE.name,
"error": "Tokenization failed",
},
PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE,
"Tokenization failed",
None,
None,
),
# Invalid result
(
{"result": "INVALID_RESULT"},
PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE,
"Missing or invalid value for `result`: INVALID_RESULT. Possible values: "
f"{', '.join([value.name for value in PaymentMethodTokenizationResult])}.",
None,
None,
),
# No response data
(
None,
PaymentMethodTokenizationResult.FAILED_TO_DELIVER,
"Failed to delivery request.",
None,
None,
),
],
)
def test_get_response_for_payment_method_tokenization(
response_data, expected_result, expected_error, expected_id, expected_data, app
):
# when
response = get_response_for_payment_method_tokenization(response_data, app)
# then
assert response.result == expected_result
assert response.error == expected_error
assert response.id == (to_payment_app_id(app, expected_id) if expected_id else None)
assert response.data == expected_data
@pytest.mark.parametrize(
("response_data", "error_msg"),
[
# Missing `id` in success response
(
{
"result": PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED.name,
"data": {"key": "value"},
},
"Missing value for field: id. Input: ",
),
# `id` as int for pending response
(
{
"result": PaymentMethodTokenizationResult.PENDING.name,
"id": 123,
},
"Incorrect value (123) for field: id. Error: Input should be a valid string.",
),
# Invalid `error` in failed response
(
{
"result": PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE.name,
"error": 123,
},
"Incorrect value (123) for field: error. Error: Input should be a valid string.",
),
],
)
def test_get_response_for_payment_method_tokenization_validation_error(
response_data, error_msg, app
):
# when
response = get_response_for_payment_method_tokenization(response_data, app)
# then
assert response.result == PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE
assert error_msg in response.error
def test_get_response_for_payment_method_tokenization_value_error(app):
# given
result = "INVALID_RESULT"
response_data = {"result": result}
# when
response = get_response_for_payment_method_tokenization(response_data, app)
# then
assert response.result == PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE
assert (
f"Missing or invalid value for `result`: {result}. Possible values: "
in response.error
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/transport/tests/test_list_stored_payment_methods.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:.semgrep/correctness/django/django-no-default-token-generator.py | def test_using_default_token_generator():
# ruleid: django-no-default-token-generator
from django.contrib.auth.tokens import default_token_generator
def test_using_token_generator_class():
# ruleid: django-no-default-token-generator
from django.contrib.auth.tokens import PasswordResetTokenGenerator
def test_ok_not_using_django_builtin_default_token_generator():
# ok: django-no-default-token-generator
from saleor.core.tokens import token_generator
| {
"repo_id": "saleor/saleor",
"file_path": ".semgrep/correctness/django/django-no-default-token-generator.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/core/rlimit.py | import resource
from django.core.exceptions import ImproperlyConfigured
RLIMIT_TYPE = resource.RLIMIT_DATA
def is_soft_limit_set_without_hard_limit(soft_limit_in_MB, hard_limit_in_MB):
return soft_limit_in_MB is not None and hard_limit_in_MB is None
def is_hard_limit_set_without_soft_limit(soft_limit_in_MB, hard_limit_in_MB):
return soft_limit_in_MB is None and hard_limit_in_MB is not None
def validate_and_set_rlimit(soft_limit_in_MB, hard_limit_in_MB):
"""Set the memory limit for the process.
This function sets the soft and hard memory limits for the process using
the resource module. The limits are specified in megabytes (MB) and
are converted to bytes before being set. If the limits are not specified,
the function sets the limits to infinity (no limit).
If the soft limit is reached, the process will raise a `MemoryError`.
"""
try:
soft_limit_in_MB = int(soft_limit_in_MB) if soft_limit_in_MB else None
hard_limit_in_MB = int(hard_limit_in_MB) if hard_limit_in_MB else None
except ValueError as e:
raise ImproperlyConfigured(
"Memory limits must be integers(`SOFT_MEMORY_LIMIT_IN_MB` or `HARD_MEMORY_LIMIT_IN_MB`)."
) from e
if is_soft_limit_set_without_hard_limit(
soft_limit_in_MB, hard_limit_in_MB
) or is_hard_limit_set_without_soft_limit(soft_limit_in_MB, hard_limit_in_MB):
raise ImproperlyConfigured(
"Both `SOFT_MEMORY_LIMIT_IN_MB` and `HARD_MEMORY_LIMIT_IN_MB` must be set to enable memory limits."
)
soft_memory_limit = (
soft_limit_in_MB * 1000 * 1000 if soft_limit_in_MB else resource.RLIM_INFINITY
)
hard_memory_limit = (
hard_limit_in_MB * 1000 * 1000 if hard_limit_in_MB else resource.RLIM_INFINITY
)
if soft_memory_limit > hard_memory_limit:
raise ImproperlyConfigured(
"Soft memory limit cannot be greater than hard memory limit."
)
if soft_memory_limit < 0 and soft_memory_limit != resource.RLIM_INFINITY:
raise ImproperlyConfigured(
"Soft memory limit(SOFT_MEMORY_LIMIT_IN_MB) cannot be negative."
)
if hard_memory_limit < 0 and hard_memory_limit != resource.RLIM_INFINITY:
raise ImproperlyConfigured(
"Hard memory limit(HARD_MEMORY_LIMIT_IN_MB) cannot be negative."
)
resource.setrlimit(RLIMIT_TYPE, (soft_memory_limit, hard_memory_limit))
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/rlimit.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/core/tests/test_rlimit.py | import resource
from unittest.mock import patch
import pytest
from django.core.exceptions import ImproperlyConfigured
from ..rlimit import RLIMIT_TYPE, validate_and_set_rlimit
@patch("saleor.core.rlimit.resource.setrlimit")
def test_limit_passed_as_string(mock_setrlimit):
# given
soft_limit = "1000"
hard_limit = "2000"
expected_soft_limit = int(soft_limit) * 1000 * 1000
expected_hard_limit = int(hard_limit) * 1000 * 1000
# when
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_called_once_with(
RLIMIT_TYPE,
(expected_soft_limit, expected_hard_limit),
)
@patch("saleor.core.rlimit.resource.setrlimit")
def test_limit_passed_as_int(mock_setrlimit):
# given
soft_limit = 1000
hard_limit = 2000
expected_soft_limit = int(soft_limit) * 1000 * 1000
expected_hard_limit = int(hard_limit) * 1000 * 1000
# when
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_called_once_with(
RLIMIT_TYPE,
(expected_soft_limit, expected_hard_limit),
)
@patch("saleor.core.rlimit.resource.setrlimit")
def test_no_limits(mock_setrlimit):
# given
soft_limit = None
hard_limit = None
expected_soft_limit = resource.RLIM_INFINITY
expected_hard_limit = resource.RLIM_INFINITY
# when
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_called_once_with(
RLIMIT_TYPE,
(expected_soft_limit, expected_hard_limit),
)
@patch("saleor.core.rlimit.resource.setrlimit")
def test_only_soft_limit_set(mock_setrlimit):
# given
soft_limit = 1000
hard_limit = None
# when
with pytest.raises(ImproperlyConfigured):
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_not_called()
@patch("saleor.core.rlimit.resource.setrlimit")
def test_only_hard_limit_set(mock_setrlimit):
# given
soft_limit = None
hard_limit = 1000
# when
with pytest.raises(ImproperlyConfigured):
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_not_called()
@patch("saleor.core.rlimit.resource.setrlimit")
def test_negative_soft_limit(mock_setrlimit):
# given
soft_limit = -10
hard_limit = 1000
# when
with pytest.raises(ImproperlyConfigured):
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_not_called()
@patch("saleor.core.rlimit.resource.setrlimit")
def test_negative_hard_limit(mock_setrlimit):
# given
soft_limit = 1000
hard_limit = -10
# when
with pytest.raises(ImproperlyConfigured):
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_not_called()
@patch("saleor.core.rlimit.resource.setrlimit")
def test_invalid_soft_limit(mock_setrlimit):
# given
soft_limit = "invalid"
hard_limit = 1000
# when
with pytest.raises(ImproperlyConfigured):
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_not_called()
@patch("saleor.core.rlimit.resource.setrlimit")
def test_invalid_hard_limit(mock_setrlimit):
# given
soft_limit = 1000
hard_limit = "invalid"
# when
with pytest.raises(ImproperlyConfigured):
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_not_called()
@patch("saleor.core.rlimit.resource.setrlimit")
def test_soft_limit_greater_than_hard_limit(mock_setrlimit):
# given
soft_limit = 2000
hard_limit = 1000
# when
with pytest.raises(ImproperlyConfigured):
validate_and_set_rlimit(soft_limit, hard_limit)
# then
mock_setrlimit.assert_not_called()
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/tests/test_rlimit.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/webhook/response_schemas/utils/annotations.py | import logging
from datetime import UTC, datetime
from enum import Enum
from typing import Annotated, Any, TypeVar
import pydantic
from pydantic import (
AfterValidator,
BeforeValidator,
Field,
GetCoreSchemaHandler,
TypeAdapter,
ValidationError,
ValidationInfo,
ValidatorFunctionWrapHandler,
WrapValidator,
)
from pydantic_core import PydanticOmit, PydanticUseDefault, core_schema
from ....core.utils.metadata_manager import metadata_is_valid
from ....payment import interface
M = TypeVar("M")
logger = logging.getLogger(__name__)
logger = logging.getLogger(__name__)
def skip_invalid_metadata[M](value: M) -> M:
if not metadata_is_valid(value):
raise PydanticUseDefault()
return value
Metadata = Annotated[dict[str, str], BeforeValidator(skip_invalid_metadata)]
def default_if_none(value: Any) -> Any:
if value is None:
raise PydanticUseDefault()
return value
T = TypeVar("T")
DefaultIfNone = Annotated[T, BeforeValidator(default_if_none)]
def skip_invalid(
value: Any, handler: ValidatorFunctionWrapHandler, info: ValidationInfo
) -> Any:
try:
return handler(value)
except ValidationError as err:
context = info.context or {}
custom_message = context.get("custom_message", "Skipping invalid value")
app = context.get("app")
logger.warning(
"%s Value: %s Error: %s",
custom_message,
value,
str(err),
extra={
"app": app.id if app else None,
},
)
raise PydanticOmit() from err
OnErrorSkip = Annotated[T, WrapValidator(skip_invalid)]
def default_if_invalid(
value: Any, handler: ValidatorFunctionWrapHandler, info: ValidationInfo
) -> Any:
try:
return handler(value)
except ValidationError as err:
context = info.context or {}
app = context.get("app")
logger.warning(
"Skipping invalid value: %s error: %s",
value,
str(err),
extra={
"app": app.id if app else None,
"field_name": info.field_name,
},
)
raise PydanticUseDefault() from err
OnErrorDefault = Annotated[T, WrapValidator(default_if_invalid)]
DatetimeUTC = Annotated[datetime, AfterValidator(lambda v: v.astimezone(UTC))]
def skip_invalid_literal[T](value: T, handler: ValidatorFunctionWrapHandler) -> T:
try:
return handler(value)
except ValidationError as err:
logger.warning("Skipping invalid literal value: %s", err)
raise PydanticOmit() from err
OnErrorSkipLiteral = Annotated[T, WrapValidator(skip_invalid_literal)]
class EnumName:
"""Validate and serialize enum by name."""
def __init__(self, *, ignore_case: bool = False):
self.ignore_case = ignore_case
def __get_pydantic_core_schema__(
self, enum_cls: type[Enum], _handler: GetCoreSchemaHandler
):
name_enum = Enum( # type: ignore[misc]
"name_enum", {member.name: member.name for member in enum_cls}
)
def enum_or_name(value: Enum | str) -> Enum:
if isinstance(value, enum_cls):
return value
if isinstance(value, str):
try:
if self.ignore_case:
return next(
member
for member in enum_cls
if member.name.lower() == value.lower()
)
return enum_cls[value]
except (KeyError, StopIteration) as e:
raise ValueError(f"Enum name not found: {value}") from e
raise ValueError(
f"Expected enum member or name, got {type(value).__name__}: {value}"
)
return core_schema.no_info_plain_validator_function(
enum_or_name,
json_schema_input_schema=core_schema.enum_schema(
enum_cls, list(name_enum.__members__.values())
),
ref=enum_cls.__name__,
serialization=core_schema.plain_serializer_function_ser_schema(
lambda e: e.name
),
)
EnumByName = Annotated[T, EnumName()]
class JSONValue:
"""A wrapper to allow Pydantic to generate schema for JsonValue."""
@classmethod
def __get_pydantic_core_schema__(
cls, source_type, handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
# Use the original JsonValue schema (full validation)
json_schema = handler(pydantic.JsonValue)
return json_schema
@classmethod
def __get_pydantic_json_schema__(cls, core_schema, handler):
# Return a standard JSON-compatible schema (no recursion errors)
Json = Annotated[
interface.JSONValue,
Field(title="JsonValue"),
]
return TypeAdapter(Json).json_schema()
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/response_schemas/utils/annotations.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/webhook/response_schemas/utils/helpers.py | from pydantic import ValidationError
def parse_validation_error(error: ValidationError) -> str:
"""Parse pydantic ValidationError to a human-readable message."""
errors = error.errors()
error_msg: list[str] = []
for error_data in errors:
field = ""
loc_data = error_data["loc"]
if loc_data:
field = str(loc_data[0])
if error_data.get("type") == "missing":
error_msg.append(
f"Missing value for field: {field}. Input: {error_data['input']}."
)
else:
error_msg.append(
f"Incorrect value ({error_data['input']}) for field: {field}. Error: {error_data['msg']}."
)
return "\n\n".join(error_msg)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/response_schemas/utils/helpers.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/webhook/tests/response_schemas/test_helpers.py | from pydantic import BaseModel, ValidationError
from ...response_schemas.utils.helpers import parse_validation_error
class ExampleSchema(BaseModel):
field1: int
field2: str
def test_parse_validation_error_single_error():
# given
invalid_data = {"field1": "not_an_int", "field2": "valid_string"}
# when
try:
ExampleSchema.model_validate(invalid_data)
except ValidationError as error:
error_msg = parse_validation_error(error)
# then
assert error_msg == (
f"Incorrect value ({invalid_data['field1']}) for field: field1. Error: Input should be a valid integer, unable to parse string as an integer."
)
def test_parse_validation_error_multiple_errors():
# given
invalid_data = {"field1": "not_an_int", "field2": 123}
# when
try:
ExampleSchema.model_validate(invalid_data)
except ValidationError as error:
error_msg = parse_validation_error(error)
# then
assert error_msg == (
f"Incorrect value ({invalid_data['field1']}) for field: field1. Error: Input should be a valid integer, unable to parse string as an integer.\n\n"
f"Incorrect value ({invalid_data['field2']}) for field: field2. Error: Input should be a valid string."
)
def test_parse_validation_error_missing_field_value():
# given
invalid_data = {"field1": 1232}
# when
try:
ExampleSchema.model_validate(invalid_data)
except ValidationError as error:
error_msg = parse_validation_error(error)
# then
assert error_msg == f"Missing value for field: field2. Input: {invalid_data}."
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/tests/response_schemas/test_helpers.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/webhook/response_schemas/payment.py | from enum import Enum
from typing import Annotated, Any, Literal
from pydantic import (
AfterValidator,
BaseModel,
Field,
ValidationInfo,
field_validator,
model_validator,
)
from ...app.models import App
from ...graphql.core.utils import str_to_enum
from ...payment import TokenizedPaymentFlow
from ...payment.interface import (
PaymentGatewayInitializeTokenizationResult,
PaymentMethodTokenizationResult,
StoredPaymentMethodRequestDeleteResult,
)
from .utils.annotations import (
DefaultIfNone,
EnumByName,
JSONValue,
OnErrorDefault,
OnErrorSkip,
)
from .utils.validators import lower_values
TokenizedPaymentFlowEnum = Enum( # type: ignore[misc]
"TokenizedPaymentFlowEnum",
[(str_to_enum(value), value) for value, _ in TokenizedPaymentFlow.CHOICES],
)
class CreditCardInfoSchema(BaseModel):
brand: Annotated[str, Field(description="Brand of the credit card.")]
last_digits: Annotated[
str,
Field(
validation_alias="lastDigits", description="Last digits of the credit card."
),
]
exp_year: Annotated[
int,
Field(
validation_alias="expYear",
description="Expiration year of the credit card.",
),
]
exp_month: Annotated[
int,
Field(
validation_alias="expMonth",
description="Expiration month of the credit card.",
),
]
first_digits: Annotated[
DefaultIfNone[str],
Field(
validation_alias="firstDigits",
description="First digits of the credit card.",
default=None,
),
]
@field_validator("last_digits", "first_digits", mode="before")
@classmethod
def clean_digits(cls, value: Any) -> str | None:
return str(value) if value is not None else None
class StoredPaymentMethodSchema(BaseModel):
id: Annotated[str, Field(description="ID of stored payment method.")]
supported_payment_flows: Annotated[ # type: ignore[name-defined]
DefaultIfNone[list[Literal[TokenizedPaymentFlowEnum.INTERACTIVE.name,]]],
Field(
validation_alias="supportedPaymentFlows",
description="Supported flows that can be performed with this payment method.",
default_factory=list,
),
AfterValidator(lower_values),
]
type: Annotated[
str,
Field(description="Type of stored payment method. For example: Credit Card."),
]
name: Annotated[
DefaultIfNone[str],
Field(
description="Name of the payment method. For example: last 4 digits of credit card, obfuscated email.",
default=None,
),
]
data: Annotated[
DefaultIfNone[JSONValue],
Field(
description="JSON data that will be returned to client.",
default=None,
),
]
credit_card_info: Annotated[
OnErrorDefault[CreditCardInfoSchema],
Field(
validation_alias="creditCardInfo",
description="Credit card information.",
default=None,
),
]
class ListStoredPaymentMethodsSchema(BaseModel):
payment_methods: Annotated[
DefaultIfNone[list[OnErrorSkip[StoredPaymentMethodSchema]]],
Field(
validation_alias="paymentMethods",
default_factory=list,
description="List of stored payment methods.",
),
]
class StoredPaymentMethodDeleteRequestedSchema(BaseModel):
result: Annotated[
EnumByName[StoredPaymentMethodRequestDeleteResult],
Field(
description="Result of the request to delete the stored payment method.",
),
]
error: Annotated[
str | None,
Field(
description="Error message if the request to delete the stored payment method failed that will be passed to the frontend.",
default=None,
),
]
class PaymentGatewayInitializeTokenizationSessionSchema(BaseModel):
result: Annotated[
EnumByName[PaymentGatewayInitializeTokenizationResult],
Field(
description="Result of the payment gateway initialization.",
),
]
data: Annotated[
DefaultIfNone[JSONValue],
Field(
default=None,
description="A data required to finalize the initialization.",
),
]
error: Annotated[
str | None,
Field(
description="Error message that will be passed to the frontend.",
default=None,
),
]
def clean_id(payment_method_id: str, info: ValidationInfo) -> str:
from ..transport.utils import to_payment_app_id
app: App | None = info.context.get("app", None) if info.context else None
if not app:
raise RuntimeError("Missing app in context")
return to_payment_app_id(app, payment_method_id)
def clean_result(result: str):
return PaymentMethodTokenizationResult[result]
class PaymentMethodTokenizationSuccessSchema(BaseModel):
id: Annotated[str, Field(description="ID of the payment method.")]
result: Annotated[ # type: ignore[name-defined]
Literal[
PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED.name,
PaymentMethodTokenizationResult.ADDITIONAL_ACTION_REQUIRED.name,
],
Field(
description="Result of the payment method tokenization.",
),
AfterValidator(clean_result),
]
data: Annotated[
DefaultIfNone[JSONValue],
Field(
description="A data passes to the client.",
default=None,
),
]
@model_validator(mode="after")
def clean_id(self, info: ValidationInfo):
payment_method_id = self.id
self.id = clean_id(payment_method_id, info)
return self
class PaymentMethodTokenizationPendingSchema(BaseModel):
id: Annotated[
str | None, Field(description="ID of the payment method.", default=None)
]
result: Annotated[ # type: ignore[name-defined]
Literal[PaymentMethodTokenizationResult.PENDING.name],
Field(
description="Result of the payment method tokenization.",
),
AfterValidator(clean_result),
]
data: Annotated[
DefaultIfNone[JSONValue],
Field(
description="A data passes to the client.",
default=None,
),
]
@model_validator(mode="after")
def clean_id(self, info: ValidationInfo):
payment_method_id = self.id
if payment_method_id is None:
return self
self.id = clean_id(payment_method_id, info)
return self
class PaymentMethodTokenizationFailedSchema(BaseModel):
result: Annotated[ # type: ignore[name-defined]
Literal[
PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE.name,
PaymentMethodTokenizationResult.FAILED_TO_DELIVER.name,
],
Field(
description="Result of the payment method tokenization.",
),
AfterValidator(clean_result),
]
error: Annotated[
str | None,
Field(
description="Error message that will be passed to the frontend.",
default=None,
),
]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/response_schemas/payment.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/webhook/tests/response_schemas/test_payment.py | from unittest.mock import patch
import pytest
from pydantic import ValidationError
from ....payment.interface import (
PaymentGatewayInitializeTokenizationResult,
PaymentMethodTokenizationResult,
StoredPaymentMethodRequestDeleteResult,
)
from ...response_schemas.payment import (
CreditCardInfoSchema,
ListStoredPaymentMethodsSchema,
PaymentGatewayInitializeTokenizationSessionSchema,
PaymentMethodTokenizationFailedSchema,
PaymentMethodTokenizationPendingSchema,
PaymentMethodTokenizationSuccessSchema,
StoredPaymentMethodDeleteRequestedSchema,
StoredPaymentMethodSchema,
)
from ...response_schemas.utils.annotations import logger as annotations_logger
from ...transport.utils import to_payment_app_id
@pytest.mark.parametrize(
"data",
[
# All fields
{
"brand": "visa",
"lastDigits": "1234",
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
# Only required fields
{
"brand": "mastercard",
"lastDigits": "5678",
"expYear": 2025,
"expMonth": 6,
},
# All int fields as strings
{
"brand": "visa",
"lastDigits": "1234",
"expYear": "2023",
"expMonth": "12",
"firstDigits": "123456",
},
# All digit fields as int
{
"brand": "visa",
"lastDigits": 1234,
"expYear": 2023,
"expMonth": 12,
"firstDigits": 123456,
},
],
)
def test_credit_card_info_schema_valid(data):
# when
schema = CreditCardInfoSchema.model_validate(data)
# then
assert schema.brand == data["brand"]
assert schema.last_digits == str(data["lastDigits"])
assert schema.exp_year == int(data["expYear"])
assert schema.exp_month == int(data["expMonth"])
first_digits = data.get("firstDigits")
assert schema.first_digits == (str(first_digits) if first_digits else None)
class NonParsableObject:
def __str__(self):
raise ValueError("Cannot convert to string")
@pytest.mark.parametrize(
("data", "invalid_field"),
[
# Missing `brand` field
(
{
"lastDigits": "1234",
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
"brand",
),
# Missing `lastDigits` field
(
{
"brand": "visa",
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
"lastDigits",
),
# Missing `expYear` field
(
{
"brand": "visa",
"expMonth": 12,
"lastDigits": "1234",
},
"expYear",
),
# Missing `expMonth` field
(
{
"brand": "visa",
"expYear": 2023,
"lastDigits": "1234",
},
"expMonth",
),
# Not parsable `expYear`
(
{
"brand": "visa",
"lastDigits": "1234",
"expYear": "ABC",
"expMonth": 12,
"firstDigits": "123456",
},
"expYear",
),
# Not parsable `expMonth`
(
{
"brand": "visa",
"lastDigits": "1234",
"expYear": 2023,
"expMonth": "ABC",
"firstDigits": "123456",
},
"expMonth",
),
# Empty string as `expYear`
(
{
"brand": "visa",
"lastDigits": "1234",
"expYear": "",
"expMonth": 12,
"firstDigits": "123456",
},
"expYear",
),
# Empty string as `expMonth`
(
{
"brand": "visa",
"lastDigits": "1234",
"expYear": 2023,
"expMonth": "",
"firstDigits": "123456",
},
"expMonth",
),
# None as `lastDigits`
(
{
"brand": "visa",
"lastDigits": None,
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
"lastDigits",
),
# Not parsable as `lastDigits`
(
{
"brand": "visa",
"lastDigits": NonParsableObject(),
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
"lastDigits",
),
# Not parsable as `firstDigits`
(
{
"brand": "visa",
"lastDigits": "1234",
"expYear": 2023,
"expMonth": 12,
"firstDigits": NonParsableObject(),
},
"firstDigits",
),
],
)
def test_credit_card_info_schema_invalid(data, invalid_field):
# when
with pytest.raises(ValidationError) as exc_info:
CreditCardInfoSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == invalid_field
@pytest.mark.parametrize(
"field",
[
"brand",
"lastDigits",
"expYear",
"expMonth",
],
)
def test_credit_card_info_schema_required_field_is_none(field):
# given
data = {
"brand": "visa",
"lastDigits": "1234",
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
}
data[field] = None
# when
with pytest.raises(ValidationError) as exc_info:
CreditCardInfoSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == field
@pytest.mark.parametrize(
"data",
[
# All fields
{
"id": "method-1",
"supportedPaymentFlows": ["INTERACTIVE"],
"type": "Credit Card",
"name": "Visa ***1234",
"data": {"key": "value"},
"creditCardInfo": {
"brand": "visa",
"lastDigits": "1234",
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
},
# Only required fields
{
"id": "method-2",
"type": "Credit Card",
},
# Empty not required fields
{
"id": "method-3",
"supportedPaymentFlows": None,
"type": "Credit Card",
"name": None,
"data": None,
"creditCardInfo": None,
},
# Empty list as supportedPaymentFlows
{
"id": "method-4",
"supportedPaymentFlows": [],
"type": "Credit Card",
},
],
)
def test_stored_payment_method_schema_valid(data):
# when
schema = StoredPaymentMethodSchema.model_validate(data)
# then
assert schema.id == data["id"]
assert schema.supported_payment_flows == [
flow.lower() for flow in data.get("supportedPaymentFlows") or []
]
assert schema.type == data["type"]
assert schema.name == data.get("name")
assert schema.data == data.get("data")
if "creditCardInfo" in data and data["creditCardInfo"]:
assert schema.credit_card_info.brand == data["creditCardInfo"]["brand"]
assert (
schema.credit_card_info.last_digits == data["creditCardInfo"]["lastDigits"]
)
assert schema.credit_card_info.exp_year == data["creditCardInfo"]["expYear"]
assert schema.credit_card_info.exp_month == data["creditCardInfo"]["expMonth"]
assert (
schema.credit_card_info.first_digits
== data["creditCardInfo"]["firstDigits"]
)
else:
assert schema.credit_card_info is None
@pytest.mark.parametrize(
("data", "invalid_field"),
[
# Missing `id` field
(
{
"supportedPaymentFlows": ["INTERACTIVE"],
"type": "Credit Card",
},
"id",
),
# Invalid `supportedPaymentFlows`
(
{
"id": "method-1",
"supportedPaymentFlows": ["INVALID_FLOW"],
"type": "Credit Card",
},
"supportedPaymentFlows",
),
# Missing `type` field
(
{
"id": "method-1",
"supportedPaymentFlows": ["INTERACTIVE"],
},
"type",
),
# Not parable `data` field
(
{
"id": "method-1",
"supportedPaymentFlows": ["INTERACTIVE"],
"type": "Credit Card",
"data": object(),
},
"data",
),
],
)
def test_stored_payment_method_schema_invalid(data, invalid_field):
# when
with pytest.raises(ValidationError) as exc_info:
StoredPaymentMethodSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == invalid_field
@patch.object(annotations_logger, "warning")
def test_stored_payment_method_schema_invalid_credit_card_info_skipped(
mocked_logger, app
):
# given
id = "method-1"
type = "Credit Card"
# when
schema = StoredPaymentMethodSchema.model_validate(
{
"id": id,
"type": type,
"creditCardInfo": {
"brand": "visa",
"lastDigits": NonParsableObject(),
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
},
context={
"app": app,
},
)
# then
assert schema.credit_card_info is None
assert schema.id == id
assert schema.type == type
assert mocked_logger.call_count == 1
error_msg = mocked_logger.call_args[0][0]
assert "Skipping invalid value" in error_msg
assert mocked_logger.call_args[1]["extra"]["app"] == app.id
assert mocked_logger.call_args[1]["extra"]["field_name"] == "credit_card_info"
@pytest.mark.parametrize(
"data",
[
# All fields
{
"paymentMethods": [
{
"id": "method-1",
"supportedPaymentFlows": ["INTERACTIVE"],
"type": "Credit Card",
"name": "Visa ***1234",
"data": {"key": "value"},
"creditCardInfo": {
"brand": "visa",
"lastDigits": "1234",
"expYear": 2023,
"expMonth": 12,
"firstDigits": "123456",
},
},
{
"id": "method-2",
"supportedPaymentFlows": ["INTERACTIVE"],
"type": "Debit Card",
},
]
},
# Empty list ad paymentMethods
{"paymentMethods": []},
# None as paymentMethods
{"paymentMethods": None},
# Only required fields
{
"paymentMethods": [
{
"id": "method-3",
"type": "Credit Card",
}
]
},
],
)
def test_list_stored_payment_methods_schema_valid(data):
# when
schema = ListStoredPaymentMethodsSchema.model_validate(data)
# then
assert len(schema.payment_methods) == (
len(data["paymentMethods"]) if data["paymentMethods"] else 0
)
@pytest.mark.parametrize(
"data",
[{}, {"test": "invalid"}],
)
def test_list_stored_payment_methods_schema_invalid(data):
# when
schema = ListStoredPaymentMethodsSchema.model_validate(data)
# then
assert schema.payment_methods == []
@patch.object(annotations_logger, "warning")
def test_list_stored_payment_methods_schema_invalid_element_skipped(mocked_logger):
"""Test when the input has one valid and one invalid stored payment method."""
# given a list with one valid and one invalid payment method
data = {
"paymentMethods": [
{
"id": "method-1",
"supportedPaymentFlows": ["INTERACTIVE"],
"type": "Credit Card",
"name": "Visa ***1234",
},
# missing type
{
"id": "method-2",
"name": "Visa ***4321",
},
]
}
# when
schema = ListStoredPaymentMethodsSchema.model_validate(data)
# then only the valid payment method should be included
assert len(schema.payment_methods) == 1
assert schema.payment_methods[0].id == data["paymentMethods"][0]["id"]
assert schema.payment_methods[0].name == data["paymentMethods"][0]["name"]
assert mocked_logger.call_count == 1
@pytest.mark.parametrize(
"data",
[
# Successfully deleted
{
"result": StoredPaymentMethodRequestDeleteResult.SUCCESSFULLY_DELETED.name,
"error": None,
},
# Failed to delete with error message
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELETE.name,
"error": "Some error occurred",
},
# Failed to deliver with error message
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELIVER.name,
"error": "Delivery failed due to network issues",
},
# Failed to delete no error message
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELETE.name,
},
# Failed to deliver no error message
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELIVER.name,
},
],
)
def test_stored_payment_method_delete_requested_schema_valid(data):
# when
schema = StoredPaymentMethodDeleteRequestedSchema.model_validate(data)
# then
assert schema.result == data["result"].lower()
assert schema.error == data.get("error")
@pytest.mark.parametrize(
("data", "invalid_field"),
[
# Missing `result` field
(
{
"error": "Some error occurred",
},
"result",
),
# Lower value for `result`
(
{
"result": "successfully_deleted",
"error": "Some error occurred",
},
"result",
),
# Invalid `result` value
(
{
"result": "INVALID_RESULT",
"error": "Invalid result value",
},
"result",
),
# Invalid `error` type
(
{
"result": StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELETE.name,
"error": 123, # Should be a string or None
},
"error",
),
],
)
def test_stored_payment_method_delete_requested_schema_invalid(data, invalid_field):
# when
with pytest.raises(ValidationError) as exc_info:
StoredPaymentMethodDeleteRequestedSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == invalid_field
@pytest.mark.parametrize(
"data",
[
# Successfully initialize
{
"result": PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED.name,
"error": None,
"data": {"key": "value"},
},
# Successfully initialize data as string
{
"result": PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED.name,
"data": "Successfully initialized",
},
# Failed to initialize with error message
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE.name,
"error": "Some error occurred",
"data": None,
},
# Failed to deliver with error message
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER.name,
"error": "Delivery failed due to network issues",
},
# Failed to initialize no error message
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE.name,
"data": None,
"error": None,
},
# Failed to deliver no error message
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER.name,
},
],
)
def test_payment_gateway_initialize_tokenization_session_schema_valid(data):
# when
schema = PaymentGatewayInitializeTokenizationSessionSchema.model_validate(data)
# then
assert schema.result == data["result"].lower()
assert schema.data == data.get("data")
assert schema.error == data.get("error")
@pytest.mark.parametrize(
("data", "invalid_field"),
[
# Missing `result` field
(
{
"error": "Some error occurred",
},
"result",
),
# Lower value for `result`
(
{
"result": PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED.value,
"error": "Some error occurred",
},
"result",
),
# Invalid `result` value
(
{
"result": "INVALID_RESULT",
"error": "Invalid result value",
},
"result",
),
# Invalid `error` type
(
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE.name,
"error": 123, # Should be a string or None
},
"error",
),
# Not parsable `data`
(
{
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE.name,
"error": "error",
"data": object(),
},
"data",
),
],
)
def test_payment_gateway_initialize_tokenization_session_schema_invalid(
data, invalid_field
):
# when
with pytest.raises(ValidationError) as exc_info:
PaymentGatewayInitializeTokenizationSessionSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == invalid_field
@pytest.mark.parametrize(
"data",
[
# SUCCESSFULLY_TOKENIZED with not data and no error
{
"id": "123",
"result": PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED.name,
},
# ADDITIONAL_ACTION_REQUIRED with data
{
"id": "456",
"result": PaymentMethodTokenizationResult.ADDITIONAL_ACTION_REQUIRED.name,
"data": {"action": "verify"},
},
],
)
def test_payment_method_tokenization_schema_valid(data, app):
# when
schema = PaymentMethodTokenizationSuccessSchema.model_validate(
data, context={"app": app}
)
# then
assert schema.result == PaymentMethodTokenizationResult[data["result"]]
assert schema.data == data.get("data")
assert schema.id == to_payment_app_id(app, data["id"])
def test_payment_method_tokenization_schema_valid_extra_data_in_input(app):
# given
data = {
"id": "123",
"result": PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED.name,
"error": "extra error value",
"data": {"key": "value"},
}
# when
schema = PaymentMethodTokenizationSuccessSchema.model_validate(
data, context={"app": app}
)
# then
assert schema.result == PaymentMethodTokenizationResult[data["result"]]
assert schema.data == data.get("data")
assert schema.id == to_payment_app_id(app, data["id"])
@pytest.mark.parametrize(
("data", "expected_error_field"),
[
# Missing `id` field
(
{
"result": PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED.name,
"data": {"key": "value"},
},
"id",
),
# Missing `result` field
(
{
"id": "123",
"data": {"key": "value"},
"error": None,
},
"result",
),
# Invalid `result` value
(
{
"id": "123",
"result": "INVALID_RESULT",
},
"result",
),
# `id` field with wrong type
(
{
"id": 123,
"result": PaymentMethodTokenizationResult.SUCCESSFULLY_TOKENIZED.name,
"data": {"key": "value"},
"error": None,
},
"id",
),
],
)
def test_payment_method_tokenization_schema_invalid(data, expected_error_field, app):
# when
with pytest.raises(ValidationError) as exc_info:
PaymentMethodTokenizationSuccessSchema.model_validate(
data, context={"app": app}
)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == expected_error_field
@pytest.mark.parametrize(
"data",
[
# All fields provided
{
"id": "123",
"result": PaymentMethodTokenizationResult.PENDING.name,
"data": {"status": "pending"},
},
# `id` is None
{
"id": None,
"result": PaymentMethodTokenizationResult.PENDING.name,
"data": {"status": "pending"},
},
# No data provided
{
"id": "456",
"result": PaymentMethodTokenizationResult.PENDING.name,
},
],
)
def test_payment_method_tokenization_pending_schema_valid(data, app):
# when
schema = PaymentMethodTokenizationPendingSchema.model_validate(
data, context={"app": app}
)
# then
assert schema.result == PaymentMethodTokenizationResult.PENDING
assert schema.data == data.get("data", None)
assert schema.id == (to_payment_app_id(app, data["id"]) if data["id"] else None)
@pytest.mark.parametrize(
("data", "expected_error_field"),
[
# Missing `result` field
(
{
"id": "123",
"data": {"status": "pending"},
},
"result",
),
# Invalid `result` value
(
{
"id": "123",
"result": "INVALID_RESULT",
"data": {"status": "pending"},
},
"result",
),
# `id` field with wrong type
(
{
"id": 123,
"result": PaymentMethodTokenizationResult.PENDING.name,
"data": {"status": "pending"},
},
"id",
),
],
)
def test_payment_method_tokenization_pending_schema_invalid(data, expected_error_field):
# when
with pytest.raises(ValidationError) as exc_info:
PaymentMethodTokenizationPendingSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == expected_error_field
@pytest.mark.parametrize(
"data",
[
# `FAILED_TO_TOKENIZE`` with error message
{
"result": PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE.name,
"error": "Tokenization failed.",
},
# `FAILED_TO_DELIVER`` with error message
{
"result": PaymentMethodTokenizationResult.FAILED_TO_DELIVER.name,
"error": "Tokenization failed.",
},
# `FAILED_TO_TOKENIZE` without error message
{
"result": PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE.name,
"error": None,
},
# `FAILED_TO_DELIVER` without error message
{
"result": PaymentMethodTokenizationResult.FAILED_TO_DELIVER.name,
},
],
)
def test_payment_method_tokenization_failed_schema_valid(data):
# when
schema = PaymentMethodTokenizationFailedSchema.model_validate(data)
# then
assert schema.result == PaymentMethodTokenizationResult[data["result"]]
assert schema.error == data.get("error")
@pytest.mark.parametrize(
("data", "expected_error_field"),
[
# Missing `result` field
(
{
"error": "Tokenization failed due to invalid input.",
},
"result",
),
# Invalid `result` value
(
{
"result": "INVALID_RESULT",
"error": "Invalid result value.",
},
"result",
),
# `error` field with wrong type
(
{
"result": PaymentMethodTokenizationResult.FAILED_TO_TOKENIZE.name,
"error": 123, # Should be a string or None
},
"error",
),
],
)
def test_payment_method_tokenization_failed_schema_invalid(data, expected_error_field):
# when
with pytest.raises(ValidationError) as exc_info:
PaymentMethodTokenizationFailedSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"][0] == expected_error_field
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/tests/response_schemas/test_payment.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 848,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/webhook/tests/response_schemas/test_validators.py | import pytest
from saleor.webhook.response_schemas.utils.validators import lower_values
@pytest.mark.parametrize(
("input_value", "expected_output"),
[
("HELLO", "hello"),
("world", "world"),
(["HELLO", "WORLD"], ["hello", "world"]),
(["Python", "TEST"], ["python", "test"]),
([], []),
(None, None),
(["MiXeD", "CaSe"], ["mixed", "case"]),
],
)
def test_lower_values(input_value, expected_output):
assert lower_values(input_value) == expected_output
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/tests/response_schemas/test_validators.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/webhook/response_schemas/transaction.py | import logging
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Annotated, Any, Literal
from django.conf import settings
from django.utils import timezone
from pydantic import BaseModel, Field, HttpUrl, field_validator
from ...graphql.core.utils import str_to_enum
from ...payment import (
OPTIONAL_PSP_REFERENCE_EVENTS,
PaymentMethodType,
TransactionAction,
TransactionEventType,
)
from .utils.annotations import DatetimeUTC, DefaultIfNone, JSONValue, OnErrorSkipLiteral
logger = logging.getLogger(__name__)
TransactionActionEnum = Enum( # type: ignore[misc]
"TransactionActionEnum",
[(str_to_enum(value), value) for value, _ in TransactionAction.CHOICES],
)
PaymentMethodTypeEnum = Enum( # type: ignore[misc]
"PaymentMethodTypeEnum",
[(str_to_enum(value), value) for value, _ in PaymentMethodType.CHOICES],
)
class PaymentMethodDetailsBase(BaseModel):
type: Annotated[
DefaultIfNone[str],
Field(
description="Type of the payment method used for the transaction.",
max_length=32,
),
]
name: Annotated[
str,
Field(
description="Name of the payment method used for the transaction.",
max_length=256,
),
]
@field_validator("type", mode="after")
@classmethod
def clean_type(cls, type_value: str) -> str:
return type_value.lower()
class OtherPaymentMethodDetails(PaymentMethodDetailsBase):
type: Annotated[ # type: ignore[name-defined]
Literal[PaymentMethodTypeEnum.OTHER.name],
Field(
description="Type of the payment method used for the transaction.",
max_length=32,
),
]
class CardPaymentMethodDetails(PaymentMethodDetailsBase):
type: Annotated[ # type: ignore[name-defined]
Literal[PaymentMethodTypeEnum.CARD.name],
Field(
description="Type of the payment method used for the transaction.",
max_length=32,
),
]
brand: Annotated[
str | None,
Field(
description="Brand of the card used for the transaction.",
max_length=40,
),
] = None
first_digits: Annotated[
str | None,
Field(
description="First digits of the card used for the transaction.",
max_length=4,
validation_alias="firstDigits",
),
] = None
last_digits: Annotated[
str | None,
Field(
description="Last digits of the card used for the transaction.",
max_length=4,
validation_alias="lastDigits",
),
] = None
exp_month: Annotated[
int | None,
Field(
description="Expiration month of the card used for the transaction.",
ge=1,
le=12,
validation_alias="expMonth",
),
] = None
exp_year: Annotated[
int | None,
Field(
description="Expiration year of the card used for the transaction.",
ge=2000,
validation_alias="expYear",
),
] = None
class TransactionBaseSchema(BaseModel):
psp_reference: Annotated[
DefaultIfNone[str],
Field(
validation_alias="pspReference",
default=None,
description=(
"PSP reference received from payment provider. Optional for the following results: "
+ ", ".join([event.upper() for event in OPTIONAL_PSP_REFERENCE_EVENTS])
),
),
]
amount: Annotated[
DefaultIfNone[Decimal],
Field(
description="Decimal amount of the processed action",
default=None,
),
]
time: Annotated[
DefaultIfNone[DatetimeUTC],
Field(
description="Time of the action in ISO 8601 format",
default_factory=timezone.now,
),
]
external_url: Annotated[
DefaultIfNone[HttpUrl],
Field(
validation_alias="externalUrl",
description="External url with action details",
default="",
),
]
message: Annotated[
DefaultIfNone[str],
Field(
description="Message related to the action. The maximum length is 512 characters; any text exceeding this limit will be truncated",
default="",
),
]
actions: ( # type: ignore[name-defined]
Annotated[
list[
OnErrorSkipLiteral[
Literal[
TransactionActionEnum.CHARGE.name,
TransactionActionEnum.REFUND.name,
TransactionActionEnum.CANCEL.name,
]
]
],
Field(description="List of actions available for the transaction."),
]
| None
) = None
result: Annotated[
DefaultIfNone[str],
Field(description="Result of the action"),
]
@field_validator("amount", mode="after")
@classmethod
def clean_amount(cls, amount: Decimal | None) -> Decimal | None:
if amount is None:
return None
amount = amount.quantize(Decimal(10) ** (-settings.DEFAULT_DECIMAL_PLACES))
return amount
@field_validator("time", mode="before")
@classmethod
def clean_time(cls, time: Any) -> datetime | None:
# pydantic do not support all ISO 8601 formats so in case of string
# we need to parse it manually; different types are handled by pydantic
if isinstance(time, str):
try:
time = datetime.fromisoformat(time)
except ValueError as e:
raise ValueError(
"Invalid value for field 'time': {time}. Expected ISO 8601 format."
) from e
return time
@field_validator("message", mode="before")
@classmethod
def clean_message(cls, value: Any):
from ...payment.utils import (
TRANSACTION_EVENT_MSG_MAX_LENGTH,
truncate_transaction_event_message,
)
message = value or ""
try:
message = str(message)
except (UnicodeEncodeError, TypeError, ValueError):
invalid_err_msg = "Incorrect value for field: %s in response of transaction action webhook."
logger.warning(invalid_err_msg, "message")
message = ""
if message and len(message) > TRANSACTION_EVENT_MSG_MAX_LENGTH:
message = truncate_transaction_event_message(message)
field_limit_exceeded_msg = (
"Value for field: %s in response of transaction action webhook "
"exceeds the character field limit. Message has been truncated."
)
logger.warning(field_limit_exceeded_msg, "message")
return message
@field_validator("actions", mode="after")
@classmethod
def clean_actions(cls, actions: list[str] | None) -> list[str] | None:
return [action.lower() for action in actions] if actions else actions
@field_validator("result", mode="after")
@classmethod
def clean_result(cls, result: str | None) -> str | None:
if result is None:
return None
return result.lower()
class TransactionAsyncSchema(BaseModel):
psp_reference: Annotated[
str,
Field(
validation_alias="pspReference",
description="PSP reference received from payment provider.",
),
]
actions: ( # type: ignore[name-defined]
Annotated[
list[
OnErrorSkipLiteral[
Literal[
TransactionActionEnum.CHARGE.name,
TransactionActionEnum.REFUND.name,
TransactionActionEnum.CANCEL.name,
]
]
],
Field(description="List of actions available for the transaction."),
]
| None
) = None
class TransactionSyncFailureSchema(TransactionBaseSchema):
psp_reference: Annotated[
DefaultIfNone[str],
Field(
validation_alias="pspReference",
default=None,
description="PSP reference received from payment provider.",
),
]
class TransactionSyncSuccessSchema(TransactionBaseSchema):
psp_reference: Annotated[
str,
Field(
validation_alias="pspReference",
description="PSP reference received from payment provider.",
),
]
TransactionEventTypeEnum = Enum( # type: ignore[misc]
"TransactionEventTypeEnum",
[(str_to_enum(value), value) for value, _ in TransactionEventType.CHOICES],
)
class TransactionChargeRequestedAsyncSchema(TransactionAsyncSchema):
pass
class TransactionChargeRequestedSyncSuccessSchema(TransactionSyncSuccessSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[TransactionEventTypeEnum.CHARGE_SUCCESS.name,],
Field(description="Result of the action"),
]
class TransactionChargeRequestedSyncFailureSchema(TransactionSyncFailureSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[TransactionEventTypeEnum.CHARGE_FAILURE.name,],
Field(description="Result of the action"),
]
class TransactionCancelationRequestedAsyncSchema(TransactionAsyncSchema):
pass
class TransactionCancelationRequestedSyncSuccessSchema(TransactionSyncSuccessSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[TransactionEventTypeEnum.CANCEL_SUCCESS.name,],
Field(description="Result of the action"),
]
class TransactionCancelationRequestedSyncFailureSchema(TransactionSyncFailureSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[TransactionEventTypeEnum.CANCEL_FAILURE.name,],
Field(description="Result of the action"),
]
class TransactionRefundRequestedAsyncSchema(TransactionAsyncSchema):
pass
class TransactionRefundRequestedSyncSuccessSchema(TransactionSyncSuccessSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[TransactionEventTypeEnum.REFUND_SUCCESS.name,],
Field(description="Result of the action"),
]
class TransactionRefundRequestedSyncFailureSchema(TransactionSyncFailureSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[TransactionEventTypeEnum.REFUND_FAILURE.name,],
Field(description="Result of the action"),
]
class TransactionSessionBaseSchema(TransactionBaseSchema):
data: Annotated[
DefaultIfNone[JSONValue],
Field(
description="The JSON data that will be returned to storefront",
default=None,
),
]
payment_method_details: Annotated[
OtherPaymentMethodDetails | CardPaymentMethodDetails | None,
Field(
validation_alias="paymentMethodDetails",
default=None,
description="Details of the payment method used for the transaction.",
discriminator="type",
),
] = None
class TransactionSessionFailureSchema(TransactionSessionBaseSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[
TransactionEventTypeEnum.AUTHORIZATION_FAILURE.name,
TransactionEventTypeEnum.CHARGE_FAILURE.name,
],
Field(description="Result of the action"),
]
psp_reference: Annotated[
DefaultIfNone[str],
Field(
validation_alias="pspReference",
default=None,
description="PSP reference received from payment provider.",
),
]
class TransactionSessionCancelSuccessSchema(TransactionSessionBaseSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[TransactionEventTypeEnum.CANCEL_SUCCESS.name,],
Field(description="Result of the action"),
]
psp_reference: Annotated[
DefaultIfNone[str],
Field(
validation_alias="pspReference",
default=None,
description="PSP reference received from payment provider.",
),
]
class TransactionSessionActionRequiredSchema(TransactionSessionBaseSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[
TransactionEventTypeEnum.AUTHORIZATION_ACTION_REQUIRED.name,
TransactionEventTypeEnum.CHARGE_ACTION_REQUIRED.name,
],
Field(description="Result of the action"),
]
psp_reference: Annotated[
DefaultIfNone[str],
Field(
validation_alias="pspReference",
default=None,
description="PSP reference received from payment provider.",
),
]
class TransactionSessionSuccessSchema(TransactionSessionBaseSchema):
result: Annotated[ # type: ignore[name-defined]
Literal[
TransactionEventTypeEnum.AUTHORIZATION_SUCCESS.name,
TransactionEventTypeEnum.CHARGE_SUCCESS.name,
TransactionEventTypeEnum.AUTHORIZATION_REQUEST.name,
TransactionEventTypeEnum.CHARGE_REQUEST.name,
],
Field(description="Result of the action"),
]
psp_reference: Annotated[
str,
Field(
validation_alias="pspReference",
description="PSP reference received from payment provider.",
),
]
class PaymentGatewayInitializeSessionSchema(BaseModel):
data: JSONValue
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/response_schemas/transaction.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 373,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/webhook/tests/response_schemas/test_transaction.py | import math
from datetime import UTC, datetime
from decimal import Decimal
import pytest
from django.utils import timezone
from freezegun import freeze_time
from pydantic import ValidationError
from ....payment import TransactionAction, TransactionEventType
from ...response_schemas.transaction import (
PaymentGatewayInitializeSessionSchema,
TransactionBaseSchema,
TransactionCancelationRequestedAsyncSchema,
TransactionCancelationRequestedSyncFailureSchema,
TransactionCancelationRequestedSyncSuccessSchema,
TransactionChargeRequestedAsyncSchema,
TransactionChargeRequestedSyncFailureSchema,
TransactionChargeRequestedSyncSuccessSchema,
TransactionRefundRequestedAsyncSchema,
TransactionRefundRequestedSyncFailureSchema,
TransactionRefundRequestedSyncSuccessSchema,
TransactionSessionActionRequiredSchema,
TransactionSessionBaseSchema,
TransactionSessionFailureSchema,
TransactionSessionSuccessSchema,
)
def test_transaction_schema_valid_full_data():
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"time": "2023-01-01T12:00:00+00:00",
"externalUrl": "https://example.com/",
"message": "Transaction completed successfully.",
"actions": [TransactionAction.CHARGE.upper(), TransactionAction.REFUND.upper()],
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
}
# when
transaction = TransactionBaseSchema.model_validate(data)
# then
assert transaction.psp_reference == data["pspReference"]
assert transaction.amount == data["amount"]
assert transaction.time.isoformat() == data["time"]
assert str(transaction.external_url) == data["externalUrl"]
assert transaction.message == data["message"]
assert transaction.actions == [action.lower() for action in data.get("actions")]
assert transaction.result == data["result"].lower()
@pytest.mark.parametrize(
"data",
[
# Only required fields with values
{
"pspReference": None,
"amount": Decimal("100.50"),
"time": None,
"externalUrl": None,
"message": None,
"actions": None,
"result": TransactionEventType.CHARGE_ACTION_REQUIRED.upper(),
},
# Only required fields with values
{
"amount": Decimal("100.50"),
"result": TransactionEventType.CHARGE_ACTION_REQUIRED.upper(),
},
],
)
@freeze_time("2023-01-01T12:00:00+00:00")
def test_transaction_schema_valid_only_required_fields(data):
# when
transaction = TransactionBaseSchema.model_validate(data)
# then
assert transaction.psp_reference is None
assert transaction.amount == data["amount"]
assert transaction.time.isoformat() == timezone.now().isoformat()
assert str(transaction.external_url) == ""
assert transaction.message == ""
assert transaction.actions is None
assert transaction.result == data["result"].lower()
@pytest.mark.parametrize(
"amount",
[Decimal("100.50"), 100.50, 100, "100.50"],
)
def test_transaction_schema_with_various_amount_types(amount):
# given
data = {
"pspReference": "psp-123",
"amount": amount,
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
}
# when
transaction = TransactionBaseSchema.model_validate(data)
# then
assert transaction.amount == Decimal(str(amount))
@pytest.mark.parametrize(
("time", "expected_datetime"),
[
# ISO 8601 format with timezone
("2023-05-05T12:00:00+02:00", datetime(2023, 5, 5, 10, 0, 0, tzinfo=UTC)),
# ISO 8601 format without timezone
("2023-02-04T10:15:22", datetime(2023, 2, 4, 10, 15, 22, tzinfo=UTC)),
# ISO 8601 format with milliseconds
(
"2023-01-01T12:00:00.123+00:00",
datetime(2023, 1, 1, 12, 0, 0, 123000, tzinfo=UTC),
),
# ISO 8601 format with week-based date
("2023-W02-1T12:00:00", datetime(2023, 1, 9, 12, 0, tzinfo=UTC)),
# Time as integer
(1672531400, datetime(2023, 1, 1, 0, 3, 20, tzinfo=UTC)),
# No time provided (should use current time)
(None, datetime(2023, 1, 1, 12, 0, 0, tzinfo=UTC)),
],
)
@freeze_time("2023-01-01T12:00:00+00:00")
def test_transaction_schema_time_valid(time, expected_datetime):
# given
data = {
"pspReference": "123",
"amount": Decimal("100.00"),
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
"time": time,
}
# when
transaction = TransactionBaseSchema.model_validate(data)
# then
assert transaction.time == expected_datetime
@pytest.mark.parametrize(
("actions", "expected_actions"),
[
# Valid actions
(
[
TransactionAction.CHARGE.upper(),
TransactionAction.REFUND.upper(),
TransactionAction.CANCEL.upper(),
],
[
TransactionAction.CHARGE,
TransactionAction.REFUND,
TransactionAction.CANCEL,
],
),
# Just one action
(
[TransactionAction.CANCEL.upper()],
[TransactionAction.CANCEL],
),
# Invalid actions (should skip invalid ones)
(
["INVALID", TransactionAction.REFUND.upper()],
[TransactionAction.REFUND],
),
# Empty actions list
(
[],
[],
),
# None actions
(
None,
None,
),
],
)
def test_transaction_schema_actions_validation(actions, expected_actions):
# given
data = {
"pspReference": "123",
"amount": Decimal("100.00"),
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
"actions": actions,
}
# when
transaction = TransactionBaseSchema.model_validate(data)
# then
assert transaction.actions == expected_actions
@pytest.mark.parametrize(
("data", "invalid_field"),
[
# Time as a string value
(
{
"pspReference": "123",
"amount": Decimal("100.00"),
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
"time": "invalid-time",
},
"time",
),
# Invalid external URL
(
{
"amount": "100.50",
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
"externalUrl": "invalid-url",
},
"externalUrl",
),
# Infinitive amount
(
{
"pspReference": "123",
"amount": math.inf,
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
},
"amount",
),
],
)
def test_transaction_schema_invalid(data, invalid_field):
# when
with pytest.raises(ValidationError) as exc_info:
TransactionBaseSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == (invalid_field,)
def test_transaction_charge_requested_sync_success_schema_valid():
# given
result = TransactionEventType.CHARGE_SUCCESS
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"time": "2023-01-01T12:00:00+00:00",
"externalUrl": "https://example.com/",
"message": "Transaction completed successfully.",
"actions": [TransactionAction.CHARGE.upper(), TransactionAction.REFUND.upper()],
"result": result.upper(),
}
# when
transaction = TransactionChargeRequestedSyncSuccessSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.CANCEL_SUCCESS,
TransactionEventType.REFUND_SUCCESS,
TransactionEventType.CANCEL_FAILURE,
TransactionEventType.REFUND_FAILURE,
],
)
def test_transaction_charge_requested_sync_success_schema_invalid(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionChargeRequestedSyncSuccessSchema.model_validate(data)
assert len(exc_info.value.errors()) == 1
# then
assert exc_info.value.errors()[0]["loc"] == ("result",)
def test_transaction_charge_requested_sync_failure_schema_valid():
# given
result = TransactionEventType.CHARGE_FAILURE
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"time": "2023-01-01T12:00:00+00:00",
"externalUrl": "https://example.com/",
"message": "Transaction failed.",
"actions": [TransactionAction.CHARGE.upper(), TransactionAction.REFUND.upper()],
"result": result.upper(),
}
# when
transaction = TransactionChargeRequestedSyncFailureSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.CANCEL_SUCCESS,
TransactionEventType.REFUND_SUCCESS,
TransactionEventType.CANCEL_FAILURE,
TransactionEventType.REFUND_FAILURE,
],
)
def test_transaction_charge_requested_sync_failure_schema_invalid(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionChargeRequestedSyncFailureSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
def test_transaction_charge_requested_async_schema_valid():
# given
data = {
"pspReference": "psp-async-123",
"actions": [TransactionAction.CHARGE.upper()],
}
# when
transaction = TransactionChargeRequestedAsyncSchema.model_validate(data)
# then
assert transaction.psp_reference == "psp-async-123"
def test_transaction_charge_requested_async_schema_invalid():
# given
data = {
"pspReference": 123,
"actions": [TransactionAction.CHARGE.upper()],
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionChargeRequestedAsyncSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("pspReference",)
def test_transaction_cancel_requested_sync_success_schema_valid():
# given
result = TransactionEventType.CANCEL_SUCCESS
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"time": "2023-01-01T12:00:00+00:00",
"externalUrl": "https://example.com/",
"message": "Transaction cancelled successfully.",
"actions": [TransactionAction.CANCEL.upper(), TransactionAction.REFUND.upper()],
"result": result.upper(),
}
# when
transaction = TransactionCancelationRequestedSyncSuccessSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.REFUND_SUCCESS,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.REFUND_FAILURE,
],
)
def test_transaction_cancel_requested_sync_success_schema_invalid(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionCancelationRequestedSyncSuccessSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
def test_transaction_cancel_requested_sync_failure_schema_valid():
# given
result = TransactionEventType.CANCEL_FAILURE
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"time": "2023-01-01T12:00:00+00:00",
"externalUrl": "https://example.com/",
"message": "Transaction cancel failed.",
"actions": [TransactionAction.CANCEL.upper(), TransactionAction.REFUND.upper()],
"result": result.upper(),
}
# when
transaction = TransactionCancelationRequestedSyncFailureSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.REFUND_SUCCESS,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.REFUND_FAILURE,
],
)
def test_transaction_cancel_requested_sync_failure_schema_invalid(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionCancelationRequestedSyncFailureSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
def test_transaction_cancel_requested_async_schema_valid():
# given
data = {
"pspReference": "psp-async-123",
"actions": [TransactionAction.CANCEL.upper()],
}
# when
transaction = TransactionCancelationRequestedAsyncSchema.model_validate(data)
# then
assert transaction.psp_reference == "psp-async-123"
def test_transaction_cancel_requested_async_schema_invalid():
# given
data = {
"pspReference": 123,
"actions": [TransactionAction.CANCEL.upper()],
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionCancelationRequestedAsyncSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("pspReference",)
def test_transaction_refund_requested_sync_success_schema_valid():
# given
result = TransactionEventType.REFUND_SUCCESS
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"time": "2023-01-01T12:00:00+00:00",
"externalUrl": "https://example.com/",
"message": "Transaction refunded successfully.",
"actions": [TransactionAction.REFUND.upper(), TransactionAction.CHARGE.upper()],
"result": result.upper(),
}
# when
transaction = TransactionRefundRequestedSyncSuccessSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.CANCEL_SUCCESS,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.CANCEL_FAILURE,
],
)
def test_transaction_refund_requested_sync_success_schema_invalid(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionRefundRequestedSyncSuccessSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
def test_transaction_refund_requested_sync_failure_schema_valid():
# given
result = TransactionEventType.REFUND_FAILURE
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"time": "2023-01-01T12:00:00+00:00",
"externalUrl": "https://example.com/",
"message": "Transaction refund failed.",
"actions": [TransactionAction.REFUND.upper(), TransactionAction.CHARGE.upper()],
"result": result.upper(),
}
# when
transaction = TransactionRefundRequestedSyncFailureSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.CANCEL_SUCCESS,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.CANCEL_FAILURE,
],
)
def test_transaction_refund_requested_sync_failure_schema_invalid(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionRefundRequestedSyncFailureSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
def test_transaction_refund_requested_async_schema_valid():
# given
data = {
"pspReference": "psp-async-123",
"actions": [TransactionAction.REFUND.upper()],
}
# when
transaction = TransactionRefundRequestedAsyncSchema.model_validate(data)
# then
assert transaction.psp_reference == "psp-async-123"
def test_transaction_refund_requested_async_schema_invalid():
# given
data = {
"pspReference": 123,
"actions": [TransactionAction.REFUND.upper()],
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionRefundRequestedAsyncSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("pspReference",)
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
],
)
def test_transaction_session_action_required_schema_valid_result(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
}
# when
transaction = TransactionSessionActionRequiredSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"firstDigits": "1234",
"lastDigits": "5678",
"expMonth": 12,
"expYear": 2025,
},
{
"type": "CARD",
"name": "Test Card",
},
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"lastDigits": "5678",
},
{
"type": "OTHER",
"name": "Test Other",
},
{
"type": "CARD",
"name": "Test Card",
"brand": None,
"firstDigits": None,
"lastDigits": None,
"expMonth": None,
"expYear": None,
},
],
)
def test_transaction_session_action_required_schema_valid_payment_method_details(
payment_method_details, result
):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
"paymentMethodDetails": payment_method_details,
}
# when
parsed_transaction = TransactionSessionActionRequiredSchema.model_validate(data)
# then
assert parsed_transaction.result == result
assert parsed_transaction.payment_method_details
parsed_payment_method_details = parsed_transaction.payment_method_details
assert parsed_payment_method_details.type == payment_method_details["type"].lower()
assert parsed_payment_method_details.name == payment_method_details["name"]
assert getattr(
parsed_payment_method_details, "brand", None
) == payment_method_details.get("brand")
assert getattr(
parsed_payment_method_details, "first_digits", None
) == payment_method_details.get("firstDigits")
assert getattr(
parsed_payment_method_details, "last_digits", None
) == payment_method_details.get("lastDigits")
assert getattr(
parsed_payment_method_details, "exp_month", None
) == payment_method_details.get("expMonth")
assert getattr(
parsed_payment_method_details, "exp_year", None
) == payment_method_details.get("expYear")
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
# unknown type
{
"type": "WRONG-TYPE",
"name": "Test Card",
},
# Missing name
{
"type": "CARD",
},
# Missing type
{
"name": "Test Card",
},
],
)
def test_transaction_session_action_required_schema_invalid_payment_method_details(
payment_method_details, result
):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
"paymentMethodDetails": payment_method_details,
}
# when & then
with pytest.raises(ValidationError):
TransactionSessionActionRequiredSchema.model_validate(data)
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.AUTHORIZATION_REQUEST,
TransactionEventType.CHARGE_REQUEST,
TransactionEventType.REFUND_SUCCESS,
TransactionEventType.CANCEL_SUCCESS,
],
)
def test_transaction_session_action_required_schema_invalid_result(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionSessionActionRequiredSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.AUTHORIZATION_REQUEST,
TransactionEventType.CHARGE_REQUEST,
],
)
def test_transaction_session_success_schema_valid_result(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
}
# when
transaction = TransactionSessionSuccessSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.AUTHORIZATION_REQUEST,
TransactionEventType.CHARGE_REQUEST,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"firstDigits": "1234",
"lastDigits": "5678",
"expMonth": 12,
"expYear": 2025,
},
{
"type": "CARD",
"name": "Test Card",
},
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"lastDigits": "5678",
},
{
"type": "OTHER",
"name": "Test Other",
},
{
"type": "CARD",
"name": "Test Card",
"brand": None,
"firstDigits": None,
"lastDigits": None,
"expMonth": None,
"expYear": None,
},
],
)
def test_transaction_session_success_schema_valid_payment_method_details(
payment_method_details, result
):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
"paymentMethodDetails": payment_method_details,
}
# when
parsed_transaction = TransactionSessionSuccessSchema.model_validate(data)
# then
assert parsed_transaction.result == result
assert parsed_transaction.payment_method_details
parsed_payment_method_details = parsed_transaction.payment_method_details
assert parsed_payment_method_details.type == payment_method_details["type"].lower()
assert parsed_payment_method_details.name == payment_method_details["name"]
assert getattr(
parsed_payment_method_details, "brand", None
) == payment_method_details.get("brand")
assert getattr(
parsed_payment_method_details, "first_digits", None
) == payment_method_details.get("firstDigits")
assert getattr(
parsed_payment_method_details, "last_digits", None
) == payment_method_details.get("lastDigits")
assert getattr(
parsed_payment_method_details, "exp_month", None
) == payment_method_details.get("expMonth")
assert getattr(
parsed_payment_method_details, "exp_year", None
) == payment_method_details.get("expYear")
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_SUCCESS,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
# unknown type
{
"type": "WRONG-TYPE",
"name": "Test Card",
},
# Missing name
{
"type": "CARD",
},
# Missing type
{
"name": "Test Card",
},
],
)
def test_transaction_session_success_schema_invalid_payment_method_details(
payment_method_details, result
):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
"paymentMethodDetails": payment_method_details,
}
# when & then
with pytest.raises(ValidationError):
TransactionSessionSuccessSchema.model_validate(data)
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
TransactionEventType.REFUND_SUCCESS,
TransactionEventType.CANCEL_SUCCESS,
],
)
def test_transaction_session_success_schema_invalid_result(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionSessionSuccessSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
],
)
def test_transaction_session_failure_schema_valid_result(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
}
# when
transaction = TransactionSessionFailureSchema.model_validate(data)
# then
assert transaction.result == result
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_FAILURE,
TransactionEventType.CHARGE_FAILURE,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"firstDigits": "1234",
"lastDigits": "5678",
"expMonth": 12,
"expYear": 2025,
},
{
"type": "CARD",
"name": "Test Card",
},
{
"type": "CARD",
"name": "Test Card",
"brand": "Brand",
"lastDigits": "5678",
},
{
"type": "OTHER",
"name": "Test Other",
},
{
"type": "CARD",
"name": "Test Card",
"brand": None,
"firstDigits": None,
"lastDigits": None,
"expMonth": None,
"expYear": None,
},
],
)
def test_transaction_session_failure_schema_valid_payment_method_details(
payment_method_details, result
):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
"paymentMethodDetails": payment_method_details,
}
# when
parsed_transaction = TransactionSessionFailureSchema.model_validate(data)
# then
assert parsed_transaction.result == result
assert parsed_transaction.payment_method_details
parsed_payment_method_details = parsed_transaction.payment_method_details
assert parsed_payment_method_details.type == payment_method_details["type"].lower()
assert parsed_payment_method_details.name == payment_method_details["name"]
assert getattr(
parsed_payment_method_details, "brand", None
) == payment_method_details.get("brand")
assert getattr(
parsed_payment_method_details, "first_digits", None
) == payment_method_details.get("firstDigits")
assert getattr(
parsed_payment_method_details, "last_digits", None
) == payment_method_details.get("lastDigits")
assert getattr(
parsed_payment_method_details, "exp_month", None
) == payment_method_details.get("expMonth")
assert getattr(
parsed_payment_method_details, "exp_year", None
) == payment_method_details.get("expYear")
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
],
)
@pytest.mark.parametrize(
"payment_method_details",
[
# unknown type
{
"type": "WRONG-TYPE",
"name": "Test Card",
},
# Missing name
{
"type": "CARD",
},
# Missing type
{
"name": "Test Card",
},
],
)
def test_transaction_session_failure_schema_invalid_payment_method_details(
payment_method_details, result
):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
"paymentMethodDetails": payment_method_details,
}
# when & then
with pytest.raises(ValidationError):
TransactionSessionFailureSchema.model_validate(data)
@pytest.mark.parametrize(
"result",
[
TransactionEventType.AUTHORIZATION_SUCCESS,
TransactionEventType.CHARGE_SUCCESS,
TransactionEventType.AUTHORIZATION_ACTION_REQUIRED,
TransactionEventType.CHARGE_ACTION_REQUIRED,
TransactionEventType.AUTHORIZATION_REQUEST,
TransactionEventType.CHARGE_REQUEST,
TransactionEventType.REFUND_SUCCESS,
TransactionEventType.CANCEL_SUCCESS,
],
)
def test_transaction_session_failure_schema_invalid_result(result):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": result.upper(),
"data": "test-data",
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionSessionFailureSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("result",)
@pytest.mark.parametrize(
"data_value",
[
# Valid data
{"key": "value", "another_key": "another_value"},
# Empty data
{},
# Data with special characters
{"key": "!@#$%^&*()_+"},
# Data with nested structure
{"nested": {"key": "value"}},
# Data with list
{"list": ["item1", "item2", "item3"]},
# Data as None
None,
# Data as string
"string_data",
# Data as integer
123,
],
)
def test_transaction_session_base_schema_valid_data(data_value):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": TransactionEventType.AUTHORIZATION_SUCCESS.upper(),
"data": data_value,
}
# when
transaction = TransactionSessionBaseSchema.model_validate(data)
# then
assert transaction.data == data_value
@pytest.mark.parametrize(
"data_value",
[
# Non-serializable object
object(),
# Set - not JSON serializable
{1, 2, 3},
# Function
lambda x: x,
# File handle
open,
],
)
def test_transaction_session_base_schema_invalid_data(data_value):
# given
data = {
"pspReference": "psp-123",
"amount": Decimal("100.50"),
"result": TransactionEventType.CHARGE_SUCCESS.upper(),
"data": data_value,
}
# when
with pytest.raises(ValidationError) as exc_info:
TransactionSessionBaseSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("data",)
@pytest.mark.parametrize(
"data_value",
[
# Valid data
{"key": "value", "another_key": "another_value"},
# Empty data
{},
# Data with special characters
{"key": "!@#$%^&*()_+"},
# Data with nested structure
{"nested": {"key": "value"}},
# Data with list
{"list": ["item1", "item2", "item3"]},
# Data as None
None,
# Data as string
"string_data",
# Data as integer
123,
],
)
def test_payment_gateway_initialize_schema_valid_data(data_value):
# given
data = {
"data": data_value,
}
# when
response = PaymentGatewayInitializeSessionSchema.model_validate(data)
# then
assert response.data == data_value
@pytest.mark.parametrize(
"data_value",
[
# Non-serializable object
object(),
# Set - not JSON serializable
{1, 2, 3},
# Function
lambda x: x,
# File handle
open,
],
)
def test_payment_gateway_initialize_schema_invalid_data(data_value):
# given
data = {
"data": data_value,
}
# when
with pytest.raises(ValidationError) as exc_info:
PaymentGatewayInitializeSessionSchema.model_validate(data)
# then
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("data",)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/tests/response_schemas/test_transaction.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/webhook/transport/metrics.py | from datetime import UTC, datetime
from urllib.parse import urlparse
from opentelemetry.semconv.attributes import error_attributes, server_attributes
from ...app.models import App
from ...core.models import EventDeliveryStatus
from ...core.telemetry import (
DEFAULT_DURATION_BUCKETS,
MetricType,
Scope,
Unit,
meter,
saleor_attributes,
)
from .utils import WebhookResponse
# Initialize metrics
METRIC_EXTERNAL_REQUEST_COUNT = meter.create_metric(
"saleor.external_request.count",
scope=Scope.SERVICE,
type=MetricType.COUNTER,
unit=Unit.REQUEST,
description="Number of webhook events.",
)
METRIC_EXTERNAL_REQUEST_DURATION = meter.create_metric(
"saleor.external_request.duration",
scope=Scope.SERVICE,
type=MetricType.HISTOGRAM,
unit=Unit.SECOND,
description="Duration of webhook event delivery.",
bucket_boundaries=DEFAULT_DURATION_BUCKETS,
)
BODY_SIZE_BUCKETS = [
0, # 0B
100, # 100B
500, # 500B
1000, # 1KB
2000, # 2KB
4000, # 4KB
8000, # 8KB
16000, # 16KB
32000, # 32KB
64000, # 64KB
128000, # 128KB
256000, # 256KB
512000, # 512KB
1048576, # 1MB
2097152, # 2MB
4194304, # 4MB
]
METRIC_EXTERNAL_REQUEST_BODY_SIZE = meter.create_metric(
"saleor.external_request.body.size",
scope=Scope.SERVICE,
type=MetricType.HISTOGRAM,
unit=Unit.BYTE,
description="Size of webhook event payloads.",
bucket_boundaries=BODY_SIZE_BUCKETS,
)
METRIC_EXTERNAL_REQUEST_FIRST_ATTEMPT_DELAY = meter.create_metric(
"saleor.external_request.async.first_attempt_delay",
scope=Scope.CORE,
type=MetricType.HISTOGRAM,
unit=Unit.MILLISECOND,
description="Delay of the first delivery attempt for async webhook.",
)
def record_external_request(
event_type: str,
target_url: str,
webhook_response: WebhookResponse,
payload_size: int,
app: App,
sync: bool,
) -> None:
attributes = {
server_attributes.SERVER_ADDRESS: urlparse(target_url).hostname or "",
saleor_attributes.SALEOR_WEBHOOK_EVENT_TYPE: event_type,
saleor_attributes.SALEOR_WEBHOOK_EXECUTION_MODE: "sync" if sync else "async",
saleor_attributes.SALEOR_APP_IDENTIFIER: app.identifier,
}
if webhook_response.status == EventDeliveryStatus.FAILED:
attributes[error_attributes.ERROR_TYPE] = "request_error"
meter.record(METRIC_EXTERNAL_REQUEST_COUNT, 1, Unit.REQUEST, attributes=attributes)
meter.record(
METRIC_EXTERNAL_REQUEST_BODY_SIZE,
payload_size,
Unit.BYTE,
attributes=attributes,
)
meter.record(
METRIC_EXTERNAL_REQUEST_DURATION,
webhook_response.duration,
Unit.SECOND,
attributes=attributes,
)
def record_first_delivery_attempt_delay(
created_at: datetime, event_type: str, app: App
) -> None:
delay = (datetime.now(UTC) - created_at).total_seconds()
attributes = {
saleor_attributes.SALEOR_WEBHOOK_EVENT_TYPE: event_type,
saleor_attributes.SALEOR_APP_IDENTIFIER: app.identifier,
}
meter.record(
METRIC_EXTERNAL_REQUEST_FIRST_ATTEMPT_DELAY,
delay,
unit=Unit.SECOND,
attributes=attributes,
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/webhook/transport/metrics.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
scikit-learn/scikit-learn:sklearn/tree/tests/test_split.py | from dataclasses import dataclass
from itertools import product
from operator import itemgetter
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.sparse import csc_array
from scipy.special import xlogy
from sklearn.metrics import mean_poisson_deviance
from sklearn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from sklearn.utils.stats import _weighted_percentile
CLF_CRITERIONS = ("gini", "log_loss")
REG_CRITERIONS = ("squared_error", "absolute_error", "poisson")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"ExtraTreeRegressor": ExtraTreeRegressor,
}
@dataclass
class NaiveSplitter:
criterion: str
n_classes: int = 0
def compute_node_value_and_impurity(self, y, w):
sum_weights = np.sum(w)
if sum_weights < 1e-7:
return np.nan, np.inf # invalid split
if self.criterion in ["gini", "entropy", "log_loss"]:
pred = np.bincount(y, weights=w, minlength=self.n_classes) / sum_weights
if self.criterion == "gini":
# 1 - sum(pk^2)
loss = 1.0 - np.sum(pred**2)
else:
# -sum(pk * log2(pk))
loss = -np.sum(xlogy(pred, pred)) / np.log(2)
elif self.criterion == "squared_error":
pred = np.average(y, weights=w)
loss = np.average((y - pred) ** 2, weights=w)
elif self.criterion == "absolute_error":
pred = _weighted_percentile(y, w, percentile_rank=50, average=True)
loss = np.average(np.abs(y - pred), weights=w)
elif self.criterion == "poisson":
pred = np.average(y, weights=w)
loss = mean_poisson_deviance(y, np.repeat(pred, y.size), sample_weight=w)
loss *= 1 / 2
else:
raise ValueError(f"Unknown criterion: {self.criterion}")
return pred, loss * sum_weights
def compute_split_nodes(self, X, y, w, feature, threshold=None, missing_left=False):
x = X[:, feature]
go_left = x <= threshold
if missing_left:
go_left |= np.isnan(x)
return (
self.compute_node_value_and_impurity(y[go_left], w[go_left]),
self.compute_node_value_and_impurity(y[~go_left], w[~go_left]),
)
def compute_split_impurity(
self, X, y, w, feature, threshold=None, missing_left=False
):
nodes = self.compute_split_nodes(X, y, w, feature, threshold, missing_left)
(_, left_impurity), (_, right_impurity) = nodes
return left_impurity + right_impurity
def _generate_all_splits(self, X):
for f in range(X.shape[1]):
x = X[:, f]
nan_mask = np.isnan(x)
thresholds = np.unique(x[~nan_mask])
for th in thresholds:
yield {
"feature": f,
"threshold": th,
"missing_left": False,
}
if not nan_mask.any():
continue
for th in [*thresholds, -np.inf]:
# include -inf to test the split with only NaNs on the left node
yield {
"feature": f,
"threshold": th,
"missing_left": True,
}
def best_split_naive(self, X, y, w):
splits = list(self._generate_all_splits(X))
if len(splits) == 0:
return (np.inf, None)
split_impurities = [
self.compute_split_impurity(X, y, w, **split) for split in splits
]
return min(zip(split_impurities, splits), key=itemgetter(0))
def make_simple_dataset(
n,
d,
with_nans,
is_sparse,
is_clf,
n_classes,
rng,
):
X_dense = rng.random((n, d))
y = rng.random(n) + X_dense.sum(axis=1)
w = rng.integers(0, 5, size=n) if rng.uniform() < 0.5 else rng.random(n)
with_duplicates = rng.integers(2) == 0
if with_duplicates:
X_dense = X_dense.round(1 if n < 50 else 2)
if with_nans:
nan_density = rng.uniform(0.05, 0.8)
mask = rng.random(X_dense.shape) < nan_density
X_dense[mask] = np.nan
if is_sparse:
density = rng.uniform(0.05, 0.99)
X_dense -= 0.5
mask = rng.random(X_dense.shape) > density
X_dense[mask] = 0
X = csc_array(X_dense)
else:
X = X_dense
if is_clf:
q = np.linspace(0, 1, num=n_classes + 1)[1:-1]
y = np.searchsorted(np.quantile(y, q), y)
# Trees cast X to float32 internally; match that dtype here to avoid
# routing/impurity mismatches from rounding with `<=`.
return X_dense.astype("float32"), X, y, w
@pytest.mark.filterwarnings("ignore:.*friedman_mse.*:FutureWarning")
@pytest.mark.parametrize(
"Tree, criterion",
[
*product(REG_TREES.values(), REG_CRITERIONS),
*product(CLF_TREES.values(), CLF_CRITERIONS),
],
)
@pytest.mark.parametrize(
"sparse, missing_values",
[(False, False), (True, False), (False, True)],
ids=["dense-without_missing", "sparse-without_missing", "dense-with_missing"],
)
def test_split_impurity(Tree, criterion, sparse, missing_values, global_random_seed):
is_clf = criterion in CLF_CRITERIONS
# TODO: (remove in PR #32119)
if missing_values and criterion == "absolute_error":
pytest.skip("AE + missing values not supported yet")
if missing_values and criterion == "poisson":
pytest.xfail("Poisson criterion is faulty for now")
rng = np.random.default_rng(global_random_seed)
ns = [5] * 5 + [10] * 5 + [20, 30, 50, 100]
for it, n in enumerate(ns):
d = rng.integers(1, 4)
n_classes = rng.integers(2, 5) # only used for classification
X_dense, X, y, w = make_simple_dataset(
n, d, missing_values, sparse, is_clf, n_classes, rng
)
naive_splitter = NaiveSplitter(criterion, n_classes)
tree = Tree(
criterion=criterion,
max_depth=1,
random_state=global_random_seed,
)
tree.fit(X, y, sample_weight=w)
actual_impurity = tree.tree_.impurity * tree.tree_.weighted_n_node_samples
actual_value = tree.tree_.value[:, 0]
# Check root's impurity:
# The root is 0, left child is 1 and right child is 2.
root_val, root_impurity = naive_splitter.compute_node_value_and_impurity(y, w)
assert_allclose(root_impurity, actual_impurity[0], atol=1e-12)
assert_allclose(root_val, actual_value[0], atol=1e-12)
if tree.tree_.node_count == 1:
# if no splits was made assert that either:
assert (
"Extra" in Tree.__name__
or root_impurity < 1e-12 # root impurity is 0
# or no valid split can be made:
or naive_splitter.best_split_naive(X_dense, y, w)[0] == np.inf
)
continue
# Check children impurity:
actual_split = {
"feature": int(tree.tree_.feature[0]),
"threshold": tree.tree_.threshold[0],
"missing_left": bool(tree.tree_.missing_go_to_left[0]),
}
nodes = naive_splitter.compute_split_nodes(X_dense, y, w, **actual_split)
(left_val, left_impurity), (right_val, right_impurity) = nodes
assert_allclose(left_impurity, actual_impurity[1], atol=1e-12)
assert_allclose(right_impurity, actual_impurity[2], atol=1e-12)
assert_allclose(left_val, actual_value[1], atol=1e-12)
assert_allclose(right_val, actual_value[2], atol=1e-12)
if "Extra" in Tree.__name__:
# The remainder of the test checks for optimality of the found split.
# However, randomized trees are not guaranteed to find an optimal split
# but only a "better-than-nothing" split.
# Therefore, end the test here for these models.
continue
# Check that the selected split has the same impurity as the best split
# found by the naive splitter. Note that there could exist multiple splits
# with the same optimal impurity, so the assertion is made on the impurity
# value: the split value is only displayed to help debugging in case
# of assertion failure.
best_impurity, best_split = naive_splitter.best_split_naive(X_dense, y, w)
actual_split_impurity = actual_impurity[1:].sum()
assert np.isclose(best_impurity, actual_split_impurity), (
best_split,
actual_split,
)
| {
"repo_id": "scikit-learn/scikit-learn",
"file_path": "sklearn/tree/tests/test_split.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
scikit-learn/scikit-learn:sklearn/ensemble/_bootstrap.py | """Utility function to get the number of bootstrap samples."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral
from warnings import warn
def _get_n_samples_bootstrap(n_samples, max_samples, sample_weight):
"""
Get the number of samples in a bootstrap sample.
Notes
-----
The frequency semantics of :term:`sample_weight` is guaranteed when
`max_samples` is a float or integer, but not when `max_samples` is None. The
returned `n_samples_bootstrap` will be the same between a weighted dataset
with integer `sample_weights` and a dataset with as many rows repeated when
`max_samples` is a float or integer. They will differ when `max_samples` is
None (the weighted and repeated datasets do not have the same number of rows).
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : None, int or float
The maximum number of samples to draw.
- If None, then draw `n_samples` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * n_samples` unweighted samples or
`max_samples * sample_weight.sum()` weighted samples.
sample_weight : array of shape (n_samples,) or None
Sample weights.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
elif isinstance(max_samples, Integral):
return max_samples
if sample_weight is None:
weighted_n_samples = n_samples
weighted_n_samples_msg = f"the number of samples is {weighted_n_samples} "
else:
weighted_n_samples = sample_weight.sum()
weighted_n_samples_msg = (
f"the total sum of sample weights is {weighted_n_samples} "
)
# max_samples Real fractional value relative to weighted_n_samples
n_samples_bootstrap = max(int(max_samples * weighted_n_samples), 1)
# Warn when number of bootstrap samples is suspiciously small.
# This heuristic for "suspiciously small" might be adapted if found
# unsuitable in practice.
if n_samples_bootstrap < max(10, n_samples ** (1 / 3)):
warn(
f"Using the fractional value {max_samples=} when {weighted_n_samples_msg}"
f"results in a low number ({n_samples_bootstrap}) of bootstrap samples. "
"We recommend passing `max_samples` as an integer instead."
)
return n_samples_bootstrap
| {
"repo_id": "scikit-learn/scikit-learn",
"file_path": "sklearn/ensemble/_bootstrap.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
scikit-learn/scikit-learn:sklearn/ensemble/tests/test_bootstrap.py | """
Testing for the utility function _get_n_samples_bootstrap
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
import pytest
from sklearn.ensemble._bootstrap import _get_n_samples_bootstrap
def test_get_n_samples_bootstrap():
# max_samples=None returns n_samples
n_samples, max_samples, sample_weight = 10, None, "not_used"
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == n_samples
# max_samples:int returns max_samples
n_samples, max_samples, sample_weight = 10, 5, "not_used"
assert (
_get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == max_samples
)
# cases where n_samples_bootstrap is small and should raise a warning
warning_msg = ".+the number of samples.+low number.+max_samples.+as an integer"
n_samples, max_samples, sample_weight = 10, 0.66, None
with pytest.warns(UserWarning, match=warning_msg):
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == int(
max_samples * n_samples
)
n_samples, max_samples, sample_weight = 10, 0.01, None
with pytest.warns(UserWarning, match=warning_msg):
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == 1
warning_msg_with_weights = (
".+the total sum of sample weights.+low number.+max_samples.+as an integer"
)
rng = np.random.default_rng(0)
n_samples, max_samples, sample_weight = 10, 0.8, rng.uniform(size=10)
with pytest.warns(UserWarning, match=warning_msg_with_weights):
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == int(
max_samples * sample_weight.sum()
)
# cases where n_samples_bootstrap is big enough and shouldn't raise a warning
with warnings.catch_warnings():
warnings.simplefilter("error")
n_samples, max_samples, sample_weight = 100, 30, None
assert (
_get_n_samples_bootstrap(n_samples, max_samples, sample_weight)
== max_samples
)
n_samples, max_samples, sample_weight = 100, 0.5, rng.uniform(size=100)
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == int(
max_samples * sample_weight.sum()
)
@pytest.mark.parametrize("max_samples", [None, 1, 5, 1000, 0.1, 1.0, 1.5])
def test_n_samples_bootstrap_repeated_weighted_equivalence(max_samples):
# weighted dataset
n_samples = 100
rng = np.random.RandomState(0)
sample_weight = rng.randint(2, 5, n_samples)
# repeated dataset
n_samples_repeated = sample_weight.sum()
n_bootstrap_weighted = _get_n_samples_bootstrap(
n_samples, max_samples, sample_weight
)
n_bootstrap_repeated = _get_n_samples_bootstrap(
n_samples_repeated, max_samples, None
)
if max_samples is None:
assert n_bootstrap_weighted != n_bootstrap_repeated
else:
assert n_bootstrap_weighted == n_bootstrap_repeated
| {
"repo_id": "scikit-learn/scikit-learn",
"file_path": "sklearn/ensemble/tests/test_bootstrap.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.