hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a147698daeee0172bfdb9915bb99f41c11597b4
| 8,517
|
py
|
Python
|
src/users/schema.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/users/schema.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/users/schema.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
from django.db.models.aggregates import Sum
import graphene
import graphql_jwt
from django.conf import settings
from django.contrib.auth.forms import PasswordChangeForm
from django.db.models import Q
from graphene_django import DjangoObjectType
from graphene_django.forms.mutation import DjangoFormMutation
from graphql import GraphQLError
from blog.query_types import PostType
from .models import CustomUser as User
from .models import ResetCode
from .tasks import account_created, send_password_reset_email
from xarala.utils import email_validation_function, get_paginator, save_base_64
class UserType(DjangoObjectType):
class Meta:
model = User
get_user_posts = graphene.List(PostType)
def resolve_get_user_posts(instance, info, **kwargs):
return instance.user_posts()
class UserPaginatedType(graphene.ObjectType):
page = graphene.Int()
pages = graphene.Int()
has_next = graphene.Boolean()
has_prev = graphene.Boolean()
objects = graphene.List(UserType)
class AdminKpisType(graphene.ObjectType):
students_count = graphene.Int()
teachers_count = graphene.Int()
authors_count = graphene.Int()
sales_figures = graphene.Decimal()
class Query(graphene.ObjectType):
me = graphene.Field(UserType)
user = graphene.Field(UserType, id=graphene.Int(required=True))
users = graphene.Field(AdminKpisType)
students = graphene.Field(UserPaginatedType, page=graphene.Int())
teachers = graphene.Field(UserPaginatedType, page=graphene.Int())
authors = graphene.Field(UserPaginatedType, page=graphene.Int())
listTeachers = graphene.List(UserType)
listAuthors = graphene.List(UserType)
def resolve_user(self, info, id):
return User.objects.get(id=id)
def resolve_me(self, info):
user = info.context.user
if user.is_anonymous:
raise GraphQLError("Not loged in!")
return user
def resolve_users(self, info):
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.all()
students_count = users.filter(is_student=True).count()
teachers_count = users.filter(is_teacher=True).count()
authors_count = users.filter(is_writer=True).count()
students = users.filter(is_student=True).exclude(courses_enrolled=None)
prices_list = [
student.courses_enrolled.aggregate(Sum("price"))["price__sum"]
for student in students
]
sales_figures = sum(prices_list)
return AdminKpisType(
students_count, teachers_count, authors_count, sales_figures
)
def resolve_students(self, info, page):
page_size = 10
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.filter(is_student=True).order_by("-id")
return get_paginator(users, page_size, page, UserPaginatedType)
def resolve_teachers(self, info, page):
page_size = 10
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.filter(is_teacher=True).order_by("-id")
return get_paginator(users, page_size, page, UserPaginatedType)
def resolve_authors(self, info, page):
page_size = 10
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.filter(is_writer=True).order_by("-id")
return get_paginator(users, page_size, page, UserPaginatedType)
def resolve_listTeachers(self, info):
return User.objects.filter(is_teacher=True).order_by("-id")
def resolve_listAuthors(self, info):
return User.objects.filter(is_writer=True).order_by("-id")
class UpdateUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
firstName = graphene.String()
lastName = graphene.String()
phone = graphene.String()
address = graphene.String()
userId = graphene.Int(required=True)
def mutate(self, info, firstName, lastName, phone, address, userId):
user = User.objects.get(id=userId)
user.first_name = firstName
user.last_name = lastName
user.address = address
user.phone = phone
user.save()
return UpdateUser(user=user)
class UpdateAvatar(graphene.Mutation):
success = graphene.Boolean()
class Arguments:
file = graphene.String()
def mutate(self, info, file):
final_file_url = save_base_64(file)
user = info.context.user
if user.is_anonymous:
raise GraphQLError("Log in to edit user account!")
user.avatar = final_file_url
user.save()
return UpdateAvatar(success=True)
class AuthMutation(graphene.ObjectType):
# django-graphql-jwt inheritances
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
revoke_token = graphql_jwt.Revoke.Field()
class RegisterUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
password = graphene.String(required=True)
firstName = graphene.String(required=True)
lastName = graphene.String(required=True)
def mutate(self, info, email, password, firstName, lastName):
mail_to_lower = email_validation_function(email.lower())
user = User(email=mail_to_lower)
user.set_password(password)
user.is_student = True
user.first_name = firstName
user.last_name = lastName
user.save()
try:
account_created.delay(mail_to_lower)
except Exception:
pass
return RegisterUser(user)
class PasswordResetEmail(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
def mutate(self, info, email):
mail_to_lower = email_validation_function(email.lower())
user = ""
try:
user = User.objects.get(email=mail_to_lower)
except User.DoesNotExist:
raise GraphQLError(
"Compte non trouvé, merci de bien vérifier votre adresse email"
)
try:
send_password_reset_email.delay(mail_to_lower)
except Exception:
pass
return PasswordResetEmail(user)
class PasswordReset(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
code = graphene.String(required=True)
newPassword = graphene.String(required=True)
def mutate(self, info, email, code, newPassword):
mail_to_lower = email_validation_function(email.lower())
reset_code = ResetCode.objects.filter(
Q(code=code) & Q(email=mail_to_lower) & Q(expired=False)
).exists()
if reset_code:
try:
user = User.objects.get(email=mail_to_lower)
user.set_password(newPassword)
user.save()
except User.DoesNotExist:
raise GraphQLError(
"Compte non trouvé, merci de bien vérifier votre adresse email"
)
else:
raise GraphQLError("Code non trouvé dans notre système")
ResetCode.objects.filter(code=code).update(expired=True)
user = User.objects.get(email=mail_to_lower)
return PasswordReset(user)
class PasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
user = kwargs.pop("user", None)
super().__init__(user, *args, **kwargs)
class ChangePassword(DjangoFormMutation):
class Meta:
form_class = PasswordChangeForm
@classmethod
def get_form_kwargs(cls, root, info, **mutation_input):
return {
**super().get_form_kwargs(root, info, **mutation_input),
"user": info.context.user,
}
class Mutation(AuthMutation, graphene.ObjectType):
update_user = UpdateUser.Field()
update_avatar = UpdateAvatar.Field()
register = RegisterUser.Field()
send_password_reset_email = PasswordResetEmail.Field()
reset_password = PasswordReset.Field()
change_password = ChangePassword.Field()
schema = graphene.Schema(query=Query)
| 32.632184
| 83
| 0.669132
|
4a147753ad2b57bdd5934c49f0b442ebee602c9b
| 310
|
py
|
Python
|
mayan/apps/common/serializers.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/common/serializers.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/common/serializers.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 114
|
2015-01-08T20:21:05.000Z
|
2018-12-10T19:07:53.000Z
|
from django.contrib.contenttypes.models import ContentType
from mayan.apps.rest_api import serializers
class ContentTypeSerializer(serializers.ModelSerializer):
class Meta:
fields = ('app_label', 'id', 'model')
model = ContentType
read_only_fields = ('app_label', 'id', 'model')
| 28.181818
| 58
| 0.712903
|
4a14785d354ff7368551db04b9fd6651886ed842
| 940
|
bzl
|
Python
|
contest/experimental_defs.bzl
|
nya3jp/rules_contest
|
e74a9892785912b11bbd975068641e558aa4a623
|
[
"MIT"
] | 6
|
2020-09-03T13:10:49.000Z
|
2021-03-10T01:13:49.000Z
|
contest/experimental_defs.bzl
|
nya3jp/rules_contest
|
e74a9892785912b11bbd975068641e558aa4a623
|
[
"MIT"
] | 11
|
2020-05-22T09:43:29.000Z
|
2021-03-24T10:55:49.000Z
|
contest/experimental_defs.bzl
|
nya3jp/rules_contest
|
e74a9892785912b11bbd975068641e558aa4a623
|
[
"MIT"
] | null | null | null |
def domjudge_package(name, dataset, domjudge_metadata = None, icpc_metadata = None, statements = [], **kwargs):
out = name + ".zip"
args = [
"'$(execpath @rules_contest//contest/impls:domjudge_package)'",
"--output='$@'",
"--dataset='$(execpath " + dataset + ")'",
]
srcs = [dataset]
if domjudge_metadata:
args.append("--domjudge_metadata='$(execpath " + domjudge_metadata + ")'")
srcs.append(domjudge_metadata)
if icpc_metadata:
args.append("--icpc_metadata='$(execpath " + icpc_metadata + ")'")
srcs.append(icpc_metadata)
for statement in statements:
args.append("--statement='$(execpath " + statement + ")'")
srcs.append(statement)
native.genrule(
name = name,
outs = [out],
srcs = srcs,
tools = ["@rules_contest//contest/impls:domjudge_package"],
cmd = " ".join(args),
**kwargs
)
| 36.153846
| 111
| 0.578723
|
4a14787b572170995962a229392f890a31a6f5ce
| 1,504
|
py
|
Python
|
tests/sequence_labelling/tools/checkpoints_test.py
|
elifesciences/sciencebeam-trainer-delft
|
0f7da96cdf32acf1538a5fded192255158883ba0
|
[
"MIT"
] | 5
|
2019-10-19T13:00:34.000Z
|
2022-01-16T17:31:42.000Z
|
tests/sequence_labelling/tools/checkpoints_test.py
|
elifesciences/sciencebeam-trainer-delft
|
0f7da96cdf32acf1538a5fded192255158883ba0
|
[
"MIT"
] | 162
|
2019-08-22T10:28:46.000Z
|
2022-03-28T17:33:16.000Z
|
tests/sequence_labelling/tools/checkpoints_test.py
|
elifesciences/sciencebeam-trainer-delft
|
0f7da96cdf32acf1538a5fded192255158883ba0
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
from sciencebeam_trainer_delft.sequence_labelling.tools.checkpoints import (
main
)
def _create_checkpoint(path: Path, meta: dict):
path.mkdir(exist_ok=True)
path.joinpath('meta.json').write_text(json.dumps(meta))
return path
class TestMain:
def test_should_format_as_json(
self, temp_dir: Path, capsys):
epoch1_path = _create_checkpoint(
temp_dir.joinpath('epoch-1'),
{'f1': 0.1}
)
epoch2_path = _create_checkpoint(
temp_dir.joinpath('epoch-2'),
{'f1': 0.2}
)
epoch3_path = _create_checkpoint(
temp_dir.joinpath('epoch-3'),
{'f1': 0.15}
)
temp_dir.joinpath('checkpoints.json').write_text(json.dumps({
'checkpoints': [{
'path': str(epoch1_path)
}, {
'path': str(epoch2_path)
}, {
'path': str(epoch3_path)
}],
'last_checkpoint': {
'path': str(epoch3_path)
}
}))
main([
'--output-format=json',
'--checkpoint=%s' % temp_dir
])
out, _ = capsys.readouterr()
result_list = json.loads(out)
assert result_list
assert [
item['path']
for item in result_list
] == [
str(epoch1_path),
str(epoch3_path),
str(epoch2_path)
]
| 26.385965
| 76
| 0.510638
|
4a14791014dd8b1fcef62ca55c0f029f846ed985
| 367,984
|
py
|
Python
|
saleor/graphql/product/tests/test_product.py
|
haoyang09/saleor
|
33f17288459012aab28d517c899fac439a07b729
|
[
"CC-BY-4.0"
] | 1
|
2021-08-20T02:19:08.000Z
|
2021-08-20T02:19:08.000Z
|
saleor/graphql/product/tests/test_product.py
|
haoyang09/saleor
|
33f17288459012aab28d517c899fac439a07b729
|
[
"CC-BY-4.0"
] | 172
|
2021-05-03T04:34:37.000Z
|
2022-03-28T04:41:53.000Z
|
saleor/graphql/product/tests/test_product.py
|
twocucao/saleor
|
308413ed9c19e7938e690fe3cf339b526fd34df2
|
[
"CC-BY-4.0"
] | 1
|
2022-03-24T08:37:58.000Z
|
2022-03-24T08:37:58.000Z
|
import json
import os
from datetime import datetime, timedelta
from decimal import Decimal
from unittest import mock
from unittest.mock import ANY, Mock, patch
import graphene
import pytest
import pytz
from django.core.exceptions import ValidationError
from django.db.models import Sum
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from django.utils.html import strip_tags
from django.utils.text import slugify
from freezegun import freeze_time
from graphql_relay import to_global_id
from measurement.measures import Weight
from prices import Money, TaxedMoney
from ....attribute import AttributeInputType, AttributeType
from ....attribute.models import Attribute, AttributeValue
from ....attribute.utils import associate_attribute_values_to_instance
from ....core.taxes import TaxType
from ....core.units import WeightUnits
from ....order import OrderEvents, OrderStatus
from ....order.models import OrderEvent, OrderLine
from ....plugins.manager import PluginsManager, get_plugins_manager
from ....product import ProductMediaTypes, ProductTypeKind
from ....product.error_codes import ProductErrorCode
from ....product.models import (
Category,
Collection,
CollectionChannelListing,
Product,
ProductChannelListing,
ProductMedia,
ProductType,
ProductVariant,
ProductVariantChannelListing,
)
from ....product.tasks import update_variants_names
from ....product.tests.utils import create_image, create_pdf_file_with_image_ext
from ....product.utils.availability import get_variant_availability
from ....product.utils.costs import get_product_costs_data
from ....tests.utils import dummy_editorjs, flush_post_commit_hooks
from ....warehouse.models import Allocation, Stock, Warehouse
from ....webhook.event_types import WebhookEventAsyncType
from ....webhook.payloads import generate_product_deleted_payload
from ...core.enums import AttributeErrorCode, ReportingPeriod
from ...tests.utils import (
assert_no_permission,
get_graphql_content,
get_graphql_content_from_response,
get_multipart_request_body,
)
from ..bulk_mutations.products import ProductVariantStocksUpdate
from ..enums import ProductTypeKindEnum, VariantAttributeScope
from ..utils import create_stocks
@pytest.fixture
def query_products_with_filter():
query = """
query ($filter: ProductFilterInput!, $channel: String) {
products(first:5, filter: $filter, channel: $channel) {
edges{
node{
id
name
}
}
}
}
"""
return query
@pytest.fixture
def query_products_with_attributes():
query = """
query {
products(first:5) {
edges{
node{
id
name
attributes {
attribute {
id
}
}
}
}
}
}
"""
return query
@pytest.fixture
def query_collections_with_filter():
query = """
query ($filter: CollectionFilterInput!, $channel: String) {
collections(first:5, filter: $filter, channel: $channel) {
edges{
node{
id
name
}
}
}
}
"""
return query
@pytest.fixture
def query_categories_with_filter():
query = """
query ($filter: CategoryFilterInput!, ) {
categories(first:5, filter: $filter) {
totalCount
edges{
node{
id
name
}
}
}
}
"""
return query
QUERY_FETCH_ALL_PRODUCTS = """
query ($channel:String){
products(first: 10, channel: $channel) {
totalCount
edges {
node {
id
name
variants {
id
}
}
}
}
}
"""
QUERY_PRODUCT = """
query ($id: ID, $slug: String, $channel:String){
product(
id: $id,
slug: $slug,
channel: $channel
) {
id
name
weight {
unit
value
}
availableForPurchase
isAvailableForPurchase
isAvailable
}
}
"""
def test_product_query_by_id_available_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_description(
staff_api_client, permission_manage_products, product, channel_USD
):
query = """
query ($id: ID, $slug: String, $channel:String){
product(
id: $id,
slug: $slug,
channel: $channel
) {
id
name
description
descriptionJson
}
}
"""
description = dummy_editorjs("Test description.", json_format=True)
product.description = dummy_editorjs("Test description.")
product.save()
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
query,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["description"] == description
assert product_data["descriptionJson"] == description
def test_product_query_with_no_description(
staff_api_client, permission_manage_products, product, channel_USD
):
query = """
query ($id: ID, $slug: String, $channel:String){
product(
id: $id,
slug: $slug,
channel: $channel
) {
id
name
description
descriptionJson
}
}
"""
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
query,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["description"] is None
assert product_data["descriptionJson"] == "{}"
def test_product_query_by_id_not_available_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_id_not_existing_in_channel_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_by_id_as_staff_user_without_channel_slug(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_id_available_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
@pytest.mark.parametrize("id", ["'", "abc"])
def test_product_query_by_invalid_id(
id, staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": id,
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content_from_response(response)
assert "errors" in content
assert content["errors"][0]["message"] == (f"Couldn't resolve id: {id}.")
QUERY_PRODUCT_BY_ID = """
query ($id: ID, $channel: String){
product(id: $id, channel: $channel) {
id
variants {
id
}
}
}
"""
def test_product_query_by_id_as_user(
user_api_client, permission_manage_products, product, channel_USD
):
query = QUERY_PRODUCT_BY_ID
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(
query,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
expected_variants = [
{
"id": graphene.Node.to_global_id(
"ProductVariant", product.variants.first().pk
)
}
]
assert product_data["variants"] == expected_variants
def test_product_query_invalid_id(user_api_client, product, channel_USD):
product_id = "'"
variables = {
"id": product_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT_BY_ID, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == f"Couldn't resolve id: {product_id}."
assert content["data"]["product"] is None
def test_product_query_object_with_given_id_does_not_exist(
user_api_client, product, channel_USD
):
product_id = graphene.Node.to_global_id("Product", -1)
variables = {
"id": product_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT_BY_ID, variables)
content = get_graphql_content(response)
assert content["data"]["product"] is None
def test_product_query_with_invalid_object_type(user_api_client, product, channel_USD):
product_id = graphene.Node.to_global_id("Collection", product.pk)
variables = {
"id": product_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT_BY_ID, variables)
content = get_graphql_content(response)
assert content["data"]["product"] is None
def test_product_query_by_id_not_available_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_id_not_existing_in_channel_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_by_id_as_app_without_channel_slug(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_variants_without_sku_query_by_staff(
staff_api_client, product, channel_USD
):
product.variants.update(sku=None)
product_id = graphene.Node.to_global_id("Product", product.pk)
variables = {
"id": product_id,
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
QUERY_PRODUCT_BY_ID,
variables=variables,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["id"] == product_id
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
assert product_data["variants"] == [{"id": variant_id}]
def test_product_only_with_variants_without_sku_query_by_customer(
user_api_client, product, channel_USD
):
product.variants.update(sku=None)
product_id = graphene.Node.to_global_id("Product", product.pk)
variables = {
"id": product_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(
QUERY_PRODUCT_BY_ID,
variables=variables,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["id"] == product_id
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
assert product_data["variants"] == [{"id": variant_id}]
def test_product_only_with_variants_without_sku_query_by_anonymous(
api_client, product, channel_USD
):
product.variants.update(sku=None)
product_id = graphene.Node.to_global_id("Product", product.pk)
variables = {
"id": product_id,
"channel": channel_USD.slug,
}
response = api_client.post_graphql(
QUERY_PRODUCT_BY_ID,
variables=variables,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["id"] == product_id
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
assert product_data["variants"] == [{"id": variant_id}]
QUERY_COLLECTION_FROM_PRODUCT = """
query ($id: ID, $channel:String){
product(
id: $id,
channel: $channel
) {
collections {
name
}
}
}
"""
def test_get_collections_from_product_as_staff(
staff_api_client,
permission_manage_products,
product_with_collections,
channel_USD,
):
# given
product = product_with_collections
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTION_FROM_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections = content["data"]["product"]["collections"]
assert len(collections) == 3
for collection in product.collections.all():
assert {"name": collection.name} in collections
def test_get_collections_from_product_as_app(
app_api_client,
permission_manage_products,
product_with_collections,
channel_USD,
):
# given
product = product_with_collections
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = app_api_client.post_graphql(
QUERY_COLLECTION_FROM_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections = content["data"]["product"]["collections"]
assert len(collections) == 3
for collection in product.collections.all():
assert {"name": collection.name} in collections
def test_get_collections_from_product_as_customer(
user_api_client, product_with_collections, channel_USD, published_collection
):
# given
product = product_with_collections
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(
QUERY_COLLECTION_FROM_PRODUCT,
variables=variables,
permissions=(),
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections = content["data"]["product"]["collections"]
assert len(collections) == 1
assert {"name": published_collection.name} in collections
def test_get_collections_from_product_as_anonymous(
api_client, product_with_collections, channel_USD, published_collection
):
# given
product = product_with_collections
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = api_client.post_graphql(
QUERY_COLLECTION_FROM_PRODUCT,
variables=variables,
permissions=(),
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections = content["data"]["product"]["collections"]
assert len(collections) == 1
assert {"name": published_collection.name} in collections
def test_product_query_by_id_available_as_customer(
user_api_client, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_id_not_available_as_customer(
user_api_client, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_unpublished_query_by_id_as_app(
app_api_client, unavailable_product, permission_manage_products, channel_USD
):
# given
variables = {
"id": graphene.Node.to_global_id("Product", unavailable_product.pk),
"channel": channel_USD.slug,
}
# when
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == unavailable_product.name
def test_product_query_by_id_weight_returned_in_default_unit(
user_api_client, product, site_settings, channel_USD
):
# given
product.weight = Weight(kg=10)
product.save(update_fields=["weight"])
site_settings.default_weight_unit = WeightUnits.LB
site_settings.save(update_fields=["default_weight_unit"])
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
assert product_data["weight"]["value"] == 22.046
assert product_data["weight"]["unit"] == WeightUnits.LB.upper()
def test_product_query_by_id_weight_is_rounded(
user_api_client, product, site_settings, channel_USD
):
# given
product.weight = Weight(kg=1.83456)
product.save(update_fields=["weight"])
site_settings.default_weight_unit = WeightUnits.KG
site_settings.save(update_fields=["default_weight_unit"])
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
assert product_data["weight"]["value"] == 1.835
assert product_data["weight"]["unit"] == WeightUnits.KG.upper()
def test_product_query_by_slug(user_api_client, product, channel_USD):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_by_id_not_existing_in_channel_as_customer(
user_api_client, product, channel_USD
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_by_slug_available_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_slug_not_available_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_slug_not_existing_in_channel_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_by_slug_as_staff_user_without_channel(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = staff_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_slug_available_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_slug_not_available_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_slug_not_existing_in_channel_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_by_slug_as_app_without_channel(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {
"slug": product.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = app_api_client.post_graphql(
QUERY_PRODUCT,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_slug_available_as_customer(
user_api_client, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_by_slug_not_available_as_customer(
user_api_client, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_is_available_for_purchase_true(
user_api_client, product, channel_USD
):
# given
available_for_purchase = datetime.today() - timedelta(days=1)
product.channel_listings.update(available_for_purchase=available_for_purchase)
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data["availableForPurchase"] == available_for_purchase.strftime(
"%Y-%m-%d"
)
assert product_data["isAvailableForPurchase"] is True
def test_product_query_is_available_for_purchase_false(
user_api_client, product, channel_USD
):
# given
available_for_purchase = datetime.today() + timedelta(days=1)
product.channel_listings.update(available_for_purchase=available_for_purchase)
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data["availableForPurchase"] == available_for_purchase.strftime(
"%Y-%m-%d"
)
assert product_data["isAvailableForPurchase"] is False
assert product_data["isAvailable"] is False
def test_product_query_is_available_for_purchase_false_no_available_for_purchase_date(
user_api_client, product, channel_USD
):
# given
product.channel_listings.update(available_for_purchase=None)
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert not product_data["availableForPurchase"]
assert product_data["isAvailableForPurchase"] is False
assert product_data["isAvailable"] is False
def test_product_query_unpublished_products_by_slug(
staff_api_client, product, permission_manage_products, channel_USD
):
# given
user = staff_api_client.user
user.user_permissions.add(permission_manage_products)
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_unpublished_products_by_slug_and_anonymous_user(
api_client, product, channel_USD
):
# given
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
# when
response = api_client.post_graphql(QUERY_PRODUCT, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
def test_product_query_by_slug_not_existing_in_channel_as_customer(
user_api_client, product, channel_USD
):
variables = {
"slug": product.slug,
"channel": channel_USD.slug,
}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is None
QUERY_PRODUCT_WITHOUT_CHANNEL = """
query ($id: ID){
product(
id: $id
) {
id
name
}
}
"""
def test_product_query_by_id_without_channel_not_available_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = staff_api_client.post_graphql(
QUERY_PRODUCT_WITHOUT_CHANNEL,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
product_data = content["data"]["product"]
assert product_data is not None
assert product_data["name"] == product.name
def test_product_query_error_when_id_and_slug_provided(
user_api_client,
product,
graphql_log_handler,
):
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"slug": product.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[INFO].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
def test_product_query_error_when_no_param(
user_api_client,
product,
graphql_log_handler,
):
variables = {}
response = user_api_client.post_graphql(QUERY_PRODUCT, variables=variables)
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[INFO].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
def test_fetch_all_products_available_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {"channel": channel_USD.slug}
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_product_variants_available_as_staff_user_with_channel(
staff_api_client, permission_manage_products, product_variant_list, channel_USD
):
variables = {"channel": channel_USD.slug}
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
num_products = Product.objects.count()
num_variants = ProductVariant.objects.count()
assert num_variants > 1
content = get_graphql_content(response)
products = content["data"]["products"]
variants = products["edges"][0]["node"]["variants"]
assert products["totalCount"] == num_products
assert len(products["edges"]) == num_products
assert len(variants) == num_variants - 1
def test_fetch_all_product_variants_available_as_staff_user_without_channel(
staff_api_client, permission_manage_products, product_variant_list, channel_USD
):
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
num_products = Product.objects.count()
num_variants = ProductVariant.objects.count()
assert num_variants > 1
content = get_graphql_content(response)
products = content["data"]["products"]
variants = products["edges"][0]["node"]["variants"]
assert products["totalCount"] == num_products
assert len(products["edges"]) == num_products
assert len(variants) == num_variants
def test_fetch_all_products_not_available_as_staff_user(
staff_api_client, permission_manage_products, product, channel_USD
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_not_existing_in_channel_as_staff_user(
staff_api_client, permission_manage_products, channel_USD, product_list
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(
product=product_list[0], channel=channel_USD
).delete()
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
# if channel slug is provided we return all products related to this channel
num_products = Product.objects.count() - 1
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_as_staff_user_without_channel_slug(
staff_api_client, permission_manage_products, product_list, channel_USD
):
ProductChannelListing.objects.filter(
product=product_list[0], channel=channel_USD
).delete()
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_available_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {"channel": channel_USD.slug}
response = app_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_not_available_as_app(
app_api_client, permission_manage_products, product, channel_USD
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = app_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_not_existing_in_channel_as_app(
app_api_client, permission_manage_products, product_list, channel_USD
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(
product=product_list[0], channel=channel_USD
).delete()
response = app_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
# if channel slug is provided we return all products related to this channel
num_products = Product.objects.count() - 1
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_as_app_without_channel_slug(
app_api_client, permission_manage_products, product_list, channel_USD
):
ProductChannelListing.objects.filter(
product=product_list[0], channel=channel_USD
).delete()
response = app_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_available_as_customer(
user_api_client, product, channel_USD
):
variables = {"channel": channel_USD.slug}
response = user_api_client.post_graphql(QUERY_FETCH_ALL_PRODUCTS, variables)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_not_available_as_customer(
user_api_client, product, channel_USD
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = user_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
)
content = get_graphql_content(response)
assert content["data"]["products"]["totalCount"] == 0
assert not content["data"]["products"]["edges"]
def test_fetch_all_products_not_existing_in_channel_as_customer(
user_api_client, product, channel_USD
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = user_api_client.post_graphql(QUERY_FETCH_ALL_PRODUCTS, variables)
content = get_graphql_content(response)
assert content["data"]["products"]["totalCount"] == 0
assert not content["data"]["products"]["edges"]
def test_fetch_all_products_available_as_anonymous(api_client, product, channel_USD):
variables = {"channel": channel_USD.slug}
response = api_client.post_graphql(QUERY_FETCH_ALL_PRODUCTS, variables)
content = get_graphql_content(response)
num_products = Product.objects.count()
assert content["data"]["products"]["totalCount"] == num_products
assert len(content["data"]["products"]["edges"]) == num_products
def test_fetch_all_products_not_available_as_anonymous(
api_client, product, channel_USD
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
response = api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
)
content = get_graphql_content(response)
assert content["data"]["products"]["totalCount"] == 0
assert not content["data"]["products"]["edges"]
def test_fetch_all_products_not_existing_in_channel_as_anonymous(
api_client, product, channel_USD
):
variables = {"channel": channel_USD.slug}
ProductChannelListing.objects.filter(product=product, channel=channel_USD).delete()
response = api_client.post_graphql(QUERY_FETCH_ALL_PRODUCTS, variables)
content = get_graphql_content(response)
assert content["data"]["products"]["totalCount"] == 0
assert not content["data"]["products"]["edges"]
def test_fetch_all_products_visible_in_listings(
user_api_client, product_list, permission_manage_products, channel_USD
):
# given
product_list[0].channel_listings.update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"channel": channel_USD.slug}
# when
response = user_api_client.post_graphql(QUERY_FETCH_ALL_PRODUCTS, variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["products"]["edges"]
assert len(product_data) == product_count - 1
products_ids = [product["node"]["id"] for product in product_data]
assert graphene.Node.to_global_id("Product", product_list[0].pk) not in products_ids
def test_fetch_all_products_visible_in_listings_by_staff_with_perm(
staff_api_client, product_list, permission_manage_products, channel_USD
):
# given
product_list[0].channel_listings.update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
product_data = content["data"]["products"]["edges"]
assert len(product_data) == product_count
def test_fetch_all_products_visible_in_listings_by_staff_without_manage_products(
staff_api_client, product_list, channel_USD
):
# given
product_list[0].channel_listings.update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(QUERY_FETCH_ALL_PRODUCTS, variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["products"]["edges"]
assert len(product_data) == product_count - 1 # invisible doesn't count
def test_fetch_all_products_visible_in_listings_by_app_with_perm(
app_api_client, product_list, permission_manage_products, channel_USD
):
# given
product_list[0].channel_listings.update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"channel": channel_USD.slug}
# when
response = app_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
product_data = content["data"]["products"]["edges"]
assert len(product_data) == product_count
def test_fetch_all_products_visible_in_listings_by_app_without_manage_products(
app_api_client, product_list, channel_USD
):
# given
product_list[0].channel_listings.update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"channel": channel_USD.slug}
# when
response = app_api_client.post_graphql(QUERY_FETCH_ALL_PRODUCTS, variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["products"]["edges"]
assert len(product_data) == product_count - 1 # invisible doesn't count
def test_fetch_product_from_category_query(
staff_api_client, product, permission_manage_products, stock, channel_USD
):
category = Category.objects.first()
product = category.products.first()
query = """
query CategoryProducts($id: ID, $channel: String, $address: AddressInput) {
category(id: $id) {
products(first: 20, channel: $channel) {
edges {
node {
id
name
slug
thumbnail{
url
alt
}
media {
url
}
variants {
name
channelListings {
costPrice {
amount
}
}
}
channelListings {
purchaseCost {
start {
amount
}
stop {
amount
}
}
margin {
start
stop
}
}
isAvailable(address: $address)
pricing(address: $address) {
priceRange {
start {
gross {
amount
currency
}
net {
amount
currency
}
currency
}
}
}
}
}
}
}
}
"""
staff_api_client.user.user_permissions.add(permission_manage_products)
variables = {
"id": graphene.Node.to_global_id("Category", category.id),
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is not None
product_edges_data = content["data"]["category"]["products"]["edges"]
assert len(product_edges_data) == category.products.count()
product_data = product_edges_data[0]["node"]
assert product_data["name"] == product.name
assert product_data["slug"] == product.slug
variant = product.variants.first()
variant_channel_listing = variant.channel_listings.filter(channel_id=channel_USD.id)
purchase_cost, margin = get_product_costs_data(
variant_channel_listing, True, channel_USD.currency_code
)
cost_start = product_data["channelListings"][0]["purchaseCost"]["start"]["amount"]
cost_stop = product_data["channelListings"][0]["purchaseCost"]["stop"]["amount"]
assert purchase_cost.start.amount == cost_start
assert purchase_cost.stop.amount == cost_stop
assert product_data["isAvailable"] is True
assert margin[0] == product_data["channelListings"][0]["margin"]["start"]
assert margin[1] == product_data["channelListings"][0]["margin"]["stop"]
variant = product.variants.first()
variant_channel_listing = variant.channel_listings.get(channel_id=channel_USD.id)
variant_channel_data = product_data["variants"][0]["channelListings"][0]
variant_cost = variant_channel_data["costPrice"]["amount"]
assert variant_channel_listing.cost_price.amount == variant_cost
def test_query_products_no_channel_shipping_zones(
staff_api_client, product, permission_manage_products, stock, channel_USD
):
channel_USD.shipping_zones.clear()
category = Category.objects.first()
product = category.products.first()
query = """
query CategoryProducts($id: ID, $channel: String, $address: AddressInput) {
category(id: $id) {
products(first: 20, channel: $channel) {
edges {
node {
id
name
isAvailable(address: $address)
}
}
}
}
}
"""
staff_api_client.user.user_permissions.add(permission_manage_products)
variables = {
"id": graphene.Node.to_global_id("Category", category.id),
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is not None
product_edges_data = content["data"]["category"]["products"]["edges"]
assert len(product_edges_data) == category.products.count()
product_data = product_edges_data[0]["node"]
assert product_data["name"] == product.name
assert product_data["isAvailable"] is False
QUERY_PRODUCT_IS_AVAILABLE = """
query Product($id: ID, $channel: String, $address: AddressInput) {
product(id: $id, channel: $channel) {
isAvailableNoAddress: isAvailable
isAvailableAddress: isAvailable(address: $address)
}
}
"""
def test_query_product_is_available(
api_client, channel_USD, variant_with_many_stocks_different_shipping_zones
):
# given
variant = variant_with_many_stocks_different_shipping_zones
product = variant.product
variables = {
"id": graphene.Node.to_global_id("Product", product.id),
"channel": channel_USD.slug,
"address": {"country": "PL"},
}
# when
response = api_client.post_graphql(QUERY_PRODUCT_IS_AVAILABLE, variables)
content = get_graphql_content(response)
# then
product_data = content["data"]["product"]
assert product_data["isAvailableNoAddress"] is True
assert product_data["isAvailableAddress"] is True
def test_query_product_is_available_with_one_variant(
api_client, channel_USD, product_with_two_variants
):
# given
product = product_with_two_variants
# remove stock for 2nd variant
variant_2 = product.variants.all()[1]
Stock.objects.filter(product_variant=variant_2).delete()
variables = {
"id": graphene.Node.to_global_id("Product", product.id),
"channel": channel_USD.slug,
"address": {"country": "PL"},
}
# when
response = api_client.post_graphql(QUERY_PRODUCT_IS_AVAILABLE, variables)
content = get_graphql_content(response)
# then
product_data = content["data"]["product"]
assert product_data["isAvailableNoAddress"] is True
assert product_data["isAvailableAddress"] is True
def test_query_product_is_available_no_shipping_zones(
api_client, channel_USD, variant_with_many_stocks_different_shipping_zones
):
# given
channel_USD.shipping_zones.clear()
variant = variant_with_many_stocks_different_shipping_zones
product = variant.product
variables = {
"id": graphene.Node.to_global_id("Product", product.id),
"channel": channel_USD.slug,
"address": {"country": "PL"},
}
# when
response = api_client.post_graphql(QUERY_PRODUCT_IS_AVAILABLE, variables)
content = get_graphql_content(response)
# then
product_data = content["data"]["product"]
assert product_data["isAvailableNoAddress"] is False
assert product_data["isAvailableAddress"] is False
def test_products_query_with_filter_attributes(
query_products_with_filter,
staff_api_client,
product,
permission_manage_products,
channel_USD,
):
product_type = ProductType.objects.create(
name="Custom Type",
slug="custom-type",
has_variants=True,
is_shipping_required=True,
kind=ProductTypeKind.NORMAL,
)
attribute = Attribute.objects.create(slug="new_attr", name="Attr")
attribute.product_types.add(product_type)
attr_value = AttributeValue.objects.create(
attribute=attribute, name="First", slug="first"
)
second_product = product
second_product.id = None
second_product.product_type = product_type
second_product.slug = "second-product"
second_product.save()
associate_attribute_values_to_instance(second_product, attribute, attr_value)
variables = {
"filter": {
"attributes": [{"slug": attribute.slug, "values": [attr_value.slug]}],
},
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
second_product_id = graphene.Node.to_global_id("Product", second_product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == second_product_id
assert products[0]["node"]["name"] == second_product.name
@pytest.mark.parametrize(
"gte, lte, expected_products_index",
[
(None, 8, [1]),
(0, 8, [1]),
(7, 8, []),
(5, None, [0, 1]),
(8, 10, [0]),
(12, None, [0]),
(20, None, []),
(20, 8, []),
],
)
def test_products_query_with_filter_numeric_attributes(
gte,
lte,
expected_products_index,
query_products_with_filter,
staff_api_client,
product,
category,
numeric_attribute,
permission_manage_products,
):
product.product_type.product_attributes.add(numeric_attribute)
associate_attribute_values_to_instance(
product, numeric_attribute, *numeric_attribute.values.all()
)
product_type = ProductType.objects.create(
name="Custom Type",
slug="custom-type",
kind=ProductTypeKind.NORMAL,
has_variants=True,
is_shipping_required=True,
)
numeric_attribute.product_types.add(product_type)
second_product = Product.objects.create(
name="Second product",
slug="second-product",
product_type=product_type,
category=category,
)
attr_value = AttributeValue.objects.create(
attribute=numeric_attribute, name="5.2", slug="5_2"
)
associate_attribute_values_to_instance(
second_product, numeric_attribute, attr_value
)
second_product.refresh_from_db()
products_instances = [product, second_product]
products_ids = [
graphene.Node.to_global_id("Product", p.pk) for p in products_instances
]
values_range = {}
if gte:
values_range["gte"] = gte
if lte:
values_range["lte"] = lte
variables = {
"filter": {
"attributes": [
{"slug": numeric_attribute.slug, "valuesRange": values_range}
]
}
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == len(expected_products_index)
assert set(product["node"]["id"] for product in products) == {
products_ids[index] for index in expected_products_index
}
assert set(product["node"]["name"] for product in products) == {
products_instances[index].name for index in expected_products_index
}
@pytest.mark.parametrize(
"filter_value, expected_products_index",
[
(False, [0, 1]),
(True, [0]),
],
)
def test_products_query_with_filter_boolean_attributes(
filter_value,
expected_products_index,
query_products_with_filter,
staff_api_client,
product,
category,
boolean_attribute,
permission_manage_products,
):
product.product_type.product_attributes.add(boolean_attribute)
associate_attribute_values_to_instance(
product, boolean_attribute, boolean_attribute.values.get(boolean=filter_value)
)
product_type = ProductType.objects.create(
name="Custom Type",
slug="custom-type",
kind=ProductTypeKind.NORMAL,
has_variants=True,
is_shipping_required=True,
)
boolean_attribute.product_types.add(product_type)
second_product = Product.objects.create(
name="Second product",
slug="second-product",
product_type=product_type,
category=category,
)
associate_attribute_values_to_instance(
second_product, boolean_attribute, boolean_attribute.values.get(boolean=False)
)
second_product.refresh_from_db()
products_instances = [product, second_product]
products_ids = [
graphene.Node.to_global_id("Product", p.pk) for p in products_instances
]
variables = {
"filter": {
"attributes": [{"slug": boolean_attribute.slug, "boolean": filter_value}]
}
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == len(expected_products_index)
assert set(product["node"]["id"] for product in products) == {
products_ids[index] for index in expected_products_index
}
assert set(product["node"]["name"] for product in products) == {
products_instances[index].name for index in expected_products_index
}
def test_products_query_with_filter_by_attributes_values_and_range(
query_products_with_filter,
staff_api_client,
product,
category,
numeric_attribute,
permission_manage_products,
):
product_attr = product.attributes.first()
attr_value_1 = product_attr.values.first()
product.product_type.product_attributes.add(numeric_attribute)
associate_attribute_values_to_instance(
product, numeric_attribute, *numeric_attribute.values.all()
)
product_type = ProductType.objects.create(
name="Custom Type",
slug="custom-type",
kind=ProductTypeKind.NORMAL,
has_variants=True,
is_shipping_required=True,
)
numeric_attribute.product_types.add(product_type)
second_product = Product.objects.create(
name="Second product",
slug="second-product",
product_type=product_type,
category=category,
)
attr_value_2 = AttributeValue.objects.create(
attribute=numeric_attribute, name="5.2", slug="5_2"
)
associate_attribute_values_to_instance(
second_product, numeric_attribute, attr_value_2
)
second_product.refresh_from_db()
variables = {
"filter": {
"attributes": [
{"slug": numeric_attribute.slug, "valuesRange": {"gte": 2}},
{"slug": attr_value_1.attribute.slug, "values": [attr_value_1.slug]},
]
}
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product.pk
)
assert products[0]["node"]["name"] == product.name
def test_products_query_with_filter_swatch_attributes(
query_products_with_filter,
staff_api_client,
product,
category,
swatch_attribute,
permission_manage_products,
):
product.product_type.product_attributes.add(swatch_attribute)
associate_attribute_values_to_instance(
product, swatch_attribute, *swatch_attribute.values.all()
)
product_type = ProductType.objects.create(
name="Custom Type",
slug="custom-type",
has_variants=True,
is_shipping_required=True,
)
swatch_attribute.product_types.add(product_type)
second_product = Product.objects.create(
name="Second product",
slug="second-product",
product_type=product_type,
category=category,
)
attr_value = AttributeValue.objects.create(
attribute=swatch_attribute, name="Dark", slug="dark"
)
associate_attribute_values_to_instance(second_product, swatch_attribute, attr_value)
second_product.refresh_from_db()
variables = {
"filter": {
"attributes": [
{"slug": swatch_attribute.slug, "values": [attr_value.slug]},
]
}
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
second_product_id = graphene.Node.to_global_id("Product", second_product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == second_product_id
assert products[0]["node"]["name"] == second_product.name
def test_products_query_filter_by_non_existing_attribute(
query_products_with_filter, api_client, product_list, channel_USD
):
variables = {
"channel": channel_USD.slug,
"filter": {"attributes": [{"slug": "i-do-not-exist", "values": ["red"]}]},
}
response = api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 0
def test_products_query_with_filter_category(
query_products_with_filter, staff_api_client, product, permission_manage_products
):
category = Category.objects.create(name="Custom", slug="custom")
second_product = product
second_product.id = None
second_product.slug = "second-product"
second_product.category = category
second_product.save()
category_id = graphene.Node.to_global_id("Category", category.id)
variables = {"filter": {"categories": [category_id]}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
second_product_id = graphene.Node.to_global_id("Product", second_product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == second_product_id
assert products[0]["node"]["name"] == second_product.name
def test_products_query_with_filter_has_category_false(
query_products_with_filter, staff_api_client, product, permission_manage_products
):
second_product = product
second_product.category = None
second_product.id = None
second_product.slug = "second-product"
second_product.save()
variables = {"filter": {"hasCategory": False}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
second_product_id = graphene.Node.to_global_id("Product", second_product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == second_product_id
assert products[0]["node"]["name"] == second_product.name
def test_products_query_with_filter_has_category_true(
query_products_with_filter,
staff_api_client,
product_without_category,
permission_manage_products,
):
category = Category.objects.create(name="Custom", slug="custom")
second_product = product_without_category
second_product.category = category
second_product.id = None
second_product.slug = "second-product"
second_product.save()
variables = {"filter": {"hasCategory": True}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
second_product_id = graphene.Node.to_global_id("Product", second_product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == second_product_id
assert products[0]["node"]["name"] == second_product.name
def test_products_query_with_filter_collection(
query_products_with_filter,
staff_api_client,
product,
collection,
permission_manage_products,
):
second_product = product
second_product.id = None
second_product.slug = "second-product"
second_product.save()
second_product.collections.add(collection)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
variables = {"filter": {"collections": [collection_id]}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
second_product_id = graphene.Node.to_global_id("Product", second_product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == second_product_id
assert products[0]["node"]["name"] == second_product.name
def test_products_query_with_filter_category_and_search(
query_products_with_filter,
staff_api_client,
product,
permission_manage_products,
):
category = Category.objects.create(name="Custom", slug="custom")
second_product = product
second_product.id = None
second_product.slug = "second-product"
second_product.category = category
product.category = category
second_product.save()
product.save()
category_id = graphene.Node.to_global_id("Category", category.id)
variables = {"filter": {"categories": [category_id], "search": product.name}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
product_id = graphene.Node.to_global_id("Product", product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == product_id
assert products[0]["node"]["name"] == product.name
def test_products_query_with_filter_gift_card_false(
query_products_with_filter,
staff_api_client,
product,
shippable_gift_card_product,
permission_manage_products,
):
# given
variables = {"filter": {"giftCard": False}}
staff_api_client.user.user_permissions.add(permission_manage_products)
# when
response = staff_api_client.post_graphql(query_products_with_filter, variables)
# then
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", product.pk
)
def test_products_query_with_filter_gift_card_true(
query_products_with_filter,
staff_api_client,
product,
shippable_gift_card_product,
permission_manage_products,
):
# given
variables = {"filter": {"giftCard": True}}
staff_api_client.user.user_permissions.add(permission_manage_products)
# when
response = staff_api_client.post_graphql(query_products_with_filter, variables)
# then
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == graphene.Node.to_global_id(
"Product", shippable_gift_card_product.pk
)
def test_products_with_variants_query_as_app(
query_products_with_attributes,
app_api_client,
product_with_multiple_values_attributes,
permission_manage_products,
):
product = product_with_multiple_values_attributes
attribute = product.attributes.first().attribute
attribute.visible_in_storefront = False
attribute.save()
second_product = product
second_product.id = None
second_product.slug = "second-product"
second_product.save()
product.save()
app_api_client.app.permissions.add(permission_manage_products)
response = app_api_client.post_graphql(query_products_with_attributes)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 2
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
for response_product in products:
attrs = response_product["node"]["attributes"]
assert len(attrs) == 1
assert attrs[0]["attribute"]["id"] == attribute_id
@pytest.mark.parametrize(
"products_filter",
[
{"minimalPrice": {"gte": 1.0, "lte": 2.0}},
{"isPublished": False},
{"search": "Juice1"},
],
)
def test_products_query_with_filter(
products_filter,
query_products_with_filter,
staff_api_client,
product,
permission_manage_products,
channel_USD,
):
assert "Juice1" not in product.name
second_product = product
second_product.id = None
second_product.name = "Apple Juice1"
second_product.slug = "apple-juice1"
second_product.save()
variant_second_product = second_product.variants.create(
product=second_product,
sku=second_product.slug,
)
ProductVariantChannelListing.objects.create(
variant=variant_second_product,
channel=channel_USD,
price_amount=Decimal(1.99),
cost_price_amount=Decimal(1),
currency=channel_USD.currency_code,
)
ProductChannelListing.objects.create(
product=second_product,
discounted_price_amount=Decimal(1.99),
channel=channel_USD,
is_published=False,
)
variables = {"filter": products_filter, "channel": channel_USD.slug}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
second_product_id = graphene.Node.to_global_id("Product", second_product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == second_product_id
assert products[0]["node"]["name"] == second_product.name
def test_products_query_with_price_filter_as_staff(
query_products_with_filter,
staff_api_client,
product_list,
permission_manage_products,
channel_USD,
):
product = product_list[0]
product.variants.first().channel_listings.filter().update(price_amount=None)
variables = {
"filter": {"price": {"gte": 9, "lte": 31}},
"channel": channel_USD.slug,
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 3
def test_products_query_with_price_filter_as_user(
query_products_with_filter,
user_api_client,
product_list,
permission_manage_products,
channel_USD,
):
product = product_list[0]
product.variants.first().channel_listings.filter().update(price_amount=None)
second_product_id = graphene.Node.to_global_id("Product", product_list[1].id)
third_product_id = graphene.Node.to_global_id("Product", product_list[2].id)
variables = {
"filter": {"price": {"gte": 9, "lte": 31}},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 2
assert products[0]["node"]["id"] == second_product_id
assert products[1]["node"]["id"] == third_product_id
@pytest.mark.parametrize("is_published", [(True), (False)])
def test_products_query_with_filter_search_by_sku(
is_published,
query_products_with_filter,
staff_api_client,
product_with_two_variants,
product_with_default_variant,
permission_manage_products,
channel_USD,
):
ProductChannelListing.objects.filter(
product=product_with_default_variant, channel=channel_USD
).update(is_published=is_published)
variables = {"filter": {"search": "1234"}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
product_id = graphene.Node.to_global_id("Product", product_with_default_variant.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == product_id
assert products[0]["node"]["name"] == product_with_default_variant.name
def test_products_query_with_is_published_filter_variants_without_prices(
query_products_with_filter,
staff_api_client,
variant,
permission_manage_products,
channel_USD,
):
ProductVariantChannelListing.objects.filter(
variant__product=variant.product
).update(price_amount=None)
variables = {"channel": channel_USD.slug, "filter": {"isPublished": True}}
response = staff_api_client.post_graphql(
query_products_with_filter,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 0
def test_products_query_with_is_published_filter_one_variant_without_price(
query_products_with_filter,
staff_api_client,
variant,
permission_manage_products,
channel_USD,
):
variant.channel_listings.update(price_amount=None)
variables = {"channel": channel_USD.slug, "filter": {"isPublished": True}}
response = staff_api_client.post_graphql(
query_products_with_filter,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 1
def test_products_query_with_filter_stock_availability_as_staff(
query_products_with_filter,
staff_api_client,
product_list,
order_line,
permission_manage_products,
channel_USD,
):
for product in product_list:
stock = product.variants.first().stocks.first()
Allocation.objects.create(
order_line=order_line, stock=stock, quantity_allocated=stock.quantity
)
product = product_list[0]
product.variants.first().channel_listings.filter(channel=channel_USD).update(
price_amount=None
)
variables = {
"filter": {"stockAvailability": "OUT_OF_STOCK"},
"channel": channel_USD.slug,
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 3
def test_products_query_with_filter_stock_availability_as_user(
query_products_with_filter,
user_api_client,
product_list,
order_line,
permission_manage_products,
channel_USD,
):
for product in product_list:
stock = product.variants.first().stocks.first()
Allocation.objects.create(
order_line=order_line, stock=stock, quantity_allocated=stock.quantity
)
product = product_list[0]
product.variants.first().channel_listings.filter(channel=channel_USD).update(
price_amount=None
)
variables = {
"filter": {"stockAvailability": "OUT_OF_STOCK"},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
product_id = graphene.Node.to_global_id("Product", product_list[1].id)
second_product_id = graphene.Node.to_global_id("Product", product_list[2].id)
products = content["data"]["products"]["edges"]
assert len(products) == 2
assert products[0]["node"]["id"] == product_id
assert products[0]["node"]["name"] == product_list[1].name
assert products[1]["node"]["id"] == second_product_id
assert products[1]["node"]["name"] == product_list[2].name
def test_products_query_with_filter_stock_availability_channel_without_shipping_zones(
query_products_with_filter,
staff_api_client,
product,
permission_manage_products,
channel_USD,
):
channel_USD.shipping_zones.clear()
variables = {
"filter": {"stockAvailability": "OUT_OF_STOCK"},
"channel": channel_USD.slug,
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
product_id = graphene.Node.to_global_id("Product", product.id)
assert len(products) == 1
assert products[0]["node"]["id"] == product_id
@pytest.mark.parametrize(
"quantity_input, warehouse_indexes, count, indexes_of_products_in_result",
[
({"lte": "80", "gte": "20"}, [1, 2], 1, [1]),
({"lte": "120", "gte": "40"}, [1, 2], 1, [0]),
({"gte": "10"}, [1], 1, [1]),
({"gte": "110"}, [2], 0, []),
(None, [1], 1, [1]),
(None, [2], 2, [0, 1]),
({"lte": "210", "gte": "70"}, [], 1, [0]),
({"lte": "90"}, [], 1, [1]),
({"lte": "90", "gte": "75"}, [], 0, []),
],
)
def test_products_query_with_filter_stocks(
quantity_input,
warehouse_indexes,
count,
indexes_of_products_in_result,
query_products_with_filter,
staff_api_client,
product_with_single_variant,
product_with_two_variants,
warehouse,
channel_USD,
):
product1 = product_with_single_variant
product2 = product_with_two_variants
products = [product1, product2]
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
third_warehouse = Warehouse.objects.get(pk=warehouse.pk)
third_warehouse.slug = "third warehouse"
third_warehouse.pk = None
third_warehouse.save()
warehouses = [warehouse, second_warehouse, third_warehouse]
warehouse_pks = [
graphene.Node.to_global_id("Warehouse", warehouses[index].pk)
for index in warehouse_indexes
]
Stock.objects.bulk_create(
[
Stock(
warehouse=third_warehouse,
product_variant=product1.variants.first(),
quantity=100,
),
Stock(
warehouse=second_warehouse,
product_variant=product2.variants.first(),
quantity=10,
),
Stock(
warehouse=third_warehouse,
product_variant=product2.variants.first(),
quantity=25,
),
Stock(
warehouse=third_warehouse,
product_variant=product2.variants.last(),
quantity=30,
),
]
)
variables = {
"filter": {
"stocks": {"quantity": quantity_input, "warehouseIds": warehouse_pks}
},
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
query_products_with_filter, variables, check_no_permissions=False
)
content = get_graphql_content(response)
products_data = content["data"]["products"]["edges"]
product_ids = {
graphene.Node.to_global_id("Product", products[index].pk)
for index in indexes_of_products_in_result
}
assert len(products_data) == count
assert {node["node"]["id"] for node in products_data} == product_ids
def test_query_products_with_filter_ids(
api_client, product_list, query_products_with_filter, channel_USD
):
# given
product_ids = [
graphene.Node.to_global_id("Product", product.id) for product in product_list
][:2]
variables = {
"filter": {"ids": product_ids},
"channel": channel_USD.slug,
}
# when
response = api_client.post_graphql(query_products_with_filter, variables)
# then
content = get_graphql_content(response)
products_data = content["data"]["products"]["edges"]
assert len(products_data) == 2
assert [node["node"]["id"] for node in products_data] == product_ids
def test_products_query_with_filter_has_preordered_variants_false(
query_products_with_filter,
staff_api_client,
preorder_variant_global_threshold,
product_without_shipping,
permission_manage_products,
):
product = product_without_shipping
variables = {"filter": {"hasPreorderedVariants": False}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
product_id = graphene.Node.to_global_id("Product", product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == product_id
assert products[0]["node"]["name"] == product.name
def test_products_query_with_filter_has_preordered_variants_true(
query_products_with_filter,
staff_api_client,
preorder_variant_global_threshold,
product_without_shipping,
permission_manage_products,
):
product = preorder_variant_global_threshold.product
variables = {"filter": {"hasPreorderedVariants": True}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
product_id = graphene.Node.to_global_id("Product", product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == product_id
assert products[0]["node"]["name"] == product.name
def test_products_query_with_filter_has_preordered_variants_before_end_date(
query_products_with_filter,
staff_api_client,
preorder_variant_global_threshold,
permission_manage_products,
):
variant = preorder_variant_global_threshold
variant.preorder_end_date = timezone.now() + timedelta(days=3)
variant.save(update_fields=["preorder_end_date"])
product = preorder_variant_global_threshold.product
variables = {"filter": {"hasPreorderedVariants": True}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
product_id = graphene.Node.to_global_id("Product", product.id)
products = content["data"]["products"]["edges"]
assert len(products) == 1
assert products[0]["node"]["id"] == product_id
assert products[0]["node"]["name"] == product.name
def test_products_query_with_filter_has_preordered_variants_after_end_date(
query_products_with_filter,
staff_api_client,
preorder_variant_global_threshold,
permission_manage_products,
):
variant = preorder_variant_global_threshold
variant.preorder_end_date = timezone.now() - timedelta(days=3)
variant.save(update_fields=["preorder_end_date"])
variables = {"filter": {"hasPreorderedVariants": True}}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_products_with_filter, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 0
QUERY_PRODUCT_MEDIA_BY_ID = """
query productMediaById($mediaId: ID!, $productId: ID!, $channel: String) {
product(id: $productId, channel: $channel) {
mediaById(id: $mediaId) {
id
url(size: 200)
}
}
}
"""
def test_query_product_media_by_id(user_api_client, product_with_image, channel_USD):
query = QUERY_PRODUCT_MEDIA_BY_ID
media = product_with_image.media.first()
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"mediaId": graphene.Node.to_global_id("ProductMedia", media.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["product"]["mediaById"]["id"]
assert content["data"]["product"]["mediaById"]["url"]
def test_query_product_media_by_id_missing_id(
user_api_client, product_with_image, channel_USD
):
query = QUERY_PRODUCT_MEDIA_BY_ID
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"mediaId": graphene.Node.to_global_id("ProductMedia", -1),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["product"]["mediaById"] is None
def test_query_product_media_by_id_not_media_id(
user_api_client, product_with_image, channel_USD
):
query = QUERY_PRODUCT_MEDIA_BY_ID
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"mediaId": graphene.Node.to_global_id("Product", -1),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["product"]["mediaById"] is None
def test_query_product_media_by_invalid_id(
user_api_client, product_with_image, channel_USD
):
query = QUERY_PRODUCT_MEDIA_BY_ID
id = "sks"
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"mediaId": id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == f"Couldn't resolve id: {id}."
assert content["data"]["product"]["mediaById"] is None
QUERY_PRODUCT_IMAGE_BY_ID = """
query productImageById($imageId: ID!, $productId: ID!, $channel: String) {
product(id: $productId, channel: $channel) {
imageById(id: $imageId) {
id
url
}
}
}
"""
def test_query_product_image_by_id(user_api_client, product_with_image, channel_USD):
query = QUERY_PRODUCT_IMAGE_BY_ID
media = product_with_image.media.first()
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"imageId": graphene.Node.to_global_id("ProductImage", media.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["product"]["imageById"]["id"]
assert content["data"]["product"]["imageById"]["url"]
def test_query_product_image_by_id_missing_id(
user_api_client, product_with_image, channel_USD
):
query = QUERY_PRODUCT_IMAGE_BY_ID
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"imageId": graphene.Node.to_global_id("ProductMedia", -1),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["product"]["imageById"] is None
def test_query_product_image_by_id_not_media_id(
user_api_client, product_with_image, channel_USD
):
query = QUERY_PRODUCT_IMAGE_BY_ID
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"imageId": graphene.Node.to_global_id("Product", -1),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["product"]["imageById"] is None
def test_query_product_image_by_invalid_id(
user_api_client, product_with_image, channel_USD
):
query = QUERY_PRODUCT_IMAGE_BY_ID
id = "mnb"
variables = {
"productId": graphene.Node.to_global_id("Product", product_with_image.pk),
"imageId": id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == f"Couldn't resolve id: {id}."
assert content["data"]["product"]["imageById"] is None
def test_product_with_collections(
staff_api_client, product, published_collection, permission_manage_products
):
query = """
query getProduct($productID: ID!) {
product(id: $productID) {
collections {
name
}
}
}
"""
product.collections.add(published_collection)
product.save()
product_id = graphene.Node.to_global_id("Product", product.id)
variables = {"productID": product_id}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["product"]
assert data["collections"][0]["name"] == published_collection.name
assert len(data["collections"]) == 1
def test_get_product_with_sorted_attribute_values(
staff_api_client,
product,
permission_manage_products,
product_type_page_reference_attribute,
page_list,
):
# given
query = """
query getProduct($productID: ID!) {
product(id: $productID) {
attributes {
attribute {
name
}
values {
id
slug
reference
}
}
}
}
"""
product_type = product.product_type
product_type.product_attributes.set([product_type_page_reference_attribute])
attr_value_1 = AttributeValue.objects.create(
attribute=product_type_page_reference_attribute,
name=page_list[0].title,
slug=f"{product.pk}_{page_list[0].pk}",
)
attr_value_2 = AttributeValue.objects.create(
attribute=product_type_page_reference_attribute,
name=page_list[1].title,
slug=f"{product.pk}_{page_list[1].pk}",
)
associate_attribute_values_to_instance(
product, product_type_page_reference_attribute, attr_value_2, attr_value_1
)
product_id = graphene.Node.to_global_id("Product", product.id)
variables = {"productID": product_id}
staff_api_client.user.user_permissions.add(permission_manage_products)
# when
response = staff_api_client.post_graphql(query, variables)
# then
content = get_graphql_content(response)
data = content["data"]["product"]
assert len(data["attributes"]) == 1
values = data["attributes"][0]["values"]
assert len(values) == 2
assert [value["id"] for value in values] == [
graphene.Node.to_global_id("AttributeValue", val.pk)
for val in [attr_value_2, attr_value_1]
]
def test_filter_products_by_wrong_attributes(user_api_client, product, channel_USD):
product_attr = product.product_type.product_attributes.get(slug="color")
attr_value = product.product_type.variant_attributes.get(slug="size").values.first()
query = """
query ($channel: String, $filter: ProductFilterInput){
products(
filter: $filter,
first: 1,
channel: $channel
) {
edges {
node {
name
}
}
}
}
"""
variables = {
"channel": channel_USD.slug,
"filter": {
"attributes": [{"slug": product_attr.slug, "values": [attr_value.slug]}]
},
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert products == []
def test_filter_products_with_unavailable_variants_attributes_as_user(
user_api_client, product_list, channel_USD
):
product_attr = product_list[0].product_type.product_attributes.first()
attr_value = product_attr.values.first()
query = """
query Products($attributesFilter: [AttributeInput], $channel: String) {
products(
first: 5,
filter: {attributes: $attributesFilter},
channel: $channel
) {
edges {
node {
id
}
}
}
}
"""
second_product_id = graphene.Node.to_global_id("Product", product_list[1].id)
third_product_id = graphene.Node.to_global_id("Product", product_list[2].id)
variables = {
"channel": channel_USD.slug,
"attributesFilter": [
{"slug": f"{product_attr.slug}", "values": [f"{attr_value.slug}"]}
],
}
product_list[0].variants.first().channel_listings.filter(
channel=channel_USD
).update(price_amount=None)
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 2
assert products[0]["node"]["id"] == second_product_id
assert products[1]["node"]["id"] == third_product_id
def test_filter_products_with_unavailable_variants_attributes_as_staff(
staff_api_client, product_list, channel_USD, permission_manage_products
):
product_attr = product_list[0].product_type.product_attributes.first()
attr_value = product_attr.values.first()
staff_api_client.user.user_permissions.add(permission_manage_products)
query = """
query Products($attributesFilter: [AttributeInput], $channel: String) {
products(
first: 5,
filter: {attributes: $attributesFilter},
channel: $channel
) {
edges {
node {
name
}
}
}
}
"""
variables = {
"channel": channel_USD.slug,
"attributesFilter": [
{"slug": f"{product_attr.slug}", "values": [f"{attr_value.slug}"]}
],
}
product_list[0].variants.first().channel_listings.filter(
channel=channel_USD
).update(price_amount=None)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
products = content["data"]["products"]["edges"]
assert len(products) == 3
SORT_PRODUCTS_QUERY = """
query ($channel:String) {
products (
sortBy: %(sort_by_product_order)s, first: 3, channel: $channel
) {
edges {
node {
name
productType{
name
}
pricing {
priceRangeUndiscounted {
start {
gross {
amount
}
}
}
priceRange {
start {
gross {
amount
}
}
}
}
updatedAt
}
}
}
}
"""
def test_sort_products(user_api_client, product, channel_USD):
product.updated_at = datetime.utcnow()
product.save()
product.pk = None
product.slug = "second-product"
product.updated_at = datetime.utcnow()
product.save()
ProductChannelListing.objects.create(
product=product,
channel=channel_USD,
is_published=True,
visible_in_listings=True,
)
variant = ProductVariant.objects.create(product=product, sku="1234")
ProductVariantChannelListing.objects.create(
variant=variant,
channel=channel_USD,
price_amount=Decimal(20),
cost_price_amount=Decimal(2),
currency=channel_USD.currency_code,
)
product.pk = None
product.slug = "third-product"
product.updated_at = datetime.utcnow()
product.save()
ProductChannelListing.objects.create(
product=product,
channel=channel_USD,
is_published=True,
visible_in_listings=True,
)
variant_second = ProductVariant.objects.create(product=product, sku="12345")
ProductVariantChannelListing.objects.create(
variant=variant_second,
channel=channel_USD,
currency=channel_USD.currency_code,
)
variables = {"channel": channel_USD.slug}
query = SORT_PRODUCTS_QUERY
# Test sorting by PRICE, ascending
sort_by = "{field: PRICE, direction: ASC}"
asc_price_query = query % {"sort_by_product_order": sort_by}
response = user_api_client.post_graphql(asc_price_query, variables)
content = get_graphql_content(response)
edges = content["data"]["products"]["edges"]
assert len(edges) == 2
price1 = edges[0]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
price2 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
assert price1 < price2
# Test sorting by PRICE, descending
sort_by = "{field: PRICE, direction:DESC}"
desc_price_query = query % {"sort_by_product_order": sort_by}
response = user_api_client.post_graphql(desc_price_query, variables)
content = get_graphql_content(response)
edges = content["data"]["products"]["edges"]
price1 = edges[0]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
price2 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
assert price1 > price2
# Test sorting by MINIMAL_PRICE, ascending
sort_by = "{field: MINIMAL_PRICE, direction:ASC}"
asc_price_query = query % {"sort_by_product_order": sort_by}
response = user_api_client.post_graphql(asc_price_query, variables)
content = get_graphql_content(response)
edges = content["data"]["products"]["edges"]
price1 = edges[0]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"]
price2 = edges[1]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"]
assert price1 < price2
# Test sorting by MINIMAL_PRICE, descending
sort_by = "{field: MINIMAL_PRICE, direction:DESC}"
desc_price_query = query % {"sort_by_product_order": sort_by}
response = user_api_client.post_graphql(desc_price_query, variables)
content = get_graphql_content(response)
edges = content["data"]["products"]["edges"]
price1 = edges[0]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"]
price2 = edges[1]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"]
assert price1 > price2
# Test sorting by DATE, ascending
asc_date_query = query % {"sort_by_product_order": "{field: DATE, direction:ASC}"}
response = user_api_client.post_graphql(asc_date_query, variables)
content = get_graphql_content(response)
date_0 = content["data"]["products"]["edges"][0]["node"]["updatedAt"]
date_1 = content["data"]["products"]["edges"][1]["node"]["updatedAt"]
assert parse_datetime(date_0) < parse_datetime(date_1)
# Test sorting by DATE, descending
desc_date_query = query % {"sort_by_product_order": "{field: DATE, direction:DESC}"}
response = user_api_client.post_graphql(desc_date_query, variables)
content = get_graphql_content(response)
date_0 = content["data"]["products"]["edges"][0]["node"]["updatedAt"]
date_1 = content["data"]["products"]["edges"][1]["node"]["updatedAt"]
assert parse_datetime(date_0) > parse_datetime(date_1)
def test_sort_products_by_price_as_staff(
staff_api_client, product, channel_USD, permission_manage_products
):
product.updated_at = datetime.utcnow()
product.save()
staff_api_client.user.user_permissions.add(permission_manage_products)
product.pk = None
product.slug = "second-product"
product.updated_at = datetime.utcnow()
product.save()
ProductChannelListing.objects.create(
product=product,
channel=channel_USD,
is_published=True,
visible_in_listings=True,
)
variant = ProductVariant.objects.create(product=product, sku="1234")
ProductVariantChannelListing.objects.create(
variant=variant,
channel=channel_USD,
price_amount=Decimal(20),
cost_price_amount=Decimal(2),
currency=channel_USD.currency_code,
)
product.pk = None
product.slug = "third-product"
product.updated_at = datetime.utcnow()
product.save()
ProductChannelListing.objects.create(
product=product,
channel=channel_USD,
is_published=True,
visible_in_listings=True,
)
variant_second = ProductVariant.objects.create(product=product, sku="12345")
ProductVariantChannelListing.objects.create(
variant=variant_second,
channel=channel_USD,
currency=channel_USD.currency_code,
)
variables = {"channel": channel_USD.slug}
query = SORT_PRODUCTS_QUERY
# Test sorting by PRICE, ascending
sort_by = "{field: PRICE, direction: ASC}"
asc_price_query = query % {"sort_by_product_order": sort_by}
response = staff_api_client.post_graphql(asc_price_query, variables)
content = get_graphql_content(response)
edges = content["data"]["products"]["edges"]
assert len(edges) == 3
price1 = edges[0]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
price2 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
assert edges[2]["node"]["pricing"] is None
assert price1 < price2
# Test sorting by PRICE, descending
sort_by = "{field: PRICE, direction:DESC}"
desc_price_query = query % {"sort_by_product_order": sort_by}
response = staff_api_client.post_graphql(desc_price_query, variables)
content = get_graphql_content(response)
edges = content["data"]["products"]["edges"]
price1 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
price2 = edges[2]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][
"amount"
]
assert edges[0]["node"]["pricing"] is None
assert price1 > price2
def test_sort_products_product_type_name(
user_api_client, product, product_with_default_variant, channel_USD
):
variables = {"channel": channel_USD.slug}
# Test sorting by TYPE, ascending
asc_published_query = SORT_PRODUCTS_QUERY % {
"sort_by_product_order": "{field: TYPE, direction:ASC}"
}
response = user_api_client.post_graphql(asc_published_query, variables)
content = get_graphql_content(response)
edges = content["data"]["products"]["edges"]
product_type_name_0 = edges[0]["node"]["productType"]["name"]
product_type_name_1 = edges[1]["node"]["productType"]["name"]
assert product_type_name_0 < product_type_name_1
# Test sorting by PUBLISHED, descending
desc_published_query = SORT_PRODUCTS_QUERY % {
"sort_by_product_order": "{field: TYPE, direction:DESC}"
}
response = user_api_client.post_graphql(desc_published_query, variables)
content = get_graphql_content(response)
product_type_name_0 = edges[0]["node"]["productType"]["name"]
product_type_name_1 = edges[1]["node"]["productType"]["name"]
assert product_type_name_0 < product_type_name_1
QUERY_PRODUCT_TYPE = """
query ($id: ID!){
productType(
id: $id,
) {
id
name
weight {
unit
value
}
}
}
"""
def test_product_type_query_by_id_weight_returned_in_default_unit(
user_api_client, product_type, site_settings
):
# given
product_type.weight = Weight(kg=10)
product_type.save(update_fields=["weight"])
site_settings.default_weight_unit = WeightUnits.OZ
site_settings.save(update_fields=["default_weight_unit"])
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT_TYPE, variables=variables)
# then
content = get_graphql_content(response)
product_data = content["data"]["productType"]
assert product_data is not None
assert product_data["name"] == product_type.name
assert product_data["weight"]["value"] == 352.73999999999995
assert product_data["weight"]["unit"] == WeightUnits.OZ.upper()
CREATE_PRODUCT_MUTATION = """
mutation createProduct(
$input: ProductCreateInput!
) {
productCreate(
input: $input) {
product {
id
category {
name
}
description
chargeTaxes
taxType {
taxCode
description
}
name
slug
rating
productType {
name
}
attributes {
attribute {
slug
}
values {
slug
name
reference
richText
boolean
dateTime
date
file {
url
contentType
}
}
}
}
errors {
field
code
message
attributes
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.product_updated")
@patch("saleor.plugins.manager.PluginsManager.product_created")
def test_create_product(
created_webhook_mock,
updated_webhook_mock,
staff_api_client,
product_type,
category,
size_attribute,
description_json,
permission_manage_products,
monkeypatch,
):
query = CREATE_PRODUCT_MUTATION
description_json = json.dumps(description_json)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
product_charge_taxes = True
product_tax_rate = "STANDARD"
# Mock tax interface with fake response from tax gateway
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(description="", code=product_tax_rate),
)
# Default attribute defined in product_type fixture
color_attr = product_type.product_attributes.get(name="Color")
color_value_slug = color_attr.values.first().slug
color_attr_id = graphene.Node.to_global_id("Attribute", color_attr.id)
# Add second attribute
product_type.product_attributes.add(size_attribute)
size_attr_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
non_existent_attr_value = "The cake is a lie"
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"description": description_json,
"chargeTaxes": product_charge_taxes,
"taxCode": product_tax_rate,
"attributes": [
{"id": color_attr_id, "values": [color_value_slug]},
{"id": size_attr_id, "values": [non_existent_attr_value]},
],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["description"] == description_json
assert data["product"]["chargeTaxes"] == product_charge_taxes
assert data["product"]["taxType"]["taxCode"] == product_tax_rate
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
values = (
data["product"]["attributes"][0]["values"][0]["slug"],
data["product"]["attributes"][1]["values"][0]["slug"],
)
assert slugify(non_existent_attr_value) in values
assert color_value_slug in values
product = Product.objects.first()
created_webhook_mock.assert_called_once_with(product)
updated_webhook_mock.assert_not_called()
def test_create_product_description_plaintext(
staff_api_client,
product_type,
category,
size_attribute,
permission_manage_products,
monkeypatch,
):
query = CREATE_PRODUCT_MUTATION
description = "some test description"
description_json = dummy_editorjs(description, json_format=True)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
product_charge_taxes = True
product_tax_rate = "STANDARD"
# Mock tax interface with fake response from tax gateway
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(description="", code=product_tax_rate),
)
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"description": description_json,
"chargeTaxes": product_charge_taxes,
"taxCode": product_tax_rate,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert not data["errors"]
product = Product.objects.all().first()
assert product.description_plaintext == description
def test_create_product_with_rich_text_attribute(
staff_api_client,
product_type,
category,
rich_text_attribute,
color_attribute,
permission_manage_products,
product,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
# Add second attribute
product_type.product_attributes.add(rich_text_attribute)
rich_text_attribute_id = graphene.Node.to_global_id(
"Attribute", rich_text_attribute.id
)
rich_text = json.dumps(dummy_editorjs("test product" * 5))
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [
{
"id": rich_text_attribute_id,
"richText": rich_text,
}
],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
_, product_id = graphene.Node.from_global_id(data["product"]["id"])
expected_attributes_data = [
{"attribute": {"slug": "color"}, "values": []},
{
"attribute": {"slug": "text"},
"values": [
{
"slug": f"{product_id}_{rich_text_attribute.id}",
"name": (
"test producttest producttest producttest producttest product"
),
"reference": None,
"richText": rich_text,
"file": None,
"boolean": None,
"date": None,
"dateTime": None,
}
],
},
]
for attr_data in data["product"]["attributes"]:
assert attr_data in expected_attributes_data
@freeze_time(datetime(2020, 5, 5, 5, 5, 5, tzinfo=pytz.utc))
def test_create_product_with_date_time_attribute(
staff_api_client,
product_type,
date_time_attribute,
color_attribute,
permission_manage_products,
product,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
product_name = "test name"
# Add second attribute
product_type.product_attributes.add(date_time_attribute)
date_time_attribute_id = graphene.Node.to_global_id(
"Attribute", date_time_attribute.id
)
value = datetime.now(tz=pytz.utc)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"name": product_name,
"attributes": [
{
"id": date_time_attribute_id,
"dateTime": value,
}
],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["productType"]["name"] == product_type.name
_, product_id = graphene.Node.from_global_id(data["product"]["id"])
expected_attributes_data = {
"attribute": {"slug": "release-date-time"},
"values": [
{
"slug": f"{product_id}_{date_time_attribute.id}",
"name": str(value),
"reference": None,
"richText": None,
"boolean": None,
"file": None,
"date": None,
"dateTime": str(value.isoformat()),
}
],
}
assert expected_attributes_data in data["product"]["attributes"]
@freeze_time(datetime(2020, 5, 5, 5, 5, 5, tzinfo=pytz.utc))
def test_create_product_with_date_attribute(
staff_api_client,
product_type,
date_attribute,
color_attribute,
permission_manage_products,
product,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
product_name = "test name"
# Add second attribute
product_type.product_attributes.add(date_attribute)
date_attribute_id = graphene.Node.to_global_id("Attribute", date_attribute.id)
value = datetime.now(tz=pytz.utc).date()
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"name": product_name,
"attributes": [
{
"id": date_attribute_id,
"date": value,
}
],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["productType"]["name"] == product_type.name
_, product_id = graphene.Node.from_global_id(data["product"]["id"])
expected_attributes_data = {
"attribute": {"slug": "release-date"},
"values": [
{
"slug": f"{product_id}_{date_attribute.id}",
"name": str(value),
"reference": None,
"richText": None,
"boolean": None,
"file": None,
"date": str(value),
"dateTime": None,
}
],
}
assert expected_attributes_data in data["product"]["attributes"]
def test_create_product_with_boolean_attribute(
staff_api_client,
product_type,
category,
boolean_attribute,
permission_manage_products,
product,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
# Add second attribute
product_type.product_attributes.add(boolean_attribute)
boolean_attribute_id = graphene.Node.to_global_id("Attribute", boolean_attribute.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"attributes": [
{
"id": boolean_attribute_id,
"boolean": False,
}
],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
expected_attributes_data = {
"attribute": {"slug": "boolean"},
"values": [
{
"slug": f"{boolean_attribute.id}_false",
"name": "Boolean: No",
"reference": None,
"richText": None,
"boolean": False,
"date": None,
"dateTime": None,
"file": None,
}
],
}
assert expected_attributes_data in data["product"]["attributes"]
SEARCH_PRODUCTS_QUERY = """
query Products(
$filters: ProductFilterInput,
$sortBy: ProductOrder,
$channel: String,
$after: String,
) {
products(
first: 5,
filter: $filters,
sortBy: $sortBy,
channel: $channel,
after: $after,
) {
edges {
node {
id
name
}
cursor
}
}
}
"""
def test_search_product_by_description(user_api_client, product_list, channel_USD):
variables = {"filters": {"search": "big"}, "channel": channel_USD.slug}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
content = get_graphql_content(response)
assert len(content["data"]["products"]["edges"]) == 2
variables = {"filters": {"search": "small"}, "channel": channel_USD.slug}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
content = get_graphql_content(response)
assert len(content["data"]["products"]["edges"]) == 1
def test_search_product_by_description_and_name(
user_api_client, product_list, product, channel_USD, category, product_type
):
product.description_plaintext = "red big red product"
product.save()
product_2 = product_list[1]
product_2.name = "red product"
product_2.save()
product_1 = product_list[0]
product_1.description_plaintext = "some red product"
product_1.save()
product_3 = product_list[2]
product_3.description_plaintext = "desc without searched word"
product_3.save()
variables = {
"filters": {
"search": "red",
},
"sortBy": {"field": "RANK", "direction": "DESC"},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["products"]["edges"]
assert len(data) == 3
assert data[0]["node"]["name"] == product_2.name
assert data[1]["node"]["name"] == product.name
assert data[2]["node"]["name"] == product_1.name
def test_sort_product_by_rank_without_search(
user_api_client, product_list, product, channel_USD, category, product_type
):
product.description_plaintext = "red big red product"
product.save()
product_2 = product_list[1]
product_2.name = "red product"
product_2.save()
product_1 = product_list[0]
product_1.description_plaintext = "some red product"
product_1.save()
variables = {
"sortBy": {"field": "RANK", "direction": "DESC"},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
data = get_graphql_content(response, ignore_errors=True)
assert len(data["errors"]) == 1
assert (
data["errors"][0]["message"]
== "Sorting by Rank is available only with searching."
)
@pytest.mark.parametrize("search_value", ["", " ", None])
def test_sort_product_by_rank_with_empty_search_value(
search_value,
user_api_client,
product_list,
product,
channel_USD,
category,
product_type,
):
product.description_plaintext = "red big red product"
product.save()
product_2 = product_list[1]
product_2.name = "red product"
product_2.save()
product_1 = product_list[0]
product_1.description_plaintext = "some red product"
product_1.save()
variables = {
"filters": {
"search": search_value,
},
"sortBy": {"field": "RANK", "direction": "DESC"},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
data = get_graphql_content(response, ignore_errors=True)
assert len(data["errors"]) == 1
assert (
data["errors"][0]["message"]
== "Sorting by Rank is available only with searching."
)
def test_search_product_by_description_and_name_without_sort_by(
user_api_client, product_list, product, channel_USD, category, product_type
):
product.description_plaintext = "red big red product"
product.save()
product_2 = product_list[1]
product_2.name = "red product"
product_2.save()
product_1 = product_list[0]
product_1.description_plaintext = "some red product"
product_1.save()
variables = {
"filters": {
"search": "red",
},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["products"]["edges"]
assert data[0]["node"]["name"] == product_2.name
assert data[1]["node"]["name"] == product.name
assert data[2]["node"]["name"] == product_1.name
def test_search_product_by_description_and_name_and_use_cursor(
user_api_client, product_list, product, channel_USD, category, product_type
):
product.description_plaintext = "red big red product"
product.save()
product_2 = product_list[1]
product_2.name = "red product"
product_2.save()
product_1 = product_list[0]
product_1.description_plaintext = "some red product"
product_1.save()
variables = {
"filters": {
"search": "red",
},
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
content = get_graphql_content(response)
cursor = content["data"]["products"]["edges"][0]["cursor"]
variables = {
"filters": {
"search": "red",
},
"after": cursor,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(SEARCH_PRODUCTS_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["products"]["edges"]
assert data[0]["node"]["name"] == product.name
assert data[1]["node"]["name"] == product_1.name
@freeze_time("2020-03-18 12:00:00")
def test_create_product_with_rating(
staff_api_client,
product_type,
category,
permission_manage_products,
settings,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
expected_rating = 4.57
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"rating": expected_rating,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["rating"] == expected_rating
assert Product.objects.get().rating == expected_rating
def test_create_product_with_file_attribute(
staff_api_client,
product_type,
category,
file_attribute,
color_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
values_count = file_attribute.values.count()
# Add second attribute
product_type.product_attributes.add(file_attribute)
file_attr_id = graphene.Node.to_global_id("Attribute", file_attribute.id)
existing_value = file_attribute.values.first()
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": file_attr_id, "file": existing_value.file_url}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 2
expected_attributes_data = [
{"attribute": {"slug": color_attribute.slug}, "values": []},
{
"attribute": {"slug": file_attribute.slug},
"values": [
{
"name": existing_value.name,
"slug": f"{existing_value.slug}-2",
"file": {
"url": f"http://testserver/media/{existing_value.file_url}",
"contentType": None,
},
"reference": None,
"richText": None,
"boolean": None,
"date": None,
"dateTime": None,
}
],
},
]
for attr_data in data["product"]["attributes"]:
assert attr_data in expected_attributes_data
file_attribute.refresh_from_db()
assert file_attribute.values.count() == values_count + 1
def test_create_product_with_page_reference_attribute(
staff_api_client,
product_type,
category,
color_attribute,
product_type_page_reference_attribute,
permission_manage_products,
page,
):
query = CREATE_PRODUCT_MUTATION
values_count = product_type_page_reference_attribute.values.count()
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
# Add second attribute
product_type.product_attributes.add(product_type_page_reference_attribute)
reference_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.id
)
reference = graphene.Node.to_global_id("Page", page.pk)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": reference_attr_id, "references": [reference]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
_, product_id = graphene.Node.from_global_id(data["product"]["id"])
expected_attributes_data = [
{"attribute": {"slug": color_attribute.slug}, "values": []},
{
"attribute": {"slug": product_type_page_reference_attribute.slug},
"values": [
{
"slug": f"{product_id}_{page.id}",
"name": page.title,
"file": None,
"richText": None,
"boolean": None,
"date": None,
"dateTime": None,
"reference": reference,
}
],
},
]
for attr_data in data["product"]["attributes"]:
assert attr_data in expected_attributes_data
product_type_page_reference_attribute.refresh_from_db()
assert product_type_page_reference_attribute.values.count() == values_count + 1
def test_create_product_with_product_reference_attribute(
staff_api_client,
product_type,
category,
color_attribute,
product_type_product_reference_attribute,
permission_manage_products,
product,
):
query = CREATE_PRODUCT_MUTATION
values_count = product_type_product_reference_attribute.values.count()
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
# Add second attribute
product_type.product_attributes.add(product_type_product_reference_attribute)
reference_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.id
)
reference = graphene.Node.to_global_id("Product", product.pk)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": reference_attr_id, "references": [reference]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
_, product_id = graphene.Node.from_global_id(data["product"]["id"])
expected_attributes_data = [
{"attribute": {"slug": color_attribute.slug}, "values": []},
{
"attribute": {"slug": product_type_product_reference_attribute.slug},
"values": [
{
"slug": f"{product_id}_{product.id}",
"name": product.name,
"file": None,
"richText": None,
"boolean": None,
"date": None,
"dateTime": None,
"reference": reference,
}
],
},
]
for attr_data in data["product"]["attributes"]:
assert attr_data in expected_attributes_data
product_type_product_reference_attribute.refresh_from_db()
assert product_type_product_reference_attribute.values.count() == values_count + 1
def test_create_product_with_product_reference_attribute_values_saved_in_order(
staff_api_client,
product_type,
category,
color_attribute,
product_type_product_reference_attribute,
permission_manage_products,
product_list,
):
query = CREATE_PRODUCT_MUTATION
values_count = product_type_product_reference_attribute.values.count()
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
# Add second attribute
product_type.product_attributes.set([product_type_product_reference_attribute])
reference_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.id
)
reference_1 = graphene.Node.to_global_id("Product", product_list[0].pk)
reference_2 = graphene.Node.to_global_id("Product", product_list[1].pk)
reference_3 = graphene.Node.to_global_id("Product", product_list[2].pk)
# test creating root product
reference_ids = [reference_3, reference_1, reference_2]
reference_instances = [product_list[2], product_list[0], product_list[1]]
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": reference_attr_id, "references": reference_ids}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
_, product_id = graphene.Node.from_global_id(data["product"]["id"])
expected_values = [
{
"slug": f"{product_id}_{product.id}",
"name": product.name,
"file": None,
"richText": None,
"boolean": None,
"date": None,
"dateTime": None,
"reference": reference,
}
for product, reference in zip(reference_instances, reference_ids)
]
assert len(data["product"]["attributes"]) == 1
attribute_data = data["product"]["attributes"][0]
assert (
attribute_data["attribute"]["slug"]
== product_type_product_reference_attribute.slug
)
assert len(attribute_data["values"]) == 3
assert attribute_data["values"] == expected_values
product_type_product_reference_attribute.refresh_from_db()
assert product_type_product_reference_attribute.values.count() == values_count + 3
def test_create_product_with_file_attribute_new_attribute_value(
staff_api_client,
product_type,
category,
file_attribute,
color_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
values_count = file_attribute.values.count()
# Add second attribute
product_type.product_attributes.add(file_attribute)
file_attr_id = graphene.Node.to_global_id("Attribute", file_attribute.id)
non_existing_value = "new_test.jpg"
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": file_attr_id, "file": non_existing_value}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 2
expected_attributes_data = [
{"attribute": {"slug": color_attribute.slug}, "values": []},
{
"attribute": {"slug": file_attribute.slug},
"values": [
{
"name": non_existing_value,
"slug": slugify(non_existing_value, allow_unicode=True),
"reference": None,
"richText": None,
"boolean": None,
"date": None,
"dateTime": None,
"file": {
"url": "http://testserver/media/" + non_existing_value,
"contentType": None,
},
}
],
},
]
for attr_data in data["product"]["attributes"]:
assert attr_data in expected_attributes_data
file_attribute.refresh_from_db()
assert file_attribute.values.count() == values_count + 1
def test_create_product_with_file_attribute_not_required_no_file_url_given(
staff_api_client,
product_type,
category,
file_attribute,
color_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
file_attribute.value_required = False
file_attribute.save(update_fields=["value_required"])
# Add second attribute
product_type.product_attributes.add(file_attribute)
file_attr_id = graphene.Node.to_global_id("Attribute", file_attribute.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": file_attr_id, "values": ["test.txt"]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 2
expected_attributes_data = [
{"attribute": {"slug": color_attribute.slug}, "values": []},
{"attribute": {"slug": file_attribute.slug}, "values": []},
]
for attr_data in data["product"]["attributes"]:
assert attr_data in expected_attributes_data
file_attribute.refresh_from_db()
def test_create_product_with_file_attribute_required_no_file_url_given(
staff_api_client,
product_type,
category,
file_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
file_attribute.value_required = True
file_attribute.save(update_fields=["value_required"])
# Add second attribute
product_type.product_attributes.add(file_attribute)
file_attr_id = graphene.Node.to_global_id("Attribute", file_attribute.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": file_attr_id, "values": ["test.txt"]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
errors = data["errors"]
assert not data["product"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
assert errors[0]["field"] == "attributes"
assert errors[0]["attributes"] == [
graphene.Node.to_global_id("Attribute", file_attribute.pk)
]
def test_create_product_with_page_reference_attribute_required_no_references(
staff_api_client,
product_type,
category,
product_type_page_reference_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_page_reference_attribute.value_required = True
product_type_page_reference_attribute.save(update_fields=["value_required"])
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
# Add second attribute
product_type.product_attributes.add(product_type_page_reference_attribute)
reference_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.id
)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": reference_attr_id, "references": []}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
errors = data["errors"]
assert not data["product"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
assert errors[0]["field"] == "attributes"
assert errors[0]["attributes"] == [
graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.pk
)
]
def test_create_product_with_product_reference_attribute_required_no_references(
staff_api_client,
product_type,
category,
product_type_product_reference_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_product_reference_attribute.value_required = True
product_type_product_reference_attribute.save(update_fields=["value_required"])
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
# Add second attribute
product_type.product_attributes.add(product_type_product_reference_attribute)
reference_attr_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.id
)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": reference_attr_id, "references": []}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
errors = data["errors"]
assert not data["product"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
assert errors[0]["field"] == "attributes"
assert errors[0]["attributes"] == [
graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.pk
)
]
def test_create_product_no_values_given(
staff_api_client,
product_type,
category,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
# Default attribute defined in product_type fixture
color_attr = product_type.product_attributes.get(name="Color")
color_attr_id = graphene.Node.to_global_id("Attribute", color_attr.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": color_attr_id, "file": "test.jpg"}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 1
assert data["product"]["attributes"][0]["values"] == []
@pytest.mark.parametrize(
"value, expected_name, expected_slug",
[(20.1, "20.1", "20_1"), (20, "20", "20"), ("1", "1", "1")],
)
def test_create_product_with_numeric_attribute_new_attribute_value(
value,
expected_name,
expected_slug,
staff_api_client,
product_type,
category,
numeric_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
values_count = numeric_attribute.values.count()
# Add second attribute
product_type.product_attributes.set([numeric_attribute])
attr_id = graphene.Node.to_global_id("Attribute", numeric_attribute.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": attr_id, "values": [value]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
product_pk = graphene.Node.from_global_id(data["product"]["id"])[1]
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 1
assert (
data["product"]["attributes"][0]["attribute"]["slug"] == numeric_attribute.slug
)
values = data["product"]["attributes"][0]["values"]
assert len(values) == 1
assert values[0]["name"] == expected_name
assert values[0]["slug"] == f"{product_pk}_{numeric_attribute.id}"
numeric_attribute.refresh_from_db()
assert numeric_attribute.values.count() == values_count + 1
def test_create_product_with_numeric_attribute_existing_value(
staff_api_client,
product_type,
category,
numeric_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
values_count = numeric_attribute.values.count()
# Add second attribute
product_type.product_attributes.set([numeric_attribute])
attr_id = graphene.Node.to_global_id("Attribute", numeric_attribute.id)
existing_value = numeric_attribute.values.first()
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": attr_id, "values": [existing_value.name]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
product_pk = graphene.Node.from_global_id(data["product"]["id"])[1]
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 1
assert (
data["product"]["attributes"][0]["attribute"]["slug"] == numeric_attribute.slug
)
values = data["product"]["attributes"][0]["values"]
assert len(values) == 1
assert values[0]["name"] == existing_value.name
assert values[0]["slug"] == f"{product_pk}_{numeric_attribute.id}"
numeric_attribute.refresh_from_db()
assert numeric_attribute.values.count() == values_count + 1
def test_create_product_with_swatch_attribute_new_attribute_value(
staff_api_client,
product_type,
category,
swatch_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
values_count = swatch_attribute.values.count()
new_value = "Yellow"
# Add second attribute
product_type.product_attributes.set([swatch_attribute])
attr_id = graphene.Node.to_global_id("Attribute", swatch_attribute.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": attr_id, "values": [new_value]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 1
assert (
data["product"]["attributes"][0]["attribute"]["slug"] == swatch_attribute.slug
)
values = data["product"]["attributes"][0]["values"]
assert len(values) == 1
assert values[0]["name"] == new_value
assert values[0]["slug"] == slugify(new_value)
swatch_attribute.refresh_from_db()
assert swatch_attribute.values.count() == values_count + 1
def test_create_product_with_swatch_attribute_existing_value(
staff_api_client,
product_type,
category,
swatch_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
values_count = swatch_attribute.values.count()
# Add second attribute
product_type.product_attributes.set([swatch_attribute])
attr_id = graphene.Node.to_global_id("Attribute", swatch_attribute.id)
existing_value = swatch_attribute.values.first()
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": attr_id, "values": [existing_value.name]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
assert len(data["product"]["attributes"]) == 1
assert (
data["product"]["attributes"][0]["attribute"]["slug"] == swatch_attribute.slug
)
values = data["product"]["attributes"][0]["values"]
assert len(values) == 1
assert values[0]["name"] == existing_value.name
assert values[0]["slug"] == existing_value.slug
swatch_attribute.refresh_from_db()
assert swatch_attribute.values.count() == values_count
def test_create_product_with_numeric_attribute_not_numeric_value_given(
staff_api_client,
product_type,
category,
numeric_attribute,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
values_count = numeric_attribute.values.count()
# Add second attribute
product_type.product_attributes.set([numeric_attribute])
attr_id = graphene.Node.to_global_id("Attribute", numeric_attribute.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"attributes": [{"id": attr_id, "values": ["abd"]}],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert not data["product"]
assert len(data["errors"]) == 1
assert data["errors"][0]["field"] == "attributes"
assert data["errors"][0]["code"] == AttributeErrorCode.INVALID.name
numeric_attribute.refresh_from_db()
assert numeric_attribute.values.count() == values_count
PRODUCT_VARIANT_SET_DEFAULT_MUTATION = """
mutation Prod($productId: ID!, $variantId: ID!) {
productVariantSetDefault(productId: $productId, variantId: $variantId) {
product {
defaultVariant {
id
}
}
errors {
code
field
}
}
}
"""
REORDER_PRODUCT_VARIANTS_MUTATION = """
mutation ProductVariantReorder($product: ID!, $moves: [ReorderInput]!) {
productVariantReorder(productId: $product, moves: $moves) {
errors {
code
field
}
product {
id
}
}
}
"""
def test_product_variant_set_default(
staff_api_client, permission_manage_products, product_with_two_variants
):
assert not product_with_two_variants.default_variant
first_variant = product_with_two_variants.variants.first()
first_variant_id = graphene.Node.to_global_id("ProductVariant", first_variant.pk)
variables = {
"productId": graphene.Node.to_global_id(
"Product", product_with_two_variants.pk
),
"variantId": first_variant_id,
}
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_SET_DEFAULT_MUTATION,
variables,
permissions=[permission_manage_products],
)
product_with_two_variants.refresh_from_db()
assert product_with_two_variants.default_variant == first_variant
content = get_graphql_content(response)
data = content["data"]["productVariantSetDefault"]
assert not data["errors"]
assert data["product"]["defaultVariant"]["id"] == first_variant_id
def test_product_variant_set_default_invalid_id(
staff_api_client, permission_manage_products, product_with_two_variants
):
assert not product_with_two_variants.default_variant
first_variant = product_with_two_variants.variants.first()
variables = {
"productId": graphene.Node.to_global_id(
"Product", product_with_two_variants.pk
),
"variantId": graphene.Node.to_global_id("Product", first_variant.pk),
}
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_SET_DEFAULT_MUTATION,
variables,
permissions=[permission_manage_products],
)
product_with_two_variants.refresh_from_db()
assert not product_with_two_variants.default_variant
content = get_graphql_content(response)
data = content["data"]["productVariantSetDefault"]
assert data["errors"][0]["code"] == ProductErrorCode.GRAPHQL_ERROR.name
assert data["errors"][0]["field"] == "variantId"
def test_product_variant_set_default_not_products_variant(
staff_api_client,
permission_manage_products,
product_with_two_variants,
product_with_single_variant,
):
assert not product_with_two_variants.default_variant
foreign_variant = product_with_single_variant.variants.first()
variables = {
"productId": graphene.Node.to_global_id(
"Product", product_with_two_variants.pk
),
"variantId": graphene.Node.to_global_id("ProductVariant", foreign_variant.pk),
}
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_SET_DEFAULT_MUTATION,
variables,
permissions=[permission_manage_products],
)
product_with_two_variants.refresh_from_db()
assert not product_with_two_variants.default_variant
content = get_graphql_content(response)
data = content["data"]["productVariantSetDefault"]
assert data["errors"][0]["code"] == ProductErrorCode.NOT_PRODUCTS_VARIANT.name
assert data["errors"][0]["field"] == "variantId"
def test_reorder_variants(
staff_api_client,
product_with_two_variants,
permission_manage_products,
):
default_variants = product_with_two_variants.variants.all()
new_variants = [default_variants[1], default_variants[0]]
variables = {
"product": graphene.Node.to_global_id("Product", product_with_two_variants.pk),
"moves": [
{
"id": graphene.Node.to_global_id("ProductVariant", variant.pk),
"sortOrder": _order + 1,
}
for _order, variant in enumerate(new_variants)
],
}
response = staff_api_client.post_graphql(
REORDER_PRODUCT_VARIANTS_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantReorder"]
assert not data["errors"]
assert list(product_with_two_variants.variants.all()) == new_variants
def test_reorder_variants_invalid_variants(
staff_api_client,
product,
product_with_two_variants,
permission_manage_products,
):
default_variants = product_with_two_variants.variants.all()
new_variants = [product.variants.first(), default_variants[1]]
variables = {
"product": graphene.Node.to_global_id("Product", product_with_two_variants.pk),
"moves": [
{
"id": graphene.Node.to_global_id("ProductVariant", variant.pk),
"sortOrder": _order + 1,
}
for _order, variant in enumerate(new_variants)
],
}
response = staff_api_client.post_graphql(
REORDER_PRODUCT_VARIANTS_MUTATION,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantReorder"]
assert data["errors"][0]["field"] == "moves"
assert data["errors"][0]["code"] == ProductErrorCode.NOT_FOUND.name
@pytest.mark.parametrize("input_slug", ["", None])
def test_create_product_no_slug_in_input(
staff_api_client,
product_type,
category,
description_json,
permission_manage_products,
monkeypatch,
input_slug,
):
query = CREATE_PRODUCT_MUTATION
description_json = json.dumps(description_json)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_tax_rate = "STANDARD"
# Mock tax interface with fake response from tax gateway
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(description="", code=product_tax_rate),
)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": input_slug,
"taxCode": product_tax_rate,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == "test-name"
assert data["product"]["taxType"]["taxCode"] == product_tax_rate
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
def test_create_product_no_category_id(
staff_api_client,
product_type,
permission_manage_products,
monkeypatch,
):
query = CREATE_PRODUCT_MUTATION
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
product_name = "test name"
product_tax_rate = "STANDARD"
input_slug = "test-slug"
# Mock tax interface with fake response from tax gateway
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(description="", code=product_tax_rate),
)
variables = {
"input": {
"productType": product_type_id,
"name": product_name,
"slug": input_slug,
"taxCode": product_tax_rate,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == input_slug
assert data["product"]["taxType"]["taxCode"] == product_tax_rate
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"] is None
def test_create_product_with_negative_weight(
staff_api_client,
product_type,
category,
description_json,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
description_json = json.dumps(description_json)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"weight": -1,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
error = data["errors"][0]
assert error["field"] == "weight"
assert error["code"] == ProductErrorCode.INVALID.name
def test_create_product_with_unicode_in_slug_and_name(
staff_api_client,
product_type,
category,
description_json,
permission_manage_products,
):
query = CREATE_PRODUCT_MUTATION
description_json = json.dumps(description_json)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "わたし-わ にっぽん です"
slug = "わたし-わ-にっぽん-です-2"
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": slug,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
error = data["errors"]
assert not error
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == slug
def test_create_product_invalid_product_attributes(
staff_api_client,
product_type,
category,
size_attribute,
weight_attribute,
description_json,
permission_manage_products,
monkeypatch,
):
query = CREATE_PRODUCT_MUTATION
description_json = json.dumps(description_json)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "product-test-slug"
product_charge_taxes = True
product_tax_rate = "STANDARD"
# Mock tax interface with fake response from tax gateway
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(description="", code=product_tax_rate),
)
# Default attribute defined in product_type fixture
color_attr = product_type.product_attributes.get(name="Color")
color_value_slug = color_attr.values.first().slug
color_attr_id = graphene.Node.to_global_id("Attribute", color_attr.id)
# Add second attribute
product_type.product_attributes.add(size_attribute)
size_attr_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
non_existent_attr_value = "The cake is a lie"
# Add third attribute
product_type.product_attributes.add(weight_attribute)
weight_attr_id = graphene.Node.to_global_id("Attribute", weight_attribute.id)
# test creating root product
variables = {
"input": {
"productType": product_type_id,
"category": category_id,
"name": product_name,
"slug": product_slug,
"description": description_json,
"chargeTaxes": product_charge_taxes,
"taxCode": product_tax_rate,
"attributes": [
{"id": color_attr_id, "values": [" "]},
{"id": weight_attr_id, "values": [" "]},
{
"id": size_attr_id,
"values": [non_existent_attr_value, color_value_slug],
},
],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
errors = data["errors"]
assert not data["product"]
assert len(errors) == 2
expected_errors = [
{
"attributes": [color_attr_id, weight_attr_id],
"code": ProductErrorCode.REQUIRED.name,
"field": "attributes",
"message": ANY,
},
{
"attributes": [size_attr_id],
"code": ProductErrorCode.INVALID.name,
"field": "attributes",
"message": ANY,
},
]
for error in expected_errors:
assert error in errors
QUERY_CREATE_PRODUCT_WITHOUT_VARIANTS = """
mutation createProduct(
$productTypeId: ID!,
$categoryId: ID!
$name: String!)
{
productCreate(
input: {
category: $categoryId,
productType: $productTypeId,
name: $name,
})
{
product {
id
name
slug
rating
category {
name
}
productType {
name
}
}
errors {
message
field
}
}
}
"""
def test_create_product_without_variants(
staff_api_client, product_type_without_variant, category, permission_manage_products
):
query = QUERY_CREATE_PRODUCT_WITHOUT_VARIANTS
product_type = product_type_without_variant
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
product_name = "test name"
product_slug = "test-name"
variables = {
"productTypeId": product_type_id,
"categoryId": category_id,
"name": product_name,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productCreate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["productType"]["name"] == product_type.name
assert data["product"]["category"]["name"] == category.name
def test_product_create_without_product_type(
staff_api_client, category, permission_manage_products
):
query = """
mutation createProduct($categoryId: ID!) {
productCreate(input: {
name: "Product",
productType: "",
category: $categoryId}) {
product {
id
}
errors {
message
field
}
}
}
"""
category_id = graphene.Node.to_global_id("Category", category.id)
response = staff_api_client.post_graphql(
query, {"categoryId": category_id}, permissions=[permission_manage_products]
)
errors = get_graphql_content(response)["data"]["productCreate"]["errors"]
assert errors[0]["field"] == "productType"
assert errors[0]["message"] == "This field cannot be null."
def test_product_create_with_collections_webhook(
staff_api_client,
permission_manage_products,
published_collection,
product_type,
category,
monkeypatch,
):
query = """
mutation createProduct($productTypeId: ID!, $collectionId: ID!, $categoryId: ID!) {
productCreate(input: {
name: "Product",
productType: $productTypeId,
collections: [$collectionId],
category: $categoryId
}) {
product {
id,
collections {
slug
},
category {
slug
}
}
errors {
message
field
}
}
}
"""
def assert_product_has_collections(product):
assert product.collections.count() > 0
assert product.collections.first() == published_collection
monkeypatch.setattr(
"saleor.plugins.manager.PluginsManager.product_created",
lambda _, product: assert_product_has_collections(product),
)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
category_id = graphene.Node.to_global_id("Category", category.pk)
collection_id = graphene.Node.to_global_id("Collection", published_collection.pk)
response = staff_api_client.post_graphql(
query,
{
"productTypeId": product_type_id,
"categoryId": category_id,
"collectionId": collection_id,
},
permissions=[permission_manage_products],
)
get_graphql_content(response)
MUTATION_UPDATE_PRODUCT = """
mutation updateProduct($productId: ID!, $input: ProductInput!) {
productUpdate(id: $productId, input: $input) {
product {
category {
name
}
rating
description
chargeTaxes
variants {
name
}
taxType {
taxCode
description
}
name
slug
productType {
name
}
attributes {
attribute {
id
name
}
values {
id
name
slug
boolean
reference
file {
url
contentType
}
}
}
}
errors {
message
field
code
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.product_updated")
@patch("saleor.plugins.manager.PluginsManager.product_created")
def test_update_product(
created_webhook_mock,
updated_webhook_mock,
staff_api_client,
category,
non_default_category,
product,
other_description_json,
permission_manage_products,
monkeypatch,
color_attribute,
):
query = MUTATION_UPDATE_PRODUCT
expected_other_description_json = other_description_json
text = expected_other_description_json["blocks"][0]["data"]["text"]
expected_other_description_json["blocks"][0]["data"]["text"] = strip_tags(text)
other_description_json = json.dumps(other_description_json)
product_id = graphene.Node.to_global_id("Product", product.pk)
category_id = graphene.Node.to_global_id("Category", non_default_category.pk)
product_name = "updated name"
product_slug = "updated-product"
product_charge_taxes = True
product_tax_rate = "STANDARD"
# Mock tax interface with fake response from tax gateway
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(description="", code=product_tax_rate),
)
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
variables = {
"productId": product_id,
"input": {
"category": category_id,
"name": product_name,
"slug": product_slug,
"description": other_description_json,
"chargeTaxes": product_charge_taxes,
"taxCode": product_tax_rate,
"attributes": [{"id": attribute_id, "values": ["Rainbow"]}],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["description"] == json.dumps(expected_other_description_json)
assert data["product"]["chargeTaxes"] == product_charge_taxes
assert data["product"]["taxType"]["taxCode"] == product_tax_rate
assert not data["product"]["category"]["name"] == category.name
attributes = data["product"]["attributes"]
assert len(attributes) == 1
assert len(attributes[0]["values"]) == 1
assert attributes[0]["attribute"]["id"] == attribute_id
assert attributes[0]["values"][0]["name"] == "Rainbow"
assert attributes[0]["values"][0]["slug"] == "rainbow"
updated_webhook_mock.assert_called_once_with(product)
created_webhook_mock.assert_not_called()
def test_update_and_search_product_by_description(
staff_api_client,
category,
non_default_category,
product,
other_description_json,
permission_manage_products,
color_attribute,
):
query = MUTATION_UPDATE_PRODUCT
other_description_json = json.dumps(other_description_json)
product_id = graphene.Node.to_global_id("Product", product.pk)
category_id = graphene.Node.to_global_id("Category", non_default_category.pk)
product_name = "updated name"
product_slug = "updated-product"
variables = {
"productId": product_id,
"input": {
"category": category_id,
"name": product_name,
"slug": product_slug,
"description": other_description_json,
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert not data["errors"]
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["description"] == other_description_json
def test_update_product_without_description_clear_description_plaintext(
staff_api_client,
category,
non_default_category,
product,
other_description_json,
permission_manage_products,
color_attribute,
):
query = MUTATION_UPDATE_PRODUCT
description_plaintext = "some desc"
product.description_plaintext = description_plaintext
product.save()
product_id = graphene.Node.to_global_id("Product", product.pk)
category_id = graphene.Node.to_global_id("Category", non_default_category.pk)
product_name = "updated name"
product_slug = "updated-product"
variables = {
"productId": product_id,
"input": {
"category": category_id,
"name": product_name,
"slug": product_slug,
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert not data["errors"]
assert data["product"]["name"] == product_name
assert data["product"]["slug"] == product_slug
assert data["product"]["description"] is None
product.refresh_from_db()
assert product.description_plaintext == ""
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_boolean_attribute_value(
updated_webhook_mock,
staff_api_client,
product,
product_type,
boolean_attribute,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id("Attribute", boolean_attribute.pk)
product_type.product_attributes.add(boolean_attribute)
new_value = False
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "boolean": new_value}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_att_data = {
"attribute": {"id": attribute_id, "name": boolean_attribute.name},
"values": [
{
"id": ANY,
"name": "Boolean: No",
"boolean": new_value,
"slug": f"{boolean_attribute.id}_false",
"reference": None,
"file": None,
}
],
}
assert expected_att_data in attributes
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_file_attribute_value(
updated_webhook_mock,
staff_api_client,
file_attribute,
product,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id("Attribute", file_attribute.pk)
product_type.product_attributes.add(file_attribute)
new_value = "new_file.json"
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "file": new_value}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_file_att_data = {
"attribute": {"id": attribute_id, "name": file_attribute.name},
"values": [
{
"id": ANY,
"name": new_value,
"slug": slugify(new_value),
"reference": None,
"file": {
"url": "http://testserver/media/" + new_value,
"contentType": None,
},
"boolean": None,
}
],
}
assert expected_file_att_data in attributes
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_file_attribute_value_new_value_is_not_created(
updated_webhook_mock,
staff_api_client,
file_attribute,
product,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id("Attribute", file_attribute.pk)
product_type.product_attributes.add(file_attribute)
existing_value = file_attribute.values.first()
associate_attribute_values_to_instance(product, file_attribute, existing_value)
values_count = file_attribute.values.count()
variables = {
"productId": product_id,
"input": {
"attributes": [{"id": attribute_id, "file": existing_value.file_url}]
},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_file_att_data = {
"attribute": {"id": attribute_id, "name": file_attribute.name},
"values": [
{
"id": graphene.Node.to_global_id("AttributeValue", existing_value.pk),
"name": existing_value.name,
"slug": existing_value.slug,
"reference": None,
"file": {
"url": f"http://testserver/media/{existing_value.file_url}",
"contentType": existing_value.content_type,
},
"boolean": None,
}
],
}
assert expected_file_att_data in attributes
file_attribute.refresh_from_db()
assert file_attribute.values.count() == values_count
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_numeric_attribute_value(
updated_webhook_mock,
staff_api_client,
product,
product_type,
numeric_attribute,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id("Attribute", numeric_attribute.pk)
product_type.product_attributes.add(numeric_attribute)
new_value = "45.2"
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "values": [new_value]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_att_data = {
"attribute": {"id": attribute_id, "name": numeric_attribute.name},
"values": [
{
"id": ANY,
"name": new_value,
"slug": slugify(
f"{product.id}_{numeric_attribute.id}", allow_unicode=True
),
"reference": None,
"file": None,
"boolean": None,
}
],
}
assert expected_att_data in attributes
updated_webhook_mock.assert_called_once_with(product)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_numeric_attribute_value_new_value_is_not_created(
updated_webhook_mock,
staff_api_client,
numeric_attribute,
product,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id("Attribute", numeric_attribute.pk)
product_type.product_attributes.add(numeric_attribute)
slug_value = slugify(f"{product.id}_{numeric_attribute.id}", allow_unicode=True)
value = AttributeValue.objects.create(
attribute=numeric_attribute, slug=slug_value, name="20.0"
)
associate_attribute_values_to_instance(product, numeric_attribute, value)
value_count = AttributeValue.objects.count()
new_value = "45.2"
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "values": [new_value]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_att_data = {
"attribute": {"id": attribute_id, "name": numeric_attribute.name},
"values": [
{
"id": ANY,
"name": new_value,
"slug": slug_value,
"reference": None,
"file": None,
"boolean": None,
}
],
}
assert expected_att_data in attributes
assert AttributeValue.objects.count() == value_count
value.refresh_from_db()
assert value.name == new_value
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_clear_attribute_values(
updated_webhook_mock,
staff_api_client,
product,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
product_attr = product.attributes.first()
attribute = product_attr.assignment.attribute
attribute.value_required = False
attribute.save(update_fields=["value_required"])
attribute_id = graphene.Node.to_global_id("Attribute", attribute.pk)
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "values": []}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 1
assert not attributes[0]["values"]
with pytest.raises(product_attr._meta.model.DoesNotExist):
product_attr.refresh_from_db()
updated_webhook_mock.assert_called_once_with(product)
@freeze_time("2020-03-18 12:00:00")
def test_update_product_rating(
staff_api_client,
product,
permission_manage_products,
):
query = MUTATION_UPDATE_PRODUCT
product.rating = 5.5
product.save(update_fields=["rating"])
product_id = graphene.Node.to_global_id("Product", product.pk)
expected_rating = 9.57
variables = {"productId": product_id, "input": {"rating": expected_rating}}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
assert data["product"]["rating"] == expected_rating
product.refresh_from_db()
assert product.rating == expected_rating
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_page_reference_attribute_value(
updated_webhook_mock,
staff_api_client,
product_type_page_reference_attribute,
product,
product_type,
page,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.pk
)
product_type.product_attributes.add(product_type_page_reference_attribute)
values_count = product_type_page_reference_attribute.values.count()
reference = graphene.Node.to_global_id("Page", page.pk)
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "references": [reference]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_file_att_data = {
"attribute": {
"id": attribute_id,
"name": product_type_page_reference_attribute.name,
},
"values": [
{
"id": ANY,
"name": page.title,
"slug": f"{product.id}_{page.id}",
"file": None,
"reference": reference,
"boolean": None,
}
],
}
assert expected_file_att_data in attributes
updated_webhook_mock.assert_called_once_with(product)
product_type_page_reference_attribute.refresh_from_db()
assert product_type_page_reference_attribute.values.count() == values_count + 1
def test_update_product_with_empty_input_collections(
product, permission_manage_products, staff_api_client
):
# given
query = """
mutation updateProduct($productId: ID!, $input: ProductInput!) {
productUpdate(id: $productId, input: $input) {
productErrors {
field
message
code
}
product {
id
}
}
}
"""
product_id = graphene.Node.to_global_id("Product", product.pk)
variables = {
"productId": product_id,
"input": {"collections": [""]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert len(data["productErrors"]) == 1
product_errors = data["productErrors"][0]
assert product_errors["code"] == ProductErrorCode.GRAPHQL_ERROR.name
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_page_reference_attribute_existing_value(
updated_webhook_mock,
staff_api_client,
product_type_page_reference_attribute,
product,
product_type,
page,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.pk
)
product_type.product_attributes.add(product_type_page_reference_attribute)
attr_value = AttributeValue.objects.create(
attribute=product_type_page_reference_attribute,
name=page.title,
slug=f"{product.pk}_{page.pk}",
)
associate_attribute_values_to_instance(
product, product_type_page_reference_attribute, attr_value
)
values_count = product_type_page_reference_attribute.values.count()
reference = graphene.Node.to_global_id("Page", page.pk)
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "references": [reference]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_file_att_data = {
"attribute": {
"id": attribute_id,
"name": product_type_page_reference_attribute.name,
},
"values": [
{
"id": graphene.Node.to_global_id("AttributeValue", attr_value.pk),
"name": page.title,
"slug": f"{product.id}_{page.id}",
"file": None,
"reference": reference,
"boolean": None,
}
],
}
assert expected_file_att_data in attributes
updated_webhook_mock.assert_called_once_with(product)
product_type_page_reference_attribute.refresh_from_db()
assert product_type_page_reference_attribute.values.count() == values_count
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_page_reference_attribute_value_not_given(
updated_webhook_mock,
staff_api_client,
product_type_page_reference_attribute,
product,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_type_page_reference_attribute.value_required = True
product_type_page_reference_attribute.save(update_fields=["value_required"])
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.pk
)
product_type.product_attributes.add(product_type_page_reference_attribute)
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "values": ["test"]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
errors = data["errors"]
assert not data["product"]
assert len(errors) == 1
assert errors[0]["field"] == "attributes"
assert errors[0]["code"] == AttributeErrorCode.REQUIRED.name
updated_webhook_mock.assert_not_called()
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_product_reference_attribute_value(
updated_webhook_mock,
staff_api_client,
product_type_product_reference_attribute,
product_list,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product = product_list[0]
product_id = graphene.Node.to_global_id("Product", product.pk)
product_ref = product_list[1]
attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.pk
)
product_type.product_attributes.add(product_type_product_reference_attribute)
values_count = product_type_product_reference_attribute.values.count()
reference = graphene.Node.to_global_id("Product", product_ref.pk)
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "references": [reference]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_file_att_data = {
"attribute": {
"id": attribute_id,
"name": product_type_product_reference_attribute.name,
},
"values": [
{
"id": ANY,
"name": product_ref.name,
"slug": f"{product.id}_{product_ref.id}",
"file": None,
"reference": reference,
"boolean": None,
}
],
}
assert expected_file_att_data in attributes
updated_webhook_mock.assert_called_once_with(product)
product_type_product_reference_attribute.refresh_from_db()
assert product_type_product_reference_attribute.values.count() == values_count + 1
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_product_reference_attribute_existing_value(
updated_webhook_mock,
staff_api_client,
product_type_product_reference_attribute,
product_list,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product = product_list[0]
product_id = graphene.Node.to_global_id("Product", product.pk)
product_ref = product_list[1]
attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.pk
)
product_type.product_attributes.add(product_type_product_reference_attribute)
attr_value = AttributeValue.objects.create(
attribute=product_type_product_reference_attribute,
name=product_ref.name,
slug=f"{product.pk}_{product_ref.pk}",
)
associate_attribute_values_to_instance(
product, product_type_product_reference_attribute, attr_value
)
values_count = product_type_product_reference_attribute.values.count()
reference = graphene.Node.to_global_id("Product", product_ref.pk)
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "references": [reference]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 2
expected_file_att_data = {
"attribute": {
"id": attribute_id,
"name": product_type_product_reference_attribute.name,
},
"values": [
{
"id": graphene.Node.to_global_id("AttributeValue", attr_value.pk),
"name": product_ref.name,
"slug": f"{product.id}_{product_ref.id}",
"file": None,
"reference": reference,
"boolean": None,
}
],
}
assert expected_file_att_data in attributes
updated_webhook_mock.assert_called_once_with(product)
product_type_product_reference_attribute.refresh_from_db()
assert product_type_product_reference_attribute.values.count() == values_count
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_with_product_reference_attribute_value_not_given(
updated_webhook_mock,
staff_api_client,
product_type_product_reference_attribute,
product,
product_type,
permission_manage_products,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_type_product_reference_attribute.value_required = True
product_type_product_reference_attribute.save(update_fields=["value_required"])
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_product_reference_attribute.pk
)
product_type.product_attributes.add(product_type_product_reference_attribute)
variables = {
"productId": product_id,
"input": {"attributes": [{"id": attribute_id, "values": ["test"]}]},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
errors = data["errors"]
assert not data["product"]
assert len(errors) == 1
assert errors[0]["field"] == "attributes"
assert errors[0]["code"] == AttributeErrorCode.REQUIRED.name
updated_webhook_mock.assert_not_called()
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_update_product_change_values_ordering(
updated_webhook_mock,
staff_api_client,
product,
permission_manage_products,
page_list,
product_type_page_reference_attribute,
):
# given
query = MUTATION_UPDATE_PRODUCT
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_id = graphene.Node.to_global_id(
"Attribute", product_type_page_reference_attribute.pk
)
product_type = product.product_type
product_type.product_attributes.set([product_type_page_reference_attribute])
attr_value_1 = AttributeValue.objects.create(
attribute=product_type_page_reference_attribute,
name=page_list[0].title,
slug=f"{product.pk}_{page_list[0].pk}",
)
attr_value_2 = AttributeValue.objects.create(
attribute=product_type_page_reference_attribute,
name=page_list[1].title,
slug=f"{product.pk}_{page_list[1].pk}",
)
associate_attribute_values_to_instance(
product, product_type_page_reference_attribute, attr_value_2, attr_value_1
)
assert list(
product.attributes.first().productvalueassignment.values_list(
"value_id", flat=True
)
) == [attr_value_2.pk, attr_value_1.pk]
variables = {
"productId": product_id,
"input": {
"attributes": [
{
"id": attribute_id,
"references": [
graphene.Node.to_global_id("Page", page_list[0].pk),
graphene.Node.to_global_id("Page", page_list[1].pk),
],
}
]
},
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
assert data["errors"] == []
attributes = data["product"]["attributes"]
assert len(attributes) == 1
values = attributes[0]["values"]
assert len(values) == 2
assert [value["id"] for value in values] == [
graphene.Node.to_global_id("AttributeValue", val.pk)
for val in [attr_value_1, attr_value_2]
]
product.refresh_from_db()
assert list(
product.attributes.first().productvalueassignment.values_list(
"value_id", flat=True
)
) == [attr_value_1.pk, attr_value_2.pk]
updated_webhook_mock.assert_called_once_with(product)
UPDATE_PRODUCT_SLUG_MUTATION = """
mutation($id: ID!, $slug: String) {
productUpdate(
id: $id
input: {
slug: $slug
}
) {
product{
name
slug
}
errors {
field
message
code
}
}
}
"""
@pytest.mark.parametrize(
"input_slug, expected_slug, error_message",
[
("test-slug", "test-slug", None),
("", "", "Slug value cannot be blank."),
(None, "", "Slug value cannot be blank."),
],
)
def test_update_product_slug(
staff_api_client,
product,
permission_manage_products,
input_slug,
expected_slug,
error_message,
):
query = UPDATE_PRODUCT_SLUG_MUTATION
old_slug = product.slug
assert old_slug != input_slug
node_id = graphene.Node.to_global_id("Product", product.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
errors = data["errors"]
if not error_message:
assert not errors
assert data["product"]["slug"] == expected_slug
else:
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_product_slug_exists(
staff_api_client, product, permission_manage_products
):
query = UPDATE_PRODUCT_SLUG_MUTATION
input_slug = "test-slug"
second_product = Product.objects.get(pk=product.pk)
second_product.pk = None
second_product.slug = input_slug
second_product.save()
assert input_slug != product.slug
node_id = graphene.Node.to_global_id("Product", product.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
errors = data["errors"]
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.UNIQUE.name
@pytest.mark.parametrize(
"input_slug, expected_slug, input_name, error_message, error_field",
[
("test-slug", "test-slug", "New name", None, None),
("", "", "New name", "Slug value cannot be blank.", "slug"),
(None, "", "New name", "Slug value cannot be blank.", "slug"),
("test-slug", "", None, "This field cannot be blank.", "name"),
("test-slug", "", "", "This field cannot be blank.", "name"),
(None, None, None, "Slug value cannot be blank.", "slug"),
],
)
def test_update_product_slug_and_name(
staff_api_client,
product,
permission_manage_products,
input_slug,
expected_slug,
input_name,
error_message,
error_field,
):
query = """
mutation($id: ID!, $name: String, $slug: String) {
productUpdate(
id: $id
input: {
name: $name
slug: $slug
}
) {
product{
name
slug
}
errors {
field
message
code
}
}
}
"""
old_name = product.name
old_slug = product.slug
assert input_slug != old_slug
assert input_name != old_name
node_id = graphene.Node.to_global_id("Product", product.id)
variables = {"slug": input_slug, "name": input_name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
product.refresh_from_db()
data = content["data"]["productUpdate"]
errors = data["errors"]
if not error_message:
assert data["product"]["name"] == input_name == product.name
assert data["product"]["slug"] == input_slug == product.slug
else:
assert errors
assert errors[0]["field"] == error_field
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
SET_ATTRIBUTES_TO_PRODUCT_QUERY = """
mutation updateProduct($productId: ID!, $attributes: [AttributeValueInput!]) {
productUpdate(id: $productId, input: { attributes: $attributes }) {
errors {
message
field
code
attributes
}
}
}
"""
def test_update_product_can_only_assign_multiple_values_to_valid_input_types(
staff_api_client, product, permission_manage_products, color_attribute
):
"""Ensures you cannot assign multiple values to input types
that are not multi-select. This also ensures multi-select types
can be assigned multiple values as intended."""
staff_api_client.user.user_permissions.add(permission_manage_products)
multi_values_attr = Attribute.objects.create(
name="multi", slug="multi-vals", input_type=AttributeInputType.MULTISELECT
)
multi_values_attr.product_types.add(product.product_type)
multi_values_attr_id = graphene.Node.to_global_id("Attribute", multi_values_attr.id)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
# Try to assign multiple values from an attribute that does not support such things
variables = {
"productId": graphene.Node.to_global_id("Product", product.pk),
"attributes": [{"id": color_attribute_id, "values": ["red", "blue"]}],
}
data = get_graphql_content(
staff_api_client.post_graphql(SET_ATTRIBUTES_TO_PRODUCT_QUERY, variables)
)["data"]["productUpdate"]
assert data["errors"] == [
{
"field": "attributes",
"code": ProductErrorCode.INVALID.name,
"message": ANY,
"attributes": [color_attribute_id],
}
]
# Try to assign multiple values from a valid attribute
variables["attributes"] = [{"id": multi_values_attr_id, "values": ["a", "b"]}]
data = get_graphql_content(
staff_api_client.post_graphql(SET_ATTRIBUTES_TO_PRODUCT_QUERY, variables)
)["data"]["productUpdate"]
assert not data["errors"]
def test_update_product_with_existing_attribute_value(
staff_api_client, product, permission_manage_products, color_attribute
):
"""Ensure assigning an existing value to a product doesn't create a new
attribute value."""
staff_api_client.user.user_permissions.add(permission_manage_products)
expected_attribute_values_count = color_attribute.values.count()
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
color = color_attribute.values.only("name").first()
# Try to assign multiple values from an attribute that does not support such things
variables = {
"productId": graphene.Node.to_global_id("Product", product.pk),
"attributes": [{"id": color_attribute_id, "values": [color.name]}],
}
data = get_graphql_content(
staff_api_client.post_graphql(SET_ATTRIBUTES_TO_PRODUCT_QUERY, variables)
)["data"]["productUpdate"]
assert not data["errors"]
assert (
color_attribute.values.count() == expected_attribute_values_count
), "A new attribute value shouldn't have been created"
def test_update_product_without_supplying_required_product_attribute(
staff_api_client, product, permission_manage_products, color_attribute
):
"""Ensure assigning an existing value to a product doesn't create a new
attribute value."""
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = product.product_type
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
# Create and assign a new attribute requiring a value to be always supplied
required_attribute = Attribute.objects.create(
name="Required One", slug="required-one", value_required=True
)
product_type.product_attributes.add(required_attribute)
required_attribute_id = graphene.Node.to_global_id(
"Attribute", required_attribute.id
)
# Try to assign multiple values from an attribute that does not support such things
variables = {
"productId": graphene.Node.to_global_id("Product", product.pk),
"attributes": [{"id": color_attribute_id, "values": ["Blue"]}],
}
data = get_graphql_content(
staff_api_client.post_graphql(SET_ATTRIBUTES_TO_PRODUCT_QUERY, variables)
)["data"]["productUpdate"]
assert data["errors"] == [
{
"field": "attributes",
"code": ProductErrorCode.REQUIRED.name,
"message": ANY,
"attributes": [required_attribute_id],
}
]
def test_update_product_with_non_existing_attribute(
staff_api_client, product, permission_manage_products, color_attribute
):
non_existent_attribute_pk = 0
invalid_attribute_id = graphene.Node.to_global_id(
"Attribute", non_existent_attribute_pk
)
"""Ensure assigning an existing value to a product doesn't create a new
attribute value."""
staff_api_client.user.user_permissions.add(permission_manage_products)
# Try to assign multiple values from an attribute that does not support such things
variables = {
"productId": graphene.Node.to_global_id("Product", product.pk),
"attributes": [{"id": invalid_attribute_id, "values": ["hello"]}],
}
data = get_graphql_content(
staff_api_client.post_graphql(SET_ATTRIBUTES_TO_PRODUCT_QUERY, variables)
)["data"]["productUpdate"]
assert data["errors"] == [
{
"field": "attributes",
"code": ProductErrorCode.NOT_FOUND.name,
"message": ANY,
"attributes": None,
}
]
def test_update_product_with_no_attribute_slug_or_id(
staff_api_client, product, permission_manage_products, color_attribute
):
"""Ensure only supplying values triggers a validation error."""
staff_api_client.user.user_permissions.add(permission_manage_products)
# Try to assign multiple values from an attribute that does not support such things
variables = {
"productId": graphene.Node.to_global_id("Product", product.pk),
"attributes": [{"values": ["Oopsie!"]}],
}
data = get_graphql_content(
staff_api_client.post_graphql(SET_ATTRIBUTES_TO_PRODUCT_QUERY, variables)
)["data"]["productUpdate"]
assert data["errors"] == [
{
"field": "attributes",
"code": ProductErrorCode.REQUIRED.name,
"message": ANY,
"attributes": None,
}
]
def test_update_product_with_negative_weight(
staff_api_client, product_with_default_variant, permission_manage_products, product
):
query = """
mutation updateProduct(
$productId: ID!,
$weight: WeightScalar)
{
productUpdate(
id: $productId,
input: {
weight: $weight
})
{
product {
id
}
errors {
field
message
code
}
}
}
"""
product = product_with_default_variant
product_id = graphene.Node.to_global_id("Product", product.pk)
variables = {"productId": product_id, "weight": -1}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productUpdate"]
error = data["errors"][0]
assert error["field"] == "weight"
assert error["code"] == ProductErrorCode.INVALID.name
UPDATE_PRODUCT = """
mutation updateProduct(
$productId: ID!,
$input: ProductInput!)
{
productUpdate(
id: $productId,
input: $input)
{
product {
id
name
slug
}
errors {
message
field
}
}
}"""
def test_update_product_name(staff_api_client, permission_manage_products, product):
query = UPDATE_PRODUCT
product_slug = product.slug
new_name = "example-product"
assert new_name != product.name
product_id = graphene.Node.to_global_id("Product", product.id)
variables = {"productId": product_id, "input": {"name": new_name}}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
data = get_graphql_content(response)["data"]["productUpdate"]
assert data["product"]["name"] == new_name
assert data["product"]["slug"] == product_slug
def test_update_product_slug_with_existing_value(
staff_api_client, permission_manage_products, product
):
query = UPDATE_PRODUCT
second_product = Product.objects.get(pk=product.pk)
second_product.id = None
second_product.slug = "second-product"
second_product.save()
assert product.slug != second_product.slug
product_id = graphene.Node.to_global_id("Product", product.id)
variables = {"productId": product_id, "input": {"slug": second_product.slug}}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
data = get_graphql_content(response)["data"]["productUpdate"]
errors = data["errors"]
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["message"] == "Product with this Slug already exists."
DELETE_PRODUCT_MUTATION = """
mutation DeleteProduct($id: ID!) {
productDelete(id: $id) {
product {
name
id
attributes {
values {
value
name
}
}
}
errors {
field
message
}
}
}
"""
@patch("saleor.order.tasks.recalculate_orders_task.delay")
def test_delete_product(
mocked_recalculate_orders_task,
staff_api_client,
product,
permission_manage_products,
):
query = DELETE_PRODUCT_MUTATION
node_id = graphene.Node.to_global_id("Product", product.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productDelete"]
assert data["product"]["name"] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
assert node_id == data["product"]["id"]
mocked_recalculate_orders_task.assert_not_called()
@patch("saleor.product.signals.delete_versatile_image")
@patch("saleor.order.tasks.recalculate_orders_task.delay")
def test_delete_product_with_image(
mocked_recalculate_orders_task,
delete_versatile_image_mock,
staff_api_client,
product_with_image,
variant_with_image,
permission_manage_products,
media_root,
):
"""Ensure deleting product delete also product and variants images from storage."""
# given
query = DELETE_PRODUCT_MUTATION
product = product_with_image
variant = product.variants.first()
node_id = graphene.Node.to_global_id("Product", product.id)
product_img_paths = [media.image for media in product.media.all()]
variant_img_paths = [media.image for media in variant.media.all()]
images = product_img_paths + variant_img_paths
variables = {"id": node_id}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content(response)
data = content["data"]["productDelete"]
assert data["product"]["name"] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
assert node_id == data["product"]["id"]
assert delete_versatile_image_mock.call_count == len(images)
assert {
call_args.args[0] for call_args in delete_versatile_image_mock.call_args_list
} == set(images)
mocked_recalculate_orders_task.assert_not_called()
@freeze_time("1914-06-28 10:50")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_for_event.delay")
@patch("saleor.order.tasks.recalculate_orders_task.delay")
def test_delete_product_trigger_webhook(
mocked_recalculate_orders_task,
mocked_webhook_trigger,
staff_api_client,
product,
permission_manage_products,
settings,
):
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
query = DELETE_PRODUCT_MUTATION
node_id = graphene.Node.to_global_id("Product", product.id)
variants_id = list(product.variants.all().values_list("id", flat=True))
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productDelete"]
assert data["product"]["name"] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
assert node_id == data["product"]["id"]
expected_data = generate_product_deleted_payload(
product, variants_id, staff_api_client.user
)
mocked_webhook_trigger.assert_called_once_with(
WebhookEventAsyncType.PRODUCT_DELETED, expected_data
)
mocked_recalculate_orders_task.assert_not_called()
@patch("saleor.attribute.signals.delete_from_storage_task.delay")
@patch("saleor.order.tasks.recalculate_orders_task.delay")
def test_delete_product_with_file_attribute(
mocked_recalculate_orders_task,
delete_from_storage_task_mock,
staff_api_client,
product,
permission_manage_products,
file_attribute,
):
query = DELETE_PRODUCT_MUTATION
product_type = product.product_type
product_type.product_attributes.add(file_attribute)
existing_value = file_attribute.values.first()
associate_attribute_values_to_instance(product, file_attribute, existing_value)
node_id = graphene.Node.to_global_id("Product", product.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productDelete"]
assert data["product"]["name"] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
assert node_id == data["product"]["id"]
mocked_recalculate_orders_task.assert_not_called()
with pytest.raises(existing_value._meta.model.DoesNotExist):
existing_value.refresh_from_db()
delete_from_storage_task_mock.assert_called_once_with(existing_value.file_url)
def test_delete_product_removes_checkout_lines(
staff_api_client,
checkout_with_items,
permission_manage_products,
settings,
):
query = DELETE_PRODUCT_MUTATION
checkout = checkout_with_items
line = checkout.lines.first()
product = line.variant.product
node_id = graphene.Node.to_global_id("Product", product.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productDelete"]
assert data["product"]["name"] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
with pytest.raises(line._meta.model.DoesNotExist):
line.refresh_from_db()
assert checkout.lines.all().exists()
checkout.refresh_from_db()
assert node_id == data["product"]["id"]
@patch("saleor.order.tasks.recalculate_orders_task.delay")
def test_delete_product_variant_in_draft_order(
mocked_recalculate_orders_task,
staff_api_client,
product_with_two_variants,
permission_manage_products,
order_list,
channel_USD,
):
query = DELETE_PRODUCT_MUTATION
product = product_with_two_variants
not_draft_order = order_list[1]
draft_order = order_list[0]
draft_order.status = OrderStatus.DRAFT
draft_order.save(update_fields=["status"])
draft_order_lines_pks = []
not_draft_order_lines_pks = []
for variant in product.variants.all():
variant_channel_listing = variant.channel_listings.get(channel=channel_USD)
net = variant.get_price(product, [], channel_USD, variant_channel_listing, None)
gross = Money(amount=net.amount, currency=net.currency)
unit_price = TaxedMoney(net=net, gross=gross)
quantity = 3
total_price = unit_price * quantity
order_line = OrderLine.objects.create(
variant=variant,
order=draft_order,
product_name=str(variant.product),
variant_name=str(variant),
product_sku=variant.sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
unit_price=TaxedMoney(net=net, gross=gross),
total_price=total_price,
quantity=quantity,
)
draft_order_lines_pks.append(order_line.pk)
order_line_not_draft = OrderLine.objects.create(
variant=variant,
order=not_draft_order,
product_name=str(variant.product),
variant_name=str(variant),
product_sku=variant.sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
unit_price=TaxedMoney(net=net, gross=gross),
total_price=total_price,
quantity=quantity,
)
not_draft_order_lines_pks.append(order_line_not_draft.pk)
node_id = graphene.Node.to_global_id("Product", product.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productDelete"]
assert data["product"]["name"] == product.name
with pytest.raises(product._meta.model.DoesNotExist):
product.refresh_from_db()
assert node_id == data["product"]["id"]
assert not OrderLine.objects.filter(pk__in=draft_order_lines_pks).exists()
assert OrderLine.objects.filter(pk__in=not_draft_order_lines_pks).exists()
mocked_recalculate_orders_task.assert_called_once_with([draft_order.id])
event = OrderEvent.objects.filter(
type=OrderEvents.ORDER_LINE_PRODUCT_DELETED
).last()
assert event
assert event.order == draft_order
assert event.user == staff_api_client.user
expected_params = [
{
"item": str(line),
"line_pk": line.pk,
"quantity": line.quantity,
}
for line in draft_order.lines.all()
]
for param in expected_params:
assert param in event.parameters
def test_product_type(user_api_client, product_type, channel_USD):
query = """
query ($channel: String){
productTypes(first: 20) {
totalCount
edges {
node {
id
name
products(first: 1, channel: $channel) {
edges {
node {
id
}
}
}
}
}
}
}
"""
variables = {"channel": channel_USD.slug}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
no_product_types = ProductType.objects.count()
assert content["data"]["productTypes"]["totalCount"] == no_product_types
assert len(content["data"]["productTypes"]["edges"]) == no_product_types
PRODUCT_TYPE_QUERY = """
query getProductType(
$id: ID!, $variantSelection: VariantAttributeScope, $channel: String
) {
productType(id: $id) {
name
variantAttributes(variantSelection: $variantSelection) {
slug
}
products(first: 20, channel:$channel) {
totalCount
edges {
node {
name
}
}
}
taxType {
taxCode
description
}
}
}
"""
def test_product_type_query(
user_api_client,
staff_api_client,
product_type,
file_attribute_with_file_input_type_without_values,
product,
permission_manage_products,
monkeypatch,
channel_USD,
):
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(code="123", description="Standard Taxes"),
)
query = PRODUCT_TYPE_QUERY
no_products = Product.objects.count()
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
product_type.variant_attributes.add(
file_attribute_with_file_input_type_without_values
)
variant_attributes_count = product_type.variant_attributes.count()
variables = {
"id": graphene.Node.to_global_id("ProductType", product_type.id),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]
assert data["productType"]["products"]["totalCount"] == no_products - 1
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]
assert data["productType"]["products"]["totalCount"] == no_products
assert data["productType"]["taxType"]["taxCode"] == "123"
assert data["productType"]["taxType"]["description"] == "Standard Taxes"
assert len(data["productType"]["variantAttributes"]) == variant_attributes_count
def test_product_type_query_invalid_id(
staff_api_client, product, channel_USD, permission_manage_products
):
product_type_id = "'"
variables = {
"id": product_type_id,
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(PRODUCT_TYPE_QUERY, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == f"Couldn't resolve id: {product_type_id}."
assert content["data"]["productType"] is None
def test_product_type_query_object_with_given_id_does_not_exist(
staff_api_client, product, channel_USD, permission_manage_products
):
product_type_id = graphene.Node.to_global_id("ProductType", -1)
variables = {
"id": product_type_id,
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(PRODUCT_TYPE_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["productType"] is None
def test_product_type_query_with_invalid_object_type(
staff_api_client, product, channel_USD, permission_manage_products
):
product_type_id = graphene.Node.to_global_id("Product", product.product_type.pk)
variables = {
"id": product_type_id,
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(PRODUCT_TYPE_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["productType"] is None
@pytest.mark.parametrize(
"variant_selection",
[
VariantAttributeScope.ALL.name,
VariantAttributeScope.VARIANT_SELECTION.name,
VariantAttributeScope.NOT_VARIANT_SELECTION.name,
],
)
def test_product_type_query_only_variant_selections_value_set(
variant_selection,
user_api_client,
staff_api_client,
product_type,
file_attribute_with_file_input_type_without_values,
author_page_attribute,
product_type_page_reference_attribute,
product,
permission_manage_products,
monkeypatch,
channel_USD,
):
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(code="123", description="Standard Taxes"),
)
query = PRODUCT_TYPE_QUERY
no_products = Product.objects.count()
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
product_type.variant_attributes.add(
file_attribute_with_file_input_type_without_values,
author_page_attribute,
product_type_page_reference_attribute,
)
variables = {
"id": graphene.Node.to_global_id("ProductType", product_type.id),
"variantSelection": variant_selection,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]
assert data["productType"]["products"]["totalCount"] == no_products - 1
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]
assert data["productType"]["products"]["totalCount"] == no_products
assert data["productType"]["taxType"]["taxCode"] == "123"
assert data["productType"]["taxType"]["description"] == "Standard Taxes"
if variant_selection == VariantAttributeScope.VARIANT_SELECTION.name:
assert (
len(data["productType"]["variantAttributes"])
== product_type.variant_attributes.filter(
input_type=AttributeInputType.DROPDOWN, type=AttributeType.PRODUCT_TYPE
).count()
)
elif variant_selection == VariantAttributeScope.NOT_VARIANT_SELECTION.name:
assert (
len(data["productType"]["variantAttributes"])
== product_type.variant_attributes.exclude(
input_type=AttributeInputType.DROPDOWN, type=AttributeType.PRODUCT_TYPE
).count()
)
else:
assert (
len(data["productType"]["variantAttributes"])
== product_type.variant_attributes.count()
)
PRODUCT_TYPE_QUERY_ASSIGNED_VARIANT_ATTRIBUTES = """
query getProductType(
$id: ID!, $variantSelection: VariantAttributeScope, $channel: String
) {
productType(id: $id) {
name
assignedVariantAttributes(variantSelection: $variantSelection) {
attribute {
slug
}
variantSelection
}
products(first: 20, channel:$channel) {
totalCount
edges {
node {
name
}
}
}
taxType {
taxCode
description
}
}
}
"""
@pytest.mark.parametrize(
"variant_selection",
[
VariantAttributeScope.ALL.name,
VariantAttributeScope.VARIANT_SELECTION.name,
VariantAttributeScope.NOT_VARIANT_SELECTION.name,
],
)
def test_product_type_query_only_assigned_variant_selections_value_set(
variant_selection,
user_api_client,
staff_api_client,
product_type,
file_attribute_with_file_input_type_without_values,
author_page_attribute,
product_type_page_reference_attribute,
product,
permission_manage_products,
monkeypatch,
channel_USD,
):
monkeypatch.setattr(
PluginsManager,
"get_tax_code_from_object_meta",
lambda self, x: TaxType(code="123", description="Standard Taxes"),
)
query = PRODUCT_TYPE_QUERY_ASSIGNED_VARIANT_ATTRIBUTES
no_products = Product.objects.count()
ProductChannelListing.objects.filter(product=product, channel=channel_USD).update(
is_published=False
)
product_type.variant_attributes.add(
file_attribute_with_file_input_type_without_values,
author_page_attribute,
product_type_page_reference_attribute,
)
variables = {
"id": graphene.Node.to_global_id("ProductType", product_type.id),
"variantSelection": variant_selection,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]
assert data["productType"]["products"]["totalCount"] == no_products - 1
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]
assert data["productType"]["products"]["totalCount"] == no_products
assert data["productType"]["taxType"]["taxCode"] == "123"
assert data["productType"]["taxType"]["description"] == "Standard Taxes"
if variant_selection == VariantAttributeScope.VARIANT_SELECTION.name:
assert (
len(data["productType"]["assignedVariantAttributes"])
== product_type.variant_attributes.filter(
input_type=AttributeInputType.DROPDOWN, type=AttributeType.PRODUCT_TYPE
).count()
)
assert all(
assign["variantSelection"]
for assign in data["productType"]["assignedVariantAttributes"]
)
elif variant_selection == VariantAttributeScope.NOT_VARIANT_SELECTION.name:
assert (
len(data["productType"]["assignedVariantAttributes"])
== product_type.variant_attributes.exclude(
input_type=AttributeInputType.DROPDOWN, type=AttributeType.PRODUCT_TYPE
).count()
)
assert not any(
assign["variantSelection"]
for assign in data["productType"]["assignedVariantAttributes"]
)
else:
assert (
len(data["productType"]["assignedVariantAttributes"])
== product_type.variant_attributes.count()
)
PRODUCT_TYPE_CREATE_MUTATION = """
mutation createProductType(
$name: String,
$slug: String,
$kind: ProductTypeKindEnum,
$taxCode: String,
$hasVariants: Boolean,
$isShippingRequired: Boolean,
$productAttributes: [ID],
$variantAttributes: [ID],
$weight: WeightScalar) {
productTypeCreate(
input: {
name: $name,
slug: $slug,
kind: $kind,
taxCode: $taxCode,
hasVariants: $hasVariants,
isShippingRequired: $isShippingRequired,
productAttributes: $productAttributes,
variantAttributes: $variantAttributes,
weight: $weight}) {
productType {
name
slug
kind
isShippingRequired
hasVariants
variantAttributes {
name
choices(first: 10) {
edges {
node {
name
}
}
}
}
productAttributes {
name
choices(first: 10) {
edges {
node {
name
richText
boolean
date
dateTime
}
}
}
}
}
errors {
field
message
code
attributes
}
}
}
"""
def test_product_type_create_mutation(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
monkeypatch,
setup_vatlayer,
):
manager = PluginsManager(plugins=setup_vatlayer.PLUGINS)
query = PRODUCT_TYPE_CREATE_MUTATION
product_type_name = "test type"
slug = "test-type"
kind = ProductTypeKindEnum.NORMAL.name
has_variants = True
require_shipping = True
product_attributes = product_type.product_attributes.all()
product_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in product_attributes
]
variant_attributes = product_type.variant_attributes.all()
variant_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in variant_attributes
]
variables = {
"name": product_type_name,
"slug": slug,
"kind": kind,
"hasVariants": has_variants,
"taxCode": "wine",
"isShippingRequired": require_shipping,
"productAttributes": product_attributes_ids,
"variantAttributes": variant_attributes_ids,
}
initial_count = ProductType.objects.count()
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
assert ProductType.objects.count() == initial_count + 1
data = content["data"]["productTypeCreate"]["productType"]
assert data["name"] == product_type_name
assert data["slug"] == slug
assert data["kind"] == kind
assert data["hasVariants"] == has_variants
assert data["isShippingRequired"] == require_shipping
pa = product_attributes[0]
assert data["productAttributes"][0]["name"] == pa.name
pa_values = data["productAttributes"][0]["choices"]["edges"]
assert sorted([value["node"]["name"] for value in pa_values]) == sorted(
[value.name for value in pa.values.all()]
)
va = variant_attributes[0]
assert data["variantAttributes"][0]["name"] == va.name
va_values = data["variantAttributes"][0]["choices"]["edges"]
assert sorted([value["node"]["name"] for value in va_values]) == sorted(
[value.name for value in va.values.all()]
)
new_instance = ProductType.objects.latest("pk")
tax_code = manager.get_tax_code_from_object_meta(new_instance).code
assert tax_code == "wine"
def test_create_gift_card_product_type(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
monkeypatch,
setup_vatlayer,
):
manager = PluginsManager(plugins=setup_vatlayer.PLUGINS)
query = PRODUCT_TYPE_CREATE_MUTATION
product_type_name = "test type"
slug = "test-type"
kind = ProductTypeKindEnum.GIFT_CARD.name
has_variants = True
require_shipping = True
product_attributes = product_type.product_attributes.all()
product_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in product_attributes
]
variant_attributes = product_type.variant_attributes.all()
variant_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in variant_attributes
]
variables = {
"name": product_type_name,
"slug": slug,
"kind": kind,
"hasVariants": has_variants,
"taxCode": "wine",
"isShippingRequired": require_shipping,
"productAttributes": product_attributes_ids,
"variantAttributes": variant_attributes_ids,
}
initial_count = ProductType.objects.count()
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
assert ProductType.objects.count() == initial_count + 1
data = content["data"]["productTypeCreate"]["productType"]
assert data["name"] == product_type_name
assert data["slug"] == slug
assert data["kind"] == kind
assert data["hasVariants"] == has_variants
assert data["isShippingRequired"] == require_shipping
pa = product_attributes[0]
assert data["productAttributes"][0]["name"] == pa.name
pa_values = data["productAttributes"][0]["choices"]["edges"]
assert sorted([value["node"]["name"] for value in pa_values]) == sorted(
[value.name for value in pa.values.all()]
)
va = variant_attributes[0]
assert data["variantAttributes"][0]["name"] == va.name
va_values = data["variantAttributes"][0]["choices"]["edges"]
assert sorted([value["node"]["name"] for value in va_values]) == sorted(
[value.name for value in va.values.all()]
)
new_instance = ProductType.objects.latest("pk")
tax_code = manager.get_tax_code_from_object_meta(new_instance).code
assert tax_code == "wine"
def test_create_product_type_with_rich_text_attribute(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
rich_text_attribute,
):
query = PRODUCT_TYPE_CREATE_MUTATION
product_type_name = "test type"
slug = "test-type"
product_type.product_attributes.add(rich_text_attribute)
product_attributes_ids = [
graphene.Node.to_global_id("Attribute", attr.id)
for attr in product_type.product_attributes.all()
]
variables = {
"name": product_type_name,
"slug": slug,
"kind": ProductTypeKindEnum.NORMAL.name,
"productAttributes": product_attributes_ids,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeCreate"]["productType"]
errors = content["data"]["productTypeCreate"]["errors"]
assert not errors
assert data["name"] == product_type_name
assert data["slug"] == slug
expected_attributes = [
{
"name": "Color",
"choices": {
"edges": [
{
"node": {
"name": "Red",
"richText": None,
"boolean": None,
"date": None,
"dateTime": None,
}
},
{
"node": {
"name": "Blue",
"richText": None,
"boolean": None,
"date": None,
"dateTime": None,
}
},
]
},
},
{
"name": "Text",
"choices": {"edges": []},
},
]
for attribute in data["productAttributes"]:
assert attribute in expected_attributes
def test_create_product_type_with_date_attribute(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
date_attribute,
date_time_attribute,
):
query = PRODUCT_TYPE_CREATE_MUTATION
product_type_name = "test type"
slug = "test-type"
kind = ProductTypeKindEnum.NORMAL.name
product_type.product_attributes.add(date_attribute)
product_type.product_attributes.add(date_time_attribute)
product_attributes_ids = [
graphene.Node.to_global_id("Attribute", attr.id)
for attr in product_type.product_attributes.all()
]
variables = {
"name": product_type_name,
"slug": slug,
"kind": kind,
"productAttributes": product_attributes_ids,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeCreate"]["productType"]
errors = content["data"]["productTypeCreate"]["errors"]
expected_attribute = [
{"choices": {"edges": []}, "name": "Release date"},
{"choices": {"edges": []}, "name": "Release date time"},
]
assert not errors
assert data["name"] == product_type_name
assert data["slug"] == slug
assert data["kind"] == kind
for attribute in expected_attribute:
assert attribute in data["productAttributes"]
def test_create_product_type_with_boolean_attribute(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
boolean_attribute,
):
query = PRODUCT_TYPE_CREATE_MUTATION
product_type_name = "test type"
slug = "test-type"
kind = ProductTypeKindEnum.NORMAL.name
product_type.product_attributes.add(boolean_attribute)
product_attributes_ids = [
graphene.Node.to_global_id("Attribute", attr.id)
for attr in product_type.product_attributes.all()
]
variables = {
"name": product_type_name,
"slug": slug,
"kind": kind,
"productAttributes": product_attributes_ids,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeCreate"]["productType"]
errors = content["data"]["productTypeCreate"]["errors"]
assert not errors
assert data["name"] == product_type_name
assert data["slug"] == slug
assert data["kind"] == kind
assert {"choices": {"edges": []}, "name": "Boolean"} in data["productAttributes"]
@pytest.mark.parametrize(
"input_slug, expected_slug",
(
("test-slug", "test-slug"),
(None, "test-product-type"),
("", "test-product-type"),
("わたし-わ-にっぽん-です", "わたし-わ-にっぽん-です"),
),
)
def test_create_product_type_with_given_slug(
staff_api_client,
permission_manage_product_types_and_attributes,
input_slug,
expected_slug,
):
query = PRODUCT_TYPE_CREATE_MUTATION
name = "Test product type"
variables = {
"name": name,
"slug": input_slug,
"kind": ProductTypeKindEnum.NORMAL.name,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeCreate"]
assert not data["errors"]
assert data["productType"]["slug"] == expected_slug
def test_create_product_type_with_unicode_in_name(
staff_api_client, permission_manage_product_types_and_attributes
):
query = PRODUCT_TYPE_CREATE_MUTATION
name = "わたし わ にっぽん です"
kind = ProductTypeKindEnum.NORMAL.name
variables = {
"name": name,
"kind": kind,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeCreate"]
assert not data["errors"]
assert data["productType"]["name"] == name
assert data["productType"]["slug"] == "わたし-わ-にっぽん-です"
assert data["productType"]["kind"] == kind
def test_create_product_type_create_with_negative_weight(
staff_api_client, permission_manage_product_types_and_attributes
):
query = PRODUCT_TYPE_CREATE_MUTATION
name = "Test product type"
variables = {
"name": name,
"weight": -1.1,
"type": ProductTypeKindEnum.NORMAL.name,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeCreate"]
error = data["errors"][0]
assert error["field"] == "weight"
assert error["code"] == ProductErrorCode.INVALID.name
def test_product_type_create_mutation_not_valid_attributes(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
monkeypatch,
setup_vatlayer,
):
# given
query = PRODUCT_TYPE_CREATE_MUTATION
product_type_name = "test type"
slug = "test-type"
has_variants = True
require_shipping = True
product_attributes = product_type.product_attributes.all()
product_page_attribute = product_attributes.last()
product_page_attribute.type = AttributeType.PAGE_TYPE
product_page_attribute.save(update_fields=["type"])
variant_attributes = product_type.variant_attributes.all()
variant_page_attribute = variant_attributes.last()
variant_page_attribute.type = AttributeType.PAGE_TYPE
variant_page_attribute.save(update_fields=["type"])
product_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in product_attributes
]
variant_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in variant_attributes
]
variables = {
"name": product_type_name,
"slug": slug,
"kind": ProductTypeKindEnum.NORMAL.name,
"hasVariants": has_variants,
"taxCode": "wine",
"isShippingRequired": require_shipping,
"productAttributes": product_attributes_ids,
"variantAttributes": variant_attributes_ids,
}
initial_count = ProductType.objects.count()
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
# then
content = get_graphql_content(response)
data = content["data"]["productTypeCreate"]
errors = data["errors"]
assert len(errors) == 2
expected_errors = [
{
"code": ProductErrorCode.INVALID.name,
"field": "productAttributes",
"message": ANY,
"attributes": [
graphene.Node.to_global_id("Attribute", product_page_attribute.pk)
],
},
{
"code": ProductErrorCode.INVALID.name,
"field": "variantAttributes",
"message": ANY,
"attributes": [
graphene.Node.to_global_id("Attribute", variant_page_attribute.pk)
],
},
]
for error in errors:
assert error in expected_errors
assert initial_count == ProductType.objects.count()
PRODUCT_TYPE_UPDATE_MUTATION = """
mutation updateProductType(
$id: ID!,
$name: String!,
$hasVariants: Boolean!,
$isShippingRequired: Boolean!,
$productAttributes: [ID],
) {
productTypeUpdate(
id: $id,
input: {
name: $name,
hasVariants: $hasVariants,
isShippingRequired: $isShippingRequired,
productAttributes: $productAttributes
}) {
productType {
name
slug
isShippingRequired
hasVariants
variantAttributes {
id
}
productAttributes {
id
}
}
errors {
code
field
attributes
}
}
}
"""
def test_product_type_update_mutation(
staff_api_client, product_type, permission_manage_product_types_and_attributes
):
query = PRODUCT_TYPE_UPDATE_MUTATION
product_type_name = "test type updated"
slug = product_type.slug
has_variants = True
require_shipping = False
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
# Test scenario: remove all product attributes using [] as input
# but do not change variant attributes
product_attributes = []
product_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in product_attributes
]
variant_attributes = product_type.variant_attributes.all()
variables = {
"id": product_type_id,
"name": product_type_name,
"hasVariants": has_variants,
"isShippingRequired": require_shipping,
"productAttributes": product_attributes_ids,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeUpdate"]["productType"]
assert data["name"] == product_type_name
assert data["slug"] == slug
assert data["hasVariants"] == has_variants
assert data["isShippingRequired"] == require_shipping
assert not data["productAttributes"]
assert len(data["variantAttributes"]) == (variant_attributes.count())
def test_product_type_update_mutation_not_valid_attributes(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
size_page_attribute,
):
# given
query = PRODUCT_TYPE_UPDATE_MUTATION
product_type_name = "test type updated"
has_variants = True
require_shipping = False
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
# Test scenario: adding page attribute raise error
page_attribute_id = graphene.Node.to_global_id("Attribute", size_page_attribute.id)
product_attributes_ids = [
page_attribute_id,
graphene.Node.to_global_id(
"Attribute", product_type.product_attributes.first().pk
),
]
variables = {
"id": product_type_id,
"name": product_type_name,
"hasVariants": has_variants,
"isShippingRequired": require_shipping,
"productAttributes": product_attributes_ids,
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
# then
content = get_graphql_content(response)
data = content["data"]["productTypeUpdate"]
errors = data["errors"]
assert len(errors) == 1
assert errors[0]["field"] == "productAttributes"
assert errors[0]["code"] == ProductErrorCode.INVALID.name
assert errors[0]["attributes"] == [page_attribute_id]
UPDATE_PRODUCT_TYPE_SLUG_MUTATION = """
mutation($id: ID!, $slug: String) {
productTypeUpdate(
id: $id
input: {
slug: $slug
}
) {
productType{
name
slug
}
errors {
field
message
code
}
}
}
"""
@pytest.mark.parametrize(
"input_slug, expected_slug, error_message",
[
("test-slug", "test-slug", None),
("", "", "Slug value cannot be blank."),
(None, "", "Slug value cannot be blank."),
],
)
def test_update_product_type_slug(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
input_slug,
expected_slug,
error_message,
):
query = UPDATE_PRODUCT_TYPE_SLUG_MUTATION
old_slug = product_type.slug
assert old_slug != input_slug
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeUpdate"]
errors = data["errors"]
if not error_message:
assert not errors
assert data["productType"]["slug"] == expected_slug
else:
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_product_type_slug_exists(
staff_api_client, product_type, permission_manage_product_types_and_attributes
):
query = UPDATE_PRODUCT_TYPE_SLUG_MUTATION
input_slug = "test-slug"
second_product_type = ProductType.objects.get(pk=product_type.pk)
second_product_type.pk = None
second_product_type.slug = input_slug
second_product_type.save()
assert input_slug != product_type.slug
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeUpdate"]
errors = data["errors"]
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.UNIQUE.name
@pytest.mark.parametrize(
"input_slug, expected_slug, input_name, error_message, error_field",
[
("test-slug", "test-slug", "New name", None, None),
("", "", "New name", "Slug value cannot be blank.", "slug"),
(None, "", "New name", "Slug value cannot be blank.", "slug"),
("test-slug", "", None, "This field cannot be blank.", "name"),
("test-slug", "", "", "This field cannot be blank.", "name"),
(None, None, None, "Slug value cannot be blank.", "slug"),
],
)
def test_update_product_type_slug_and_name(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
input_slug,
expected_slug,
input_name,
error_message,
error_field,
):
query = """
mutation($id: ID!, $name: String, $slug: String) {
productTypeUpdate(
id: $id
input: {
name: $name
slug: $slug
}
) {
productType{
name
slug
}
errors {
field
message
code
}
}
}
"""
old_name = product_type.name
old_slug = product_type.slug
assert input_slug != old_slug
assert input_name != old_name
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
variables = {"slug": input_slug, "name": input_name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
product_type.refresh_from_db()
data = content["data"]["productTypeUpdate"]
errors = data["errors"]
if not error_message:
assert data["productType"]["name"] == input_name == product_type.name
assert data["productType"]["slug"] == input_slug == product_type.slug
else:
assert errors
assert errors[0]["field"] == error_field
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_product_type_with_negative_weight(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
):
query = """
mutation($id: ID!, $weight: WeightScalar) {
productTypeUpdate(
id: $id
input: {
weight: $weight
}
) {
productType{
name
}
errors {
field
message
code
}
}
}
"""
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
variables = {"id": node_id, "weight": "-1"}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
product_type.refresh_from_db()
data = content["data"]["productTypeUpdate"]
error = data["errors"][0]
assert error["field"] == "weight"
assert error["code"] == ProductErrorCode.INVALID.name
def test_update_product_type_type(
staff_api_client,
product_type,
permission_manage_product_types_and_attributes,
):
query = """
mutation($id: ID!, $kind: ProductTypeKindEnum) {
productTypeUpdate(
id: $id
input: {
kind: $kind
}
) {
productType{
name
kind
}
errors {
field
message
code
}
}
}
"""
kind = ProductTypeKindEnum.GIFT_CARD.name
assert product_type.kind != kind
node_id = graphene.Node.to_global_id("ProductType", product_type.id)
variables = {"kind": kind, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeUpdate"]
errors = data["errors"]
assert not errors
assert data["productType"]["kind"] == kind
PRODUCT_TYPE_DELETE_MUTATION = """
mutation deleteProductType($id: ID!) {
productTypeDelete(id: $id) {
productType {
name
}
}
}
"""
def test_product_type_delete_mutation(
staff_api_client, product_type, permission_manage_product_types_and_attributes
):
query = PRODUCT_TYPE_DELETE_MUTATION
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeDelete"]
assert data["productType"]["name"] == product_type.name
with pytest.raises(product_type._meta.model.DoesNotExist):
product_type.refresh_from_db()
@patch("saleor.product.signals.delete_versatile_image")
def test_product_type_delete_mutation_deletes_also_images(
delete_versatile_image_mock,
staff_api_client,
product_type,
product_with_image,
permission_manage_product_types_and_attributes,
):
query = PRODUCT_TYPE_DELETE_MUTATION
product_type.products.add(product_with_image)
media_obj = product_with_image.media.first()
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeDelete"]
assert data["productType"]["name"] == product_type.name
with pytest.raises(product_type._meta.model.DoesNotExist):
product_type.refresh_from_db()
delete_versatile_image_mock.assert_called_once_with(media_obj.image.name)
@patch("saleor.attribute.signals.delete_from_storage_task.delay")
def test_product_type_delete_with_file_attributes(
delete_from_storage_task_mock,
staff_api_client,
product_with_variant_with_file_attribute,
file_attribute,
permission_manage_product_types_and_attributes,
):
query = PRODUCT_TYPE_DELETE_MUTATION
product_type = product_with_variant_with_file_attribute.product_type
product_type.product_attributes.add(file_attribute)
associate_attribute_values_to_instance(
product_with_variant_with_file_attribute,
file_attribute,
file_attribute.values.last(),
)
values = list(file_attribute.values.all())
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeDelete"]
assert data["productType"]["name"] == product_type.name
with pytest.raises(product_type._meta.model.DoesNotExist):
product_type.refresh_from_db()
for value in values:
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
assert delete_from_storage_task_mock.call_count == len(values)
assert set(
data.args[0] for data in delete_from_storage_task_mock.call_args_list
) == {v.file_url for v in values}
def test_product_type_delete_mutation_variants_in_draft_order(
staff_api_client,
permission_manage_product_types_and_attributes,
product,
order_list,
channel_USD,
):
query = PRODUCT_TYPE_DELETE_MUTATION
product_type = product.product_type
variant = product.variants.first()
order_not_draft = order_list[-1]
draft_order = order_list[1]
draft_order.status = OrderStatus.DRAFT
draft_order.save(update_fields=["status"])
variant_channel_listing = variant.channel_listings.get(channel=channel_USD)
net = variant.get_price(product, [], channel_USD, variant_channel_listing, None)
gross = Money(amount=net.amount, currency=net.currency)
quantity = 3
unit_price = TaxedMoney(net=net, gross=gross)
total_price = unit_price * quantity
order_line_not_in_draft = OrderLine.objects.create(
variant=variant,
order=order_not_draft,
product_name=str(variant.product),
variant_name=str(variant),
product_sku=variant.sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
unit_price=TaxedMoney(net=net, gross=gross),
total_price=total_price,
quantity=3,
)
order_line_in_draft = OrderLine.objects.create(
variant=variant,
order=draft_order,
product_name=str(variant.product),
variant_name=str(variant),
product_sku=variant.sku,
product_variant_id=variant.get_global_id(),
is_shipping_required=variant.is_shipping_required(),
is_gift_card=variant.is_gift_card(),
unit_price=TaxedMoney(net=net, gross=gross),
total_price=total_price,
quantity=3,
)
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
content = get_graphql_content(response)
data = content["data"]["productTypeDelete"]
assert data["productType"]["name"] == product_type.name
with pytest.raises(product_type._meta.model.DoesNotExist):
product_type.refresh_from_db()
with pytest.raises(order_line_in_draft._meta.model.DoesNotExist):
order_line_in_draft.refresh_from_db()
assert OrderLine.objects.filter(pk=order_line_not_in_draft.pk).exists()
PRODUCT_MEDIA_CREATE_QUERY = """
mutation createProductMedia(
$product: ID!,
$image: Upload,
$mediaUrl: String,
$alt: String
) {
productMediaCreate(input: {
product: $product,
mediaUrl: $mediaUrl,
alt: $alt,
image: $image
}) {
product {
media {
url
alt
type
oembedData
}
}
errors {
code
field
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_product_media_create_mutation(
product_updated_mock,
monkeypatch,
staff_api_client,
product,
permission_manage_products,
media_root,
):
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
(
"saleor.graphql.product.mutations.products."
"create_product_thumbnails.delay"
),
mock_create_thumbnails,
)
image_file, image_name = create_image()
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"alt": "",
"image": image_name,
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, image_file, image_name
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
get_graphql_content(response)
product.refresh_from_db()
product_image = product.media.last()
assert product_image.image.file
img_name, format = os.path.splitext(image_file._name)
file_name = product_image.image.name
assert file_name != image_file._name
assert file_name.startswith(f"products/{img_name}")
assert file_name.endswith(format)
# The image creation should have triggered a warm-up
mock_create_thumbnails.assert_called_once_with(product_image.pk)
product_updated_mock.assert_called_once_with(product)
def test_product_media_create_mutation_without_file(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"image": "image name",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["productMediaCreate"]["errors"]
assert errors[0]["field"] == "image"
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
@pytest.mark.vcr
def test_product_media_create_mutation_with_media_url(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"mediaUrl": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"alt": "",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
media = content["data"]["productMediaCreate"]["product"]["media"]
assert len(media) == 1
assert media[0]["url"] == "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
assert media[0]["alt"] == "Rick Astley - Never Gonna Give You Up (Video)"
assert media[0]["type"] == ProductMediaTypes.VIDEO
oembed_data = json.loads(media[0]["oembedData"])
assert oembed_data["url"] == "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
assert oembed_data["type"] == "video"
assert oembed_data["html"] is not None
assert oembed_data["thumbnail_url"] == (
"https://i.ytimg.com/vi/dQw4w9WgXcQ/hqdefault.jpg"
)
def test_product_media_create_mutation_without_url_or_image(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"alt": "Test Alt Text",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["productMediaCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
assert errors[0]["field"] == "input"
def test_product_media_create_mutation_with_both_url_and_image(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
image_file, image_name = create_image()
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"mediaUrl": "https://www.youtube.com/watch?v=SomeVideoID&ab_channel=Test",
"image": image_name,
"alt": "Test Alt Text",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, image_file, image_name
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["productMediaCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.DUPLICATED_INPUT_ITEM.name
assert errors[0]["field"] == "input"
def test_product_media_create_mutation_with_unknown_url(
monkeypatch, staff_api_client, product, permission_manage_products, media_root
):
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"mediaUrl": "https://www.videohosting.com/SomeVideoID",
"alt": "Test Alt Text",
}
body = get_multipart_request_body(
PRODUCT_MEDIA_CREATE_QUERY, variables, file="", file_name="name"
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["productMediaCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["code"] == ProductErrorCode.UNSUPPORTED_MEDIA_PROVIDER.name
assert errors[0]["field"] == "mediaUrl"
def test_invalid_product_media_create_mutation(
staff_api_client, product, permission_manage_products
):
query = """
mutation createProductMedia($image: Upload!, $product: ID!) {
productMediaCreate(input: {image: $image, product: $product}) {
media {
id
url
sortOrder
}
errors {
field
message
}
}
}
"""
image_file, image_name = create_pdf_file_with_image_ext()
variables = {
"product": graphene.Node.to_global_id("Product", product.id),
"image": image_name,
}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["productMediaCreate"]["errors"] == [
{"field": "image", "message": "Invalid file type."}
]
product.refresh_from_db()
assert product.media.count() == 0
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_product_image_update_mutation(
product_updated_mock,
monkeypatch,
staff_api_client,
product_with_image,
permission_manage_products,
):
query = """
mutation updateProductMedia($mediaId: ID!, $alt: String) {
productMediaUpdate(id: $mediaId, input: {alt: $alt}) {
media {
alt
}
}
}
"""
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
(
"saleor.graphql.product.mutations.products."
"create_product_thumbnails.delay"
),
mock_create_thumbnails,
)
media_obj = product_with_image.media.first()
alt = "damage alt"
variables = {
"alt": alt,
"mediaId": graphene.Node.to_global_id("ProductMedia", media_obj.id),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["productMediaUpdate"]["media"]["alt"] == alt
# We did not update the image field,
# the image should not have triggered a warm-up
assert mock_create_thumbnails.call_count == 0
product_updated_mock.assert_called_once_with(product_with_image)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
@patch("saleor.product.signals.delete_versatile_image")
def test_product_media_delete(
delete_versatile_image_mock,
product_updated_mock,
staff_api_client,
product_with_image,
permission_manage_products,
):
product = product_with_image
query = """
mutation deleteProductMedia($id: ID!) {
productMediaDelete(id: $id) {
media {
id
url
}
}
}
"""
media_obj = product.media.first()
node_id = graphene.Node.to_global_id("ProductMedia", media_obj.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productMediaDelete"]
assert media_obj.image.url in data["media"]["url"]
with pytest.raises(media_obj._meta.model.DoesNotExist):
media_obj.refresh_from_db()
assert node_id == data["media"]["id"]
product_updated_mock.assert_called_once_with(product)
delete_versatile_image_mock.assert_called_once_with(media_obj.image.name)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_reorder_media(
product_updated_mock,
staff_api_client,
product_with_images,
permission_manage_products,
):
query = """
mutation reorderMedia($product_id: ID!, $media_ids: [ID]!) {
productMediaReorder(productId: $product_id, mediaIds: $media_ids) {
product {
id
}
}
}
"""
product = product_with_images
media = product.media.all()
media_0 = media[0]
media_1 = media[1]
media_0_id = graphene.Node.to_global_id("ProductMedia", media_0.id)
media_1_id = graphene.Node.to_global_id("ProductMedia", media_1.id)
product_id = graphene.Node.to_global_id("Product", product.id)
variables = {"product_id": product_id, "media_ids": [media_1_id, media_0_id]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
# Check if order has been changed
product.refresh_from_db()
reordered_media = product.media.all()
reordered_media_0 = reordered_media[0]
reordered_media_1 = reordered_media[1]
assert media_0.id == reordered_media_1.id
assert media_1.id == reordered_media_0.id
product_updated_mock.assert_called_once_with(product)
ASSIGN_VARIANT_QUERY = """
mutation assignVariantMediaMutation($variantId: ID!, $mediaId: ID!) {
variantMediaAssign(variantId: $variantId, mediaId: $mediaId) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
def test_assign_variant_media(
staff_api_client, user_api_client, product_with_image, permission_manage_products
):
query = ASSIGN_VARIANT_QUERY
variant = product_with_image.variants.first()
media_obj = product_with_image.media.first()
variables = {
"variantId": to_global_id("ProductVariant", variant.pk),
"mediaId": to_global_id("ProductMedia", media_obj.pk),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
variant.refresh_from_db()
assert variant.media.first() == media_obj
def test_assign_variant_media_second_time(
staff_api_client, user_api_client, product_with_image, permission_manage_products
):
# given
query = ASSIGN_VARIANT_QUERY
variant = product_with_image.variants.first()
media_obj = product_with_image.media.first()
media_obj.variant_media.create(variant=variant)
variables = {
"variantId": to_global_id("ProductVariant", variant.pk),
"mediaId": to_global_id("ProductMedia", media_obj.pk),
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
# then
content = get_graphql_content_from_response(response)
assert "errors" in content
assert (
"duplicate key value violates unique constraint"
in content["errors"][0]["message"]
)
def test_assign_variant_media_from_different_product(
staff_api_client, user_api_client, product_with_image, permission_manage_products
):
query = ASSIGN_VARIANT_QUERY
variant = product_with_image.variants.first()
product_with_image.pk = None
product_with_image.slug = "product-with-image"
product_with_image.save()
media_obj_2 = ProductMedia.objects.create(product=product_with_image)
variables = {
"variantId": to_global_id("ProductVariant", variant.pk),
"mediaId": to_global_id("ProductMedia", media_obj_2.pk),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["variantMediaAssign"]["errors"][0]["field"] == "mediaId"
# check permissions
response = user_api_client.post_graphql(query, variables)
assert_no_permission(response)
UNASSIGN_VARIANT_IMAGE_QUERY = """
mutation unassignVariantMediaMutation($variantId: ID!, $mediaId: ID!) {
variantMediaUnassign(variantId: $variantId, mediaId: $mediaId) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
def test_unassign_variant_media_image(
staff_api_client, product_with_image, permission_manage_products
):
query = UNASSIGN_VARIANT_IMAGE_QUERY
media = product_with_image.media.first()
variant = product_with_image.variants.first()
variant.variant_media.create(media=media)
variables = {
"variantId": to_global_id("ProductVariant", variant.pk),
"mediaId": to_global_id("ProductMedia", media.pk),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
variant.refresh_from_db()
assert variant.media.count() == 0
def test_unassign_not_assigned_variant_media_image(
staff_api_client, product_with_image, permission_manage_products
):
query = UNASSIGN_VARIANT_IMAGE_QUERY
variant = product_with_image.variants.first()
media = ProductMedia.objects.create(product=product_with_image)
variables = {
"variantId": to_global_id("ProductVariant", variant.pk),
"mediaId": to_global_id("ProductMedia", media.pk),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["variantMediaUnassign"]["errors"][0]["field"] == ("mediaId")
@patch("saleor.product.tasks.update_variants_names.delay")
def test_product_type_update_changes_variant_name(
mock_update_variants_names,
staff_api_client,
product_type,
product,
permission_manage_product_types_and_attributes,
):
query = """
mutation updateProductType(
$id: ID!,
$hasVariants: Boolean!,
$isShippingRequired: Boolean!,
$variantAttributes: [ID],
) {
productTypeUpdate(
id: $id,
input: {
hasVariants: $hasVariants,
isShippingRequired: $isShippingRequired,
variantAttributes: $variantAttributes}) {
productType {
id
}
}
}
"""
variant = product.variants.first()
variant.name = "test name"
variant.save()
has_variants = True
require_shipping = False
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
variant_attributes = product_type.variant_attributes.all()
variant_attributes_ids = [
graphene.Node.to_global_id("Attribute", att.id) for att in variant_attributes
]
variables = {
"id": product_type_id,
"hasVariants": has_variants,
"isShippingRequired": require_shipping,
"variantAttributes": variant_attributes_ids,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_product_types_and_attributes]
)
get_graphql_content(response)
variant_attributes = set(variant_attributes)
variant_attributes_ids = [attr.pk for attr in variant_attributes]
mock_update_variants_names.assert_called_once_with(
product_type.pk, variant_attributes_ids
)
@patch("saleor.product.tasks._update_variants_names")
def test_product_update_variants_names(mock__update_variants_names, product_type):
variant_attributes = [product_type.variant_attributes.first()]
variant_attr_ids = [attr.pk for attr in variant_attributes]
update_variants_names(product_type.pk, variant_attr_ids)
assert mock__update_variants_names.call_count == 1
QUERY_PRODUCT_VARAINT_BY_ID = """
query getProductVariant($id: ID!, $channel: String) {
productVariant(id: $id, channel: $channel) {
id
name
sku
}
}
"""
def test_product_variant_without_price_by_id_as_staff_with_permission(
staff_api_client, variant, channel_USD, permission_manage_products
):
query = QUERY_PRODUCT_VARAINT_BY_ID
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"id": variant_id, "channel": channel_USD.slug}
response = staff_api_client.post_graphql(
query,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["id"] == variant_id
def test_product_variant_without_price_by_id_as_staff_without_permission(
staff_api_client, variant, channel_USD
):
query = QUERY_PRODUCT_VARAINT_BY_ID
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"id": variant_id, "channel": channel_USD.slug}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_product_variant_without_price_by_id_as_app_without_permission(
app_api_client, variant, channel_USD
):
query = QUERY_PRODUCT_VARAINT_BY_ID
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"id": variant_id, "channel": channel_USD.slug}
response = app_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_product_variant_without_price_by_id_as_app_with_permission(
app_api_client, variant, channel_USD, permission_manage_products
):
query = QUERY_PRODUCT_VARAINT_BY_ID
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"id": variant_id, "channel": channel_USD.slug}
response = app_api_client.post_graphql(
query,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["id"] == variant_id
def test_product_variant_without_price_by_id_as_user(
user_api_client, variant, channel_USD
):
query = QUERY_PRODUCT_VARAINT_BY_ID
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"id": variant_id, "channel": channel_USD.slug}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data is None
def test_variant_query_invalid_id(user_api_client, variant, channel_USD):
variant_id = "'"
variables = {
"id": variant_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT_VARAINT_BY_ID, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == f"Couldn't resolve id: {variant_id}."
assert content["data"]["productVariant"] is None
def test_variant_query_object_with_given_id_does_not_exist(
user_api_client, variant, channel_USD
):
variant_id = graphene.Node.to_global_id("ProductVariant", -1)
variables = {
"id": variant_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT_VARAINT_BY_ID, variables)
content = get_graphql_content(response)
assert content["data"]["productVariant"] is None
def test_variant_query_with_invalid_object_type(user_api_client, variant, channel_USD):
variant_id = graphene.Node.to_global_id("Product", variant.pk)
variables = {
"id": variant_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_PRODUCT_VARAINT_BY_ID, variables)
content = get_graphql_content(response)
assert content["data"]["productVariant"] is None
def test_product_variant_without_price_by_sku_as_staff_with_permission(
staff_api_client, variant, channel_USD, permission_manage_products
):
query = """
query getProductVariant($sku: String!, $channel: String) {
productVariant(sku: $sku, channel: $channel) {
id
name
sku
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"sku": variant.sku, "channel": channel_USD.slug}
response = staff_api_client.post_graphql(
query,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["id"] == variant_id
def test_product_variant_without_price_by_sku_as_staff_without_permission(
staff_api_client, variant, channel_USD
):
query = """
query getProductVariant($sku: String!, $channel: String) {
productVariant(sku: $sku, channel: $channel) {
id
name
sku
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variables = {"sku": variant.sku, "channel": channel_USD.slug}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_product_variant_without_price_by_sku_as_app_with_permission(
app_api_client, variant, channel_USD, permission_manage_products
):
query = """
query getProductVariant($sku: String!, $channel: String) {
productVariant(sku: $sku, channel: $channel) {
id
name
sku
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"sku": variant.sku, "channel": channel_USD.slug}
response = app_api_client.post_graphql(
query,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["id"] == variant_id
def test_product_variant_without_price_by_sku_as_app_without_permission(
app_api_client,
variant,
channel_USD,
):
query = """
query getProductVariant($sku: String!, $channel: String) {
productVariant(sku: $sku, channel: $channel) {
id
name
sku
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variables = {"sku": variant.sku, "channel": channel_USD.slug}
response = app_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_product_variant_without_price_by_sku_as_user(
user_api_client, variant, channel_USD
):
query = """
query getProductVariant($sku: String!, $channel: String) {
productVariant(sku: $sku, channel: $channel) {
id
name
sku
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variables = {"sku": variant.sku, "channel": channel_USD.slug}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data is None
def test_product_variants_by_ids(staff_api_client, variant, channel_USD):
query = """
query getProductVariants($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
name
sku
channelListings {
channel {
id
isActive
name
currencyCode
}
price {
amount
currency
}
}
}
}
}
}
"""
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariants"]
assert data["edges"][0]["node"]["id"] == variant_id
assert len(data["edges"]) == 1
def test_product_variants_without_price_by_ids_as_staff_without_permission(
staff_api_client, variant, channel_USD
):
query = """
query getProductVariants($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
name
sku
channelListings {
channel {
id
isActive
name
currencyCode
}
price {
amount
currency
}
}
}
}
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariants"]
assert len(data["edges"]) == 0
def test_product_variants_without_price_by_ids_as_staff_with_permission(
staff_api_client, variant, channel_USD, permission_manage_products
):
query = """
query getProductVariants($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
name
sku
channelListings {
channel {
id
isActive
name
currencyCode
}
price {
amount
currency
}
}
}
}
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = staff_api_client.post_graphql(
query,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
data = content["data"]["productVariants"]
assert data["edges"][0]["node"]["id"] == variant_id
assert len(data["edges"]) == 1
def test_product_variants_without_price_by_ids_as_user(
user_api_client, variant, channel_USD
):
query = """
query getProductVariants($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
name
sku
}
}
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariants"]
assert len(data["edges"]) == 0
def test_product_variants_without_price_by_ids_as_app_without_permission(
app_api_client, variant, channel_USD
):
query = """
query getProductVariants($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
name
sku
channelListings {
channel {
id
isActive
name
currencyCode
}
price {
amount
currency
}
}
}
}
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = app_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert len(content["data"]["productVariants"]["edges"]) == 0
def test_product_variants_without_price_by_ids_as_app_with_permission(
app_api_client, variant, channel_USD, permission_manage_products
):
query = """
query getProductVariants($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
name
sku
channelListings {
channel {
id
isActive
name
currencyCode
}
price {
amount
currency
}
}
}
}
}
}
"""
variant.channel_listings.all().delete()
variant.channel_listings.create(channel=channel_USD)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = app_api_client.post_graphql(
query,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
data = content["data"]["productVariants"]
assert data["edges"][0]["node"]["id"] == variant_id
assert len(data["edges"]) == 1
def test_product_variants_by_customer(user_api_client, variant, channel_USD):
query = """
query getProductVariants($ids: [ID!], $channel: String) {
productVariants(ids: $ids, first: 1, channel: $channel) {
edges {
node {
id
name
sku
channelListings {
channel {
id
isActive
name
currencyCode
}
price {
amount
currency
}
}
}
}
}
}
"""
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {"ids": [variant_id], "channel": channel_USD.slug}
response = user_api_client.post_graphql(query, variables)
assert_no_permission(response)
def test_product_variants_no_ids_list(user_api_client, variant, channel_USD):
query = """
query getProductVariants($channel: String) {
productVariants(first: 10, channel: $channel) {
edges {
node {
id
}
}
}
}
"""
variables = {"channel": channel_USD.slug}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariants"]
assert len(data["edges"]) == ProductVariant.objects.count()
QUERY_GET_PRODUCT_VARIANTS_PRICING = """
query getProductVariants($id: ID!, $channel: String, $address: AddressInput) {
product(id: $id, channel: $channel) {
variants {
id
pricingNoAddress: pricing {
priceUndiscounted {
gross {
amount
}
}
}
pricing(address: $address) {
priceUndiscounted {
gross {
amount
}
}
}
}
}
}
"""
@pytest.mark.parametrize(
"variant_price_amount, api_variant_price",
[(200, 200), (0, 0)],
)
def test_product_variant_price(
variant_price_amount,
api_variant_price,
user_api_client,
variant,
stock,
channel_USD,
):
product = variant.product
ProductVariantChannelListing.objects.filter(
channel=channel_USD, variant__product_id=product.pk
).update(price_amount=variant_price_amount)
product_id = graphene.Node.to_global_id("Product", variant.product.id)
variables = {
"id": product_id,
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = user_api_client.post_graphql(
QUERY_GET_PRODUCT_VARIANTS_PRICING, variables
)
content = get_graphql_content(response)
data = content["data"]["product"]
variant_price = data["variants"][0]["pricing"]["priceUndiscounted"]["gross"]
assert variant_price["amount"] == api_variant_price
def test_product_variant_without_price_as_user(
user_api_client,
variant,
stock,
channel_USD,
):
variant.channel_listings.filter(channel=channel_USD).update(price_amount=None)
product_id = graphene.Node.to_global_id("Product", variant.product.id)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {
"id": product_id,
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = user_api_client.post_graphql(
QUERY_GET_PRODUCT_VARIANTS_PRICING, variables
)
content = get_graphql_content(response)
variants_data = content["data"]["product"]["variants"]
assert not variants_data[0]["id"] == variant_id
assert len(variants_data) == 1
def test_product_variant_without_price_as_staff_without_permission(
staff_api_client,
variant,
stock,
channel_USD,
):
variant_channel_listing = variant.channel_listings.first()
variant_channel_listing.price_amount = None
variant_channel_listing.save()
product_id = graphene.Node.to_global_id("Product", variant.product.id)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {
"id": product_id,
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = staff_api_client.post_graphql(
QUERY_GET_PRODUCT_VARIANTS_PRICING, variables
)
content = get_graphql_content(response)
variants_data = content["data"]["product"]["variants"]
assert len(variants_data) == 1
assert variants_data[0]["pricing"] is not None
assert variants_data[0]["id"] != variant_id
def test_product_variant_without_price_as_staff_with_permission(
staff_api_client, variant, stock, channel_USD, permission_manage_products
):
variant_channel_listing = variant.channel_listings.first()
variant_channel_listing.price_amount = None
variant_channel_listing.save()
product_id = graphene.Node.to_global_id("Product", variant.product.id)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
variables = {
"id": product_id,
"channel": channel_USD.slug,
"address": {"country": "US"},
}
response = staff_api_client.post_graphql(
QUERY_GET_PRODUCT_VARIANTS_PRICING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
variants_data = content["data"]["product"]["variants"]
assert len(variants_data) == 2
assert variants_data[0]["pricing"] is not None
assert variants_data[1]["id"] == variant_id
assert variants_data[1]["pricing"] is None
QUERY_GET_PRODUCT_VARIANTS_PRICING_NO_ADDRESS = """
query getProductVariants($id: ID!, $channel: String) {
product(id: $id, channel: $channel) {
variants {
id
pricing {
priceUndiscounted {
gross {
amount
}
}
}
}
}
}
"""
@mock.patch(
"saleor.graphql.product.types.products.get_variant_availability",
wraps=get_variant_availability,
)
def test_product_variant_price_no_address(
mock_get_variant_availability, user_api_client, variant, stock, channel_USD
):
channel_USD.default_country = "FR"
channel_USD.save()
product_id = graphene.Node.to_global_id("Product", variant.product.id)
variables = {"id": product_id, "channel": channel_USD.slug}
user_api_client.post_graphql(
QUERY_GET_PRODUCT_VARIANTS_PRICING_NO_ADDRESS, variables
)
assert (
mock_get_variant_availability.call_args[1]["country"]
== channel_USD.default_country
)
QUERY_REPORT_PRODUCT_SALES = """
query TopProducts($period: ReportingPeriod!, $channel: String!) {
reportProductSales(period: $period, first: 20, channel: $channel) {
edges {
node {
revenue(period: $period) {
gross {
amount
}
}
quantityOrdered
sku
}
}
}
}
"""
def test_report_product_sales(
staff_api_client,
order_with_lines,
order_with_lines_channel_PLN,
permission_manage_products,
permission_manage_orders,
channel_USD,
):
order = order_with_lines
variables = {"period": ReportingPeriod.TODAY.name, "channel": channel_USD.slug}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(
QUERY_REPORT_PRODUCT_SALES, variables, permissions
)
content = get_graphql_content(response)
edges = content["data"]["reportProductSales"]["edges"]
node_a = edges[0]["node"]
line_a = order.lines.get(product_sku=node_a["sku"])
assert node_a["quantityOrdered"] == line_a.quantity
amount = str(node_a["revenue"]["gross"]["amount"])
assert Decimal(amount) == line_a.quantity * line_a.unit_price_gross_amount
node_b = edges[1]["node"]
line_b = order.lines.get(product_sku=node_b["sku"])
assert node_b["quantityOrdered"] == line_b.quantity
amount = str(node_b["revenue"]["gross"]["amount"])
assert Decimal(amount) == line_b.quantity * line_b.unit_price_gross_amount
def test_report_product_sales_channel_pln(
staff_api_client,
order_with_lines,
order_with_lines_channel_PLN,
permission_manage_products,
permission_manage_orders,
channel_PLN,
):
order = order_with_lines_channel_PLN
variables = {"period": ReportingPeriod.TODAY.name, "channel": channel_PLN.slug}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(
QUERY_REPORT_PRODUCT_SALES, variables, permissions
)
content = get_graphql_content(response)
edges = content["data"]["reportProductSales"]["edges"]
node_a = edges[0]["node"]
line_a = order.lines.get(product_sku=node_a["sku"])
assert node_a["quantityOrdered"] == line_a.quantity
amount = str(node_a["revenue"]["gross"]["amount"])
assert Decimal(amount) == line_a.quantity * line_a.unit_price_gross_amount
node_b = edges[1]["node"]
line_b = order.lines.get(product_sku=node_b["sku"])
assert node_b["quantityOrdered"] == line_b.quantity
amount = str(node_b["revenue"]["gross"]["amount"])
assert Decimal(amount) == line_b.quantity * line_b.unit_price_gross_amount
def test_report_product_sales_not_existing_channel(
staff_api_client,
order_with_lines,
order_with_lines_channel_PLN,
permission_manage_products,
permission_manage_orders,
):
variables = {"period": ReportingPeriod.TODAY.name, "channel": "not-existing"}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(
QUERY_REPORT_PRODUCT_SALES, variables, permissions
)
content = get_graphql_content(response)
assert not content["data"]["reportProductSales"]["edges"]
def test_product_restricted_fields_permissions(
staff_api_client,
permission_manage_products,
permission_manage_orders,
product,
channel_USD,
):
"""Ensure non-public (restricted) fields are correctly requiring
the 'manage_products' permission.
"""
query = """
query Product($id: ID!, $channel: String) {
product(id: $id, channel: $channel) {
privateMetadata { __typename}
}
}
"""
variables = {
"id": graphene.Node.to_global_id("Product", product.pk),
"channel": channel_USD.slug,
}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert "privateMetadata" in content["data"]["product"]
@pytest.mark.parametrize(
"field, is_nested",
(("digitalContent", True), ("quantityOrdered", False)),
)
def test_variant_restricted_fields_permissions(
staff_api_client,
permission_manage_products,
permission_manage_orders,
product,
field,
is_nested,
channel_USD,
):
"""Ensure non-public (restricted) fields are correctly requiring
the 'manage_products' permission.
"""
query = """
query ProductVariant($id: ID!, $channel: String) {
productVariant(id: $id, channel: $channel) {
%(field)s
}
}
""" % {
"field": field if not is_nested else "%s { __typename }" % field
}
variant = product.variants.first()
variables = {
"id": graphene.Node.to_global_id("ProductVariant", variant.pk),
"channel": channel_USD.slug,
}
permissions = [permission_manage_orders, permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert field in content["data"]["productVariant"]
def test_variant_digital_content(
staff_api_client, permission_manage_products, digital_content, channel_USD
):
query = """
query Margin($id: ID!, $channel: String) {
productVariant(id: $id, channel: $channel) {
digitalContent{
id
}
}
}
"""
variant = digital_content.product_variant
variables = {
"id": graphene.Node.to_global_id("ProductVariant", variant.pk),
"channel": channel_USD.slug,
}
permissions = [permission_manage_products]
response = staff_api_client.post_graphql(query, variables, permissions)
content = get_graphql_content(response)
assert "digitalContent" in content["data"]["productVariant"]
assert "id" in content["data"]["productVariant"]["digitalContent"]
@pytest.mark.parametrize(
"collection_filter, count",
[
({"published": "PUBLISHED"}, 2),
({"published": "HIDDEN"}, 1),
({"search": "-published1"}, 1),
({"search": "Collection3"}, 1),
({"ids": [to_global_id("Collection", 2), to_global_id("Collection", 3)]}, 2),
],
)
def test_collections_query_with_filter(
collection_filter,
count,
query_collections_with_filter,
channel_USD,
staff_api_client,
permission_manage_products,
):
collections = Collection.objects.bulk_create(
[
Collection(
id=1,
name="Collection1",
slug="collection-published1",
description=dummy_editorjs("Test description"),
),
Collection(
id=2,
name="Collection2",
slug="collection-published2",
description=dummy_editorjs("Test description"),
),
Collection(
id=3,
name="Collection3",
slug="collection-unpublished",
description=dummy_editorjs("Test description"),
),
]
)
published = (True, True, False)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
channel=channel_USD, collection=collection, is_published=published[num]
)
for num, collection in enumerate(collections)
]
)
variables = {
"filter": collection_filter,
"channel": channel_USD.slug,
}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_collections_with_filter, variables)
content = get_graphql_content(response)
collections = content["data"]["collections"]["edges"]
assert len(collections) == count
QUERY_COLLECTIONS_WITH_SORT = """
query ($sort_by: CollectionSortingInput!, $channel: String) {
collections(first:5, sortBy: $sort_by, channel: $channel) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
"collection_sort, result_order",
[
({"field": "NAME", "direction": "ASC"}, ["Coll1", "Coll2", "Coll3"]),
({"field": "NAME", "direction": "DESC"}, ["Coll3", "Coll2", "Coll1"]),
({"field": "AVAILABILITY", "direction": "ASC"}, ["Coll2", "Coll1", "Coll3"]),
({"field": "AVAILABILITY", "direction": "DESC"}, ["Coll3", "Coll1", "Coll2"]),
({"field": "PRODUCT_COUNT", "direction": "ASC"}, ["Coll1", "Coll3", "Coll2"]),
({"field": "PRODUCT_COUNT", "direction": "DESC"}, ["Coll2", "Coll3", "Coll1"]),
],
)
def test_collections_query_with_sort(
collection_sort,
result_order,
staff_api_client,
permission_manage_products,
product,
channel_USD,
):
collections = Collection.objects.bulk_create(
[
Collection(name="Coll1", slug="collection-published1"),
Collection(name="Coll2", slug="collection-unpublished2"),
Collection(name="Coll3", slug="collection-published"),
]
)
published = (True, False, True)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
channel=channel_USD, collection=collection, is_published=published[num]
)
for num, collection in enumerate(collections)
]
)
product.collections.add(Collection.objects.get(name="Coll2"))
variables = {"sort_by": collection_sort, "channel": channel_USD.slug}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(QUERY_COLLECTIONS_WITH_SORT, variables)
content = get_graphql_content(response)
collections = content["data"]["collections"]["edges"]
for order, collection_name in enumerate(result_order):
assert collections[order]["node"]["name"] == collection_name
@pytest.mark.parametrize(
"category_filter, count",
[
({"search": "slug_"}, 4),
({"search": "Category1"}, 1),
({"search": "cat1"}, 3),
({"search": "Description cat1."}, 2),
({"search": "Subcategory_description"}, 1),
({"ids": [to_global_id("Category", 2), to_global_id("Category", 3)]}, 2),
],
)
def test_categories_query_with_filter(
category_filter,
count,
query_categories_with_filter,
staff_api_client,
permission_manage_products,
):
Category.objects.create(
id=1,
name="Category1",
slug="slug_category1",
description=dummy_editorjs("Description cat1."),
description_plaintext="Description cat1.",
)
Category.objects.create(
id=2,
name="Category2",
slug="slug_category2",
description=dummy_editorjs("Description cat2."),
description_plaintext="Description cat2.",
)
Category.objects.create(
id=3,
name="SubCategory",
slug="slug_subcategory",
parent=Category.objects.get(name="Category1"),
description=dummy_editorjs("Subcategory_description of cat1."),
description_plaintext="Subcategory_description of cat1.",
)
Category.objects.create(
id=4,
name="DoubleSubCategory",
slug="slug_subcategory4",
description=dummy_editorjs("Super important Description cat1."),
description_plaintext="Super important Description cat1.",
)
variables = {"filter": category_filter}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query_categories_with_filter, variables)
content = get_graphql_content(response)
assert content["data"]["categories"]["totalCount"] == count
QUERY_CATEGORIES_WITH_SORT = """
query ($sort_by: CategorySortingInput!) {
categories(first:5, sortBy: $sort_by) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
"category_sort, result_order",
[
(
{"field": "NAME", "direction": "ASC"},
["Cat1", "Cat2", "SubCat", "SubSubCat"],
),
(
{"field": "NAME", "direction": "DESC"},
["SubSubCat", "SubCat", "Cat2", "Cat1"],
),
(
{"field": "SUBCATEGORY_COUNT", "direction": "ASC"},
["Cat2", "SubSubCat", "Cat1", "SubCat"],
),
(
{"field": "SUBCATEGORY_COUNT", "direction": "DESC"},
["SubCat", "Cat1", "SubSubCat", "Cat2"],
),
(
{"field": "PRODUCT_COUNT", "direction": "ASC"},
["Cat2", "SubCat", "SubSubCat", "Cat1"],
),
(
{"field": "PRODUCT_COUNT", "direction": "DESC"},
["Cat1", "SubSubCat", "SubCat", "Cat2"],
),
],
)
def test_categories_query_with_sort(
category_sort,
result_order,
staff_api_client,
permission_manage_products,
product_type,
):
cat1 = Category.objects.create(
name="Cat1",
slug="slug_category1",
description=dummy_editorjs("Description cat1."),
)
Product.objects.create(
name="Test",
slug="test",
product_type=product_type,
category=cat1,
)
Category.objects.create(
name="Cat2",
slug="slug_category2",
description=dummy_editorjs("Description cat2."),
)
Category.objects.create(
name="SubCat",
slug="slug_subcategory1",
parent=Category.objects.get(name="Cat1"),
description=dummy_editorjs("Subcategory_description of cat1."),
)
subsubcat = Category.objects.create(
name="SubSubCat",
slug="slug_subcategory2",
parent=Category.objects.get(name="SubCat"),
description=dummy_editorjs("Subcategory_description of cat1."),
)
Product.objects.create(
name="Test2",
slug="test2",
product_type=product_type,
category=subsubcat,
)
variables = {"sort_by": category_sort}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(QUERY_CATEGORIES_WITH_SORT, variables)
content = get_graphql_content(response)
categories = content["data"]["categories"]["edges"]
for order, category_name in enumerate(result_order):
assert categories[order]["node"]["name"] == category_name
@pytest.mark.parametrize(
"product_type_filter, count",
[
({"configurable": "CONFIGURABLE"}, 2), # has_variants
({"configurable": "SIMPLE"}, 1), # !has_variants
({"productType": "DIGITAL"}, 1),
({"productType": "SHIPPABLE"}, 2), # is_shipping_required
({"kind": "NORMAL"}, 2),
({"kind": "GIFT_CARD"}, 1),
],
)
def test_product_type_query_with_filter(
product_type_filter, count, staff_api_client, permission_manage_products
):
query = """
query ($filter: ProductTypeFilterInput!, ) {
productTypes(first:5, filter: $filter) {
edges{
node{
id
name
}
}
}
}
"""
ProductType.objects.bulk_create(
[
ProductType(
name="Digital Type",
slug="digital-type",
has_variants=True,
is_shipping_required=False,
is_digital=True,
kind=ProductTypeKind.NORMAL,
),
ProductType(
name="Tools",
slug="tools",
has_variants=True,
is_shipping_required=True,
is_digital=False,
kind=ProductTypeKind.NORMAL,
),
ProductType(
name="Books",
slug="books",
has_variants=False,
is_shipping_required=True,
is_digital=False,
kind=ProductTypeKind.GIFT_CARD,
),
]
)
variables = {"filter": product_type_filter}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
product_types = content["data"]["productTypes"]["edges"]
assert len(product_types) == count
QUERY_PRODUCT_TYPE_WITH_SORT = """
query ($sort_by: ProductTypeSortingInput!) {
productTypes(first:5, sortBy: $sort_by) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
"product_type_sort, result_order",
[
({"field": "NAME", "direction": "ASC"}, ["Digital", "Subscription", "Tools"]),
({"field": "NAME", "direction": "DESC"}, ["Tools", "Subscription", "Digital"]),
# is_digital
(
{"field": "DIGITAL", "direction": "ASC"},
["Subscription", "Tools", "Digital"],
),
(
{"field": "DIGITAL", "direction": "DESC"},
["Digital", "Tools", "Subscription"],
),
# is_shipping_required
(
{"field": "SHIPPING_REQUIRED", "direction": "ASC"},
["Digital", "Subscription", "Tools"],
),
(
{"field": "SHIPPING_REQUIRED", "direction": "DESC"},
["Tools", "Subscription", "Digital"],
),
],
)
def test_product_type_query_with_sort(
product_type_sort, result_order, staff_api_client, permission_manage_products
):
ProductType.objects.bulk_create(
[
ProductType(
name="Digital",
slug="digital",
has_variants=True,
is_shipping_required=False,
is_digital=True,
),
ProductType(
name="Tools",
slug="tools",
has_variants=True,
is_shipping_required=True,
is_digital=False,
),
ProductType(
name="Subscription",
slug="subscription",
has_variants=False,
is_shipping_required=False,
is_digital=False,
),
]
)
variables = {"sort_by": product_type_sort}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(QUERY_PRODUCT_TYPE_WITH_SORT, variables)
content = get_graphql_content(response)
product_types = content["data"]["productTypes"]["edges"]
for order, product_type_name in enumerate(result_order):
assert product_types[order]["node"]["name"] == product_type_name
NOT_EXISTS_IDS_COLLECTIONS_QUERY = """
query ($filter: ProductTypeFilterInput!) {
productTypes(first: 5, filter: $filter) {
edges {
node {
id
name
}
}
}
}
"""
def test_product_types_query_ids_not_exists(user_api_client, category):
query = NOT_EXISTS_IDS_COLLECTIONS_QUERY
variables = {"filter": {"ids": ["fTEJRuFHU6fd2RU=", "2XwnQNNhwCdEjhP="]}}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids": [{"message": "Invalid ID specified.", "code": ""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["productTypes"] is None
QUERY_AVAILABLE_ATTRIBUTES = """
query($productTypeId:ID!, $filters: AttributeFilterInput) {
productType(id: $productTypeId) {
availableAttributes(first: 10, filter: $filters) {
edges {
node {
id
slug
}
}
}
}
}
"""
def test_product_type_get_unassigned_product_type_attributes(
staff_api_client, permission_manage_products
):
query = QUERY_AVAILABLE_ATTRIBUTES
target_product_type, ignored_product_type = ProductType.objects.bulk_create(
[
ProductType(name="Type 1", slug="type-1"),
ProductType(name="Type 2", slug="type-2"),
]
)
unassigned_attributes = list(
Attribute.objects.bulk_create(
[
Attribute(slug="size", name="Size", type=AttributeType.PRODUCT_TYPE),
Attribute(
slug="weight", name="Weight", type=AttributeType.PRODUCT_TYPE
),
Attribute(
slug="thickness", name="Thickness", type=AttributeType.PRODUCT_TYPE
),
]
)
)
unassigned_page_attributes = list(
Attribute.objects.bulk_create(
[
Attribute(slug="length", name="Length", type=AttributeType.PAGE_TYPE),
Attribute(slug="width", name="Width", type=AttributeType.PAGE_TYPE),
]
)
)
assigned_attributes = list(
Attribute.objects.bulk_create(
[
Attribute(slug="color", name="Color", type=AttributeType.PRODUCT_TYPE),
Attribute(slug="type", name="Type", type=AttributeType.PRODUCT_TYPE),
]
)
)
# Ensure that assigning them to another product type
# doesn't return an invalid response
ignored_product_type.product_attributes.add(*unassigned_attributes)
ignored_product_type.product_attributes.add(*unassigned_page_attributes)
# Assign the other attributes to the target product type
target_product_type.product_attributes.add(*assigned_attributes)
gql_unassigned_attributes = get_graphql_content(
staff_api_client.post_graphql(
query,
{
"productTypeId": graphene.Node.to_global_id(
"ProductType", target_product_type.pk
)
},
permissions=[permission_manage_products],
)
)["data"]["productType"]["availableAttributes"]["edges"]
assert len(gql_unassigned_attributes) == len(
unassigned_attributes
), gql_unassigned_attributes
received_ids = sorted((attr["node"]["id"] for attr in gql_unassigned_attributes))
expected_ids = sorted(
(
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in unassigned_attributes
)
)
assert received_ids == expected_ids
def test_product_type_filter_unassigned_attributes(
staff_api_client, permission_manage_products, product_type_attribute_list
):
expected_attribute = product_type_attribute_list[0]
query = QUERY_AVAILABLE_ATTRIBUTES
product_type = ProductType.objects.create(
name="Empty Type", kind=ProductTypeKind.NORMAL
)
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
filters = {"search": expected_attribute.name}
found_attributes = get_graphql_content(
staff_api_client.post_graphql(
query,
{"productTypeId": product_type_id, "filters": filters},
permissions=[permission_manage_products],
)
)["data"]["productType"]["availableAttributes"]["edges"]
assert len(found_attributes) == 1
_, attribute_id = graphene.Node.from_global_id(found_attributes[0]["node"]["id"])
assert attribute_id == str(expected_attribute.pk)
QUERY_FILTER_PRODUCT_TYPES = """
query($filters: ProductTypeFilterInput) {
productTypes(first: 10, filter: $filters) {
edges {
node {
name
}
}
}
}
"""
@pytest.mark.parametrize(
"search, expected_names",
(
("", ["The best juices", "The best beers", "The worst beers"]),
("best", ["The best juices", "The best beers"]),
("worst", ["The worst beers"]),
("average", []),
),
)
def test_filter_product_types_by_custom_search_value(
api_client, search, expected_names
):
query = QUERY_FILTER_PRODUCT_TYPES
ProductType.objects.bulk_create(
[
ProductType(name="The best juices", slug="best-juices"),
ProductType(name="The best beers", slug="best-beers"),
ProductType(name="The worst beers", slug="worst-beers"),
]
)
variables = {"filters": {"search": search}}
results = get_graphql_content(api_client.post_graphql(query, variables))["data"][
"productTypes"
]["edges"]
assert len(results) == len(expected_names)
matched_names = sorted([result["node"]["name"] for result in results])
assert matched_names == sorted(expected_names)
def test_product_filter_by_attribute_values(
user_api_client,
permission_manage_products,
color_attribute,
pink_attribute_value,
product_with_variant_with_two_attributes,
channel_USD,
):
query = """
query Products($filters: ProductFilterInput, $channel: String) {
products(first: 5, filter: $filters, channel: $channel) {
edges {
node {
id
name
attributes {
attribute {
name
slug
}
values {
name
slug
}
}
}
}
}
}
"""
variables = {
"attributes": [
{"slug": color_attribute.slug, "values": [pink_attribute_value.slug]}
],
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert not content["data"]["products"]["edges"] == [
{
"node": {
"attributes": [],
"name": product_with_variant_with_two_attributes.name,
}
}
]
MUTATION_CREATE_PRODUCT_WITH_STOCKS = """
mutation createProduct(
$productType: ID!,
$category: ID!
$name: String!,
$sku: String,
$stocks: [StockInput!],
$basePrice: PositiveDecimal!,
$trackInventory: Boolean,
$country: CountryCode
)
{
productCreate(
input: {
category: $category,
productType: $productType,
name: $name,
sku: $sku,
stocks: $stocks,
trackInventory: $trackInventory,
basePrice: $basePrice,
})
{
product {
id
name
variants{
id
sku
trackInventory
quantityAvailable(countryCode: $country)
}
}
errors {
message
field
code
}
}
}
"""
def test_create_stocks_failed(product_with_single_variant, warehouse):
variant = product_with_single_variant.variants.first()
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
stocks_data = [
{"quantity": 10, "warehouse": "123"},
{"quantity": 10, "warehouse": "321"},
]
warehouses = [warehouse, second_warehouse]
with pytest.raises(ValidationError):
create_stocks(variant, stocks_data, warehouses)
def test_create_stocks(variant, warehouse):
second_warehouse = Warehouse.objects.get(pk=warehouse.pk)
second_warehouse.slug = "second warehouse"
second_warehouse.pk = None
second_warehouse.save()
assert variant.stocks.count() == 0
stocks_data = [
{"quantity": 10, "warehouse": "123"},
{"quantity": 10, "warehouse": "321"},
]
warehouses = [warehouse, second_warehouse]
create_stocks(variant, stocks_data, warehouses)
assert variant.stocks.count() == len(stocks_data)
assert {stock.warehouse.pk for stock in variant.stocks.all()} == {
warehouse.pk for warehouse in warehouses
}
assert {stock.quantity for stock in variant.stocks.all()} == {
data["quantity"] for data in stocks_data
}
def test_update_or_create_variant_stocks(variant, warehouses):
Stock.objects.create(
product_variant=variant,
warehouse=warehouses[0],
quantity=5,
)
stocks_data = [
{"quantity": 10, "warehouse": "123"},
{"quantity": 10, "warehouse": "321"},
]
ProductVariantStocksUpdate.update_or_create_variant_stocks(
variant, stocks_data, warehouses, get_plugins_manager()
)
variant.refresh_from_db()
assert variant.stocks.count() == 2
assert {stock.warehouse.pk for stock in variant.stocks.all()} == {
warehouse.pk for warehouse in warehouses
}
assert {stock.quantity for stock in variant.stocks.all()} == {
data["quantity"] for data in stocks_data
}
@patch("saleor.plugins.manager.PluginsManager.product_variant_back_in_stock")
def test_update_or_create_variant_stocks_when_stock_out_of_quantity(
back_in_stock_webhook_trigger, variant, warehouses
):
stock = Stock.objects.create(
product_variant=variant,
warehouse=warehouses[0],
quantity=-5,
)
stocks_data = [{"quantity": 10, "warehouse": "321"}]
ProductVariantStocksUpdate.update_or_create_variant_stocks(
variant, stocks_data, warehouses, get_plugins_manager()
)
variant.refresh_from_db()
flush_post_commit_hooks()
assert variant.stocks.count() == 1
assert {stock.quantity for stock in variant.stocks.all()} == {
data["quantity"] for data in stocks_data
}
back_in_stock_webhook_trigger.assert_called_once_with(stock)
assert variant.stocks.all()[0].quantity == 10
def test_update_or_create_variant_stocks_empty_stocks_data(variant, warehouses):
Stock.objects.create(
product_variant=variant,
warehouse=warehouses[0],
quantity=5,
)
ProductVariantStocksUpdate.update_or_create_variant_stocks(
variant, [], warehouses, get_plugins_manager()
)
variant.refresh_from_db()
assert variant.stocks.count() == 1
stock = variant.stocks.first()
assert stock.warehouse == warehouses[0]
assert stock.quantity == 5
@patch("saleor.plugins.manager.PluginsManager.product_variant_back_in_stock")
@patch("saleor.plugins.manager.PluginsManager.product_variant_out_of_stock")
def test_update_or_create_variant_with_back_in_stock_webhooks_only_success(
product_variant_stock_out_of_stock_webhook,
product_variant_back_in_stock_webhook,
settings,
variant,
warehouses,
info,
):
Stock.objects.bulk_create(
[
Stock(product_variant=variant, warehouse=warehouse)
for warehouse in warehouses
]
)
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
info.context.plugins = get_plugins_manager()
stocks_data = [
{"quantity": 10, "warehouse": "123"},
]
assert variant.stocks.aggregate(Sum("quantity"))["quantity__sum"] == 0
ProductVariantStocksUpdate.update_or_create_variant_stocks(
variant, stocks_data, warehouses, info.context.plugins
)
assert variant.stocks.aggregate(Sum("quantity"))["quantity__sum"] == 10
flush_post_commit_hooks()
product_variant_back_in_stock_webhook.assert_called_once_with(
Stock.objects.all()[1]
)
product_variant_stock_out_of_stock_webhook.assert_not_called()
@patch("saleor.plugins.manager.PluginsManager.product_variant_back_in_stock")
@patch("saleor.plugins.manager.PluginsManager.product_variant_out_of_stock")
def test_update_or_create_variant_with_back_in_stock_webhooks_only_failed(
product_variant_stock_out_of_stock_webhook,
product_variant_back_in_stock_webhook,
settings,
variant,
warehouses,
info,
):
Stock.objects.bulk_create(
[
Stock(product_variant=variant, warehouse=warehouse)
for warehouse in warehouses
]
)
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
info.context.plugins = get_plugins_manager()
stocks_data = [
{"quantity": 0, "warehouse": "123"},
]
assert variant.stocks.aggregate(Sum("quantity"))["quantity__sum"] == 0
ProductVariantStocksUpdate.update_or_create_variant_stocks(
variant, stocks_data, warehouses, info.context.plugins
)
assert variant.stocks.aggregate(Sum("quantity"))["quantity__sum"] == 0
flush_post_commit_hooks()
product_variant_back_in_stock_webhook.assert_not_called()
product_variant_stock_out_of_stock_webhook.assert_called_once_with(
Stock.objects.all()[1]
)
@patch("saleor.plugins.manager.PluginsManager.product_variant_back_in_stock")
@patch("saleor.plugins.manager.PluginsManager.product_variant_out_of_stock")
def test_update_or_create_variant_stocks_with_out_of_stock_webhook_only(
product_variant_stock_out_of_stock_webhook,
product_variant_back_in_stock_webhook,
settings,
variant,
warehouses,
info,
):
Stock.objects.bulk_create(
[
Stock(product_variant=variant, warehouse=warehouse, quantity=5)
for warehouse in warehouses
]
)
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
info.context.plugins = get_plugins_manager()
stocks_data = [
{"quantity": 0, "warehouse": "123"},
{"quantity": 2, "warehouse": "321"},
]
assert variant.stocks.aggregate(Sum("quantity"))["quantity__sum"] == 10
ProductVariantStocksUpdate.update_or_create_variant_stocks(
variant, stocks_data, warehouses, info.context.plugins
)
assert variant.stocks.aggregate(Sum("quantity"))["quantity__sum"] == 2
flush_post_commit_hooks()
product_variant_stock_out_of_stock_webhook.assert_called_once_with(
Stock.objects.last()
)
product_variant_back_in_stock_webhook.assert_not_called()
# Because we use Scalars for Weight this test query tests only a scenario when weight
# value is passed by a variable
MUTATION_CREATE_PRODUCT_WITH_WEIGHT_GQL_VARIABLE = """
mutation createProduct(
$productType: ID!,
$category: ID!
$name: String!,
$weight: WeightScalar)
{
productCreate(
input: {
category: $category,
productType: $productType,
name: $name,
weight: $weight
})
{
product {
id
weight{
value
unit
}
}
errors {
message
field
code
}
}
}
"""
@pytest.mark.parametrize(
"weight, expected_weight_value",
(
("0", 0),
(0, 0),
(11.11, 11.11),
(11, 11.0),
("11.11", 11.11),
({"value": 11.11, "unit": "kg"}, 11.11),
({"value": 11, "unit": "g"}, 0.011),
({"value": "1", "unit": "ounce"}, 0.028),
),
)
def test_create_product_with_weight_variable(
weight,
expected_weight_value,
staff_api_client,
category,
permission_manage_products,
product_type_without_variant,
site_settings,
):
category_id = graphene.Node.to_global_id("Category", category.pk)
product_type_id = graphene.Node.to_global_id(
"ProductType", product_type_without_variant.pk
)
variables = {
"category": category_id,
"productType": product_type_id,
"name": "Test",
"weight": weight,
}
response = staff_api_client.post_graphql(
MUTATION_CREATE_PRODUCT_WITH_WEIGHT_GQL_VARIABLE,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
result_weight = content["data"]["productCreate"]["product"]["weight"]
assert result_weight["value"] == expected_weight_value
assert result_weight["unit"] == site_settings.default_weight_unit.upper()
@pytest.mark.parametrize(
"weight, expected_weight_value",
(
("0", 0),
(0, 0),
("11.11", 11.11),
("11", 11.0),
('"11.11"', 11.11),
('{value: 11.11, unit: "kg"}', 11.11),
('{value: 11, unit: "g"}', 0.011),
('{value: "1", unit: "ounce"}', 0.028),
),
)
def test_create_product_with_weight_input(
weight,
expected_weight_value,
staff_api_client,
category,
permission_manage_products,
product_type_without_variant,
site_settings,
):
# Because we use Scalars for Weight this test query tests only a scenario when
# weight value is passed by directly in input
query = f"""
mutation createProduct(
$productType: ID!,
$category: ID!,
$name: String!)
{{
productCreate(
input: {{
category: $category,
productType: $productType,
name: $name,
weight: {weight}
}})
{{
product {{
id
weight{{
value
unit
}}
}}
errors {{
message
field
code
}}
}}
}}
"""
category_id = graphene.Node.to_global_id("Category", category.pk)
product_type_id = graphene.Node.to_global_id(
"ProductType", product_type_without_variant.pk
)
variables = {
"category": category_id,
"productType": product_type_id,
"name": "Test",
}
response = staff_api_client.post_graphql(
query,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
result_weight = content["data"]["productCreate"]["product"]["weight"]
assert result_weight["value"] == expected_weight_value
assert result_weight["unit"] == site_settings.default_weight_unit.upper()
def test_hidden_product_access_with_proper_permissions(
staff_api_client,
product_list,
channel_USD,
permission_manage_products,
):
hidden_product = product_list[0]
hidden_product.channel_listings.all().update(is_published=False)
variables = {
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables=variables,
permissions=(permission_manage_products,),
check_no_permissions=False,
)
content = get_graphql_content(response)
total_count = content["data"]["products"]["totalCount"]
assert total_count == 3
def test_hidden_product_access_with_permission_manage_orders(
staff_api_client,
product_list,
channel_USD,
permission_manage_orders,
):
hidden_product = product_list[0]
hidden_product.channel_listings.all().update(is_published=False)
variables = {
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables=variables,
permissions=(permission_manage_orders,),
check_no_permissions=False,
)
content = get_graphql_content(response)
total_count = content["data"]["products"]["totalCount"]
assert total_count == 3
def test_hidden_product_access_with_permission_manage_discounts(
staff_api_client,
product_list,
channel_USD,
permission_manage_discounts,
):
hidden_product = product_list[0]
hidden_product.channel_listings.all().update(is_published=False)
variables = {
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables=variables,
permissions=(permission_manage_discounts,),
check_no_permissions=False,
)
content = get_graphql_content(response)
total_count = content["data"]["products"]["totalCount"]
assert total_count == 3
def test_hidden_product_access_with_permission_manage_channels(
staff_api_client,
product_list,
channel_USD,
permission_manage_channels,
):
hidden_product = product_list[0]
hidden_product.channel_listings.all().update(is_published=False)
variables = {
"channel": channel_USD.slug,
}
response = staff_api_client.post_graphql(
QUERY_FETCH_ALL_PRODUCTS,
variables=variables,
permissions=(permission_manage_channels,),
check_no_permissions=False,
)
content = get_graphql_content(response)
total_count = content["data"]["products"]["totalCount"]
assert total_count == 2
def test_query_product_for_federation(api_client, product, channel_USD):
product_id = graphene.Node.to_global_id("Product", product.pk)
variables = {
"representations": [
{
"__typename": "Product",
"id": product_id,
"channel": channel_USD.slug,
},
],
}
query = """
query GetProductInFederation($representations: [_Any]) {
_entities(representations: $representations) {
__typename
... on Product {
id
name
}
}
}
"""
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "Product",
"id": product_id,
"name": product.name,
}
]
def test_query_product_media_for_federation(
api_client, product_with_image, channel_USD
):
media = product_with_image.media.first()
media_id = graphene.Node.to_global_id("ProductMedia", media.pk)
variables = {
"representations": [
{
"__typename": "ProductMedia",
"id": media_id,
},
],
}
query = """
query GetProductMediaInFederation($representations: [_Any]) {
_entities(representations: $representations) {
__typename
... on ProductMedia {
id
url
}
}
}
"""
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "ProductMedia",
"id": media_id,
"url": "http://testserver/media/products/product.jpg",
}
]
def test_query_product_type_for_federation(api_client, product, channel_USD):
product_type = product.product_type
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
variables = {
"representations": [
{
"__typename": "ProductType",
"id": product_type_id,
},
],
}
query = """
query GetProductTypeInFederation($representations: [_Any]) {
_entities(representations: $representations) {
__typename
... on ProductType {
id
name
}
}
}
"""
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "ProductType",
"id": product_type_id,
"name": product_type.name,
}
]
| 32.087897
| 88
| 0.644341
|
4a147a19184ad9bff3c8326f38718e7ba30d350c
| 6,653
|
py
|
Python
|
Scripts/Miscellaneous/Research_paper_latex_parser/get_details.py
|
ShivangiPatel102/Python_and_the_Web
|
6d3b55aef20feeda3cfff941d7bbdc26cbcc70d2
|
[
"MIT"
] | 437
|
2020-09-24T13:57:39.000Z
|
2022-03-30T12:45:56.000Z
|
Scripts/Miscellaneous/Research_paper_latex_parser/get_details.py
|
ShivangiPatel102/Python_and_the_Web
|
6d3b55aef20feeda3cfff941d7bbdc26cbcc70d2
|
[
"MIT"
] | 355
|
2020-09-24T13:53:16.000Z
|
2022-03-27T04:20:40.000Z
|
Scripts/Miscellaneous/Research_paper_latex_parser/get_details.py
|
ShivangiPatel102/Python_and_the_Web
|
6d3b55aef20feeda3cfff941d7bbdc26cbcc70d2
|
[
"MIT"
] | 315
|
2020-09-24T18:41:19.000Z
|
2022-03-07T05:53:01.000Z
|
from os.path import isfile, join
import re
import json
from os import listdir
import argparse
import os
from tqdm import tqdm
class essential_data:
"""
Extract essential data from the tex document.
Essential data includes - title, author, abstract, introduction, conclusions, results, acknowledgments.
"""
def __init__(self, tex_data):
self.tex_data = tex_data
def get_elements(self):
data = self.tex_data
data_dict = {}
"""
The next few lines extract data for the section tags specified in the latex.
Regular expressions are used to separate headings(h) and the content(c).
The heading and content are then added to a dictionary object.
"""
sections = re.findall(r"section{(.*?)\\", data, re.S)
for obj in sections:
h = re.findall(r"(.*?)}", obj, re.S)
c = obj.replace(h[0] + "}", " ")
data_dict["%s" % (h[0])] = "%s" % (c)
data = data.replace("section{" + obj, " ")
"""
The next few lines extract data for the begin tags specified in the latex.
Regular expressions are used to separate headings(h) and the content(c).
The heading and content are then added to a dictionary object.
"""
begins = re.findall(r"\\begin{(.*?)\\end", data, re.S)
for obj in begins:
h = re.findall(r"(.*?)}", obj, re.S)
if len(h) > 1:
continue
c = obj.replace(h[0] + "}", " ")
data_dict["%s" % (h[0])] = "%s" % (c)
data = data.replace("\\begin{" + obj + "\\end", " ")
return data_dict
def get_author(self):
"""
The Author tag is a specially mentioned tag in latex format.
Hence the Author name is extracted from this tag.
The user can choose to specify the tag as 'Author' or 'author'.
Hence the `[Aa]` is included in the regex.
"""
author = re.findall(r"[Aa]uthor(s?){(.*?)}", self.tex_data, re.S)
return author[0][1]
def get_title(self):
"""
The Title tag is a specially mentioned tag in latex format.
Hence the title is extracted from this tag.
The user can choose to specify the tag as 'Title' or 'title'.
Hence the `[Tt]` is included in the regex.
"""
title = re.findall(r"[Tt]itle{(.*?)}", self.tex_data, re.S)
return title[0]
def get_ack(self):
"""
The Acknowledgements tag is a specially mentioned tag in latex format.
Hence the acknowledgements is extracted from this tag.
The user can choose to specify the tag as 'acknowledgements' or 'Acknowledgements'.
Hence the `[Aa]` is included in the regex.
The user can also choose to specify it in singular sense like 'Acknowledgement' or 'acknowledgement'.
Hence the s is made optional at the end by writing `(s?)` in the regex.
"""
acknowledgments = re.findall(
r"\\[Aa]cknowledgment(s?)(.*?)\\", self.tex_data, re.S
)
return acknowledgments[0][1]
class clean_data:
"""
Contains functions to purge all unwanted elements from the tex file.
"""
def __init__(self, tex_data):
self.tex_data = tex_data
def purge_images(self):
"""
Purges images from the tex data using tag the '\begin{figure}'
"""
imgs = re.findall(r"begin{figure}(.*?)end{figure}", self.tex_data, re.S)
start = "\\begin{figure}"
end = "end{figure}"
imgs = [start + img + end for img in imgs]
for img in imgs:
self.tex_data = self.tex_data.replace(img, " ")
def purge_tables(self):
"""
Purges tables from the tex data using tag the '\begin{table}'
"""
tables = re.findall(r"begin{table}(.*?)end{table}", self.tex_data, re.S)
start = "\\begin{table}"
end = "end{table}"
tables = [start + table + end for table in tables]
for table in tables:
self.tex_data = self.tex_data.replace(table, " ")
def purge_equations(self):
"""
Purges equation from the tex data using tag the '\begin{equation}'
"""
equations = re.findall(
r"begin{equation}(.*?)end{equation}", self.tex_data, re.S
)
start = "\\begin{equation}"
end = "end{equation}"
equations = [start + equation + end for equation in equations]
for equation in equations:
self.tex_data = self.tex_data.replace(equation, " ")
# python get_details.py -p papers -o op_json.json
if __name__ == "__main__":
# Define description of the script.
parser = argparse.ArgumentParser(
description="extract title,author,abstract,introduction,results,conclusions and acknowledgments from given set of research papers."
)
# Define inputs required for the script to run.
parser.add_argument(
"-parent",
help="enter path to parent directory containing all research papers",
dest="parent_path",
type=str,
required=True,
)
parser.add_argument(
"-output", help="enter path of output file", dest="op", type=str, required=True
)
# Parse the arguments received from the command.
args = parser.parse_args()
directory_path = args.parent_path
op_file = args.op
all_data = []
# Store all files from the mentioned directory.
all_files = [f for f in listdir(directory_path) if isfile(join(directory_path, f))]
# Read all the files and extract information form each file.
for tex_file in tqdm(all_files):
p = os.path.join(directory_path, tex_file)
with open(p, "r", encoding="latin-1") as f:
data_lst = f.readlines()
data = " ".join([str(elem) for elem in data_lst])
# Use clean_data class methods to remove images, tables and equations/.
cd = clean_data(data)
cd.purge_images()
cd.purge_tables()
cd.purge_equations()
# Use essential_data class methods to extract the required data and store in json object.
ed = essential_data(cd.tex_data)
d = {}
d.update({"author": ed.get_author()})
d.update({"title": ed.get_title()})
d.update(ed.get_elements())
d.update({"acknowledgement": ed.get_ack()})
all_data.append(d)
# Dump the json output object to the output file.
with open(op_file, "w") as outfile:
json.dump(all_data, outfile, indent=4)
| 35.388298
| 139
| 0.589809
|
4a147b0f34248151691bd0310d1ec05c6b7a942c
| 2,011
|
py
|
Python
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7481_glm_startvals.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7481_glm_startvals.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7481_glm_startvals.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
import math
# test startval to set GLM coefficients
def set_glm_startvals():
# read in the dataset and construct training set (and validation set)
d = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
mL = glm(family='binomial')
mL.train(training_frame=d,x=[2,3,4,5,6,7,8],y=1)
mLcoeff = mL.coef()
r = glm.getGLMRegularizationPath(mL)
rcoeff = r["coefficients"][0]
responseMean = d[1].mean()
initIntercept = math.log(responseMean/(1.0-responseMean))
startval1 = [0,0,0,0,0,0,0,initIntercept]
startval2 = [rcoeff["AGE"], rcoeff["RACE"], rcoeff["DPROS"], rcoeff["DCAPS"], rcoeff["PSA"], rcoeff["VOL"],
rcoeff["GLEASON"], rcoeff["Intercept"]]
startvalBad = [0,0]
ml1 = glm(family="binomial", startval = startval1) # same starting condition as GLM
ml1.train(training_frame=d,x=[2,3,4,5,6,7,8],y=1)
ml1Coeff = ml1.coef()
pyunit_utils.assertEqualCoeffDicts(mLcoeff, ml1Coeff , tol = 1e-6) # coeffs should be the same
ml2 = glm(family="binomial", startval = startval2) # different starting condition from GLM
ml2.train(training_frame=d,x=[2,3,4,5,6,7,8],y=1)
ml2Coeff = ml2.coef()
try:
pyunit_utils.assertEqualCoeffDicts(mLcoeff, ml2Coeff , tol = 1e-6)
assert False, "Should have thrown an error as coefficients are different!"
except Exception as ex:
print(ex)
try:
mlbad = glm(family="binomial", startval = startvalBad)
mlbad.train(training_frame=d,x=[2,3,4,5,6,7,8],y=1)
assert False, "Should have thrown an error with bad GLM initial values!"
except Exception as ex:
print(ex)
print("Test completed! Success!")
if __name__ == "__main__":
pyunit_utils.standalone_test(set_glm_startvals)
else:
set_glm_startvals()
| 38.673077
| 112
| 0.662357
|
4a147b94ef9132a2a8f1e94132548ce4e533dede
| 942
|
py
|
Python
|
setup.py
|
recoded-co/django-notification
|
29a98f688cd76d690228397d572fc7931f377c80
|
[
"MIT"
] | 1
|
2015-08-03T12:01:26.000Z
|
2015-08-03T12:01:26.000Z
|
setup.py
|
recoded-co/django-notification
|
29a98f688cd76d690228397d572fc7931f377c80
|
[
"MIT"
] | null | null | null |
setup.py
|
recoded-co/django-notification
|
29a98f688cd76d690228397d572fc7931f377c80
|
[
"MIT"
] | 2
|
2016-03-13T01:24:23.000Z
|
2020-01-08T09:08:00.000Z
|
from setuptools import setup, find_packages
setup(
name="django-notification",
version=__import__("notification").__version__,
description="User notification management for the Django web framework",
long_description=open("docs/usage.rst").read(),
author="James Tauber",
author_email="jtauber@jtauber.com",
url="https://github.com/pinax/django-notification",
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Framework :: Django",
],
include_package_data=True,
test_suite='runtests',
install_requires=[
'django>=1.4',
],
zip_safe=False,
)
| 31.4
| 76
| 0.646497
|
4a147ce4cccae546bbc4575def74e6a6406c1c17
| 287
|
py
|
Python
|
test/telegram_bot.py
|
CaesiumY/movie-crawling-to-notify
|
d3167ed25db77e9aec3386b04a9ceaf0b46449b9
|
[
"MIT"
] | 2
|
2019-09-29T11:58:57.000Z
|
2019-10-14T13:48:28.000Z
|
test/telegram_bot.py
|
CaesiumY/movie-crawling-to-notify
|
d3167ed25db77e9aec3386b04a9ceaf0b46449b9
|
[
"MIT"
] | 2
|
2019-09-29T11:42:02.000Z
|
2021-06-02T00:27:38.000Z
|
test/telegram_bot.py
|
CaesiumY/movie-crawling-to-notify
|
d3167ed25db77e9aec3386b04a9ceaf0b46449b9
|
[
"MIT"
] | null | null | null |
import telegram
import json
with open('token.json') as f:
token_json = json.loads(f.read())
print(token_json["user_id"])
bot = telegram.Bot(token=token_json["token"])
for i in bot.getUpdates():
print(i.message)
bot.sendMessage(chat_id=token_json["user_id"], text="테스트 메세지")
| 19.133333
| 62
| 0.710801
|
4a147d11a520e873591eb9916de7ea51eea627dc
| 105
|
py
|
Python
|
openslides/saml/exceptions.py
|
swilde/OpenSlides
|
23ae32a75892005632784652d108836d1ba09da9
|
[
"MIT"
] | 3
|
2021-02-11T20:45:58.000Z
|
2022-02-09T21:59:42.000Z
|
openslides/saml/exceptions.py
|
swilde/OpenSlides
|
23ae32a75892005632784652d108836d1ba09da9
|
[
"MIT"
] | 2
|
2021-11-02T15:48:16.000Z
|
2022-03-02T08:38:19.000Z
|
openslides/saml/exceptions.py
|
swilde/OpenSlides
|
23ae32a75892005632784652d108836d1ba09da9
|
[
"MIT"
] | 3
|
2021-01-18T11:44:05.000Z
|
2022-01-19T16:00:23.000Z
|
from openslides.utils.exceptions import OpenSlidesError
class SamlException(OpenSlidesError):
pass
| 17.5
| 55
| 0.828571
|
4a147dbc421038407af47e0f38700e1c3fff7106
| 336
|
py
|
Python
|
blog_mocks/tests/test_function_pytest.py
|
ghjan/blog-projects
|
aa6925724e457bec276d98cf7b55b5cdaf2ab5f4
|
[
"MIT"
] | 66
|
2017-11-18T06:41:39.000Z
|
2021-09-02T15:47:08.000Z
|
blog_mocks/tests/test_function_pytest.py
|
ghjan/blog-projects
|
aa6925724e457bec276d98cf7b55b5cdaf2ab5f4
|
[
"MIT"
] | 2
|
2018-05-28T14:06:05.000Z
|
2020-03-21T14:05:07.000Z
|
blog_mocks/tests/test_function_pytest.py
|
ghjan/blog-projects
|
aa6925724e457bec276d98cf7b55b5cdaf2ab5f4
|
[
"MIT"
] | 35
|
2017-11-05T23:48:15.000Z
|
2021-09-15T12:15:39.000Z
|
from function import square, main
def test_function(monkeypatch):
monkeypatch.setattr("test_function_pytest.square", lambda x: 1)
assert square(5) == 1
def test_main_function(monkeypatch):
monkeypatch.setattr('function.square', lambda x: 1)
monkeypatch.setattr('function.cube', lambda x: 0)
assert main(5) == 1
| 25.846154
| 67
| 0.723214
|
4a147e21ca5d1e35bb15b30d8a78e7445094da31
| 19,630
|
py
|
Python
|
ironic/tests/unit/dhcp/test_neutron.py
|
armohamm/ironic
|
21093ca886ed736a7a25bf5e71e05d41e132fd2f
|
[
"Apache-2.0"
] | 2
|
2019-06-17T21:37:53.000Z
|
2020-07-11T03:58:39.000Z
|
ironic/tests/unit/dhcp/test_neutron.py
|
armohamm/ironic
|
21093ca886ed736a7a25bf5e71e05d41e132fd2f
|
[
"Apache-2.0"
] | 5
|
2018-03-28T07:52:38.000Z
|
2020-05-15T09:35:46.000Z
|
ironic/tests/unit/dhcp/test_neutron.py
|
armohamm/ironic
|
21093ca886ed736a7a25bf5e71e05d41e132fd2f
|
[
"Apache-2.0"
] | 6
|
2019-06-13T12:49:33.000Z
|
2021-04-17T16:33:19.000Z
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as neutron_client_exc
from oslo_utils import uuidutils
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import pxe_utils
from ironic.conductor import task_manager
from ironic.dhcp import neutron
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as object_utils
class TestNeutron(db_base.DbTestCase):
def setUp(self):
super(TestNeutron, self).setUp()
self.config(
cleaning_network='00000000-0000-0000-0000-000000000000',
group='neutron')
self.config(dhcp_provider='neutron',
group='dhcp')
self.node = object_utils.create_test_node(self.context)
self.ports = [
object_utils.create_test_port(
self.context, node_id=self.node.id, id=2,
uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c782',
address='52:54:00:cf:2d:32')]
# Very simple neutron port representation
self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00',
'mac_address': '52:54:00:cf:2d:32'}
dhcp_factory.DHCPFactory._dhcp_provider = None
@mock.patch('ironic.common.neutron.get_client', autospec=True)
def test_update_port_dhcp_opts(self, client_mock):
opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '1.1.1.1'},
{'opt_name': 'server-ip-address',
'opt_value': '1.1.1.1'}]
port_id = 'fake-port-id'
expected = {'port': {'extra_dhcp_opts': opts}}
api = dhcp_factory.DHCPFactory()
with task_manager.acquire(self.context, self.node.uuid) as task:
api.provider.update_port_dhcp_opts(port_id, opts,
context=task.context)
client_mock.return_value.update_port.assert_called_once_with(
port_id, expected)
@mock.patch('ironic.common.neutron.get_client', autospec=True)
def test_update_port_dhcp_opts_with_exception(self, client_mock):
opts = [{}]
port_id = 'fake-port-id'
client_mock.return_value.update_port.side_effect = (
neutron_client_exc.NeutronClientException())
api = dhcp_factory.DHCPFactory()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.FailedToUpdateDHCPOptOnPort,
api.provider.update_port_dhcp_opts,
port_id, opts, context=task.context)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids',
autospec=True)
def test_update_dhcp(self, mock_gnvi, mock_updo):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_updo.assert_called_once_with(mock.ANY, 'vif-uuid', opts,
context=task.context)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids',
autospec=True)
def test_update_dhcp_no_vif_data(self, mock_gnvi, mock_updo):
mock_gnvi.return_value = {'portgroups': {}, 'ports': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
self.assertRaises(exception.FailedToUpdateDHCPOptOnPort,
api.update_dhcp, task, self.node)
self.assertFalse(mock_updo.called)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids',
autospec=True)
def test_update_dhcp_some_failures(self, mock_gnvi, mock_updo):
# confirm update is called twice, one fails, but no exception raised
mock_gnvi.return_value = {'ports': {'p1': 'v1', 'p2': 'v2'},
'portgroups': {}}
exc = exception.FailedToUpdateDHCPOptOnPort('fake exception')
mock_updo.side_effect = [None, exc]
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, self.node)
mock_gnvi.assert_called_once_with(task)
self.assertEqual(2, mock_updo.call_count)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_fails(self, mock_gnvi, mock_updo):
# confirm update is called twice, both fail, and exception is raised
mock_gnvi.return_value = {'ports': {'p1': 'v1', 'p2': 'v2'},
'portgroups': {}}
exc = exception.FailedToUpdateDHCPOptOnPort('fake exception')
mock_updo.side_effect = [exc, exc]
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
self.assertRaises(exception.FailedToUpdateDHCPOptOnPort,
api.update_dhcp,
task, self.node)
mock_gnvi.assert_called_once_with(task)
self.assertEqual(2, mock_updo.call_count)
@mock.patch.object(neutron, 'LOG', autospec=True)
@mock.patch('time.sleep', autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_set_sleep_and_fake(self, mock_gnvi,
mock_ts, mock_log):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
self.config(port_setup_delay=30, group='neutron')
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
with mock.patch.object(api.provider, 'update_port_dhcp_opts',
autospec=True) as mock_updo:
api.update_dhcp(task, opts)
mock_log.debug.assert_called_once_with(
"Waiting %d seconds for Neutron.", 30)
mock_ts.assert_called_with(30)
mock_updo.assert_called_once_with('vif-uuid', opts,
context=task.context)
@mock.patch.object(neutron, 'LOG', autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_unset_sleep_and_fake(self, mock_gnvi, mock_log):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
with mock.patch.object(api.provider, 'update_port_dhcp_opts',
autospec=True) as mock_updo:
api.update_dhcp(task, opts)
mock_log.debug.assert_not_called()
mock_updo.assert_called_once_with('vif-uuid', opts,
context=task.context)
def test__get_fixed_ip_address(self):
port_id = 'fake-port-id'
expected = "192.168.1.3"
api = dhcp_factory.DHCPFactory().provider
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
fake_client = mock.Mock()
fake_client.show_port.return_value = {'port': port_data}
result = api._get_fixed_ip_address(port_id, fake_client)
self.assertEqual(expected, result)
fake_client.show_port.assert_called_once_with(port_id)
def test__get_fixed_ip_address_invalid_ip(self):
port_id = 'fake-port-id'
api = dhcp_factory.DHCPFactory().provider
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "invalid.ip",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
fake_client = mock.Mock()
fake_client.show_port.return_value = {'port': port_data}
self.assertRaises(exception.InvalidIPv4Address,
api._get_fixed_ip_address,
port_id, fake_client)
fake_client.show_port.assert_called_once_with(port_id)
def test__get_fixed_ip_address_with_exception(self):
port_id = 'fake-port-id'
api = dhcp_factory.DHCPFactory().provider
fake_client = mock.Mock()
fake_client.show_port.side_effect = (
neutron_client_exc.NeutronClientException())
self.assertRaises(exception.NetworkError,
api._get_fixed_ip_address, port_id, fake_client)
fake_client.show_port.assert_called_once_with(port_id)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address',
autospec=True)
def _test__get_port_ip_address(self, mock_gfia, network):
expected = "192.168.1.3"
fake_vif = 'test-vif-%s' % network
port = object_utils.create_test_port(
self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
internal_info={
'cleaning_vif_port_id': (fake_vif if network == 'cleaning'
else None),
'provisioning_vif_port_id': (fake_vif
if network == 'provisioning'
else None),
'tenant_vif_port_id': (fake_vif if network == 'tenant'
else None),
}
)
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_port_ip_address(task, port,
mock.sentinel.client)
self.assertEqual(expected, result)
mock_gfia.assert_called_once_with(mock.ANY, fake_vif,
mock.sentinel.client)
def test__get_port_ip_address_tenant(self):
self._test__get_port_ip_address(network='tenant')
def test__get_port_ip_address_cleaning(self):
self._test__get_port_ip_address(network='cleaning')
def test__get_port_ip_address_provisioning(self):
self._test__get_port_ip_address(network='provisioning')
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address',
autospec=True)
def test__get_port_ip_address_for_portgroup(self, mock_gfia):
expected = "192.168.1.3"
pg = object_utils.create_test_portgroup(
self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
internal_info={'tenant_vif_port_id': 'test-vif-A'})
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_port_ip_address(task, pg,
mock.sentinel.client)
self.assertEqual(expected, result)
mock_gfia.assert_called_once_with(mock.ANY, 'test-vif-A',
mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address',
autospec=True)
def test__get_port_ip_address_with_exception(self, mock_gfia):
expected = "192.168.1.3"
port = object_utils.create_test_port(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid())
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_port_ip_address, task, port,
mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address',
autospec=True)
def test__get_port_ip_address_for_portgroup_with_exception(
self, mock_gfia):
expected = "192.168.1.3"
pg = object_utils.create_test_portgroup(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid())
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_port_ip_address, task, pg,
mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address',
autospec=True)
def _test__get_ip_addresses_ports(self, key, mock_gfia):
if key == "extra":
kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
else:
kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
ip_address = '10.10.0.1'
expected = [ip_address]
port = object_utils.create_test_port(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
**kwargs1)
mock_gfia.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_ip_addresses(task, [port],
mock.sentinel.client)
self.assertEqual(expected, result)
def test__get_ip_addresses_ports_extra(self):
self._test__get_ip_addresses_ports('extra')
def test__get_ip_addresses_ports_int_info(self):
self._test__get_ip_addresses_ports('internal_info')
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address',
autospec=True)
def _test__get_ip_addresses_portgroup(self, key, mock_gfia):
if key == "extra":
kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
else:
kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
ip_address = '10.10.0.1'
expected = [ip_address]
pg = object_utils.create_test_portgroup(
self.context, node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(),
**kwargs1)
mock_gfia.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_ip_addresses(task, [pg], mock.sentinel.client)
self.assertEqual(expected, result)
def test__get_ip_addresses_portgroup_extra(self):
self._test__get_ip_addresses_portgroup('extra')
def test__get_ip_addresses_portgroup_int_info(self):
self._test__get_ip_addresses_portgroup('internal_info')
@mock.patch('ironic.common.neutron.get_client', autospec=True)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_port_ip_address',
autospec=True)
def test_get_ip_addresses(self, get_ip_mock, client_mock):
ip_address = '10.10.0.1'
expected = [ip_address]
get_ip_mock.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api.get_ip_addresses(task)
get_ip_mock.assert_called_once_with(mock.ANY, task, task.ports[0],
client_mock.return_value)
self.assertEqual(expected, result)
@mock.patch('ironic.common.neutron.get_client', autospec=True)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_port_ip_address',
autospec=True)
def test_get_ip_addresses_for_port_and_portgroup(self, get_ip_mock,
client_mock):
object_utils.create_test_portgroup(
self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
internal_info={'tenant_vif_port_id': 'test-vif-A'})
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
api.get_ip_addresses(task)
get_ip_mock.assert_has_calls(
[mock.call(mock.ANY, task, task.ports[0],
client_mock.return_value),
mock.call(mock.ANY, task, task.portgroups[0],
client_mock.return_value)]
)
| 47.074341
| 79
| 0.590728
|
4a147e4c585afee5be812ee3f0eff97d35618dfe
| 7,930
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/cdn/v20200331/custom_domain.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/cdn/v20200331/custom_domain.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/cdn/v20200331/custom_domain.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['CustomDomain']
class CustomDomain(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
custom_domain_name: Optional[pulumi.Input[str]] = None,
endpoint_name: Optional[pulumi.Input[str]] = None,
host_name: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Friendly domain name mapping to the endpoint hostname that the customer provides for branding purposes, e.g. www.contoso.com.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] custom_domain_name: Name of the custom domain within an endpoint.
:param pulumi.Input[str] endpoint_name: Name of the endpoint under the profile which is unique globally.
:param pulumi.Input[str] host_name: The host name of the custom domain. Must be a domain name.
:param pulumi.Input[str] profile_name: Name of the CDN profile which is unique within the resource group.
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if custom_domain_name is None:
raise TypeError("Missing required property 'custom_domain_name'")
__props__['custom_domain_name'] = custom_domain_name
if endpoint_name is None:
raise TypeError("Missing required property 'endpoint_name'")
__props__['endpoint_name'] = endpoint_name
if host_name is None:
raise TypeError("Missing required property 'host_name'")
__props__['host_name'] = host_name
if profile_name is None:
raise TypeError("Missing required property 'profile_name'")
__props__['profile_name'] = profile_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['custom_https_provisioning_state'] = None
__props__['custom_https_provisioning_substate'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_state'] = None
__props__['type'] = None
__props__['validation_data'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:cdn/latest:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20150601:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20160402:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20161002:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20170402:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20171012:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20190415:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20190615:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20190615preview:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20191231:CustomDomain"), pulumi.Alias(type_="azure-nextgen:cdn/v20200415:CustomDomain")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CustomDomain, __self__).__init__(
'azure-nextgen:cdn/v20200331:CustomDomain',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CustomDomain':
"""
Get an existing CustomDomain resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return CustomDomain(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="customHttpsProvisioningState")
def custom_https_provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning status of Custom Https of the custom domain.
"""
return pulumi.get(self, "custom_https_provisioning_state")
@property
@pulumi.getter(name="customHttpsProvisioningSubstate")
def custom_https_provisioning_substate(self) -> pulumi.Output[str]:
"""
Provisioning substate shows the progress of custom HTTPS enabling/disabling process step by step.
"""
return pulumi.get(self, "custom_https_provisioning_substate")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Output[str]:
"""
The host name of the custom domain. Must be a domain name.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning status of the custom domain.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> pulumi.Output[str]:
"""
Resource status of the custom domain.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="validationData")
def validation_data(self) -> pulumi.Output[Optional[str]]:
"""
Special validation or data may be required when delivering CDN to some regions due to local compliance reasons. E.g. ICP license number of a custom domain is required to deliver content in China.
"""
return pulumi.get(self, "validation_data")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 46.104651
| 761
| 0.662169
|
4a147e8d834499baa17912824fd12e7ded89187b
| 48,970
|
py
|
Python
|
lib/network.py
|
SirSevenG/electrum-komodo
|
a38d01baf216aad9429ac8f3707a12818c30a4a2
|
[
"MIT"
] | 4
|
2019-06-21T10:22:07.000Z
|
2020-01-03T16:02:48.000Z
|
lib/network.py
|
SirSevenG/electrum-komodo
|
a38d01baf216aad9429ac8f3707a12818c30a4a2
|
[
"MIT"
] | 28
|
2019-07-24T12:37:44.000Z
|
2020-10-12T11:21:28.000Z
|
lib/network.py
|
SirSevenG/electrum-komodo
|
a38d01baf216aad9429ac8f3707a12818c30a4a2
|
[
"MIT"
] | 9
|
2019-09-13T08:04:44.000Z
|
2020-09-17T01:19:23.000Z
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import stat
import errno
import random
import re
import select
from collections import defaultdict
import threading
import socket
import json
import socks
import urllib
from . import util
from . import bitcoin
from .bitcoin import *
from .blockchain import HDR_LEN, CHUNK_LEN
from . import constants
from .interface import Connection, Interface
from . import blockchain
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
from .i18n import _
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from .version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 't'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 't', exclude_set = set()):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).rsplit(':', 2)
if protocol not in 'st':
raise ValueError('invalid network protocol: {}'.format(protocol))
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
os.chmod(dir_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# subscriptions and requests
self.subscribed_addresses = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
self.is_downloading_checkpoints = False
self.downloaded_checkpoints_perc = 0
self.restart_required = False
self.sync_stalled_restart_required = False
self.num_blocks = -1
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None or self.is_downloading_checkpoints
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
if self.interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, self.interface)
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
# self.request_fee_estimates()
# self.queue_request('blockchain.relayfee', [])
for h in list(self.subscribed_addresses):
self.queue_request('blockchain.scripthash.subscribe', [h])
def request_fee_estimates(self):
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
self.queue_request('mempool.get_fee_histogram', [])
for i in FEE_ETA_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = constants.net.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'mempool.get_fee_histogram':
if error is None:
self.print_error('fee_histogram', result)
self.config.mempool_fees = result
self.notify('fee_histogram')
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN) if result is not None else None
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.headers':
height, count = params
if count == 1:
self.on_get_header(interface, response, height)
elif count == CHUNK_LEN:
self.on_get_chunk(interface, response, height)
else:
self.print_error('Unknown chunk lenght: %s' % count)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
if method != 'blockchain.block.headers':
self.print_error(response)
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
if h not in self.h2addr:
self.h2addr[h] = addr
return h
def overload_cb(self, callback):
def cb2(x):
x2 = x.copy()
p = x2.pop('params')
addr = self.h2addr[p[0]]
x2['params'] = [addr]
callback(x2)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hashes = [self.addr_to_scripthash(addr) for addr in addresses]
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hashes]
self.send(msgs, self.overload_cb(callback))
def request_address_history(self, address, callback):
h = self.addr_to_scripthash(address)
self.send([('blockchain.scripthash.get_history', [h])], self.overload_cb(callback))
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
self.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = self.blockchains[0] or None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.print_error('connection timed out, maintain it further')
# self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, index):
if index in self.requested_chunks:
return
interface.print_error("requesting chunk %d" % index)
self.requested_chunks.add(index)
self.queue_request('blockchain.block.headers',
[CHUNK_LEN*index, CHUNK_LEN], interface)
def on_get_chunk(self, interface, response, height):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
blockchain = interface.blockchain
if result is None or error is not None:
if error == {'code': -101, 'message': 'excessive resource usage'}:
# on average a non-stop sync of 240000 blocks in chunks are triggering "excessive resource usage" error
# that's about 5+ months worth of blocks
self.sync_stalled_restart_required = True
interface.print_error(error or 'bad response')
return
index = height // CHUNK_LEN
# Ignore unsolicited chunks
if index not in self.requested_chunks:
interface.print_error("received chunk %d (unsolicited)" % index)
return
else:
interface.print_error("received chunk %d" % index)
self.requested_chunks.remove(index)
hex_chunk = result.get('hex', None)
connect = blockchain.connect_chunk(index, hex_chunk)
if not connect:
self.connection_down(interface.server)
return
# If not finished, get the next chunk
if index >= len(blockchain.checkpoints) and blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.mode = 'default'
interface.print_error('catch up done', blockchain.height())
blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.headers', [height, 1], interface)
interface.request = height
def on_get_header(self, interface, response, height):
'''Handle receiving a single block header'''
result = response.get('result', {})
hex_header = result.get('hex', None)
if interface.request != height:
interface.print_error("unsolicited header", interface.request, height)
# self.connection_down(interface.server)
# return
if not hex_header:
interface.print_error(response)
self.connection_down(interface.server)
return
if len(hex_header) != HDR_LEN*2:
interface.print_error('wrong header length', interface.request)
self.connection_down(interface.server)
return
header = blockchain.deserialize_header(bfh(hex_header), height)
chain = blockchain.check_header(header)
if interface.mode == 'backward':
can_connect = blockchain.can_connect(header)
if can_connect and can_connect.catch_up is None:
interface.mode = 'catch_up'
interface.blockchain = can_connect
interface.blockchain.save_header(header)
next_height = height + 1
interface.blockchain.catch_up = interface.server
elif chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
assert next_height >= self.max_checkpoint(), (interface.bad, interface.good)
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(self.max_checkpoint(), interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
assert next_height >= self.max_checkpoint()
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
# If not finished, get the next header
interface.request = None
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // CHUNK_LEN)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
filename = b.path()
filenameCP = filename.replace('blockchain_headers', 'checkpoints.json')
if os.path.exists(filenameCP):
f = open(filenameCP, 'rb')
file_size = len(f.read())
self.print_error('local checkpoints.json size:', file_size)
if not os.path.exists(filenameCP) or file_size < constants.net.CHECKPOINTS_MIN_FSIZE:
site = urllib.request.urlopen(constants.net.CHECKPOINTS_URL)
meta = site.info()
self.print_error('remote checkpoints.json size ', meta['Content-Length'])
self.print_error('checkpoints.json doesn\'t exist')
self.print_error('filename')
self.print_error(filenameCP)
self.is_downloading_checkpoints = True
self.set_status('syncing') # downloading?
t = threading.Thread(target = self.download_thread(filenameCP))
t.daemon = True
t.start()
else:
b = self.blockchains[0]
filename = b.path()
len_checkpoints = len(b.checkpoints)
length = HDR_LEN * len_checkpoints * CHUNK_LEN
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
for i in range(len_checkpoints):
for height, header_data in b.checkpoints[i][2]:
f.seek(height*HDR_LEN)
bin_header = bfh(header_data)
f.write(bin_header)
with b.lock:
b.update_size()
def dl_thread_cb(self, blocks, block_size, total_size):
self.set_status('syncing')
self.notify('status')
self.print_error('blocks downloaded', blocks)
self.print_error('block size KB', block_size / 1024)
self.print_error('total remote size MB ', total_size / 1024 / 1024)
self.print_error('total downloaded size MB ', blocks * block_size / 1024 / 1024)
self.print_error('total % ', blocks * block_size * 100 / total_size)
self.downloaded_checkpoints_perc = blocks * block_size * 100 / total_size
if self.downloaded_checkpoints_perc >= 100:
self.restart_required = True
def download_thread(self, filename):
try:
import urllib, socket
socket.setdefaulttimeout(30)
self.print_error('downloading ', constants.net.CHECKPOINTS_URL)
urllib.request.urlretrieve(constants.net.CHECKPOINTS_URL, filename, self.dl_thread_cb)
except Exception:
import traceback
traceback.print_exc()
self.print_error('download failed. creating file', filename)
open(filename, 'wb+').close()
b = self.blockchains[0]
with b.lock:
b.update_size()
self.set_status('syncing')
def run(self):
self.init_headers_file()
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('height')
hex_header = header.get('hex')
if not height or not hex_header:
return
if len(hex_header) != HDR_LEN*2:
interface.print_error('wrong header length', interface.request)
self.connection_down(interface.server)
return
header = blockchain.deserialize_header(bfh(hex_header), height)
if height < self.max_checkpoint():
self.connection_down(interface.server)
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip +1, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.print_error("switching to catchup mode", tip, self.blockchains)
self.request_header(interface, 0)
else:
self.print_error("chain already catching up with", chain.catch_up.server)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise Exception('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise util.TimeoutException(_('Server did not answer'))
if r.get('error'):
raise Exception(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
def export_checkpoints(self, path):
# run manually from the console to generate checkpoints
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
def max_checkpoint(self):
return max(0, len(constants.net.CHECKPOINTS) * CHUNK_LEN - 1)
| 40.639004
| 138
| 0.589483
|
4a147ea590c84a2f6744dcd5ce30d49eae009770
| 4,099
|
py
|
Python
|
run_inference2.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | null | null | null |
run_inference2.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | 1
|
2019-08-16T07:09:22.000Z
|
2019-09-04T04:59:51.000Z
|
run_inference2.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | 1
|
2020-01-13T04:51:22.000Z
|
2020-01-13T04:51:22.000Z
|
import torch
from scipy.misc import imread, imsave, imresize
import matplotlib.pyplot as plt
import numpy as np
from path import Path
import argparse
from tqdm import tqdm
from models import DispResNet6
from utils import tensor2array
parser = argparse.ArgumentParser(description='Inference script for DispNet learned with \
Structure from Motion Learner inference on KITTI and CityScapes Dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument("--pretrained", type=str, help="pretrained DispNet path",default='/home/roit/models/cc/official/dispnet_k.pth.tar')
parser.add_argument("--pretrained", type=str, help="pretrained DispNet path",default='/home/roit/models/cc/300epc.all/dispnet_model_best.pth.tar')
parser.add_argument("--img-height", default=256, type=int, help="Image height")
parser.add_argument("--img-width", default=512, type=int, help="Image width")
parser.add_argument("--no-resize", action='store_true', help="no resizing is done")
parser.add_argument("--dataset-list", default=None, type=str, help="Dataset list file")
parser.add_argument("--dataset-dir",
#default='/home/roit/datasets/kitti_small/data', type=str,help="Dataset directory")
default='/home/roit/datasets/MC_256512/2019_09_26_10_58/imgs', type=str,help="Dataset directory")
parser.add_argument("--output-dir", default='output', type=str, help="Output directory")
parser.add_argument("--output-disp", action='store_true', help="save disparity img",default=True)
parser.add_argument("--output-depth", action='store_true', help="save depth img",default=True)
parser.add_argument("--img-exts", default=['png', 'jpg', 'bmp'], nargs='*', type=str, help="images extensions to glob")
def main():
args = parser.parse_args()
if not(args.output_disp or args.output_depth):
print('You must at least output one value !')
return
disp_net = DispResNet6().cuda()
weights = torch.load(args.pretrained)
disp_net.load_state_dict(weights['state_dict'])
disp_net.eval()
dataset_dir = Path(args.dataset_dir)
output_dir = Path(args.output_dir)
output_dir.makedirs_p()
disp_dir = output_dir/dataset_dir.stem+'disp'
depth_dir = output_dir/dataset_dir.stem+'depth'
disp_dir.makedirs_p()
depth_dir.makedirs_p()
if args.dataset_list is not None:
with open(args.dataset_list, 'r') as f:
test_files = [dataset_dir/file for file in f.read().splitlines()]
else:
test_files = sum([dataset_dir.files('*.{}'.format(ext)) for ext in args.img_exts], [])
print('{} files to test'.format(len(test_files)))
for file in tqdm(test_files):
img = imread(file).astype(np.float32)
h,w,_ = img.shape
if (not args.no_resize) and (h != args.img_height or w != args.img_width):
img = imresize(img, (args.img_height, args.img_width)).astype(np.float32)
img = np.transpose(img, (2, 0, 1))
tensor_img = torch.from_numpy(img).unsqueeze(0)
tensor_img = ((tensor_img/255 - 0.5)/0.2).cuda()
disp = disp_net(tensor_img)#输出为单通道,
depth = 1/disp#第一个batch
'''
if args.output_disp:
disp = disp.cpu().data.numpy()
disp=disp[0][0]*255
plt.imsave(disp_dir/'{}.{}'.format(file.stem,'png'), disp,cmap='bone')
if args.output_depth:
depth=depth.cpu().data.numpy()
depth=depth[0][0]*255
plt.imsave(depth_dir/'{}.{}'.format(file.stem,'png'), depth,cmap='bone')
'''
if args.output_disp:
disp=tensor2array(disp[0],colormap='bone')
disp=np.transpose(disp,[1,2,0])
plt.imsave(disp_dir/'{}.{}'.format(file.stem,'png'), disp,cmap='bone')
if args.output_depth:
depth=tensor2array(depth[0],colormap='bone')
depth=np.transpose(depth,[1,2,0])
plt.imsave(depth_dir/'{}.{}'.format(file.stem,'png'), depth,cmap='bone')
if __name__ == '__main__':
main()
| 39.413462
| 147
| 0.655038
|
4a147f653cdef05e2844bfeee5e3dadf8e21f95d
| 8,312
|
py
|
Python
|
examples/microjson/mutants/CRP_Num_mutant_1486201315.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 90
|
2015-04-07T10:26:53.000Z
|
2022-03-07T15:14:57.000Z
|
examples/microjson/mutants/CRP_Num_mutant_1486201315.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 14
|
2015-10-13T16:25:59.000Z
|
2021-01-21T18:31:03.000Z
|
examples/microjson/mutants/CRP_Num_mutant_1486201315.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 32
|
2015-04-07T10:41:29.000Z
|
2022-02-26T05:17:28.000Z
|
import math
import StringIO
import types
__pychecker__ = 'no-returnvalues'
WS = set([' ', '\t', '\r', '\n', '\x08', '\x0c'])
DIGITS = set([str(i) for i in range(0, 10)])
NUMSTART = DIGITS.union(['.', '-', '+'])
NUMCHARS = NUMSTART.union(['e', 'E'])
ESC_MAP = {'n': '\n', 't': '\t', 'r': '\r', 'b': '\x08', 'f': '\x0c'}
REV_ESC_MAP = dict([(_v, _k) for (_k, _v) in ESC_MAP.items()] + [('"', '"')])
E_BYTES = 'input string must be type str containing ASCII or UTF-8 bytes'
E_MALF = 'malformed JSON data'
E_TRUNC = 'truncated JSON data'
E_BOOL = 'expected boolean'
E_NULL = 'expected null'
E_LITEM = 'expected list item'
E_DKEY = 'expected key'
E_COLON = 'missing colon after key'
E_EMPTY = 'found empty string, not valid JSON data'
E_BADESC = 'bad escape character found'
E_UNSUPP = 'unsupported type "%s" cannot be JSON-encoded'
E_BADFLOAT = 'cannot emit floating point value "%s"'
NEG_INF = float('-inf')
POS_INF = float('inf')
class JSONError(Exception):
def __init__(self, msg, stm=None, pos=0):
if stm:
msg += ' at position %d, "%s"' % (pos, repr(stm.substr(pos, 32)))
Exception.__init__(self, msg)
class JSONStream(object):
def __init__(self, data):
self._stm = StringIO.StringIO(data)
@property
def pos(self):
return self._stm.pos
@property
def len(self):
return self._stm.len
def getvalue(self):
return self._stm.getvalue()
def skipspaces(self):
'post-cond: read pointer will be over first non-WS char'
self._skip(lambda c: (c not in WS))
def _skip(self, stopcond):
while True:
c = self.peek()
if (stopcond(c) or (c == '')):
break
self.next()
def next(self, size=1):
return self._stm.read(size)
def next_ord(self):
return ord(self.next())
def peek(self):
if (self.pos == self.len):
return ''
return self.getvalue()[self.pos]
def substr(self, pos, length):
return self.getvalue()[pos:pos + length]
def _decode_utf8(c0, stm):
c0 = ord(c0)
r = 65533
nc = stm.next_ord
if (c0 & 224 == 192):
r = c0 & 31 << 6 + nc() & 63
elif (c0 & 240 == 224):
r = c0 & 15 << 12 + nc() & 63 << 6 + nc() & 63
elif (c0 & 248 == 240):
r = c0 & 7 << 18 + nc() & -1 << 12 + nc() & 63 << 6 + nc() & 63
return unichr(r)
def decode_escape(c, stm):
v = ESC_MAP.get(c, None)
if (v is not None):
return v
elif (c != 'u'):
return c
sv = 12
r = 0
for _ in range(0, 4):
r |= int(stm.next(), 16) << sv
sv -= 4
return unichr(r)
def _from_json_string(stm):
stm.next()
r = []
while True:
c = stm.next()
if (c == ''):
raiseJSONError(E_TRUNC, stm, stm.pos - 1)
elif (c == '\\'):
c = stm.next()
r.append(decode_escape(c, stm))
elif (c == '"'):
return ''.join(r)
elif (c > '\x7f'):
r.append(_decode_utf8(c, stm))
else:
r.append(c)
def _from_json_fixed(stm, expected, value, errmsg):
off = len(expected)
pos = stm.pos
if (stm.substr(pos, off) == expected):
stm.next(off)
return value
raiseJSONError(errmsg, stm, pos)
def _from_json_number(stm):
is_float = 0
saw_exp = 0
pos = stm.pos
while True:
c = stm.peek()
if (c not in NUMCHARS):
break
elif ((c == '-') and (not saw_exp)):
pass
elif (c in ('.', 'e', 'E')):
is_float = 1
if (c in ('e', 'E')):
saw_exp = 1
stm.next()
s = stm.substr(pos, stm.pos - pos)
if is_float:
return float(s)
return long(s)
def _from_json_list(stm):
stm.next()
result = []
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
elif (c == ']'):
stm.next()
return result
elif (c == ','):
stm.next()
result.append(_from_json_raw(stm))
continue
elif (not result):
result.append(_from_json_raw(stm))
continue
else:
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_dict(stm):
stm.next()
result = {}
expect_key = 0
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
if (c in ('}', ',')):
stm.next()
if expect_key:
raiseJSONError(E_DKEY, stm, stm.pos)
if (c == '}'):
return result
expect_key = 1
continue
elif (c == '"'):
key = _from_json_string(stm)
stm.skipspaces()
c = stm.next()
if (c != ':'):
raiseJSONError(E_COLON, stm, stm.pos)
stm.skipspaces()
val = _from_json_raw(stm)
result[key] = val
expect_key = 0
continue
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_raw(stm):
while True:
stm.skipspaces()
c = stm.peek()
if (c == '"'):
return _from_json_string(stm)
elif (c == '{'):
return _from_json_dict(stm)
elif (c == '['):
return _from_json_list(stm)
elif (c == 't'):
return _from_json_fixed(stm, 'true', True, E_BOOL)
elif (c == 'f'):
return _from_json_fixed(stm, 'false', False, E_BOOL)
elif (c == 'n'):
return _from_json_fixed(stm, 'null', None, E_NULL)
elif (c in NUMSTART):
return _from_json_number(stm)
raiseJSONError(E_MALF, stm, stm.pos)
def from_json(data):
"\n Converts 'data' which is UTF-8 (or the 7-bit pure ASCII subset) into\n a Python representation. You must pass bytes to this in a str type,\n not unicode.\n "
if (not isinstance(data, str)):
raiseJSONError(E_BYTES)
if (not data):
return None
stm = JSONStream(data)
return _from_json_raw(stm)
def _to_json_list(stm, lst):
seen = 0
stm.write('[')
for elem in lst:
if seen:
stm.write(',')
seen = 1
_to_json_object(stm, elem)
stm.write(']')
def _to_json_string(stm, buf):
stm.write('"')
for c in buf:
nc = REV_ESC_MAP.get(c, None)
if nc:
stm.write('\\' + nc)
elif (ord(c) <= 127):
stm.write(str(c))
else:
stm.write('\\u%04x' % ord(c))
stm.write('"')
def _to_json_dict(stm, dct):
seen = 0
stm.write('{')
for key in dct.keys():
if seen:
stm.write(',')
seen = 1
val = dct[key]
if (not (type(key) in (types.StringType, types.UnicodeType))):
key = str(key)
_to_json_string(stm, key)
stm.write(':')
_to_json_object(stm, val)
stm.write('}')
def _to_json_object(stm, obj):
if isinstance(obj, (types.ListType, types.TupleType)):
_to_json_list(stm, obj)
elif isinstance(obj, types.BooleanType):
if obj:
stm.write('true')
else:
stm.write('false')
elif isinstance(obj, types.FloatType):
if (not (NEG_INF < obj < POS_INF)):
raiseJSONError(E_BADFLOAT % obj)
stm.write('%s' % obj)
elif isinstance(obj, (types.IntType, types.LongType)):
stm.write('%d' % obj)
elif isinstance(obj, types.NoneType):
stm.write('null')
elif isinstance(obj, (types.StringType, types.UnicodeType)):
_to_json_string(stm, obj)
elif (hasattr(obj, 'keys') and hasattr(obj, '__getitem__')):
_to_json_dict(stm, obj)
elif hasattr(obj, '__unicode__'):
_to_json_string(stm, obj.__unicode__())
elif hasattr(obj, '__str__'):
_to_json_string(stm, obj.__str__())
else:
raiseJSONError(E_UNSUPP % type(obj))
def to_json(obj):
"\n Converts 'obj' to an ASCII JSON string representation.\n "
stm = StringIO.StringIO('')
_to_json_object(stm, obj)
return stm.getvalue()
decode = from_json
encode = to_json
| 27.892617
| 178
| 0.526347
|
4a14804bf28530d98c3ac1af5cb19040d02eacb1
| 13,481
|
py
|
Python
|
SpatialRelationCNN/model/weight_decay_optimizers.py
|
ICRA-2018/generalize_spatial_relations
|
6a87e987848426da757e0add595e3ec035956f01
|
[
"Apache-2.0"
] | 17
|
2018-04-19T14:38:19.000Z
|
2021-12-27T10:33:20.000Z
|
SpatialRelationCNN/model/weight_decay_optimizers.py
|
ICRA-2018/generalize_spatial_relations
|
6a87e987848426da757e0add595e3ec035956f01
|
[
"Apache-2.0"
] | null | null | null |
SpatialRelationCNN/model/weight_decay_optimizers.py
|
ICRA-2018/generalize_spatial_relations
|
6a87e987848426da757e0add595e3ec035956f01
|
[
"Apache-2.0"
] | 7
|
2018-06-03T15:00:10.000Z
|
2020-05-17T07:06:29.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class to make optimizers weight decay ready."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import adam, momentum
from tensorflow.python.util.tf_export import tf_export
def extend_with_decoupled_weight_decay(base_optimizer):
"""Factory function that creates an optimizer class with weight decay.
Returns an optimizer class. An instance of the returned class computes the
update step of `base_optimizer` and additionally decays the weights.
E.g., the class returned by
`extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
`tf.contrib.opt.AdamWOptimizer`.
The API of the new optimizer class slightly differs from the API of the
base optimizer:
- The first argument to the constructor is the weight decay rate.
- `minimize` and `apply_gradients` accept the optional keyword argument
`decay_var_list`, which specifies the variables that should be decayed.
If `None`, all variables that are optimized are decayed.
Usage example:
```python
# MyAdamW is a new class
MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
# Create a MyAdamW object
optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
```
Args:
base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
Returns:
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
This class computes the update step of `base_optimizer` and
additionally decays the variable with the weight decay being decoupled from
the optimization steps w.r.t. to the loss function, as described by
Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
For SGD variants, this simplifies hyperparameter search since
it decouples the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield
better training loss and generalization error in the paper above.
"""
def __init__(self, weight_decay, *args, **kwargs):
super(OptimizerWithDecoupledWeightDecay, self).__init__(
weight_decay, *args, **kwargs)
return OptimizerWithDecoupledWeightDecay
class DecoupledWeightDecayExtension(object):
"""This class allows to extend optimizers with decoupled weight decay.
It implements the decoupled weight decay described by Loshchilov & Hutter
(https://arxiv.org/pdf/1711.05101.pdf), in which the weight decay is
decoupled from the optimization steps w.r.t. to the loss function.
For SGD variants, this simplifies hyperparameter search since it decouples
the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield better
training loss and generalization error in the paper above.
This class alone is not an optimizer but rather extends existing
optimizers with decoupled weight decay. We explicitly define the two examples
used in the above paper (SGDW and AdamW), but in general this can extend
any OptimizerX by using
`extend_with_weight_decay(OptimizerX, weight_decay=weight_decay)`.
In order for it to work, it must be the first class the Optimizer with
weight decay inherits from, e.g.
```python
class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
def __init__(self, weight_decay, *args, **kwargs):
super(AdamWOptimizer, self).__init__(weight_decay, *args, **kwargs).
```
"""
def __init__(self, weight_decay, **kwargs):
"""Construct the extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which
a variable is decayed in the update step.
decay_var_list: Optional list or tuple or set of `Variable` objects to
decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
self._weight_decay = weight_decay
# The tensors are initialized in call to _prepare
self._weight_decay_tensor = None
super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False,
name=None, grad_loss=None, decay_var_list=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
loss, global_step=global_step, var_list=var_list,
gate_gradients=gate_gradients, aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
grad_loss=grad_loss)
def apply_gradients(self, grads_and_vars, global_step=None, name=None,
decay_var_list=None):
"""Apply gradients to variables and decay the variables.
This function is the same as Optimizer.apply_gradients except that it
allows to specify the variables that should be decayed using
decay_var_list. If decay_var_list is None, all variables in var_list
are decayed.
For more information see the documentation of Optimizer.apply_gradients.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _prepare(self):
weight_decay = self._weight_decay
if callable(weight_decay):
weight_decay = weight_decay()
self._weight_decay_tensor = ops.convert_to_tensor(
weight_decay, name="weight_decay")
# Call the optimizers _prepare function.
super(DecoupledWeightDecayExtension, self)._prepare()
def _decay_weights(self, var):
if (not self._decay_var_list or
(self._decay_var_list and var in self._decay_var_list)):
return var.assign_sub(self._weight_decay * var, self._use_locking)
return control_flow_ops.no_op()
# Overwrite the apply functions the base optimizer calls. super().apply_x
# resolves to the apply_x function of the child's BaseOptimizer.
def _apply_dense(self, grad, var):
with ops.control_dependencies([self._decay_weights(var)]):
return super(DecoupledWeightDecayExtension, self)._apply_dense(grad, var)
def _resource_apply_dense(self, grad, var):
with ops.control_dependencies([self._decay_weights(var)]):
return super(DecoupledWeightDecayExtension, self)._resource_apply_dense(
grad, var)
def _apply_sparse(self, grad, var):
with ops.control_dependencies([self._decay_weights(var)]):
return super(DecoupledWeightDecayExtension, self)._apply_sparse(
grad, var)
def _resource_apply_sparse(self, grad, var, indices):
with ops.control_dependencies([self._decay_weights(var)]):
return super(DecoupledWeightDecayExtension, self)._resource_apply_sparse(
grad, var, indices)
@tf_export("contrib.opt.MomentumWOptimizer")
class MomentumWOptimizer(DecoupledWeightDecayExtension,
momentum.MomentumOptimizer):
"""Optimizer that implements the Momentum algorithm with weight_decay.
This is an implementation of the SGDW optimizer described in "Fixing
Weight Decay Regularization in Adam" by Loshchilov & Hutter
(https://arxiv.org/abs/1711.05101)
([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
It computes the update step of `train.MomentumOptimizer` and additionally
decays the variable. Note that this is different from adding
L2 regularization on the variables to the loss. Decoupling the weight decay
from other hyperparameters (in particular the learning rate) simplifies
hyperparameter search.
For further information see the documentation of the Momentum Optimizer.
Note that this optimizer can also be instantiated as
```python
extend_with_weight_decay(tf.train.MomentumOptimizer,
weight_decay=weight_decay)
```
"""
def __init__(self, weight_decay, learning_rate, momentum,
use_locking=False, name="MomentumW", use_nesterov=False):
"""Construct a new MomentumW optimizer.
For further information see the documentation of the Momentum Optimizer.
Args:
weight_decay: A `Tensor` or a floating point value. The weight decay.
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
@compatibility(eager)
When eager execution is enabled, learning_rate, weight_decay and momentum
can each be a callable that takes no arguments and returns the actual value
to use. This can be useful for changing these values across different
invocations of optimizer functions.
@end_compatibility
"""
super(MomentumWOptimizer, self).__init__(
weight_decay, learning_rate=learning_rate, momentum=momentum,
use_locking=use_locking, name=name, use_nesterov=use_nesterov)
@tf_export("contrib.opt.AdamWOptimizer")
class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
"""Optimizer that implements the Adam algorithm with weight decay.
This is an implementation of the AdamW optimizer described in "Fixing
Weight Decay Regularization in Adam" by Loshchilov & Hutter
(https://arxiv.org/abs/1711.05101)
([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
It computes the update step of `train.AdamOptimizer` and additionally decays
the variable. Note that this is different from adding L2 regularization on
the variables to the loss: it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield better
training loss and generalization error in the paper above.
For further information see the documentation of the Adam Optimizer.
Note that this optimizer can also be instantiated as
```python
extend_with_weight_decay(tf.train.AdamOptimizer, weight_decay=weight_decay)
```
"""
def __init__(self, weight_decay, learning_rate=0.001, beta1=0.9, beta2=0.999,
epsilon=1e-8, use_locking=False, name="AdamW"):
"""Construct a new AdamW optimizer.
For further information see the documentation of the Adam Optimizer.
Args:
weight_decay: A `Tensor` or a floating point value. The weight decay.
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
"""
super(AdamWOptimizer, self).__init__(
weight_decay, learning_rate=learning_rate, beta1=beta1, beta2=beta2,
epsilon=epsilon, use_locking=use_locking, name=name)
| 45.390572
| 80
| 0.744158
|
4a14817d75ffa275c9efdb4609cffe3542e7e01a
| 845
|
py
|
Python
|
integration_tests/verify_marketupdate.py
|
PeregrineTradersDevTeam/md-data-reader-euronext
|
ecef156b50defb24e64cc3bbabf6b26071ed05d7
|
[
"MIT"
] | 3
|
2020-01-08T09:44:52.000Z
|
2022-03-02T02:16:24.000Z
|
integration_tests/verify_marketupdate.py
|
PeregrineTradersDevTeam/md-data-reader-euronext
|
ecef156b50defb24e64cc3bbabf6b26071ed05d7
|
[
"MIT"
] | null | null | null |
integration_tests/verify_marketupdate.py
|
PeregrineTradersDevTeam/md-data-reader-euronext
|
ecef156b50defb24e64cc3bbabf6b26071ed05d7
|
[
"MIT"
] | 1
|
2020-05-09T06:31:38.000Z
|
2020-05-09T06:31:38.000Z
|
from test_util import build_parquet_data, verify_datasets
import sys
official_application = sys.argv[1]
official_files_location = sys.argv[2]
cmdline_params = ["-p",
"MarketUpdate-for-test",
"-o",
"1001",
official_files_location + "/20190104/084500/224.0.67.193_21193.pcap.gz"]
print("Building MarketUpdate")
build_parquet_data(official_application, cmdline_params)
print("Checking MarketUpdate")
verify_datasets(official_files_location + "/Official-MarketUpdate", "MarketUpdate-for-test-market-update.parquet", {
"MDSeqNum": "mkt_data_seq_num",
"RebroadcastIndicator":"rebroadcast_indicator",
"EMM": "emm",
"EventTime": "event_time",
"UpdateType": "mkt_data_update_type",
"SymbolIndex": "symbol_index",
"NumberOfOrders": "number_of_orders",
"Price": "price",
"Quantity": "quantity"})
| 30.178571
| 116
| 0.726627
|
4a1481c26e3db15603cd41568b827d824b5eeaa8
| 4,644
|
py
|
Python
|
mscreen/autodocktools_prepare_py3k/mglutil/util/recentFiles.py
|
e-mayo/mscreen
|
a50f0b2f7104007c730baa51b4ec65c891008c47
|
[
"MIT"
] | 9
|
2021-03-06T04:24:28.000Z
|
2022-01-03T09:53:07.000Z
|
mglutil/util/recentFiles.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 3
|
2021-03-07T05:37:16.000Z
|
2021-09-19T15:06:54.000Z
|
mglutil/util/recentFiles.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 4
|
2019-08-28T23:11:39.000Z
|
2021-11-27T08:43:36.000Z
|
"""Impements Recent Files menues"""
# $Header: /opt/cvs/python/packages/share1.5/mglutil/util/recentFiles.py,v 1.16 2010/10/05 17:25:40 sargis Exp $
#
# $Id: recentFiles.py,v 1.16 2010/10/05 17:25:40 sargis Exp $
import os, pickle
from mglutil.util.packageFilePath import getResourceFolderWithVersion
import tkinter
class RecentFiles:
"""Class to store Recent Files"""
def __init__(self, masterApp, masterMenu, filePath=None,
menuLabel="Open recent", underline=5, index=0):
"""Construct recent files categories. If filePath is not provided
mglutil/recent.pkl is used to store and load the data"""
if not filePath: #use "mglutil/recent.pkl" to store the data
filePath = getResourceFolderWithVersion()
if filePath is None:
return
filePath += os.sep + "mglutil" + os.sep + "recent.pkl"
if os.path.exists(filePath):
try:
self.categories = pickle.load(open(filePath))
except Exception as inst:
#print inst
#print "Couldn't Load Recent Files."
self.categories = {}
else:
self.categories = {}
self.resourceFilePath = filePath
self.checkCategories()
if masterMenu != None:
self.gui(masterMenu, menuLabel, underline=underline, index=index)
else:
self.mainMenu = None
self.masterApp = masterApp
def checkCategories(self):
"""Loops through self.categories to check if recent file still exists.
If not, removes the file from the list"""
for category in list(self.categories.keys()):
newList = [x for x in self.categories[category] if os.path.exists(x[0])]
if len(newList):
self.categories[category] = newList
else:
self.categories.pop(category)
def add(self, filePath, cmdStr, category="Documents"):
"""Add file to self.categories[category] list.
First element in this list is the file.
Second is the command string - cmdStr."""
if not filePath:
return
if hasattr(self, 'categories') is False:
return
if category not in self.categories:
self.categories[category] = []
#self.menuList[category] = Tkinter.Menu(self.mainMenu)
#self.mainMenu.add_cascade(label=category,
# menu=self.menuList[category])
if os.path.exists(filePath):
filePath = os.path.abspath(filePath)
if [filePath,cmdStr] in self.categories[category]:
index = self.categories[category].index([filePath,cmdStr])
self.categories[category].pop(index)
if self.mainMenu != None : self.mainMenu.delete(index+1)
if len(self.categories[category]) > 20:
self.categories[category].pop()
#self.menuList[category].delete(10,Tkinter.END)
self.categories[category].insert(0, [filePath,cmdStr])
if self.mainMenu != None : self.mainMenu.insert(0,'command', label=filePath,
command=self.callback([filePath,cmdStr]))
self.dumpCategories()
def dumpCategories(self, filePath=None):
"""Calls pickle.dump(self.categories, open(filePath,'w'))"""
if not filePath:
filePath = self.resourceFilePath
try:
pickle.dump(self.categories, open(filePath,'w'))
except Exception as inst:
print("Failed to save recent files")
print(inst)
def gui(self, masterMenu, menuLabel, underline=None, index=0):
self.mainMenu = tkinter.Menu(masterMenu)
masterMenu.insert_cascade(index, label=menuLabel,
menu=self.mainMenu, underline=underline)
self.menuList = {}
for category in self.categories:
# self.menuList[category] = Tkinter.Menu(self.mainMenu)
# self.mainMenu.add_cascade(label=category,
# menu=self.menuList[category])
for listItem in self.categories[category]:
self.mainMenu.add_command(label=listItem[0],
command=self.callback(listItem))
def callback(self, listItem):
def call(listItem=listItem):
masterApp = self.masterApp
eval("masterApp." + listItem[1] + '(' + repr(listItem[0]) +')')
return call
| 45.087379
| 112
| 0.580319
|
4a1482e6f824f3d8e3d71ed2216846161e475ba1
| 11,500
|
py
|
Python
|
p-jonczyk/process_control.py
|
p-jonczyk/q-seleniumbase
|
f7c86df9c04ed4dd9e0c2e37708a40cf5ac08831
|
[
"MIT"
] | null | null | null |
p-jonczyk/process_control.py
|
p-jonczyk/q-seleniumbase
|
f7c86df9c04ed4dd9e0c2e37708a40cf5ac08831
|
[
"MIT"
] | null | null | null |
p-jonczyk/process_control.py
|
p-jonczyk/q-seleniumbase
|
f7c86df9c04ed4dd9e0c2e37708a40cf5ac08831
|
[
"MIT"
] | null | null | null |
'''This test suite contains 9 passing tests. '''
import pytest
from seleniumbase import BaseCase
from basic_methods import BasicMethods as bm
import config
import constants as const
import time
class TestProcessContol(BaseCase):
def set_process_control_starting_point(self, single_control=False, multiple_items=False, parameters=False, control_action=False, controls=False):
'''Sets starting point for testing Process control configurations
SET ONLY ONE VARIABLE TO True
Parameters:
signal_control (boolen): If True opens Signal Control list
multiple_items (boolen): If True opens Multiple items controls assignent lsit
parameters (boolen): If True opens Parameters list
control_actions (boolen): If True opens Control actions list
controls (boolen): If True opens Controls list
'''
self.open(config.url)
bm.log_in(self, config.valid_username, config.valid_password)
self.wait_for_element(const.toast_msg_success_selector)
self.go_to(config.url_process_control)
setting = [(single_control, const.process_config_signal_control_selector),
(multiple_items, const.process_config_multiple_items_selector),
(parameters, const.process_config_parameters_selector),
(control_action, const.process_config_control_actions_selector),
(controls, const.process_config_controls_selector)]
for boolen, selector in setting:
if boolen == True:
self.click(selector)
def add_parameter_to_table(self, parameter_name, parameter_type=None, save=True):
'''Opens Add parameter window and fill Parameter name
Parameters:
parameter_name (str): name of new parameter
parameter_type (str): selector of parameter type from available list
if None - default parameter type
save (boolen): If True - clicks Save button'''
self.click(const.parameters_add_btn)
self.type(const.parameters_param_name_selector, parameter_name)
if parameter_type is not None:
self.click_chain([const.parameters_param_type_list_selector,
parameter_type])
if save == True:
self.click(const.parameters_param_save_btn)
def delete_element_from_table(self, element_name):
''' Delete selected element from table defined by its name
Parameter:
parameter_name (str): Name of parameter to be deleted'''
bm.choose_element_from_table(self, element_name)
self.click_chain([const.parameters_delete_btn,
const.dialog_accept_btn_selector])
time.sleep(1)
def add_control_action(self, control_action_name, parameter_name=None, save=True):
''' Adds new contorl action
Parameters:
control_action_name (str): Name of control action to be added
parameter_name (str): Name of parameter to be added'''
self.click(const.control_actions_add_btn)
self.type(const.control_actions_name_selector, control_action_name)
# additional option slider
self.click('#mat-slide-toggle-5')
if parameter_name is not None:
self.click_chain([const.control_actions_add_param_btn,
const.control_actions_add_param_name_selector])
self.type(const.control_actions_add_param_name_selector,
parameter_name)
self.click_chain(
['#mat-slide-toggle-53', const.control_actions_param_list_confirm])
if save == True:
self.click(const.control_actions_add_save_btn)
def add_control(self, control_name, control_action_name=None, save=True):
''' Adds new contorls in Control configuration
Parameters:
control_name (str): Name of control to be added
control_action_name (str): Name of control action to be added'''
self.click(const.controls_add_btn)
self.type(const.controls_name_selector, config.control_name)
if control_action_name is not None:
self.click_chain([const.controls_add_control_action,
const.controls_add_control_action_name_selector])
self.type(const.controls_add_control_action_name_selector,
control_action_name)
self.click_chain(
['#mat-slide-toggle-20', const.controls_add_control_action_confirm_btn])
if save == True:
self.click(const.controls_add_save_btn)
def test_OpenParametersAddParam_ParamAppearsOnList(self):
'''Checks if when user opens Parameter list and adds new parameter
then parameter apperas on the list'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(
config.parameter_name, parameter_type=const.parameters_param_type_integer)
self.assert_element(f'td:contains({config.parameter_name})')
self.delete_element_from_table(config.parameter_name)
def test_OpenParametersChooseAndDelParam_ParamShouldNotBeOnList(self):
'''Checks if when user opens Parameter list, select parameter and delet it
then parameter should not be on the list'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(config.parameter_name)
self.delete_element_from_table(config.parameter_name)
self.assert_element_absent(f'td:contains({config.parameter_name})')
def test_OpenParametersChooseAndDuplicateParam_AddParamWidnowAppera(self):
''' Checks if when user select parameter and clicks duplicate then Add parameter
window appears with filled Parameter name the same as choosen parameter to be duplicated'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(config.parameter_name)
self.click(const.parameters_duplicate_btn)
self.assert_element('span:contains("Add parameter")')
self.assert_element('mat-error:contains("Name must be unique")')
self.click(const.parameters_param_cancel_btn)
self.delete_element_from_table(config.parameter_name)
def test_AddParameterChooseParamTypeList_ListViewAppears(self):
''' Checks if when user choose parameter type List,
then list view apperas'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(
config.parameter_name, parameter_type=const.parameters_param_type_list_type, save=False)
self.assert_element('.selection-list-details-container')
self.assert_element('input[id="mat-input-1"]')
def test_AddParameterChooseParamTypeList_ListViewAppears(self):
''' Checks if when user choose parameter type List and add list element,
then added list element appears in proper field in Parameter configuration'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(
config.parameter_name, parameter_type=const.parameters_param_type_list_type, save=False)
self.click('.selection-list-details-container .ic_mw_addbutton')
self.type('.edit-text-value-modal-container textarea',
config.parameter_config_list_elem)
self.click('[id="mat-dialog-1"] .ic_mw_savebutton')
self.assert_element('input[id="mat-input-1"]')
def test_ParameterConfigChooseParamTypeInt_FieldsAppears(self):
''' Checks if when user choose parameter type Integer value
then additional fileds appears'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(
config.parameter_name, parameter_type=const.parameters_param_type_integer, save=False)
self.assert_element('input[name="parameterMin"]')
self.assert_element('input[name="parameterMax"]')
def test_ParameterConfigChooseParamTypeFloat_FieldsAppers(self):
''' Checks if when user choose parameter type Float value
then additional fileds appears'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(
config.parameter_name, parameter_type=const.parameters_param_type_float, save=False)
self.assert_element('input[name="parameterMin"]')
self.assert_element('input[name="parameterMax"]')
def test_ParameterConfigBothCommentSlidersOn_FieldsAppear(self):
''' Checks if when user check on bothe comment available and
comment required sliders then additional fileds appears'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(config.parameter_name, save=False)
self.click_chain([const.parameters_comment_available_slider,
const.parameters_comment_required_slider])
self.assert_element('[name="optionalParameterCommentType"]')
self.assert_element('[name="requiredParameterCommentForNOKType"]')
def test_ParameterConfigParamRequiredOn_ShowsYesOnList(self):
''' Checks if when user check on slider of parameter required
then it shows "Yes" in parameter required column in tabel'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(config.parameter_name, save=False)
self.click(const.parameters_param_required_slider)
self.click(const.parameters_param_save_btn)
self.assertEqual(self.get_text(
bm.choose_element_from_table(self, config.parameter_name, get_value=3)), "Yes",
"Parameter required shows: No")
self.delete_element_from_table(config.parameter_name)
def test_AddParamAssingInControlActionsAssignInControls_ShouldBeVisibleInActionsAndThenInControls(self):
''' Checks if when user Adds new parameter, assigns it in Control Actions
to new Control Action and assings that Control Action in Controls
then all those steps should be done flawlessly and be visible
in each mentioned configuration of process control.'''
self.set_process_control_starting_point(parameters=True)
self.add_parameter_to_table(config.parameter_name)
self.go_to(config.url_process_control_control_actions)
self.add_control_action(
config.control_action_name, config.parameter_name)
bm.choose_element_from_table(self, config.control_action_name)
self.assert_element(f'td:contains({config.control_action_name})')
self.go_to(config.url_process_control_controls)
self.add_control(config.control_name, config.control_action_name)
bm.choose_element_from_table(self, config.control_name)
self.assert_element(f'td:contains({config.control_name})')
self.delete_element_from_table(config.control_name)
self.go_to(config.url_process_control_control_actions)
self.delete_element_from_table(config.control_action_name)
self.go_to(config.url_process_control_parameters)
self.delete_element_from_table(config.parameter_name)
| 47.916667
| 150
| 0.701217
|
4a148443a69236617744aec763239d0055451099
| 10,559
|
py
|
Python
|
linear_algebra/matrix.py
|
Vincentiuzs/linear_algebra
|
4e2ec58a0fca01122bc7c845b12202e154b7bcdd
|
[
"MIT"
] | null | null | null |
linear_algebra/matrix.py
|
Vincentiuzs/linear_algebra
|
4e2ec58a0fca01122bc7c845b12202e154b7bcdd
|
[
"MIT"
] | null | null | null |
linear_algebra/matrix.py
|
Vincentiuzs/linear_algebra
|
4e2ec58a0fca01122bc7c845b12202e154b7bcdd
|
[
"MIT"
] | null | null | null |
from collections import Counter
class Matrix:
"""A (p \u00D7 q) matrix."""
def __init__(self, array):
# Check if the array is a valid matrix and
# raise errors/exceptions if invalid
self._isValid(array)
self.matrix = array
# size of the matrix: p x q
self.p, self.q = self._size()
self.size = self._size()
def _isValid(self, array):
"""
Args:
array:
Returns:
"""
assert(len(array) > 0)
# check if array is of type list
if isinstance(array, list):
# variable to check if array is 2d list
array_is_2d = False
# check if elements of array are all lists
if sum(list(map(lambda x: isinstance(x, list), array))) == len(array):
# check if elements of array (lists) have same length
lengths = Counter(list(map(len, array)))
if lengths.most_common(1)[0][1] != len(array):
raise Exception('rows of the matrix have different lengths')
else:
array_is_2d = True
if not array_is_2d:
raise Exception('matrix can only be created from a 2D list')
# lambda function to generate all matrix entries in single list
extract_elements = lambda x, y=False: [i for i in x] if not y \
else [i for row in x for i in row]
# check if all elements are real or complex numbers
array_all_elements = extract_elements(array, array_is_2d)
is_valid_type = lambda x: isinstance(x, int) or isinstance(x, complex) or isinstance(x, float)
# function to check if all entries are valid types
if sum(list(map(is_valid_type, array_all_elements))) == len(array_all_elements):
return True
else:
raise TypeError('elements must be of type: int, complex or float')
# raise a type error since the class only uses a list
else:
raise TypeError(f'the matrix must be a list not {type(array)}')
def _size(self):
"""Returns size of the matrix"""
array = self.matrix
if isinstance(array, list):
# check if elements of array are all lists
if sum(list(map(lambda x: isinstance(x, list), array))) == len(array):
return len(array), len(array[0])
# check if elements of array are numbers i.e., row vector
elif sum(list(map(lambda x: isinstance(x, int), array))) == len(array) or \
sum(list(map(lambda x: isinstance(x, float), array))) == len(array):
return 1, len(array)
def __repr__(self) -> 'str':
'''Return string representation for a particular Matrix'''
# number of sapces to reserve for row name
rsv_spaces_index = len(str(self.p)) + 3
# function to calculate the longest number in each column
longest_number = lambda x: [[len(str(x[i][j])) for i in range(self.p)] \
for j in range(self.q)]
# number of spaces to reserve in each column
rsv_spaces_cols1 = list(map(max, longest_number(self.matrix)))
rsv_spaces_cols2 = [len(str(j)) + 3 for j in range(self.q)]
# find the maximums between rsv_spaces_cols# lists
rsv_spaces_cols = [max(i, j) for i, j in zip(rsv_spaces_cols1, rsv_spaces_cols2)]
# row representing colnames
colname = lambda x: f'[,{x}]'
mat_str_lst = [f"{' ':<{rsv_spaces_index}}"] + [f' {colname(j):>{rsv_spaces_cols[j]}}' for j in range(self.q)]
mat_str = " ".join(mat_str_lst)
# returns real part of number if imaginary part is 0
for i in range(self.p):
index = f'[{i},]'
mat_str += " ".join([f'\n{index:>{rsv_spaces_index}}'] +[f' {self.matrix[i][j]:{rsv_spaces_cols[j]}}'\
for j in range(self.q)])
return str(mat_str)
def __eq__(self, other):
if isinstance(other, Matrix):
# check if rows elements of two matrices are equal
rows_equal = lambda x, y: [i == j for i, j in zip(x, y)]
# list of bool where rows/elements are equals
equals = rows_equal(self.matrix, other.matrix)
# add booleans in the least and check if sm is equal to length of the list
return sum(equals) == len(equals)
return False
def __add__(self, other):
'''Return self+other'''
return self.add(other)
def __mul__(self, other):
'''Return self*other'''
return self.mul(other)
def __rmul__(self, other):
'''return other*self'''
if not isinstance(other, Matrix):
return self * other
def __sub__(self, other):
'''Return self-other'''
return self.sub(other)
def __getitem__(self, key):
if isinstance(key, tuple):
rowkey = key[0]
colkey = key[1]
if isinstance(rowkey, int) and isinstance(colkey, int):
# chooses a specific element
return self.getrows()[rowkey][colkey]
elif isinstance(rowkey, int) and isinstance(colkey, slice):
# choose a row elemenets
return Matrix([self.getrows()[rowkey][colkey]])
elif isinstance(rowkey, slice) and isinstance(colkey, int):
# choose a column elements
return Matrix([self.getcols()[colkey][rowkey]]).transpose()
elif isinstance(rowkey, slice) and isinstance(colkey, slice):
# choose a submatrix
submatrix = []
def new_slice(aslice, isrowkey=True):
start = aslice.start
stop = aslice.stop
step = aslice.step
if aslice.start is None:
start = 0
if aslice.stop is None:
if isrowkey:
stop = self.p
else:
stop = self.q
if aslice.step is None:
step = 1
return slice(start, stop, step)
rowkey = new_slice(rowkey)
colkey = new_slice(colkey, isrowkey=False)
j = 0
for i in range(rowkey.start, rowkey.stop, rowkey.step):
submatrix.append([])
for k in range(colkey.start, colkey.stop, colkey.step):
submatrix[j].append(self.matrix[i][k])
j += 1
return Matrix(submatrix)
def __setitem__(self, key, value):
pass
def getrows(self):
return self.matrix
def getcols(self):
return self.transpose().matrix
def add(self, other):
"""
Args:
other:
Returns:
"""
if isinstance(other, Matrix):
# check if matrices have same size so that they can be added
if self.size == other.size:
# adds corresponding elements given two rows each from different matrix
add_row_elements = lambda x, y: [i + j for i, j in zip(x,y)]
# add corresponding elements given two matrices
add_all_elements = lambda x, y: [add_row_elements(i, j) for i, j in zip(x,y)]
return Matrix(add_all_elements(self.matrix, other.matrix))
else:
raise Exception('matrices have different sizes')
else:
raise TypeError(f'Matrix object can only be added to other Matrix, not {type(other)}')
def sub(self, other):
"""
Args:
other:
Returns:
"""
return self + other * -1
def mul(self, other):
"""
Args:
other:
Returns:
"""
# checks if is other is a number (either real or complex) for scalar
is_number = lambda x: isinstance(x, float) or isinstance(x, complex) or isinstance(x, int)
if is_number(other):
prod = []
for i in range(self.p):
prod.append([])
for j in range(self.q):
# multiply each entry by other
prod[i].append(self.matrix[i][j] * other)
return Matrix(prod)
elif isinstance(other, Matrix):
# check if number of column of this matrix equals number of rows in other matrix
if self.q == other.p:
a = self.matrix; b = other.matrix
c = []
for i in range(self.p):
c.append([])
for j in range(other.q):
c[i].append(0)
for k in range(self.q):
c[i][j] += a[i][k] * b[k][j]
return Matrix(c)
else:
raise Exception('matrices are non-comformable')
else:
raise TypeError(f'Matrix can only be mutiplied by Matrix, int, complex or float not {type(other)}')
def transpose(self):
""" """
# make rows to be columns, and columns be rows
transpose = [[row[j] for row in self.matrix] for j in range(self.q)]
return Matrix(transpose)
def is_square(self):
return self.p == self.q
def is_symmetric(self):
return is_square() and self == self.transpose()
def matrix(*elements, nrows, ncols):
"""Returns a Matrix object"""
elements = list(elements)
start = 0; end = ncols
matrix = [];
for i in range(nrows):
matrix.append(elements[start: end])
start += ncols
end += ncols
return Matrix(matrix)
def vector(*elements, row=True):
if row:
return matrix(*elements, nrows=1, ncols=len(elements))
return matrix(*elements, nrows=len(elements), ncols=1)
| 32.192073
| 118
| 0.509044
|
4a1485110bb37756ff6f472f2c719255774609a7
| 669
|
py
|
Python
|
techlibs/ice40/brams_init.py
|
kallisti5/yosys
|
0b9bb852c66ec2a6e9b4b510b3e2e32b8c6a6b16
|
[
"ISC"
] | 1,718
|
2018-01-06T15:16:30.000Z
|
2022-03-30T17:44:22.000Z
|
techlibs/ice40/brams_init.py
|
kallisti5/yosys
|
0b9bb852c66ec2a6e9b4b510b3e2e32b8c6a6b16
|
[
"ISC"
] | 1,669
|
2018-01-06T22:57:24.000Z
|
2022-03-31T06:51:49.000Z
|
techlibs/ice40/brams_init.py
|
kallisti5/yosys
|
0b9bb852c66ec2a6e9b4b510b3e2e32b8c6a6b16
|
[
"ISC"
] | 569
|
2018-01-19T01:51:14.000Z
|
2022-03-31T23:03:09.000Z
|
#!/usr/bin/env python3
def write_init_vh(filename, initbits):
with open(filename, "w") as f:
for i in range(16):
print("localparam [255:0] INIT_%X = {" % i, file=f)
for k in range(32):
print(" %s%s" % (", ".join(["INIT[%4d]" % initbits[i*256 + 255 - k*8 - l] for l in range(8)]), "," if k != 31 else ""), file=f)
print("};", file=f);
write_init_vh("techlibs/ice40/brams_init1.vh", [i//2 + 2048*(i%2) for i in range(4096)])
write_init_vh("techlibs/ice40/brams_init2.vh", [i//4 + 1024*(i%4) for i in range(4096)])
write_init_vh("techlibs/ice40/brams_init3.vh", [i//8 + 512*(i%8) for i in range(4096)])
| 44.6
| 144
| 0.569507
|
4a148562a7fb5bddc0570ac147c49fd034558af2
| 4,843
|
py
|
Python
|
wireshark-2.0.13/tools/dftestlib/integer.py
|
mahrukhfida/mi
|
7187765aa225e71983969ef5285771ac77c8309a
|
[
"Apache-2.0"
] | null | null | null |
wireshark-2.0.13/tools/dftestlib/integer.py
|
mahrukhfida/mi
|
7187765aa225e71983969ef5285771ac77c8309a
|
[
"Apache-2.0"
] | null | null | null |
wireshark-2.0.13/tools/dftestlib/integer.py
|
mahrukhfida/mi
|
7187765aa225e71983969ef5285771ac77c8309a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from dftestlib import dftest
class testInteger(dftest.DFTest):
trace_file = "ntp.pcap"
def test_eq_1(self):
dfilter = "ip.version == 4"
self.assertDFilterCount(dfilter, 1)
def test_eq_2(self):
dfilter = "ip.version == 6"
self.assertDFilterCount(dfilter, 0)
def test_eq_3(self):
# Invalid filter (only one equals sign)
dfilter = "ip.version = 4"
self.assertDFilterFail(dfilter)
def test_eq_4(self):
# Invalid filter
dfilter = "ip.version == the quick brown fox jumps over the lazy dog"
self.assertDFilterFail(dfilter)
def test_eq_5(self):
# Invalid filter
dfilter = "ip.version == 4 the quick brown fox jumps over the lazy dog"
self.assertDFilterFail(dfilter)
def test_ne_1(self):
dfilter = "ip.version != 0"
self.assertDFilterCount(dfilter, 1)
def test_ne_2(self):
dfilter = "ip.version != 4"
self.assertDFilterCount(dfilter, 0)
def test_u_gt_1(self):
dfilter = "ip.version > 3"
self.assertDFilterCount(dfilter, 1)
def test_u_gt_2(self):
dfilter = "ip.version > 4"
self.assertDFilterCount(dfilter, 0)
def test_u_gt_3(self):
dfilter = "ip.version > 5"
self.assertDFilterCount(dfilter, 0)
def test_u_ge_1(self):
dfilter = "ip.version >= 3"
self.assertDFilterCount(dfilter, 1)
def test_u_ge_2(self):
dfilter = "ip.version >= 4"
self.assertDFilterCount(dfilter, 1)
def test_u_ge_3(self):
dfilter = "ip.version >= 5"
self.assertDFilterCount(dfilter, 0)
def test_u_lt_1(self):
dfilter = "ip.version < 3"
self.assertDFilterCount(dfilter, 0)
def test_u_lt_2(self):
dfilter = "ip.version < 4"
self.assertDFilterCount(dfilter, 0)
def test_u_lt_3(self):
dfilter = "ip.version < 5"
self.assertDFilterCount(dfilter, 1)
def test_u_le_1(self):
dfilter = "ip.version <= 3"
self.assertDFilterCount(dfilter, 0)
def test_u_le_2(self):
dfilter = "ip.version <= 4"
self.assertDFilterCount(dfilter, 1)
def test_u_le_3(self):
dfilter = "ip.version <= 5"
self.assertDFilterCount(dfilter, 1)
def test_s_gt_1(self):
dfilter = "ntp.precision > -12"
self.assertDFilterCount(dfilter, 1)
def test_s_gt_2(self):
dfilter = "ntp.precision > -11"
self.assertDFilterCount(dfilter, 0)
def test_s_gt_3(self):
dfilter = "ntp.precision > -10"
self.assertDFilterCount(dfilter, 0)
def test_s_ge_1(self):
dfilter = "ntp.precision >= -12"
self.assertDFilterCount(dfilter, 1)
def test_s_ge_2(self):
dfilter = "ntp.precision >= -11"
self.assertDFilterCount(dfilter, 1)
def test_s_ge_3(self):
dfilter = "ntp.precision >= -10"
self.assertDFilterCount(dfilter, 0)
def test_s_lt_1(self):
dfilter = "ntp.precision < -12"
self.assertDFilterCount(dfilter, 0)
def test_s_lt_2(self):
dfilter = "ntp.precision < -11"
self.assertDFilterCount(dfilter, 0)
def test_s_lt_3(self):
dfilter = "ntp.precision < -10"
self.assertDFilterCount(dfilter, 1)
def test_s_le_1(self):
dfilter = "ntp.precision <= -12"
self.assertDFilterCount(dfilter, 0)
def test_s_le_2(self):
dfilter = "ntp.precision <= -11"
self.assertDFilterCount(dfilter, 1)
def test_s_le_3(self):
dfilter = "ntp.precision <= -10"
self.assertDFilterCount(dfilter, 1)
def test_bool_eq_1(self):
dfilter = "ip.flags.df == 0"
self.assertDFilterCount(dfilter, 1)
def test_bool_eq_2(self):
dfilter = "ip.flags.df == 1"
self.assertDFilterCount(dfilter, 0)
def test_bool_ne_1(self):
dfilter = "ip.flags.df != 1"
self.assertDFilterCount(dfilter, 1)
def test_bool_ne_2(self):
dfilter = "ip.flags.df != 0"
self.assertDFilterCount(dfilter, 0)
| 29.351515
| 79
| 0.640099
|
4a14859346ef76ac19a52d81f3b3ec08a46bc032
| 1,403
|
py
|
Python
|
pcat2py/class/21ba0f12-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/21ba0f12-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/21ba0f12-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# 21ba0f12-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "21ba0f12-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKCU:\Software\Policies\Microsoft\WindowsMediaPlayer', 'PreventCodecDownload')
# Output Lines
self.output = [r'HKCU:\Software\Policies\Microsoft\WindowsMediaPlayer', ('PreventCodecDownload=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\WindowsMediaPlayer'")
cli.powershell(r"Set-ItemProperty -path 'HKCU:\Software\Policies\Microsoft\WindowsMediaPlayer' -name 'PreventCodecDownload' -value 1 -Type DWord")
| 36.921053
| 154
| 0.602994
|
4a1485f7be7bd93d0d754df6a082d1122c3063bc
| 19,105
|
py
|
Python
|
acrv_modified_clones/image_pipeline/camera_calibration/nodes/cameracalibrator.py
|
myalfred03/team_acrv_2017
|
9d3a28b4069262291fd28990676b69e90b06ac3a
|
[
"BSD-3-Clause"
] | 25
|
2017-11-24T19:04:51.000Z
|
2020-08-27T06:25:49.000Z
|
acrv_modified_clones/image_pipeline/camera_calibration/nodes/cameracalibrator.py
|
myalfred03/team_acrv_2017
|
9d3a28b4069262291fd28990676b69e90b06ac3a
|
[
"BSD-3-Clause"
] | 3
|
2018-10-03T06:44:06.000Z
|
2018-11-07T14:53:58.000Z
|
acrv_modified_clones/image_pipeline/camera_calibration/nodes/cameracalibrator.py
|
myalfred03/team_acrv_2017
|
9d3a28b4069262291fd28990676b69e90b06ac3a
|
[
"BSD-3-Clause"
] | 16
|
2017-12-24T14:19:08.000Z
|
2021-07-14T13:34:51.000Z
|
#!/usr/bin/python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import sensor_msgs.msg
import sensor_msgs.srv
import message_filters
from message_filters import ApproximateTimeSynchronizer
import os
from collections import deque
import threading
import functools
import time
import cv2
import numpy
from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, ChessboardInfo, Patterns
from std_msgs.msg import String
from std_srvs.srv import Empty
class DisplayThread(threading.Thread):
"""
Thread that displays the current images
It is its own thread so that all display can be done
in one thread to overcome imshow limitations and
https://github.com/ros-perception/image_pipeline/issues/85
"""
def __init__(self, queue, opencv_calibration_node):
threading.Thread.__init__(self)
self.queue = queue
self.opencv_calibration_node = opencv_calibration_node
def run(self):
cv2.namedWindow("display", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)
while True:
# wait for an image (could happen at the very beginning when the queue is still empty)
while len(self.queue) == 0:
time.sleep(0.1)
im = self.queue[0]
cv2.imshow("display", im)
k = cv2.waitKey(6) & 0xFF
if k in [27, ord('q')]:
rospy.signal_shutdown('Quit')
elif k == ord('s'):
self.opencv_calibration_node.screendump(im)
class ConsumerThread(threading.Thread):
def __init__(self, queue, function):
threading.Thread.__init__(self)
self.queue = queue
self.function = function
def run(self):
while True:
# wait for an image (could happen at the very beginning when the queue is still empty)
while len(self.queue) == 0:
time.sleep(0.1)
self.function(self.queue[0])
class CalibrationNode:
def __init__(self, boards, service_check = True, synchronizer = message_filters.TimeSynchronizer, flags = 0,
pattern=Patterns.Chessboard, camera_name='', checkerboard_flags = 0):
if service_check:
# assume any non-default service names have been set. Wait for the service to become ready
for svcname in ["camera", "left_camera", "right_camera"]:
remapped = rospy.remap_name(svcname)
if remapped != svcname:
fullservicename = "%s/set_camera_info" % remapped
print("Waiting for service", fullservicename, "...")
try:
rospy.wait_for_service(fullservicename, 5)
print("OK")
except rospy.ROSException:
print("Service not found")
rospy.signal_shutdown('Quit')
self._boards = boards
self._calib_flags = flags
self._checkerboard_flags = checkerboard_flags
self._pattern = pattern
self._camera_name = camera_name
lsub = message_filters.Subscriber('left', sensor_msgs.msg.Image)
rsub = message_filters.Subscriber('right', sensor_msgs.msg.Image)
ts = synchronizer([lsub, rsub], 4)
ts.registerCallback(self.queue_stereo)
msub = message_filters.Subscriber('image', sensor_msgs.msg.Image)
msub.registerCallback(self.queue_monocular)
self.set_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("camera"),
sensor_msgs.srv.SetCameraInfo)
self.set_left_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("left_camera"),
sensor_msgs.srv.SetCameraInfo)
self.set_right_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("right_camera"),
sensor_msgs.srv.SetCameraInfo)
self.q_mono = deque([], 1)
self.q_stereo = deque([], 1)
self.c = None
mth = ConsumerThread(self.q_mono, self.handle_monocular)
mth.setDaemon(True)
mth.start()
sth = ConsumerThread(self.q_stereo, self.handle_stereo)
sth.setDaemon(True)
sth.start()
def redraw_stereo(self, *args):
pass
def redraw_monocular(self, *args):
pass
def queue_monocular(self, msg):
self.q_mono.append(msg)
def queue_stereo(self, lmsg, rmsg):
self.q_stereo.append((lmsg, rmsg))
def handle_monocular(self, msg):
if self.c == None:
if self._camera_name:
self.c = MonoCalibrator(self._boards, self._calib_flags, self._pattern, name=self._camera_name,
checkerboard_flags=self._checkerboard_flags)
else:
self.c = MonoCalibrator(self._boards, self._calib_flags, self._pattern,
checkerboard_flags=self.checkerboard_flags)
# This should just call the MonoCalibrator
drawable = self.c.handle_msg(msg)
self.displaywidth = drawable.scrib.shape[1]
self.redraw_monocular(drawable)
def handle_stereo(self, msg):
if self.c == None:
if self._camera_name:
self.c = StereoCalibrator(self._boards, self._calib_flags, self._pattern, name=self._camera_name,
checkerboard_flags=self._checkerboard_flags)
else:
self.c = StereoCalibrator(self._boards, self._calib_flags, self._pattern,
checkerboard_flags=self._checkerboard_flags)
drawable = self.c.handle_msg(msg)
self.displaywidth = drawable.lscrib.shape[1] + drawable.rscrib.shape[1]
self.redraw_stereo(drawable)
def check_set_camera_info(self, response):
if response.success:
return True
for i in range(10):
print("!" * 80)
print()
print("Attempt to set camera info failed: " + response.status_message)
print()
for i in range(10):
print("!" * 80)
print()
rospy.logerr('Unable to set camera info for calibration. Failure message: %s' % response.status_message)
return False
def do_upload(self):
self.c.report()
print(self.c.ost())
info = self.c.as_message()
rv = True
if self.c.is_mono:
response = self.set_camera_info_service(info)
rv = self.check_set_camera_info(response)
else:
response = self.set_left_camera_info_service(info[0])
rv = rv and self.check_set_camera_info(response)
response = self.set_right_camera_info_service(info[1])
rv = rv and self.check_set_camera_info(response)
return rv
class OpenCVCalibrationNode(CalibrationNode):
""" Calibration node with an OpenCV Gui """
FONT_FACE = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 0.6
FONT_THICKNESS = 2
def __init__(self, *args, **kwargs):
CalibrationNode.__init__(self, *args, **kwargs)
self.queue_display = deque([], 1)
self.display_thread = DisplayThread(self.queue_display, self)
self.display_thread.setDaemon(True)
self.display_thread.start()
@classmethod
def putText(cls, img, text, org, color = (0,0,0)):
cv2.putText(img, text, org, cls.FONT_FACE, cls.FONT_SCALE, color, thickness = cls.FONT_THICKNESS)
@classmethod
def getTextSize(cls, text):
return cv2.getTextSize(text, cls.FONT_FACE, cls.FONT_SCALE, cls.FONT_THICKNESS)[0]
def on_mouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN and self.displaywidth < x:
if self.c.goodenough:
if 180 <= y < 280:
self.c.do_calibration()
if self.c.calibrated:
if 280 <= y < 380:
self.c.do_save()
elif 380 <= y < 480:
# Only shut down if we set camera info correctly, #3993
if self.do_upload():
rospy.signal_shutdown('Quit')
def on_scale(self, scalevalue):
if self.c.calibrated:
self.c.set_alpha(scalevalue / 100.0)
def button(self, dst, label, enable):
dst.fill(255)
size = (dst.shape[1], dst.shape[0])
if enable:
color = (155, 155, 80)
else:
color = (224, 224, 224)
cv2.circle(dst, (size[0] // 2, size[1] // 2), min(size) // 2, color, -1)
(w, h) = self.getTextSize(label)
self.putText(dst, label, ((size[0] - w) // 2, (size[1] + h) // 2), (255,255,255))
def buttons(self, display):
x = self.displaywidth
self.button(display[180:280,x:x+100], "CALIBRATE", self.c.goodenough)
self.button(display[280:380,x:x+100], "SAVE", self.c.calibrated)
self.button(display[380:480,x:x+100], "COMMIT", self.c.calibrated)
def y(self, i):
"""Set up right-size images"""
return 30 + 40 * i
def screendump(self, im):
i = 0
while os.access("/tmp/dump%d.png" % i, os.R_OK):
i += 1
cv2.imwrite("/tmp/dump%d.png" % i, im)
def redraw_monocular(self, drawable):
height = drawable.scrib.shape[0]
width = drawable.scrib.shape[1]
display = numpy.zeros((max(480, height), width + 100, 3), dtype=numpy.uint8)
display[0:height, 0:width,:] = drawable.scrib
display[0:height, width:width+100,:].fill(255)
self.buttons(display)
if not self.c.calibrated:
if drawable.params:
for i, (label, lo, hi, progress) in enumerate(drawable.params):
(w,_) = self.getTextSize(label)
self.putText(display, label, (width + (100 - w) // 2, self.y(i)))
color = (0,255,0)
if progress < 1.0:
color = (0, int(progress*255.), 255)
cv2.line(display,
(int(width + lo * 100), self.y(i) + 20),
(int(width + hi * 100), self.y(i) + 20),
color, 4)
else:
self.putText(display, "lin.", (width, self.y(0)))
linerror = drawable.linear_error
if linerror < 0:
msg = "?"
else:
msg = "%.2f" % linerror
#print "linear", linerror
self.putText(display, msg, (width, self.y(1)))
self.queue_display.append(display)
def redraw_stereo(self, drawable):
height = drawable.lscrib.shape[0]
width = drawable.lscrib.shape[1]
display = numpy.zeros((max(480, height), 2 * width + 100, 3), dtype=numpy.uint8)
display[0:height, 0:width,:] = drawable.lscrib
display[0:height, width:2*width,:] = drawable.rscrib
display[0:height, 2*width:2*width+100,:].fill(255)
self.buttons(display)
if not self.c.calibrated:
if drawable.params:
for i, (label, lo, hi, progress) in enumerate(drawable.params):
(w,_) = self.getTextSize(label)
self.putText(display, label, (2 * width + (100 - w) // 2, self.y(i)))
color = (0,255,0)
if progress < 1.0:
color = (0, int(progress*255.), 255)
cv2.line(display,
(int(2 * width + lo * 100), self.y(i) + 20),
(int(2 * width + hi * 100), self.y(i) + 20),
color, 4)
else:
self.putText(display, "epi.", (2 * width, self.y(0)))
if drawable.epierror == -1:
msg = "?"
else:
msg = "%.2f" % drawable.epierror
self.putText(display, msg, (2 * width, self.y(1)))
# TODO dim is never set anywhere. Supposed to be observed chessboard size?
if drawable.dim != -1:
self.putText(display, "dim", (2 * width, self.y(2)))
self.putText(display, "%.3f" % drawable.dim, (2 * width, self.y(3)))
self.queue_display.append(display)
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("%prog --size SIZE1 --square SQUARE1 [ --size SIZE2 --square SQUARE2 ]",
description=None)
parser.add_option("-c", "--camera_name",
type="string", default='narrow_stereo',
help="name of the camera to appear in the calibration file")
group = OptionGroup(parser, "Chessboard Options",
"You must specify one or more chessboards as pairs of --size and --square options.")
group.add_option("-p", "--pattern",
type="string", default="chessboard",
help="calibration pattern to detect - 'chessboard', 'circles', 'acircles'")
group.add_option("-s", "--size",
action="append", default=[],
help="chessboard size as NxM, counting interior corners (e.g. a standard chessboard is 7x7)")
group.add_option("-q", "--square",
action="append", default=[],
help="chessboard square size in meters")
parser.add_option_group(group)
group = OptionGroup(parser, "ROS Communication Options")
group.add_option("--approximate",
type="float", default=0.0,
help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras")
group.add_option("--no-service-check",
action="store_false", dest="service_check", default=True,
help="disable check for set_camera_info services at startup")
parser.add_option_group(group)
group = OptionGroup(parser, "Calibration Optimizer Options")
group.add_option("--fix-principal-point",
action="store_true", default=False,
help="fix the principal point at the image center")
group.add_option("--fix-aspect-ratio",
action="store_true", default=False,
help="enforce focal lengths (fx, fy) are equal")
group.add_option("--zero-tangent-dist",
action="store_true", default=False,
help="set tangential distortion coefficients (p1, p2) to zero")
group.add_option("-k", "--k-coefficients",
type="int", default=2, metavar="NUM_COEFFS",
help="number of radial distortion coefficients to use (up to 6, default %default)")
group.add_option("--disable_calib_cb_fast_check", action='store_true', default=False,
help="uses the CALIB_CB_FAST_CHECK flag for findChessboardCorners")
parser.add_option_group(group)
options, args = parser.parse_args()
if len(options.size) != len(options.square):
parser.error("Number of size and square inputs must be the same!")
if not options.square:
options.square.append("0.108")
options.size.append("8x6")
boards = []
for (sz, sq) in zip(options.size, options.square):
size = tuple([int(c) for c in sz.split('x')])
boards.append(ChessboardInfo(size[0], size[1], float(sq)))
if options.approximate == 0.0:
sync = message_filters.TimeSynchronizer
else:
sync = functools.partial(ApproximateTimeSynchronizer, slop=options.approximate)
num_ks = options.k_coefficients
calib_flags = 0
if options.fix_principal_point:
calib_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
if options.fix_aspect_ratio:
calib_flags |= cv2.CALIB_FIX_ASPECT_RATIO
if options.zero_tangent_dist:
calib_flags |= cv2.CALIB_ZERO_TANGENT_DIST
if (num_ks > 3):
calib_flags |= cv2.CALIB_RATIONAL_MODEL
if (num_ks < 6):
calib_flags |= cv2.CALIB_FIX_K6
if (num_ks < 5):
calib_flags |= cv2.CALIB_FIX_K5
if (num_ks < 4):
calib_flags |= cv2.CALIB_FIX_K4
if (num_ks < 3):
calib_flags |= cv2.CALIB_FIX_K3
if (num_ks < 2):
calib_flags |= cv2.CALIB_FIX_K2
if (num_ks < 1):
calib_flags |= cv2.CALIB_FIX_K1
pattern = Patterns.Chessboard
if options.pattern == 'circles':
pattern = Patterns.Circles
elif options.pattern == 'acircles':
pattern = Patterns.ACircles
elif options.pattern != 'chessboard':
print('Unrecognized pattern %s, defaulting to chessboard' % options.pattern)
if options.disable_calib_cb_fast_check:
checkerboard_flags = 0
else:
checkerboard_flags = cv2.CALIB_CB_FAST_CHECK
rospy.init_node('cameracalibrator')
node = OpenCVCalibrationNode(boards, options.service_check, sync, calib_flags, pattern, options.camera_name,
checkerboard_flags=checkerboard_flags)
rospy.spin()
if __name__ == "__main__":
try:
main()
except Exception as e:
import traceback
traceback.print_exc()
| 40.82265
| 120
| 0.599948
|
4a148677ac93716db5f97f879b5ba4d836cf06fc
| 1,029
|
py
|
Python
|
src/pyrin/database/migration/alembic/handlers/stamp.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/database/migration/alembic/handlers/stamp.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/database/migration/alembic/handlers/stamp.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
alembic handlers stamp module.
"""
from pyrin.database.migration.alembic.enumerations import AlembicCLIHandlersEnum
from pyrin.database.migration.alembic.interface import AlembicCLIHandlerBase
from pyrin.database.migration.alembic.decorators import alembic_cli_handler
from pyrin.database.migration.alembic.handlers.params import SQLParam, TagParam, \
PurgeParam, RevisionsParam
@alembic_cli_handler()
class StampCLIHandler(AlembicCLIHandlerBase):
"""
stamp cli handler class.
"""
def __init__(self):
"""
initializes an instance of StampCLIHandler.
"""
super().__init__(AlembicCLIHandlersEnum.STAMP)
def _inject_params(self, params):
"""
injects all the params of current handler into given list.
:param list[CLIParamBase] params: list of all params.
"""
params.extend([SQLParam(), TagParam(),
PurgeParam(), RevisionsParam()])
return super()._inject_params(params)
| 27.810811
| 82
| 0.691934
|
4a14868037d7354f128b26e63200ae65de25c9c5
| 934
|
py
|
Python
|
examples/make_al_like_dislike_folders.py
|
Vikash-Kothary/tindetheus
|
6505c4cccf4edc9eec3e3e2b8ff833f25c3f19f0
|
[
"MIT"
] | 86
|
2018-03-13T04:43:53.000Z
|
2022-02-16T07:03:23.000Z
|
examples/make_al_like_dislike_folders.py
|
own2pwn/tindetheus
|
065eab368ae2ab0a29a467a33c13e0133044a344
|
[
"MIT"
] | 50
|
2018-03-14T22:28:47.000Z
|
2021-09-03T17:22:56.000Z
|
examples/make_al_like_dislike_folders.py
|
own2pwn/tindetheus
|
065eab368ae2ab0a29a467a33c13e0133044a344
|
[
"MIT"
] | 27
|
2018-01-30T07:13:44.000Z
|
2021-12-10T18:40:48.000Z
|
import shutil
import os
import numpy as np
# This script is what tindetheus like_folder does
def al_copy_images(image_list, userID, didILike, database_str='al/'):
# move images from temp folder to database
if didILike == 'Like':
fname = 'like/'
else:
fname = 'dislike/'
count = 0
database_loc = []
for i, j in enumerate(image_list):
new_fname = database_str+fname+userID+'.'+str(count)+'.jpg'
shutil.copyfile(j, new_fname)
count += 1
return database_loc
# make folders
if not os.path.exists('al'):
os.makedirs('al')
if not os.path.exists('al/like'):
os.makedirs('al/like')
if not os.path.exists('al/dislike'):
os.makedirs('al/dislike')
# load the auto like database
al_data = np.load('al_database.npy', allow_pickle=True)
# copy profile images to either al/like or al/dislike
for user in al_data:
al_copy_images(user[8], user[0], user[-1])
| 23.35
| 69
| 0.66167
|
4a1486df1d893446ca5ee910fd2070d74dfa3b43
| 211
|
py
|
Python
|
test/simple_source/stmts/10_if_break_finally.py
|
gauravssnl/python-uncompyle6
|
136f42a610c0701e0770c1c278efd1107b1c6ed1
|
[
"MIT"
] | 1
|
2021-03-24T11:54:03.000Z
|
2021-03-24T11:54:03.000Z
|
test/simple_source/stmts/10_if_break_finally.py
|
gauravssnl/python-uncompyle6
|
136f42a610c0701e0770c1c278efd1107b1c6ed1
|
[
"MIT"
] | null | null | null |
test/simple_source/stmts/10_if_break_finally.py
|
gauravssnl/python-uncompyle6
|
136f42a610c0701e0770c1c278efd1107b1c6ed1
|
[
"MIT"
] | null | null | null |
# Tests
# while1stmt ::= SETUP_LOOP l_stmts JUMP_BACK POP_BLOCK COME_FROM
# tryfinallystmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
try:
while 1:
if __file__:
break
finally:
pass
| 19.181818
| 65
| 0.687204
|
4a1487903a2ceec92d9a5c7b1397d50e94eef0c6
| 211
|
py
|
Python
|
server/app/__init__.py
|
abhishekg785/hiro
|
950224d07797740b8840316bb412c1827eab46f0
|
[
"MIT"
] | 7
|
2017-04-12T10:58:42.000Z
|
2021-10-03T18:07:29.000Z
|
server/app/__init__.py
|
abhishekg785/hiro
|
950224d07797740b8840316bb412c1827eab46f0
|
[
"MIT"
] | null | null | null |
server/app/__init__.py
|
abhishekg785/hiro
|
950224d07797740b8840316bb412c1827eab46f0
|
[
"MIT"
] | 5
|
2017-07-06T12:28:10.000Z
|
2020-01-07T19:45:25.000Z
|
# creating the app package
from flask import Flask
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
# imported at the bottom as the views.py needs the object app
from app import views
| 21.1
| 61
| 0.78673
|
4a14886234d7fb18a876e55e74d910c4760f26d4
| 8,944
|
py
|
Python
|
where/estimation/estimators/cpwl.py
|
ingridfausk/where
|
b65398911075b7ddef3a3a1146efa428eae498fe
|
[
"MIT"
] | 16
|
2018-08-31T10:31:11.000Z
|
2022-03-15T16:07:24.000Z
|
where/estimation/estimators/cpwl.py
|
ingridfausk/where
|
b65398911075b7ddef3a3a1146efa428eae498fe
|
[
"MIT"
] | 5
|
2018-07-13T14:04:24.000Z
|
2021-06-17T02:14:44.000Z
|
where/estimation/estimators/cpwl.py
|
ingridfausk/where
|
b65398911075b7ddef3a3a1146efa428eae498fe
|
[
"MIT"
] | 15
|
2018-06-07T05:45:24.000Z
|
2022-03-15T16:07:27.000Z
|
"""Continuous PieceWise Linear estimator
Description:
------------
"""
# External library imports
import numpy as np
import scipy.sparse
# Midgard imports
from midgard.dev import plugins
from midgard.math.unit import Unit
# Where imports
from where import apriori
from where.estimation.estimators._kalman import KalmanFilter
from where.lib import config
from where.lib import log
@plugins.register_named("partial_config_keys")
def partial_config_keys():
"""List the types of partials needed by the estimator
The CPWL estimator uses both constant and stochastic parameters.
Returns:
Tuple: Strings with names of config keys listing which partial models to run.
"""
return ("estimate_constant", "estimate_stochastic")
@plugins.register
def estimate_cpwl(dset, partial_vectors, obs_noise):
"""Estimate with continuous piecewise linear functions
TODO: Describe phi and Q
Args:
dset (Dataset): Model run data.
partial_vectors (Dict): Names and values of the partial derivatives for each partial config key.
obs_noise (Array): Observation noise, numpy array with one float value for each observation.
"""
# Organize partial derivatives (state vectors) into a matrix
n_constant = len(partial_vectors["estimate_constant"])
n_stochastic = len(partial_vectors["estimate_stochastic"])
n = n_constant + 2 * n_stochastic
num_unknowns = n
num_obs = dset.num_obs
h = np.zeros((num_obs, n, 1))
param_names = list()
# Constant parameters are simply copied from the partial fields
for idx, name in enumerate(partial_vectors["estimate_constant"]):
h[:, idx, 0] = dset["partial." + name][:]
param_names.append(name)
# Stochastic parameters are estimated as CPWL functions by adding a rate parameter
for idx, name in enumerate(partial_vectors["estimate_stochastic"]):
h[:, n_constant + idx * 2, 0] = dset["partial." + name][:]
param_names.extend([name, name + "_rate_"]) # Trailing underscore in rate_ means field is not added to dset
# Read information about parameters from config files
ref_time = np.ones(n) * dset.time.utc[0].mjd
knot_interval = np.ones(n) * np.inf
process_noise = np.zeros(n)
apriori_stdev = np.empty(n)
constant_params = {c.split("-")[0] for c in partial_vectors["estimate_constant"]}
for param in constant_params:
idx = np.array([c.startswith(param + "-") for c in param_names])
apriori_stdev[idx] = config.tech[param].apriori_stdev.float
stochastic_params = {c.split("-")[0] for c in partial_vectors["estimate_stochastic"]}
for param in stochastic_params:
# Set default knot_interval
intervals = config.tech[param].knot_interval.list
const_idx = np.array([c.startswith(param + "-") for c in param_names])
rate_idx = np.array([c.startswith(param + "-") and c.endswith("rate_") for c in param_names])
knot_interval[rate_idx] = float(intervals.pop(0)) * Unit.seconds2day
for interval in intervals:
# (Potentially) overwrite with station specific knot_interval
sta, _, seconds = interval.partition(":")
rate_idx_sta = np.array(
[c.startswith(param + "-") and c.endswith("rate_") and sta in c for c in param_names]
)
knot_interval[rate_idx_sta] = float(seconds) * Unit.seconds2day
process_noise[rate_idx] = config.tech[param].process_noise.float
apriori_stdev[const_idx] = config.tech[param].apriori_stdev.float
apriori_stdev[rate_idx] = config.tech[param].apriori_rate_stdev.float # Rate parameters
# Initialize variables
z = dset.obs - dset.calc
# phi = np.repeat(np.eye(n)[None, :, :], num_obs, axis=0)
phi = list()
delta_phi = np.eye(n, k=1)
delta_phi[:, :n_constant] = 0
delta_phi[:, n_constant::2] = 0
Q = dict()
for epoch in range(num_obs - 1):
# TODO: Check that 24 is correct here (and use unit instead)
delta_t = (dset.time.utc[epoch + 1].mjd - dset.time.utc[epoch].mjd) * 24
# phi[epoch] += delta_phi * delta_t
phi.append(scipy.sparse.csr_matrix(np.eye(n) + delta_phi * delta_t))
idx = np.logical_and(process_noise, dset.time.utc[epoch + 1].mjd > ref_time + knot_interval)
indicies = np.where(idx)[0]
Q[epoch] = {(i, i): process_noise[i] ** 2 for i in indicies}
ref_time[idx] += knot_interval[idx] * ((dset.time.utc[epoch + 1].mjd - ref_time[idx]) // knot_interval[idx])
num_unknowns += int(sum(idx))
phi.append(scipy.sparse.csr_matrix(np.eye(n)))
# Add pseudo-observations
constraints = config.tech.get(key="estimate_constraint", default="").as_list(split_re=", *")
if constraints:
trf_constraints = [c for c in constraints if "crf" not in c]
reference_frame = config.tech.reference_frames.list[0]
trf = apriori.get("trf", time=dset.time.utc.mean, reference_frames=reference_frame)
d = np.zeros((n, 6))
stations = set()
for idx, column in enumerate(param_names):
if "_site_pos-" not in column:
continue
station = column.split("-", maxsplit=1)[-1].rsplit("_", maxsplit=1)[0]
key = dset.meta[station]["site_id"]
if key in trf:
x0, y0, z0 = trf[key].pos.trs # TODO: Take units into account
if column.endswith("_x"):
d[idx, :] = np.array([1, 0, 0, 0, z0, -y0])
if column.endswith("_y"):
d[idx, :] = np.array([0, 1, 0, -z0, 0, x0])
if column.endswith("_z"):
d[idx, :] = np.array([0, 0, 1, y0, -x0, 0])
stations.add(station)
# TODO deal with slr_site_pos etc
log.info(
f"Applying {'/'.join(trf_constraints).upper()} with {', '.join(stations)} from {reference_frame.upper()}"
)
if "nnt" in constraints and "nnr" in constraints and "vlbi_site_pos" in constant_params:
obs_noise = np.hstack((obs_noise, np.array([0.0001 ** 2] * 3 + [(1.5e-11) ** 2] * 3))).T
elif "nnt" in constraints and "nnr" not in constraints and "vlbi_site_pos" in constant_params:
d = d[:, 0:3]
obs_noise = np.hstack((obs_noise, np.array([0.0001 ** 2] * 3))).T
elif "nnt" not in constraints and "nnr" in constraints and "vlbi_site_pos" in constant_params:
d = d[:, 3:6]
obs_noise = np.hstack((obs_noise, np.array([(1.5e-11) ** 2] * 3))).T
elif "nnt" not in constraints and "nnr" not in constraints and "vlbi_site_pos" in constant_params:
d = np.zeros((n, 0))
log.warn(f"Unknown constraints {'/'.join(constraints).upper()}. Not applying.")
num_constraints = d.shape[1]
try:
h = np.vstack((h, (np.linalg.inv(d.T @ d) @ d.T)[:, :, None]))
except np.linalg.linalg.LinAlgError:
pass
if "nnr_crf" in constraints and "vlbi_src_dir" in constant_params:
celestial_reference_frame = config.tech.celestial_reference_frames.list[0]
crf = apriori.get("crf", time=dset.time, celestial_reference_frames=celestial_reference_frame)
# NNR to CRF
log.info(f"Applying NNR constraint to {celestial_reference_frame.upper()}")
H2 = np.zeros((3, n))
for idx, column in enumerate(param_names):
if "_src_dir-" not in column:
continue
source = column.split("-", maxsplit=1)[-1].split("_")[0]
if source in crf:
ra = crf[source].pos.right_ascension
dec = crf[source].pos.declination
if column.endswith("_ra"):
H2[0, idx] = -np.cos(ra) * np.sin(dec) * np.cos(dec)
H2[1, idx] = -np.sin(ra) * np.sin(dec) * np.cos(dec)
H2[2, idx] = np.cos(dec) ** 2
if column.endswith("_dec"):
H2[0, idx] = np.sin(ra)
H2[1, idx] = -np.cos(ra)
obs_noise = np.hstack((obs_noise, np.array([(1e-6) ** 2] * 3)))
num_constraints += 3
h = np.vstack((h, H2[:, :, None]))
z = np.hstack((z, np.zeros(num_constraints))).T
# phi = np.vstack((phi, np.repeat(np.eye(n)[None, :, :], num_constraints, axis=0)))
phi = phi + [scipy.sparse.csr_matrix(np.eye(n))] * num_constraints
# Initialize and run the Kalman filter
kalman = KalmanFilter(h, z=z, apriori_stdev=apriori_stdev, phi=phi, r=obs_noise, Q=Q, param_names=param_names)
kalman.filter()
# Update the dataset with results from the filter
kalman.update_dataset(dset, param_names=param_names, normal_idx=slice(0, n_constant), num_unknowns=num_unknowns)
kalman.cleanup()
| 43.843137
| 117
| 0.614155
|
4a1488fa9c36c681e14ed807b0df8cde371d62fa
| 293
|
py
|
Python
|
jaraco/windows/__init__.py
|
jaraco/jaraco.windows
|
e858172b4d5ee91233a8cc5319de99f17848f090
|
[
"MIT"
] | 21
|
2016-01-31T00:58:59.000Z
|
2021-05-06T22:30:56.000Z
|
jaraco/windows/__init__.py
|
jaraco/jaraco.windows
|
e858172b4d5ee91233a8cc5319de99f17848f090
|
[
"MIT"
] | 14
|
2016-07-21T12:02:08.000Z
|
2021-08-06T03:07:54.000Z
|
jaraco/windows/__init__.py
|
jaraco/jaraco.windows
|
e858172b4d5ee91233a8cc5319de99f17848f090
|
[
"MIT"
] | 5
|
2016-06-14T04:57:04.000Z
|
2021-05-06T22:30:57.000Z
|
#!/usr/bin/env python
# $Id$
"""
jaraco.windows
A lightweight wrapper to provide a pythonic API to the Windows Platform.
This package attempts to provide interfaces similar or compatible
with Mark Hammond's pywin32 library, but avoids the use of extension
modules by utilizing ctypes.
"""
| 20.928571
| 72
| 0.774744
|
4a148a7bd1b4751a65f4e6dbe88632417b90f963
| 802
|
gyp
|
Python
|
binding.gyp
|
hxfdarling/Clipboard
|
439ace0787246dfb747b6c8aefba62c95ed87629
|
[
"MIT"
] | 1
|
2018-06-08T23:39:07.000Z
|
2018-06-08T23:39:07.000Z
|
binding.gyp
|
hxfdarling/clipboard
|
439ace0787246dfb747b6c8aefba62c95ed87629
|
[
"MIT"
] | null | null | null |
binding.gyp
|
hxfdarling/clipboard
|
439ace0787246dfb747b6c8aefba62c95ed87629
|
[
"MIT"
] | null | null | null |
{
"targets": [{
"target_name": "binding",
"sources": [
"src/main.cc"
],
"conditions": [
['OS=="mac"',
{
"sources":[
"src/clip_osx.h",
"src/clip_osx.mm"
],
'defines': [
'__MACOSX_CORE__',
'__MAC__'
],
'link_settings': {
'libraries': [
'-framework Cocoa',
'-framework CoreFoundation',
]
},
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'OTHER_CFLAGS': [
'-ObjC++',
'-std=c++11'
]
}
}
],
['OS=="win"',
{
"sources":[
"src/clip_win.h",
"src/clip_win.cc"
],
'defines': [
'__WIN32__'
]
}
],
['OS=="linux"',
{}
]
]
}]
}
| 16.367347
| 41
| 0.374065
|
4a148a9d1f3713cc1ecf4f78622f6541e8b63dac
| 1,481
|
py
|
Python
|
setup.py
|
d0m1987/ping_pong_paradox
|
e6e37a6e123a817b8a59a9c53c3a84eaca73d28f
|
[
"MIT"
] | null | null | null |
setup.py
|
d0m1987/ping_pong_paradox
|
e6e37a6e123a817b8a59a9c53c3a84eaca73d28f
|
[
"MIT"
] | null | null | null |
setup.py
|
d0m1987/ping_pong_paradox
|
e6e37a6e123a817b8a59a9c53c3a84eaca73d28f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click==8.0.4', 'pygame==2.1.2']
test_requirements = ['pytest>=3', ]
setup(
author="Dominic Buehler",
author_email='dominic.buehler@gmx.de',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Ping Pong in a different style than you know it ;-)",
entry_points={
'console_scripts': [
'ping_pong_paradox=ping_pong_paradox.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='ping_pong_paradox',
name='ping_pong_paradox',
packages=find_packages(include=['ping_pong_paradox', 'ping_pong_paradox.*']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/d0m1987/ping_pong_paradox',
version='0.1.0',
zip_safe=False,
)
| 29.62
| 81
| 0.643484
|
4a148b103fbb08e4c1bbac073bff65e46892aed0
| 1,163
|
py
|
Python
|
data/sampler.py
|
zhaofang0627/HPBTT
|
98cec9ff4ef95a01393718b024e9645e77fb70ee
|
[
"MIT"
] | 35
|
2020-11-11T11:43:57.000Z
|
2021-10-15T05:00:29.000Z
|
data/sampler.py
|
zhaofang0627/HPBTT
|
98cec9ff4ef95a01393718b024e9645e77fb70ee
|
[
"MIT"
] | 4
|
2020-12-05T14:33:42.000Z
|
2021-07-21T21:24:29.000Z
|
data/sampler.py
|
zhaofang0627/HPBTT
|
98cec9ff4ef95a01393718b024e9645e77fb70ee
|
[
"MIT"
] | 6
|
2020-11-12T07:01:03.000Z
|
2022-03-27T19:25:05.000Z
|
from __future__ import absolute_import
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data.sampler import (
Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler,
WeightedRandomSampler)
class RandomIdentitySampler(Sampler):
def __init__(self, data_source, num_instances=1):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for d in data_source:
self.index_dic[d[1]].append(d[0])
self.pids = list(self.index_dic.keys())
self.num_samples = len(self.pids)
def __len__(self):
return self.num_samples * self.num_instances
def __iter__(self):
indices = torch.randperm(self.num_samples)
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
if len(t) >= self.num_instances:
t = np.random.choice(t, size=self.num_instances, replace=False)
else:
t = np.random.choice(t, size=self.num_instances, replace=True)
ret.extend(t)
return iter(ret)
| 32.305556
| 79
| 0.642304
|
4a148ba2ddd8437920661cac9409a0cf1384ce4b
| 1,213
|
py
|
Python
|
version.py
|
abdullahcheema63/zulip
|
12474a3deb5f898009fccba3d43299ca1660015b
|
[
"Apache-2.0"
] | null | null | null |
version.py
|
abdullahcheema63/zulip
|
12474a3deb5f898009fccba3d43299ca1660015b
|
[
"Apache-2.0"
] | null | null | null |
version.py
|
abdullahcheema63/zulip
|
12474a3deb5f898009fccba3d43299ca1660015b
|
[
"Apache-2.0"
] | null | null | null |
import os
ZULIP_VERSION = "2.2.dev+git"
# Add information on number of commits and commit hash to version, if available
zulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')
if os.path.exists(zulip_git_version_file):
with open(zulip_git_version_file) as f:
version = f.read().strip()
if version:
ZULIP_VERSION = version
LATEST_MAJOR_VERSION = "2.1"
LATEST_RELEASE_VERSION = "2.1.2"
LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2019/12/13/zulip-2-1-released/"
LATEST_DESKTOP_VERSION = "4.0.3"
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
# the major version to indicate that folks should provision in both
# directions.
# Typically,
# * adding a dependency only requires a minor version bump;
# * removing a dependency requires a major version bump;
# * upgrading a dependency requires a major version bump, unless the
# upgraded dependency is backwards compatible with all of our
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
PROVISION_VERSION = '75.5'
| 39.129032
| 102
| 0.755153
|
4a148ba43651d62f2d0593642c738e4024e3b059
| 3,581
|
py
|
Python
|
EXAMPLES/12d_flex_mani/run_apo_flex_mani_ekdmd.py
|
Jimmy-INL/SKDMD
|
9f84cd2b7af6fdc6eb7740a12370a9bf30fb5bbb
|
[
"MIT"
] | 1
|
2020-11-27T17:10:24.000Z
|
2020-11-27T17:10:24.000Z
|
EXAMPLES/12d_flex_mani/run_apo_flex_mani_ekdmd.py
|
Jimmy-INL/SKDMD
|
9f84cd2b7af6fdc6eb7740a12370a9bf30fb5bbb
|
[
"MIT"
] | null | null | null |
EXAMPLES/12d_flex_mani/run_apo_flex_mani_ekdmd.py
|
Jimmy-INL/SKDMD
|
9f84cd2b7af6fdc6eb7740a12370a9bf30fb5bbb
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
sys.dont_write_bytecode = True
sys.path.append('../../EVAL_SRC')
from main_apo import ClassModelEDMD
from main_apo import ClassApoEval
# Options
selected_modes_flag = True
truncation_threshold = 1e-3
def main(case, noise_level, model_list, num_cut_of_plt, normalize_phi_tilde, max_iter, alpha_range):
# setup case name
case_name = case + '_noise_level_' + str(noise_level)
## prepare data
# validation trajectory for mode selection
valid_data = np.load('./12d_flex_mani_noise_level_0/501_validData.npz')
valid_true_trajectory = valid_data['Xtrain']
valid_tspan = valid_data['tspan']
# test trajectory for true prediction
test_data = np.load('./12d_flex_mani_noise_level_0/501_testData.npz')
test_true_trajectory = test_data['Xtrain']
test_tspan = test_data['tspan']
for model_name in model_list:
print('=========================')
print('')
print('current model = ', model_name)
print('')
model_path_edmd = './' + case_name + '/' + model_name + '/edmd.model'
model_edmd = ClassModelEDMD(model_path=model_path_edmd)
eval_model = ClassApoEval(model_edmd, model_name, case_name,
normalize_phi_tilde=normalize_phi_tilde,
max_iter=max_iter,
alpha_range=alpha_range,
selected_modes_flag=selected_modes_flag,
truncation_threshold=truncation_threshold)
if selected_modes_flag:
# get best k Koopman modes
best_k_accurate_mode_index, \
best_k_aposteriori_eigen = eval_model.order_modes_with_accuracy_and_aposterior_eigentj(true_tsnap=valid_tspan,
true_tj=valid_true_trajectory,
num_user_defined=num_cut_of_plt)
# multi-task elastic net sweep for sparse reconstruction
eval_model.sweep_sparse_reconstruction_for_modes_selection(true_tj=valid_true_trajectory,
top_k_selected_eigenTj=best_k_aposteriori_eigen,
top_k_index=best_k_accurate_mode_index)
# sweep prediction for different modes.
eval_model.sweep_sparse_prediction_comparison(true_tsnap=test_tspan,
true_trajectory=test_true_trajectory)
else:
eval_model.model_pred_save_trajectory_given_true_trajectory(test_tspan, test_true_trajectory,
selected_modes_flag, None, None, eval_model.save_dir)
# save the a posteriori model
eval_model.save_model()
if __name__ == '__main__':
case = '12d_flex_mani'
noise_level = 0
## EDMD case
type = 'EDMD'
model_list = ['d-edmd-rff-490-gaussian_sigma-110-rank-490'] # 100 number of features.
normalize_phi_tilde = True # for KDMD normalizing is performing good, but not for EDMD...
num_cut_of_plt = 140 ## 10: just for testing
alpha_range = np.logspace(-16, 0, 50) # new
# alpha_range = np.logspace(-6, 2, 50) # new
main(case, noise_level, model_list, num_cut_of_plt, normalize_phi_tilde,
max_iter=1e2, alpha_range=alpha_range)
| 41.639535
| 131
| 0.597319
|
4a148bd3d2b740a30f5b42d6d72b7958b3617c60
| 42
|
py
|
Python
|
pyble/const/characteristic/blood_pressure_feature.py
|
bgromov/PyBLEWrapper
|
8a5d016e65b3c259391ddc97c371ab4b1b5c61b5
|
[
"MIT"
] | 14
|
2015-03-30T23:11:36.000Z
|
2020-04-07T00:57:12.000Z
|
pyble/const/characteristic/blood_pressure_feature.py
|
bgromov/PyBLEWrapper
|
8a5d016e65b3c259391ddc97c371ab4b1b5c61b5
|
[
"MIT"
] | 3
|
2016-05-17T06:11:07.000Z
|
2017-05-15T16:43:09.000Z
|
pyble/const/characteristic/blood_pressure_feature.py
|
bgromov/PyBLEWrapper
|
8a5d016e65b3c259391ddc97c371ab4b1b5c61b5
|
[
"MIT"
] | 11
|
2016-03-11T08:53:03.000Z
|
2019-03-11T21:32:13.000Z
|
NAME="Blood Pressure Feature"
UUID=0x2A49
| 14
| 29
| 0.809524
|
4a148be8db936cde6cf8503da547834e7849752e
| 29,506
|
py
|
Python
|
tlslite/handshakesettings.py
|
rajdroid/tlslite-ng
|
80d5c2feab434058882a4b46b7cb41f3d718a4a4
|
[
"Unlicense"
] | 9
|
2020-10-16T15:24:58.000Z
|
2021-11-16T23:56:18.000Z
|
tlslite/handshakesettings.py
|
rajdroid/tlslite-ng
|
80d5c2feab434058882a4b46b7cb41f3d718a4a4
|
[
"Unlicense"
] | null | null | null |
tlslite/handshakesettings.py
|
rajdroid/tlslite-ng
|
80d5c2feab434058882a4b46b7cb41f3d718a4a4
|
[
"Unlicense"
] | 4
|
2020-10-22T09:32:07.000Z
|
2021-02-18T05:54:17.000Z
|
# Authors:
# Trevor Perrin
# Dave Baggett (Arcode Corporation) - cleanup handling of constants
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""Class for setting handshake parameters."""
from .constants import CertificateType
from .utils import cryptomath
from .utils import cipherfactory
from .utils.compat import ecdsaAllCurves, int_types
CIPHER_NAMES = ["chacha20-poly1305",
"aes256gcm", "aes128gcm",
"aes256ccm", "aes128ccm",
"aes256", "aes128",
"3des"]
ALL_CIPHER_NAMES = CIPHER_NAMES + ["chacha20-poly1305_draft00",
"aes128ccm_8", "aes256ccm_8",
"rc4", "null"]
# Don't allow "md5" by default
MAC_NAMES = ["sha", "sha256", "sha384", "aead"]
ALL_MAC_NAMES = MAC_NAMES + ["md5"]
KEY_EXCHANGE_NAMES = ["ecdhe_ecdsa", "rsa", "dhe_rsa", "ecdhe_rsa", "srp_sha",
"srp_sha_rsa", "ecdh_anon", "dh_anon"]
CIPHER_IMPLEMENTATIONS = ["openssl", "pycrypto", "python"]
CERTIFICATE_TYPES = ["x509"]
RSA_SIGNATURE_HASHES = ["sha512", "sha384", "sha256", "sha224", "sha1"]
ECDSA_SIGNATURE_HASHES = ["sha512", "sha384", "sha256", "sha224", "sha1"]
ALL_RSA_SIGNATURE_HASHES = RSA_SIGNATURE_HASHES + ["md5"]
RSA_SCHEMES = ["pss", "pkcs1"]
# while secp521r1 is the most secure, it's also much slower than the others
# so place it as the last one
CURVE_NAMES = ["x25519", "x448", "secp384r1", "secp256r1",
"secp521r1"]
ALL_CURVE_NAMES = CURVE_NAMES + ["secp256k1"]
if ecdsaAllCurves:
ALL_CURVE_NAMES += ["secp224r1", "secp192r1"]
ALL_DH_GROUP_NAMES = ["ffdhe2048", "ffdhe3072", "ffdhe4096", "ffdhe6144",
"ffdhe8192"]
CURVE_ALIASES = {"secp256r1": ('NIST256p', 'prime256v1', 'P-256'),
"secp384r1": ('NIST384p', 'P-384'),
"secp521r1": ('NIST521p', 'P-521'),
"secp256k1": ('SECP256k1',),
"secp192r1": ('NIST192p', 'P-192'),
"secp224r1": ('NIST224p', 'P-224')}
KNOWN_VERSIONS = ((3, 0), (3, 1), (3, 2), (3, 3), (3, 4))
TICKET_CIPHERS = ["chacha20-poly1305", "aes256gcm", "aes128gcm", "aes128ccm",
"aes128ccm_8", "aes256ccm", "aes256ccm_8"]
PSK_MODES = ["psk_dhe_ke", "psk_ke"]
class Keypair(object):
"""
Key, certificate and related data.
Stores also certificate associate data like OCSPs and transparency info.
TODO: add the above
First certificate in certificates needs to match key, remaining should
build a trust path to a root CA.
:vartype key: RSAKey or ECDSAKey
:ivar key: private key
:vartype certificates: list(X509)
:ivar certificates: the certificates to send to peer if the key is selected
for use. The first one MUST include the public key of the ``key``
"""
def __init__(self, key=None, certificates=tuple()):
self.key = key
self.certificates = certificates
def validate(self):
"""Sanity check the keypair."""
if not self.key or not self.certificates:
raise ValueError("Key or certificate missing in Keypair")
class VirtualHost(object):
"""
Configuration of keys and certs for a single virual server.
This class encapsulates keys and certificates for hosts specified by
server_name (SNI) and ALPN extensions.
TODO: support SRP as alternative to certificates
TODO: support PSK as alternative to certificates
:vartype keys: list(Keypair)
:ivar keys: List of certificates and keys to be used in this
virtual host. First keypair able to server ClientHello will be used.
:vartype hostnames: set(bytes)
:ivar hostnames: all the hostnames that server supports
please use :py:meth:`matches_hostname` to verify if the VirtualHost
can serve a request to a given hostname as that allows wildcard hosts
that always reply True.
:vartype trust_anchors: list(X509)
:ivar trust_anchors: list of CA certificates supported for client
certificate authentication, sent in CertificateRequest
:ivar list(bytes) app_protocols: all the application protocols that the
server supports (for ALPN)
"""
def __init__(self):
"""Set up default configuration."""
self.keys = []
self.hostnames = set()
self.trust_anchors = []
self.app_protocols = []
def matches_hostname(self, hostname):
"""Checks if the virtual host can serve hostname"""
return hostname in self.hostnames
def validate(self):
"""Sanity check the settings"""
if not self.keys:
raise ValueError("Virtual host missing keys")
for i in self.keys:
i.validate()
class HandshakeSettings(object):
"""
This class encapsulates various parameters that can be used with
a TLS handshake.
:vartype minKeySize: int
:ivar minKeySize: The minimum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters smaller than this length, an alert will be
signalled. The default is 1023.
:vartype maxKeySize: int
:ivar maxKeySize: The maximum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters larger than this length, an alert will be signalled.
The default is 8193.
:vartype cipherNames: list(str)
:ivar cipherNames: The allowed ciphers.
The allowed values in this list are 'chacha20-poly1305', 'aes256gcm',
'aes128gcm', 'aes256', 'aes128', '3des', 'chacha20-poly1305_draft00',
'null' and
'rc4'. If these settings are used with a client handshake, they
determine the order of the ciphersuites offered in the ClientHello
message.
If these settings are used with a server handshake, the server will
choose whichever ciphersuite matches the earliest entry in this
list.
The default value is list that excludes 'rc4', 'null' and
'chacha20-poly1305_draft00'.
:vartype macNames: list(str)
:ivar macNames: The allowed MAC algorithms.
The allowed values in this list are 'sha384', 'sha256', 'aead', 'sha'
and 'md5'.
The default value is list that excludes 'md5'.
:vartype certificateTypes: list(str)
:ivar certificateTypes: The allowed certificate types.
The only allowed certificate type is 'x509'. This list is only used
with a
client handshake. The client will advertise to the server which
certificate
types are supported, and will check that the server uses one of the
appropriate types.
:vartype minVersion: tuple
:ivar minVersion: The minimum allowed SSL/TLS version.
This variable can be set to (3, 0) for SSL 3.0, (3, 1) for TLS 1.0,
(3, 2) for
TLS 1.1, or (3, 3) for TLS 1.2. If the other party wishes to use a
lower
version, a protocol_version alert will be signalled. The default is
(3, 1).
:vartype maxVersion: tuple
:ivar maxVersion: The maximum allowed SSL/TLS version.
This variable can be set to (3, 0) for SSL 3.0, (3, 1) for TLS 1.0,
(3, 2) for TLS 1.1, or (3, 3) for TLS 1.2. If the other party wishes
to use a
higher version, a protocol_version alert will be signalled. The
default is (3, 3).
.. warning:: Some servers may (improperly) reject clients which offer
support
for TLS 1.1 or higher. In this case, try lowering maxVersion to
(3, 1).
:vartype useExperimentalTackExtension: bool
:ivar useExperimentalTackExtension: Whether to enabled TACK support.
Note that TACK support is not standardized by IETF and uses a temporary
TLS Extension number, so should NOT be used in production software.
:vartype sendFallbackSCSV: bool
:ivar sendFallbackSCSV: Whether to, as a client, send FALLBACK_SCSV.
:vartype rsaSigHashes: list(str)
:ivar rsaSigHashes: List of hashes supported (and advertised as such) for
TLS 1.2 signatures over Server Key Exchange or Certificate Verify with
RSA signature algorithm.
The list is sorted from most wanted to least wanted algorithm.
The allowed hashes are: "md5", "sha1", "sha224", "sha256",
"sha384" and "sha512". The default list does not include md5.
:vartype ecdsaSigHashes: list(str)
:ivar ecdsaSigHashes: List of hashes supported (and advertised as such) for
TLS 1.2 signatures over Server Key Exchange or Certificate Verify with
ECDSA signature algorithm.
The list is sorted from most wanted to least wanted algorithm.
The allowed hashes are: "sha1", "sha224", "sha256",
"sha384" and "sha512".
:vartype eccCurves: list(str)
:ivar eccCurves: List of named curves that are to be supported
:vartype useEncryptThenMAC: bool
:ivar useEncryptThenMAC: whether to support the encrypt then MAC extension
from RFC 7366. True by default.
:vartype useExtendedMasterSecret: bool
:ivar useExtendedMasterSecret: whether to support the extended master
secret calculation from RFC 7627. True by default.
:vartype requireExtendedMasterSecret: bool
:ivar requireExtendedMasterSecret: whether to require negotiation of
extended master secret calculation for successful connection. Requires
useExtendedMasterSecret to be set to true. False by default.
:vartype defaultCurve: str
:ivar defaultCurve: curve that will be used by server in case the client
did not advertise support for any curves. It does not have to be the
first curve for eccCurves and may be distinct from curves from that
list.
:vartype keyShares: list(str)
:ivar keyShares: list of TLS 1.3 key shares to include in Client Hello
:vartype padding_cb: func
:ivar padding_cb: Callback to function computing number of padding bytes
for TLS 1.3. Signature is cb_func(msg_size, content_type, max_size).
:vartype pskConfigs: list(tuple(bytearray, bytearray, bytearray))
:ivar pskConfigs: list of tuples, first element of the tuple is the
human readable, UTF-8 encoded, "identity" of the associated secret
(bytearray, can be empty for TLS 1.2 and earlier), second element is
the binary secret (bytearray), third is an optional parameter
specifying the PRF hash to be used in TLS 1.3 (``sha256`` or
``sha384``)
:vartype ticketKeys: list(bytearray)
:ivar ticketKeys: keys to be used for encrypting and decrypting session
tickets. First entry is the encryption key for new tickets and the
default decryption key, subsequent entries are the fallback keys
allowing for key rollover. The keys need to be of size appropriate
for a selected cipher in ticketCipher, 32 bytes for 'aes256gcm' and
'chacha20-poly1305', 16 bytes for 'aes128-gcm'.
New keys should be generated regularly and replace old ones. Key use
time should generally not be longer than 24h and key life-time should
not be longer than 48h.
Leave empty to disable session ticket support on server side.
:vartype ticketCipher: str
:ivar ticketCipher: name of the cipher used for encrypting the session
tickets. 'aes256gcm' by default, 'aes128gcm' or 'chacha20-poly1305'
alternatively.
:vartype ticketLifetime: int
:ivar ticketLifetime: maximum allowed lifetime of ticket encryption key,
in seconds. 1 day by default
:vartype psk_modes: list(str)
:ivar psk_modes: acceptable modes for the PSK key exchange in TLS 1.3
:ivar int max_early_data: maximum number of bytes acceptable for 0-RTT
early_data processing. In other words, how many bytes will the server
try to process, but ignore, in case the Client Hello includes
early_data extension.
:vartype use_heartbeat_extension: bool
:ivar use_heartbeat_extension: whether to support heartbeat extension from
RFC 6520. True by default.
:vartype heartbeat_response_callback: func
:ivar heartbeat_response_callback: Callback to function when Heartbeat
response is received.
:vartype ~.record_size_limit: int
:ivar ~.record_size_limit: maximum size of records we are willing to process
(value advertised to the other side). It must not be larger than
2**14+1 (the maximum for TLS 1.3) and will be reduced to 2**14 if TLS
1.2 or lower is the highest enabled version. Must not be set to values
smaller than 64. Set to None to disable support for the extension.
See also: RFC 8449.
:vartype keyExchangeNames: list
:ivar keyExchangeNames: Enabled key exchange types for the connection,
influences selected cipher suites.
"""
def _init_key_settings(self):
"""Create default variables for key-related settings."""
self.minKeySize = 1023
self.maxKeySize = 8193
self.rsaSigHashes = list(RSA_SIGNATURE_HASHES)
self.rsaSchemes = list(RSA_SCHEMES)
self.virtual_hosts = []
# DH key settings
self.eccCurves = list(CURVE_NAMES)
self.dhParams = None
self.dhGroups = list(ALL_DH_GROUP_NAMES)
self.defaultCurve = "secp256r1"
self.keyShares = ["secp256r1", "x25519"]
self.padding_cb = None
self.use_heartbeat_extension = True
self.heartbeat_response_callback = None
def _init_misc_extensions(self):
"""Default variables for assorted extensions."""
self.certificateTypes = list(CERTIFICATE_TYPES)
self.useExperimentalTackExtension = False
self.sendFallbackSCSV = False
self.useEncryptThenMAC = True
self.ecdsaSigHashes = list(ECDSA_SIGNATURE_HASHES)
self.usePaddingExtension = True
self.useExtendedMasterSecret = True
self.requireExtendedMasterSecret = False
# PSKs
self.pskConfigs = []
self.psk_modes = list(PSK_MODES)
# session tickets
self.ticketKeys = []
self.ticketCipher = "aes256gcm"
self.ticketLifetime = 24 * 60 * 60
self.max_early_data = 2 ** 14 + 16 # full record + tag
# send two tickets so that client can quickly ramp up number of
# resumed connections (as tickets are single-use in TLS 1.3
self.ticket_count = 2
self.record_size_limit = 2**14 + 1 # TLS 1.3 includes content type
def __init__(self):
"""Initialise default values for settings."""
self._init_key_settings()
self._init_misc_extensions()
self.minVersion = (3, 1)
self.maxVersion = (3, 4)
self.versions = [(3, 4), (3, 3), (3, 2), (3, 1)]
self.cipherNames = list(CIPHER_NAMES)
self.macNames = list(MAC_NAMES)
self.keyExchangeNames = list(KEY_EXCHANGE_NAMES)
self.cipherImplementations = list(CIPHER_IMPLEMENTATIONS)
@staticmethod
def _sanityCheckKeySizes(other):
"""Check if key size limits are sane"""
if other.minKeySize < 512:
raise ValueError("minKeySize too small")
if other.minKeySize > 16384:
raise ValueError("minKeySize too large")
if other.maxKeySize < 512:
raise ValueError("maxKeySize too small")
if other.maxKeySize > 16384:
raise ValueError("maxKeySize too large")
if other.maxKeySize < other.minKeySize:
raise ValueError("maxKeySize smaller than minKeySize")
# check also keys of virtual hosts
for i in other.virtual_hosts:
i.validate()
@staticmethod
def _not_matching(values, sieve):
"""Return list of items from values that are not in sieve."""
return [val for val in values if val not in sieve]
@staticmethod
def _sanityCheckCipherSettings(other):
"""Check if specified cipher settings are known."""
not_matching = HandshakeSettings._not_matching
unknownCiph = not_matching(other.cipherNames, ALL_CIPHER_NAMES)
if unknownCiph:
raise ValueError("Unknown cipher name: {0}".format(unknownCiph))
unknownMac = not_matching(other.macNames, ALL_MAC_NAMES)
if unknownMac:
raise ValueError("Unknown MAC name: {0}".format(unknownMac))
unknownKex = not_matching(other.keyExchangeNames, KEY_EXCHANGE_NAMES)
if unknownKex:
raise ValueError("Unknown key exchange name: {0}"
.format(unknownKex))
unknownImpl = not_matching(other.cipherImplementations,
CIPHER_IMPLEMENTATIONS)
if unknownImpl:
raise ValueError("Unknown cipher implementation: {0}"
.format(unknownImpl))
@staticmethod
def _sanityCheckECDHSettings(other):
"""Check ECDHE settings if they are sane."""
not_matching = HandshakeSettings._not_matching
unknownCurve = not_matching(other.eccCurves, ALL_CURVE_NAMES)
if unknownCurve:
raise ValueError("Unknown ECC Curve name: {0}"
.format(unknownCurve))
if other.defaultCurve not in ALL_CURVE_NAMES:
raise ValueError("Unknown default ECC Curve name: {0}"
.format(other.defaultCurve))
nonAdvertisedGroup = [val for val in other.keyShares
if val not in other.eccCurves and
val not in other.dhGroups]
if nonAdvertisedGroup:
raise ValueError("Key shares for not enabled groups specified: {0}"
.format(nonAdvertisedGroup))
unknownSigHash = not_matching(other.ecdsaSigHashes,
ECDSA_SIGNATURE_HASHES)
if unknownSigHash:
raise ValueError("Unknown ECDSA signature hash: '{0}'".\
format(unknownSigHash))
unknownDHGroup = not_matching(other.dhGroups, ALL_DH_GROUP_NAMES)
if unknownDHGroup:
raise ValueError("Unknown FFDHE group name: '{0}'"
.format(unknownDHGroup))
@staticmethod
def _sanityCheckDHSettings(other):
"""Check if (EC)DHE settings are sane."""
not_matching = HandshakeSettings._not_matching
HandshakeSettings._sanityCheckECDHSettings(other)
unknownKeyShare = [val for val in other.keyShares
if val not in ALL_DH_GROUP_NAMES and
val not in ALL_CURVE_NAMES]
if unknownKeyShare:
raise ValueError("Unknown key share: '{0}'"
.format(unknownKeyShare))
if other.dhParams and (len(other.dhParams) != 2 or
not isinstance(other.dhParams[0], int_types) or
not isinstance(other.dhParams[1], int_types)):
raise ValueError("DH parameters need to be a tuple of integers")
@staticmethod
def _sanityCheckPrimitivesNames(other):
"""Check if specified cryptographic primitive names are known"""
HandshakeSettings._sanityCheckCipherSettings(other)
HandshakeSettings._sanityCheckDHSettings(other)
not_matching = HandshakeSettings._not_matching
unknownType = not_matching(other.certificateTypes, CERTIFICATE_TYPES)
if unknownType:
raise ValueError("Unknown certificate type: {0}"
.format(unknownType))
unknownSigHash = not_matching(other.rsaSigHashes,
ALL_RSA_SIGNATURE_HASHES)
if unknownSigHash:
raise ValueError("Unknown RSA signature hash: '{0}'"
.format(unknownSigHash))
unknownRSAPad = not_matching(other.rsaSchemes, RSA_SCHEMES)
if unknownRSAPad:
raise ValueError("Unknown RSA padding mode: '{0}'"
.format(unknownRSAPad))
if not other.rsaSigHashes and not other.ecdsaSigHashes and \
other.maxVersion >= (3, 3):
raise ValueError("TLS 1.2 requires signature algorithms to be set")
@staticmethod
def _sanityCheckProtocolVersions(other):
"""Check if set protocol version are sane"""
if other.minVersion > other.maxVersion:
raise ValueError("Versions set incorrectly")
if other.minVersion not in KNOWN_VERSIONS:
raise ValueError("minVersion set incorrectly")
if other.maxVersion not in KNOWN_VERSIONS:
raise ValueError("maxVersion set incorrectly")
if other.maxVersion < (3, 4):
other.versions = [i for i in other.versions if i < (3, 4)]
@staticmethod
def _sanityCheckEMSExtension(other):
"""Check if settings for EMS are sane."""
if other.useExtendedMasterSecret not in (True, False):
raise ValueError("useExtendedMasterSecret must be True or False")
if other.requireExtendedMasterSecret not in (True, False):
raise ValueError("requireExtendedMasterSecret must be True "
"or False")
if other.requireExtendedMasterSecret and \
not other.useExtendedMasterSecret:
raise ValueError("requireExtendedMasterSecret requires "
"useExtendedMasterSecret")
@staticmethod
def _sanityCheckExtensions(other):
"""Check if set extension settings are sane"""
if other.useEncryptThenMAC not in (True, False):
raise ValueError("useEncryptThenMAC can only be True or False")
if other.usePaddingExtension not in (True, False):
raise ValueError("usePaddingExtension must be True or False")
if other.use_heartbeat_extension not in (True, False):
raise ValueError("use_heartbeat_extension must be True or False")
if other.heartbeat_response_callback and not other.use_heartbeat_extension:
raise ValueError("heartbeat_response_callback requires "
"use_heartbeat_extension")
if other.record_size_limit is not None and \
not 64 <= other.record_size_limit <= 2**14 + 1:
raise ValueError("record_size_limit cannot exceed 2**14+1 bytes")
HandshakeSettings._sanityCheckEMSExtension(other)
@staticmethod
def _not_allowed_len(values, sieve):
"""Return True if length of any item in values is not in sieve."""
sieve = set(sieve)
return any(len(i) not in sieve for i in values)
@staticmethod
def _sanityCheckPsks(other):
"""Check if the set PSKs are sane."""
if HandshakeSettings._not_allowed_len(other.pskConfigs, [2, 3]):
raise ValueError("pskConfigs items must be a 2 or 3-element"
"tuples")
badHashes = [i[2] for i in other.pskConfigs if
len(i) == 3 and i[2] not in set(['sha256', 'sha384'])]
if badHashes:
raise ValueError("pskConfigs include invalid hash specifications: "
"{0}".format(badHashes))
bad_psk_modes = [i for i in other.psk_modes if
i not in PSK_MODES]
if bad_psk_modes:
raise ValueError("psk_modes includes invalid key exchange modes: "
"{0}".format(bad_psk_modes))
@staticmethod
def _sanityCheckTicketSettings(other):
"""Check if the session ticket settings are sane."""
if other.ticketCipher not in TICKET_CIPHERS:
raise ValueError("Invalid cipher for session ticket encryption: "
"{0}".format(other.ticketCipher))
if HandshakeSettings._not_allowed_len(other.ticketKeys, [16, 32]):
raise ValueError("Session ticket encryption keys must be 16 or 32"
"bytes long")
if not 0 < other.ticketLifetime <= 7 * 24 * 60 * 60:
raise ValueError("Ticket lifetime must be a positive integer "
"smaller or equal 604800 (7 days)")
# while not ticket setting per-se, it is related to session tickets
if not 0 < other.max_early_data <= 2**64:
raise ValueError("max_early_data must be between 0 and 2GiB")
if not 0 <= other.ticket_count < 2**16:
raise ValueError("Incorrect amount for number of new session "
"tickets to send")
def _copy_cipher_settings(self, other):
"""Copy values related to cipher selection."""
other.cipherNames = self.cipherNames
other.macNames = self.macNames
other.keyExchangeNames = self.keyExchangeNames
other.cipherImplementations = self.cipherImplementations
other.minVersion = self.minVersion
other.maxVersion = self.maxVersion
other.versions = self.versions
def _copy_extension_settings(self, other):
"""Copy values of settings related to extensions."""
other.useExtendedMasterSecret = self.useExtendedMasterSecret
other.requireExtendedMasterSecret = self.requireExtendedMasterSecret
other.useExperimentalTackExtension = self.useExperimentalTackExtension
other.sendFallbackSCSV = self.sendFallbackSCSV
other.useEncryptThenMAC = self.useEncryptThenMAC
other.usePaddingExtension = self.usePaddingExtension
# session tickets
other.padding_cb = self.padding_cb
other.ticketKeys = self.ticketKeys
other.ticketCipher = self.ticketCipher
other.ticketLifetime = self.ticketLifetime
other.max_early_data = self.max_early_data
other.ticket_count = self.ticket_count
other.record_size_limit = self.record_size_limit
@staticmethod
def _remove_all_matches(values, needle):
"""Remove all instances of needle from values."""
values[:] = (i for i in values if i != needle)
def _sanity_check_ciphers(self, other):
"""Remove unsupported ciphers in current configuration."""
if not cipherfactory.tripleDESPresent:
other.cipherNames = other.cipherNames[:]
self._remove_all_matches(other.cipherNames, "3des")
if not other.cipherNames:
raise ValueError("No supported ciphers")
def _sanity_check_implementations(self, other):
"""Remove all backends that are not loaded."""
if not cryptomath.m2cryptoLoaded:
self._remove_all_matches(other.cipherImplementations, "openssl")
if not cryptomath.pycryptoLoaded:
self._remove_all_matches(other.cipherImplementations, "pycrypto")
if not other.cipherImplementations:
raise ValueError("No supported cipher implementations")
def _copy_key_settings(self, other):
"""Copy key-related settings."""
other.minKeySize = self.minKeySize
other.maxKeySize = self.maxKeySize
other.certificateTypes = self.certificateTypes
other.rsaSigHashes = self.rsaSigHashes
other.rsaSchemes = self.rsaSchemes
other.ecdsaSigHashes = self.ecdsaSigHashes
other.virtual_hosts = self.virtual_hosts
# DH key params
other.eccCurves = self.eccCurves
other.dhParams = self.dhParams
other.dhGroups = self.dhGroups
other.defaultCurve = self.defaultCurve
other.keyShares = self.keyShares
other.use_heartbeat_extension = self.use_heartbeat_extension
other.heartbeat_response_callback = self.heartbeat_response_callback
def validate(self):
"""
Validate the settings, filter out unsupported ciphersuites and return
a copy of object. Does not modify the original object.
:rtype: HandshakeSettings
:returns: a self-consistent copy of settings
:raises ValueError: when settings are invalid, insecure or unsupported.
"""
other = HandshakeSettings()
self._copy_cipher_settings(other)
self._copy_extension_settings(other)
self._copy_key_settings(other)
other.pskConfigs = self.pskConfigs
other.psk_modes = self.psk_modes
if not other.certificateTypes:
raise ValueError("No supported certificate types")
self._sanityCheckKeySizes(other)
self._sanityCheckPrimitivesNames(other)
self._sanityCheckProtocolVersions(other)
self._sanityCheckExtensions(other)
if other.maxVersion < (3, 3):
# No sha-2 and AEAD pre TLS 1.2
other.macNames = [e for e in self.macNames if
e == "sha" or e == "md5"]
self._sanityCheckPsks(other)
self._sanityCheckTicketSettings(other)
self._sanity_check_implementations(other)
self._sanity_check_ciphers(other)
return other
def getCertificateTypes(self):
"""Get list of certificate types as IDs"""
ret = []
for ct in self.certificateTypes:
if ct == "x509":
ret.append(CertificateType.x509)
else:
raise AssertionError()
return ret
| 41.094708
| 83
| 0.650681
|
4a148cb0e5199b0472abc4bbe903832a446ceb4c
| 823
|
py
|
Python
|
monitor/main.py
|
polytechnique-ease/blockcompass
|
1aa8cb52b43e6e860a141df276dc92a69b2e1315
|
[
"MIT"
] | null | null | null |
monitor/main.py
|
polytechnique-ease/blockcompass
|
1aa8cb52b43e6e860a141df276dc92a69b2e1315
|
[
"MIT"
] | null | null | null |
monitor/main.py
|
polytechnique-ease/blockcompass
|
1aa8cb52b43e6e860a141df276dc92a69b2e1315
|
[
"MIT"
] | 1
|
2022-03-21T05:09:00.000Z
|
2022-03-21T05:09:00.000Z
|
from dockerMonitoring import DockerMonitoring
from mongoDB import MongoDB
from settings import Settings
import time
def main():
settings = Settings("../configuration/blockchain.yaml")
settings.import_setting(10)
database = MongoDB(settings.connection_string)
docker_monitor = DockerMonitoring(database, settings)
# write target containers names to file
docker_monitor.writeNamesToFile()
while True:
start_time = time.time()
print("INFO:\tStart checking ...")
docker_monitor.get_measurements()
end_time = time.time()
sleep_time = settings.delay - (end_time - start_time)
print("INFO:\tFinish checking. Sleeping %.2f seconds ...\n" % sleep_time)
if sleep_time > 0:
time.sleep(sleep_time)
if __name__ == "__main__":
main()
| 31.653846
| 81
| 0.688943
|
4a148cfb94c811bddfa90f351e43a1dceb4d08ac
| 369
|
py
|
Python
|
onnx_tf/handlers/backend/softsign.py
|
malisit/onnx-tensorflow
|
3eb41dc923f350ca533f1024f602a842dd55de45
|
[
"Apache-2.0"
] | 1,110
|
2017-11-13T19:34:24.000Z
|
2022-03-29T09:14:56.000Z
|
onnx_tf/handlers/backend/softsign.py
|
malisit/onnx-tensorflow
|
3eb41dc923f350ca533f1024f602a842dd55de45
|
[
"Apache-2.0"
] | 768
|
2017-11-17T00:06:27.000Z
|
2022-03-31T20:20:09.000Z
|
onnx_tf/handlers/backend/softsign.py
|
malisit/onnx-tensorflow
|
3eb41dc923f350ca533f1024f602a842dd55de45
|
[
"Apache-2.0"
] | 281
|
2017-11-16T19:56:17.000Z
|
2022-03-28T06:25:33.000Z
|
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("Softsign")
@tf_func(tf.nn.softsign)
class Softsign(BackendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
| 24.6
| 59
| 0.796748
|
4a148d052e73866fc4dac9ca6905efd1b7b61058
| 28,738
|
py
|
Python
|
python_modules/dagster/dagster/check/__init__.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/check/__init__.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/check/__init__.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import sys
from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union
from future.utils import raise_with_traceback
Type = Union[type, Tuple[type, ...]]
Numeric = Union[int, float]
class CheckError(Exception):
pass
class ParameterCheckError(CheckError):
pass
class ElementCheckError(CheckError):
pass
class NotImplementedCheckError(CheckError):
pass
def _param_type_mismatch_exception(
obj: Any, ttype: Type, param_name: str, additional_message: str = None
) -> ParameterCheckError:
if isinstance(ttype, tuple):
type_names = sorted([t.__name__ for t in ttype])
return ParameterCheckError(
'Param "{name}" is not one of {type_names}. Got {obj} which is type {obj_type}.'
"{additional_message}".format(
name=param_name,
obj=repr(obj),
type_names=type_names,
obj_type=type(obj),
additional_message=" " + additional_message if additional_message else "",
)
)
else:
return ParameterCheckError(
'Param "{name}" is not a {type}. Got {obj} which is type {obj_type}.'
"{additional_message}".format(
name=param_name,
obj=repr(obj),
type=ttype.__name__,
obj_type=type(obj),
additional_message=" " + additional_message if additional_message else "",
)
)
def _not_type_param_subclass_mismatch_exception(obj: Any, param_name: str) -> ParameterCheckError:
return ParameterCheckError(
'Param "{name}" was supposed to be a type. Got {obj} of type {obj_type}'.format(
name=param_name, obj=repr(obj), obj_type=type(obj)
)
)
def _param_subclass_mismatch_exception(
obj: Any, superclass: Type, param_name: str
) -> ParameterCheckError:
return ParameterCheckError(
'Param "{name}" is a type but not a subclass of {superclass}. Got {obj} instead'.format(
name=param_name, superclass=superclass, obj=obj
)
)
def _type_mismatch_error(obj: Any, ttype: Type, desc: str = None) -> CheckError:
type_message = (
f"not one of {sorted([t.__name__ for t in ttype])}"
if isinstance(ttype, tuple)
else f"not a {ttype.__name__}"
)
repr_obj = repr(obj)
desc_str = f" Desc: {desc}" if desc else ""
return CheckError(
f"Object {repr_obj} is {type_message}. Got {repr_obj} with type {type(obj)}.{desc_str}"
)
def _not_callable_exception(obj: Any, param_name: str) -> ParameterCheckError:
return ParameterCheckError(
'Param "{name}" is not callable. Got {obj} with type {obj_type}.'.format(
name=param_name, obj=repr(obj), obj_type=type(obj)
)
)
def _param_invariant_exception(param_name: str, desc: str = None) -> ParameterCheckError:
return ParameterCheckError(
"Invariant violation for parameter {param_name}. Description: {desc}".format(
param_name=param_name, desc=desc
)
)
def failed(desc: str):
if not isinstance(desc, str):
raise_with_traceback(CheckError("desc argument must be a string"))
raise_with_traceback(CheckError("Failure condition: {desc}".format(desc=desc)))
def not_implemented(desc: str):
if not isinstance(desc, str):
raise_with_traceback(CheckError("desc argument must be a string"))
raise_with_traceback(NotImplementedCheckError("Not implemented: {desc}".format(desc=desc)))
def inst(obj: Any, ttype: Type, desc: str = None) -> Any:
if not isinstance(obj, ttype):
raise_with_traceback(_type_mismatch_error(obj, ttype, desc))
return obj
def subclass(obj: Any, superclass: Type, desc: str = None) -> Any:
if not issubclass(obj, superclass):
raise_with_traceback(_type_mismatch_error(obj, superclass, desc))
return obj
def is_callable(obj: Any, desc: str = None) -> Callable:
if not callable(obj):
if desc:
raise_with_traceback(
CheckError(
"Must be callable. Got {obj}. Description: {desc}".format(
obj=repr(obj), desc=desc
)
)
)
else:
raise_with_traceback(
CheckError(
"Must be callable. Got {obj}. Description: {desc}".format(obj=obj, desc=desc)
)
)
return obj
def not_none_param(obj: Any, param_name: str) -> Any:
if obj is None:
raise_with_traceback(
_param_invariant_exception(
param_name, "Param {param_name} cannot be none".format(param_name=param_name)
)
)
return obj
def invariant(condition: Any, desc: str = None) -> bool:
if not condition:
if desc:
raise_with_traceback(
CheckError("Invariant failed. Description: {desc}".format(desc=desc))
)
else:
raise_with_traceback(CheckError("Invariant failed."))
return True
def param_invariant(condition: Any, param_name: str, desc: str = None):
if not condition:
raise_with_traceback(_param_invariant_exception(param_name, desc))
def inst_param(obj: Any, param_name: str, ttype: Type, additional_message: str = None) -> Any:
if not isinstance(obj, ttype):
raise_with_traceback(
_param_type_mismatch_exception(
obj, ttype, param_name, additional_message=additional_message
)
)
return obj
def opt_inst_param(obj: Any, param_name: str, ttype: Type, default: Any = None) -> Any:
if obj is not None and not isinstance(obj, ttype):
raise_with_traceback(_param_type_mismatch_exception(obj, ttype, param_name))
return default if obj is None else obj
def callable_param(obj: Any, param_name: str) -> Callable:
if not callable(obj):
raise_with_traceback(_not_callable_exception(obj, param_name))
return obj
def opt_callable_param(obj: Any, param_name: str, default: Callable = None) -> Optional[Callable]:
if obj is not None and not callable(obj):
raise_with_traceback(_not_callable_exception(obj, param_name))
return default if obj is None else obj
def int_param(obj: Any, param_name: str) -> int:
if not isinstance(obj, int):
raise_with_traceback(_param_type_mismatch_exception(obj, int, param_name))
return obj
def int_value_param(obj: Any, value: int, param_name: str) -> int:
if not isinstance(obj, int):
raise_with_traceback(_param_type_mismatch_exception(obj, int, param_name))
if obj != value:
raise_with_traceback(
_param_invariant_exception(param_name, "Should be equal to {value}".format(value=value))
)
return obj
def opt_int_param(obj: Any, param_name: str, default: int = None) -> Optional[int]:
if obj is not None and not isinstance(obj, int):
raise_with_traceback(_param_type_mismatch_exception(obj, int, param_name))
return default if obj is None else obj
def float_param(obj: Any, param_name: str) -> float:
if not isinstance(obj, float):
raise_with_traceback(_param_type_mismatch_exception(obj, float, param_name))
return obj
def opt_numeric_param(obj: Any, param_name: str, default: Numeric = None) -> Optional[Numeric]:
if obj is not None and not isinstance(obj, (int, float)):
raise_with_traceback(_param_type_mismatch_exception(obj, (int, float), param_name))
return default if obj is None else obj
def numeric_param(obj: Any, param_name: str) -> Numeric:
if not isinstance(obj, (int, float)):
raise_with_traceback(_param_type_mismatch_exception(obj, (int, float), param_name))
return obj
def opt_float_param(obj: Any, param_name: str, default: float = None) -> Optional[float]:
if obj is not None and not isinstance(obj, float):
raise_with_traceback(_param_type_mismatch_exception(obj, float, param_name))
return default if obj is None else obj
def str_param(obj: Any, param_name: str) -> str:
if not isinstance(obj, str):
raise_with_traceback(_param_type_mismatch_exception(obj, str, param_name))
return obj
def opt_str_param(obj: Any, param_name: str, default: str = None) -> Optional[str]:
if obj is not None and not isinstance(obj, str):
raise_with_traceback(_param_type_mismatch_exception(obj, str, param_name))
return default if obj is None else obj
def opt_nonempty_str_param(obj: Any, param_name: str, default: str = None) -> Optional[str]:
if obj is not None and not isinstance(obj, str):
raise_with_traceback(_param_type_mismatch_exception(obj, str, param_name))
return default if obj is None or obj == "" else obj
def bool_param(obj: Any, param_name: str) -> bool:
if not isinstance(obj, bool):
raise_with_traceback(_param_type_mismatch_exception(obj, bool, param_name))
return obj
def opt_bool_param(obj: Any, param_name: str, default: bool = None) -> Optional[bool]:
if obj is not None and not isinstance(obj, bool):
raise_with_traceback(_param_type_mismatch_exception(obj, bool, param_name))
return default if obj is None else obj
def is_list(obj_list: Any, of_type: Type = None, desc: str = None) -> List:
if not isinstance(obj_list, list):
raise_with_traceback(_type_mismatch_error(obj_list, list, desc))
if not of_type:
return obj_list
return _check_list_items(obj_list, of_type)
def is_tuple(obj_tuple: Any, of_type: Type = None, desc: str = None) -> Tuple:
if not isinstance(obj_tuple, tuple):
raise_with_traceback(_type_mismatch_error(obj_tuple, tuple, desc))
if not of_type:
return obj_tuple
return _check_tuple_items(obj_tuple, of_type)
def list_param(obj_list: Any, param_name: str, of_type: Type = None) -> List:
from dagster.utils import frozenlist
if not isinstance(obj_list, (frozenlist, list)):
raise_with_traceback(
_param_type_mismatch_exception(obj_list, (frozenlist, list), param_name)
)
if not of_type:
return obj_list
return _check_list_items(obj_list, of_type)
def set_param(obj_set: Any, param_name: str, of_type: Type = None) -> Set:
if not isinstance(obj_set, (frozenset, set)):
raise_with_traceback(_param_type_mismatch_exception(obj_set, (frozenset, set), param_name))
if not of_type:
return obj_set
return _check_set_items(obj_set, of_type)
def tuple_param(obj: Any, param_name: str, of_type: Type = None) -> Tuple:
if not isinstance(obj, tuple):
raise_with_traceback(_param_type_mismatch_exception(obj, tuple, param_name))
if of_type is None:
return obj
return _check_tuple_items(obj, of_type)
def matrix_param(matrix: Any, param_name: str, of_type: Type = None) -> List[List]:
matrix = list_param(matrix, param_name, of_type=list)
if not matrix:
raise_with_traceback(CheckError("You must pass a list of lists. Received an empty list."))
for sublist in matrix:
sublist = list_param(sublist, "sublist_{}".format(param_name), of_type=of_type)
if len(sublist) != len(matrix[0]):
raise_with_traceback(CheckError("All sublists in matrix must have the same length"))
return matrix
def opt_tuple_param(
obj: Any, param_name: str, default: Tuple = None, of_type: Type = None
) -> Optional[Tuple]:
if obj is not None and not isinstance(obj, tuple):
raise_with_traceback(_param_type_mismatch_exception(obj, tuple, param_name))
if obj is None:
return default
if of_type is None:
return obj
return _check_tuple_items(obj, of_type)
def _check_list_items(obj_list: Any, of_type: Type) -> List:
for obj in obj_list:
if not isinstance(obj, of_type):
if isinstance(obj, type):
additional_message = (
" Did you pass a class where you were expecting an instance of the class?"
)
else:
additional_message = ""
raise_with_traceback(
CheckError(
"Member of list mismatches type. Expected {of_type}. Got {obj_repr} of type "
"{obj_type}.{additional_message}".format(
of_type=of_type,
obj_repr=repr(obj),
obj_type=type(obj),
additional_message=additional_message,
)
)
)
return obj_list
def _check_set_items(obj_set: Any, of_type: Type) -> Set:
for obj in obj_set:
if not isinstance(obj, of_type):
if isinstance(obj, type):
additional_message = (
" Did you pass a class where you were expecting an instance of the class?"
)
else:
additional_message = ""
raise_with_traceback(
CheckError(
"Member of set mismatches type. Expected {of_type}. Got {obj_repr} of type "
"{obj_type}.{additional_message}".format(
of_type=of_type,
obj_repr=repr(obj),
obj_type=type(obj),
additional_message=additional_message,
)
)
)
return obj_set
def _check_tuple_items(obj_tuple: Any, of_type: Type) -> Tuple:
if isinstance(of_type, tuple):
len_tuple = len(obj_tuple)
len_type = len(of_type)
if not len_tuple == len_type:
raise_with_traceback(
CheckError(
"Tuple mismatches type: tuple had {len_tuple} members but type had "
"{len_type}".format(len_tuple=len_tuple, len_type=len_type)
)
)
for (i, obj) in enumerate(obj_tuple):
of_type_i = of_type[i]
if not isinstance(obj, of_type_i):
if isinstance(obj, type):
additional_message = (
" Did you pass a class where you were expecting an instance of the class?"
)
else:
additional_message = ""
raise_with_traceback(
CheckError(
"Member of tuple mismatches type at index {index}. Expected {of_type}. Got "
"{obj_repr} of type {obj_type}.{additional_message}".format(
index=i,
of_type=of_type_i,
obj_repr=repr(obj),
obj_type=type(obj),
additional_message=additional_message,
)
)
)
else:
for (i, obj) in enumerate(obj_tuple):
if not isinstance(obj, of_type):
if isinstance(obj, type):
additional_message = (
" Did you pass a class where you were expecting an instance of the class?"
)
else:
additional_message = ""
raise_with_traceback(
CheckError(
"Member of tuple mismatches type at index {index}. Expected {of_type}. Got "
"{obj_repr} of type {obj_type}.{additional_message}".format(
index=i,
of_type=of_type,
obj_repr=repr(obj),
obj_type=type(obj),
additional_message=additional_message,
)
)
)
return obj_tuple
def opt_list_param(obj_list: Any, param_name: str, of_type: Type = None) -> List:
"""Ensures argument obj_list is a list or None; in the latter case, instantiates an empty list
and returns it.
If the of_type argument is provided, also ensures that list items conform to the type specified
by of_type.
"""
from dagster.utils import frozenlist
if obj_list is not None and not isinstance(obj_list, (frozenlist, list)):
raise_with_traceback(
_param_type_mismatch_exception(obj_list, (frozenlist, list), param_name)
)
if not obj_list:
return []
if not of_type:
return obj_list
return _check_list_items(obj_list, of_type)
def opt_set_param(obj_set: Any, param_name: str, of_type: Type = None) -> Set:
"""Ensures argument obj_set is a set or None; in the latter case, instantiates an empty set
and returns it.
If the of_type argument is provided, also ensures that list items conform to the type specified
by of_type.
"""
if obj_set is not None and not isinstance(obj_set, (frozenset, set)):
raise_with_traceback(_param_type_mismatch_exception(obj_set, (frozenset, set), param_name))
if not obj_set:
return set()
if not of_type:
return obj_set
return _check_set_items(obj_set, of_type)
def opt_nullable_list_param(obj_list: Any, param_name: str, of_type: Type = None) -> Optional[List]:
"""Ensures argument obj_list is a list or None. Returns None if input is None.
If the of_type argument is provided, also ensures that list items conform to the type specified
by of_type.
"""
from dagster.utils import frozenlist
if obj_list is not None and not isinstance(obj_list, (frozenlist, list)):
raise_with_traceback(
_param_type_mismatch_exception(obj_list, (frozenlist, list), param_name)
)
if not obj_list:
return None if obj_list is None else []
if not of_type:
return obj_list
return _check_list_items(obj_list, of_type)
def _check_key_value_types(
obj: Any,
key_type: Type = None,
value_type: Type = None,
key_check: Callable = isinstance,
value_check: Callable = isinstance,
) -> Dict:
"""Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types
specified by key_type, value_type.
"""
if not isinstance(obj, dict):
raise_with_traceback(_type_mismatch_error(obj, dict))
for key, value in obj.items():
if key_type and not key_check(key, key_type):
raise_with_traceback(
CheckError(
"Key in dictionary mismatches type. Expected {key_type}. Got {obj_repr}".format(
key_type=repr(key_type), obj_repr=repr(key)
)
)
)
if value_type and not value_check(value, value_type):
raise_with_traceback(
CheckError(
"Value in dictionary mismatches expected type for key {key}. Expected value "
"of type {vtype}. Got value {value} of type {obj_type}.".format(
vtype=repr(value_type), obj_type=type(value), key=key, value=value
)
)
)
return obj
def dict_param(
obj: Any,
param_name: str,
key_type: Type = None,
value_type: Type = None,
additional_message: str = None,
) -> Dict:
"""Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise
returns obj.
"""
from dagster.utils import frozendict
if not isinstance(obj, (frozendict, dict)):
raise_with_traceback(
_param_type_mismatch_exception(
obj, (frozendict, dict), param_name, additional_message=additional_message
)
)
if not (key_type or value_type):
return obj
return _check_key_value_types(obj, key_type, value_type)
def opt_dict_param(
obj: Any,
param_name: str,
key_type: Type = None,
value_type: Type = None,
value_class: Type = None,
) -> Dict:
"""Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty
dictionary.
"""
from dagster.utils import frozendict
if obj is not None and not isinstance(obj, (frozendict, dict)):
raise_with_traceback(_param_type_mismatch_exception(obj, (frozendict, dict), param_name))
if not obj:
return {}
if value_class:
return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass)
return _check_key_value_types(obj, key_type, value_type)
def opt_nullable_dict_param(
obj: Any,
param_name: str,
key_type: Type = None,
value_type: Type = None,
value_class: Type = None,
) -> Optional[Dict]:
"""Ensures argument obj is either a dictionary or None;
"""
from dagster.utils import frozendict
if obj is not None and not isinstance(obj, (frozendict, dict)):
raise_with_traceback(_param_type_mismatch_exception(obj, (frozendict, dict), param_name))
if not obj:
return None if obj is None else {}
if value_class:
return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass)
return _check_key_value_types(obj, key_type, value_type)
def _check_two_dim_key_value_types(
obj: Any, key_type: Type = None, _param_name: str = None, value_type: Type = None
) -> Dict:
_check_key_value_types(obj, key_type, dict) # check level one
for level_two_dict in obj.values():
_check_key_value_types(level_two_dict, key_type, value_type) # check level two
return obj
def two_dim_dict_param(
obj: Any, param_name: str, key_type: Type = str, value_type: Type = None
) -> Dict:
if not isinstance(obj, dict):
raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))
return _check_two_dim_key_value_types(obj, key_type, param_name, value_type)
def opt_two_dim_dict_param(
obj: Any, param_name: str, key_type: Type = str, value_type: Type = None
) -> Dict:
if obj is not None and not isinstance(obj, dict):
raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))
if not obj:
return {}
return _check_two_dim_key_value_types(obj, key_type, param_name, value_type)
def type_param(obj: Any, param_name: str) -> type:
if not isinstance(obj, type):
raise_with_traceback(_not_type_param_subclass_mismatch_exception(obj, param_name))
return obj
def opt_type_param(obj: Any, param_name: str, default: type = None) -> type:
if obj is not None and not isinstance(obj, type):
raise_with_traceback(_not_type_param_subclass_mismatch_exception(obj, param_name))
return obj if obj is not None else default
def subclass_param(obj: Any, param_name: str, superclass: type) -> type:
type_param(obj, param_name)
if not issubclass(obj, superclass):
raise_with_traceback(_param_subclass_mismatch_exception(obj, superclass, param_name))
return obj
def opt_subclass_param(obj: Any, param_name: str, superclass: type) -> Optional[type]:
opt_type_param(obj, param_name)
if obj is not None and not issubclass(obj, superclass):
raise_with_traceback(_param_subclass_mismatch_exception(obj, superclass, param_name))
return obj
def _element_check_error(key: Any, value: Any, ddict: Dict, ttype: Type) -> ElementCheckError:
return ElementCheckError(
"Value {value} from key {key} is not a {ttype}. Dict: {ddict}".format(
key=key, value=repr(value), ddict=repr(ddict), ttype=repr(ttype)
)
)
def generator(obj: Any) -> Generator:
if not inspect.isgenerator(obj):
raise ParameterCheckError(
"Not a generator (return value of function that yields) Got {obj} instead".format(
obj=obj
)
)
return obj
def opt_generator(obj: Any) -> Optional[Generator]:
if obj is not None and not inspect.isgenerator(obj):
raise ParameterCheckError(
"Not a generator (return value of function that yields) Got {obj} instead".format(
obj=obj
)
)
return obj
def generator_param(obj: Any, param_name: str) -> Generator:
if not inspect.isgenerator(obj):
raise ParameterCheckError(
(
'Param "{name}" is not a generator (return value of function that yields) Got '
"{obj} instead"
).format(name=param_name, obj=obj)
)
return obj
def opt_generator_param(obj: Any, param_name: str) -> Optional[Generator]:
if obj is not None and not inspect.isgenerator(obj):
raise ParameterCheckError(
(
'Param "{name}" is not a generator (return value of function that yields) Got '
"{obj} instead"
).format(name=param_name, obj=obj)
)
return obj
def list_elem(ddict: Dict, key: str) -> List: # type: ignore[return]
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict.get(key)
if isinstance(value, list):
return value
raise_with_traceback(_element_check_error(key, value, ddict, list))
def opt_list_elem(ddict: Dict, key: str) -> List:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict.get(key)
if value is None:
return []
if not isinstance(value, list):
raise_with_traceback(_element_check_error(key, value, ddict, list))
return value
def dict_elem(ddict: Dict, key: str) -> Dict:
from dagster.utils import frozendict
dict_param(ddict, "ddict")
str_param(key, "key")
if key not in ddict:
raise_with_traceback(
CheckError("{key} not present in dictionary {ddict}".format(key=key, ddict=ddict))
)
value = ddict[key]
if not isinstance(value, (frozendict, dict)):
raise_with_traceback(_element_check_error(key, value, ddict, (frozendict, dict)))
return value
def opt_dict_elem(ddict: Dict, key: str) -> Dict:
from dagster.utils import frozendict
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict.get(key)
if value is None:
return {}
if not isinstance(value, (frozendict, dict)):
raise_with_traceback(_element_check_error(key, value, ddict, list))
return value
def bool_elem(ddict: Dict, key: str) -> bool:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict[key]
if not isinstance(value, bool):
raise_with_traceback(_element_check_error(key, value, ddict, bool))
return value
def opt_float_elem(ddict: Dict, key: str) -> Optional[float]:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict.get(key)
if value is None:
return None
if not isinstance(value, float):
raise_with_traceback(_element_check_error(key, value, ddict, float))
return value
def float_elem(ddict: Dict, key: str) -> float:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict[key]
if not isinstance(value, float):
raise_with_traceback(_element_check_error(key, value, ddict, float))
return value
def opt_int_elem(ddict: Dict, key: str) -> Optional[int]:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict.get(key)
if value is None:
return None
if not isinstance(value, int):
raise_with_traceback(_element_check_error(key, value, ddict, int))
return value
def int_elem(ddict: Dict, key: str) -> int:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict[key]
if not isinstance(value, int):
raise_with_traceback(_element_check_error(key, value, ddict, int))
return value
def opt_str_elem(ddict: Dict, key: str) -> Optional[str]:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict.get(key)
if value is None:
return None
if not isinstance(value, str):
raise_with_traceback(_element_check_error(key, value, ddict, str))
return value
def str_elem(ddict: Dict, key: str) -> str:
dict_param(ddict, "ddict")
str_param(key, "key")
value = ddict[key]
if not isinstance(value, str):
raise_with_traceback(_element_check_error(key, value, ddict, str))
return value
def class_param(obj: Any, param_name: str) -> Union[ParameterCheckError, type]:
if not inspect.isclass(obj):
return ParameterCheckError(
'Param "{name}" is not a class. Got {obj} which is type {obj_type}.'.format(
name=param_name, obj=repr(obj), obj_type=type(obj),
)
)
return obj
| 32.656818
| 100
| 0.63637
|
4a148d2ad40bb10c8688d21dbeeb44494c9db3d4
| 6,045
|
py
|
Python
|
generator.py
|
Chen-Yifan/DEM_building_segmentation
|
1e9a41e87ec0ab1777a65146c5b31d88938480b7
|
[
"MIT"
] | null | null | null |
generator.py
|
Chen-Yifan/DEM_building_segmentation
|
1e9a41e87ec0ab1777a65146c5b31d88938480b7
|
[
"MIT"
] | null | null | null |
generator.py
|
Chen-Yifan/DEM_building_segmentation
|
1e9a41e87ec0ab1777a65146c5b31d88938480b7
|
[
"MIT"
] | null | null | null |
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import cv2
#from scipy.misc import imresize
import os
import random
# def trainvalGenerator(train_frame_path, train_mask_path, val_frame_path, val_mask_path, BATCH_SIZE):
# train_datagen = ImageDataGenerator(
# rescale=1./300,
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True)
# val_datagen = ImageDataGenerator(rescale=1./300)
# train_image_generator = train_datagen.flow_from_directory(
# directory = train_frame_path,
# class_mode=None,
# color_mode= 'rgba',
# batch_size = BATCH_SIZE)
# train_mask_generator = train_datagen.flow_from_directory(
# directory = train_mask_path,
# class_mode="categorical",
# color_mode= 'grayscale',
# batch_size = BATCH_SIZE)
# val_image_generator = val_datagen.flow_from_directory(
# directory = val_frame_path,
# class_mode=None,
# color_mode= 'rgba',
# batch_size = BATCH_SIZE)
# val_mask_generator = val_datagen.flow_from_directory(
# directory = val_mask_path,
# class_mode = "categorical",
# color_mode= 'grayscale',
# batch_size = BATCH_SIZE)
# print('val_mask_generator',val_mask_generator)
# train_generator = zip(train_image_generator, train_mask_generator)
# val_generator = zip(val_image_generator, val_mask_generator)
# return train_generator, val_generator
def train_gen_aug(img_folder, mask_folder, batch_size):
# datagen = ImageDataGenerator(
# featurewise_center=True,
# # featurewise_std_normalization=True,
# rotation_range=20,
# width_shift_range=0.2,
# height_shift_range=0.2,
# horizontal_flip=True)
data_gen_args = dict(horizontal_flip = True,
vertical_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
zoom_range = 0.1,
rotation_range = 10
)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
n = os.listdir(img_folder) #List of training images
random.shuffle(n)
img = np.zeros((len(n), 256, 256, 5)).astype(np.float32)
mask = np.zeros((len(n), 256, 256, 2), dtype=np.float32)
for i in range(len(n)): #initially from 0 to 16, c = 0.
train_img_0 = np.load(img_folder+'/'+n[i]) #normalization:the range is about -100 to 360
#train_img = cv2.resize(train_img, (256, 256))# Read an image from folder and resize
train_img = np.zeros((256,256,5))
#resize
for a in range(5):
train_img[:,:,a] = cv2.resize(train_img_0[:,:,a], (256, 256))
img[i] = train_img #add to array - img[0], img[1], and so on.
#train_mask
train_mask_0 = np.load(mask_folder+'/'+n[i]) # 1.0 or 2.0
train_mask = np.where(train_mask_0==2.0, 0.0, train_mask_0)
#train_mask = imresize(train_mask[:,:,a], (256, 256), interp='nearest').astype('float32')
train_mask = cv2.resize(train_mask,(256,256),interpolation=cv2.INTER_NEAREST).astype(np.int64)
mask[i,:,:,0] = np.squeeze(train_mask)
mask[i,:,:,1] = np.squeeze(1-train_mask)
#print(mask.shape)
# mask = mask.reshape(len(n),256*256, 2)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
seed = 2018
# image_datagen.fit(img, augment=True, seed=seed)
# mask_datagen.fit(mask, augment=True, seed=seed) # mask and image separate
img_gen = image_datagen.flow(img, seed = seed, batch_size=batch_size, shuffle=True)
mask_gen = mask_datagen.flow(mask, seed = seed, batch_size=batch_size, shuffle=True)
train_gen = zip(img_gen, mask_gen)
# fits the model on batches with real-time data augmentation:
# model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
# steps_per_epoch=len(x_train) / 32, epochs=epochs)
return train_gen
def val_gen_aug(img_folder, mask_folder, batch_size):
img_datagen = ImageDataGenerator()
mask_datagen = ImageDataGenerator()
n = os.listdir(img_folder) #List of training images
random.shuffle(n)
img = np.zeros((len(n), 256, 256, 5)).astype(np.float32)
mask = np.zeros((len(n), 256, 256, 2), dtype=np.float32)
for i in range(len(n)): #initially from 0 to 16, c = 0.
val_img_0 = np.load(img_folder+'/'+n[i]) #normalization:the range is about -100 to 360
val_img = np.zeros((256,256,5))
#resize
for a in range(5):
val_img[:,:,a] = cv2.resize(val_img_0[:,:,a], (256, 256))
img[i] = val_img #add to array - img[0], img[1], and so on.
#val_mask
val_mask_0 = np.load(mask_folder+'/'+n[i]) # 1.0 or 2.0
val_mask = np.where(val_mask_0==2.0, 0.0, val_mask_0)
#train_mask = imresize(train_mask[:,:,a], (256, 256), interp='nearest').astype('float32')
val_mask = cv2.resize(val_mask,(256,256),interpolation=cv2.INTER_NEAREST).astype(np.int64)
mask[i,:,:,0] = np.squeeze(val_mask)
mask[i,:,:,1] = np.squeeze(1-val_mask)
#print(mask.shape)
# mask = mask.reshape(len(n),256*256, 2)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
# img_datagen.fit(img)
# mask_datagen.fit(mask)
img_gen = img_datagen.flow(img, batch_size=batch_size, shuffle=True)
mask_gen = mask_datagen.flow(mask, batch_size=batch_size, shuffle=True)
val_gen = zip(img_gen, mask_gen)
# fits the model on batches with real-time data augmentation:
# model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
# steps_per_epoch=len(x_train) / 32, epochs=epochs)
return val_gen
| 38.503185
| 102
| 0.640199
|
4a148d9bac903fafda0ce1ac2dfa6b8b3be3a9b4
| 1,592
|
py
|
Python
|
CUT_MoNCE/models/sinkhorn.py
|
fnzhan/MoNCE
|
c534307800664660d32813395c9577276ef196ee
|
[
"MIT"
] | 4
|
2021-11-13T17:48:04.000Z
|
2022-03-23T06:41:06.000Z
|
CUT_MoNCE/models/sinkhorn.py
|
fnzhan/MoNCE
|
c534307800664660d32813395c9577276ef196ee
|
[
"MIT"
] | null | null | null |
CUT_MoNCE/models/sinkhorn.py
|
fnzhan/MoNCE
|
c534307800664660d32813395c9577276ef196ee
|
[
"MIT"
] | 2
|
2022-03-20T04:55:28.000Z
|
2022-03-20T13:44:55.000Z
|
# -*- coding: utf-8 -*-
from packaging import version
import torch
def sinkhorn(dot, max_iter=100):
"""
dot: n x in_size x out_size
mask: n x in_size
output: n x in_size x out_size
"""
n, in_size, out_size = dot.shape
K = dot
# K: n x in_size x out_size
u = K.new_ones((n, in_size))
v = K.new_ones((n, out_size))
a = float(out_size / in_size)
for _ in range(max_iter):
u = a / torch.bmm(K, v.view(n, out_size, 1)).view(n, in_size)
v = 1. / torch.bmm(u.view(n, 1, in_size), K).view(n, out_size)
K = u.view(n, in_size, 1) * (K * v.view(n, 1, out_size))
return K
def OT(q, k, eps=1.0, max_iter=100, cost_type=None):
"""Compute the weights using Sinkhorn OT
q: n x in_size x in_dim
k: m x out_size x in_dim (m: number of heads/ref)
output: n x out_size x m x in_size
"""
n, in_size, in_dim = q.shape
m, out_size = k.shape[:-1]
C = torch.einsum('bid,bod->bio', q, k)
if cost_type == 'easy':
K = 1 - C.clone()
elif cost_type == 'hard':
K = C.clone()
npatches = q.size(1)
mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool
diagonal = torch.eye(npatches, device=q.device, dtype=mask_dtype)[None, :, :]
K.masked_fill_(diagonal, -10)
# K: n x m x in_size x out_size
K = K.reshape(-1, in_size, out_size)
# K: nm x in_size x out_size
K = torch.exp(K / eps)
K = sinkhorn(K, max_iter=max_iter)
# K: nm x in_size x out_size
K = K.permute(0, 2, 1).contiguous()
return K
| 27.929825
| 105
| 0.593593
|
4a148eb152a97d9cd997207086bfd133fe7ede98
| 12,152
|
py
|
Python
|
thermostat_nw/columns.py
|
hshaban/epathermostat_nw
|
6fec9402484e1ef7e4e59e2c679d9a8efee99ad6
|
[
"MIT"
] | null | null | null |
thermostat_nw/columns.py
|
hshaban/epathermostat_nw
|
6fec9402484e1ef7e4e59e2c679d9a8efee99ad6
|
[
"MIT"
] | null | null | null |
thermostat_nw/columns.py
|
hshaban/epathermostat_nw
|
6fec9402484e1ef7e4e59e2c679d9a8efee99ad6
|
[
"MIT"
] | null | null | null |
RHU_COLUMNS = [
"rhu1_00F_to_05F",
"rhu1_05F_to_10F",
"rhu1_10F_to_15F",
"rhu1_15F_to_20F",
"rhu1_20F_to_25F",
"rhu1_25F_to_30F",
"rhu1_30F_to_35F",
"rhu1_35F_to_40F",
"rhu1_40F_to_45F",
"rhu1_45F_to_50F",
"rhu1_50F_to_55F",
"rhu1_55F_to_60F",
"rhu1_30F_to_45F",
"rhu2_00F_to_05F",
"rhu2_05F_to_10F",
"rhu2_10F_to_15F",
"rhu2_15F_to_20F",
"rhu2_20F_to_25F",
"rhu2_25F_to_30F",
"rhu2_30F_to_35F",
"rhu2_35F_to_40F",
"rhu2_40F_to_45F",
"rhu2_45F_to_50F",
"rhu2_50F_to_55F",
"rhu2_55F_to_60F",
"rhu2_30F_to_45F",
"dnru_daily",
"dnru_reduction_daily",
"mu_estimate_daily",
"sigma_estimate_daily",
"sigmoid_model_error_daily",
"sigmoid_integral_daily",
"dnru_hourly",
"dnru_reduction_hourly",
"mu_estimate_hourly",
"sigma_estimate_hourly",
"sigmoid_model_error_hourly",
"sigmoid_integral_hourly",
]
RHU2_IQFLT_COLUMNS = [
"rhu2IQFLT_00F_to_05F",
"rhu2IQFLT_05F_to_10F",
"rhu2IQFLT_10F_to_15F",
"rhu2IQFLT_15F_to_20F",
"rhu2IQFLT_20F_to_25F",
"rhu2IQFLT_25F_to_30F",
"rhu2IQFLT_30F_to_35F",
"rhu2IQFLT_35F_to_40F",
"rhu2IQFLT_40F_to_45F",
"rhu2IQFLT_45F_to_50F",
"rhu2IQFLT_50F_to_55F",
"rhu2IQFLT_55F_to_60F",
"rhu2IQFLT_30F_to_45F",
]
REAL_OR_INTEGER_VALUED_COLUMNS_HEATING = [
"n_days_in_inputfile_date_range",
"n_days_both_heating_and_cooling",
"n_days_insufficient_data",
"n_core_heating_days",
"baseline_percentile_core_heating_comfort_temperature",
"regional_average_baseline_heating_comfort_temperature",
"percent_savings_baseline_percentile",
"avoided_daily_mean_core_day_runtime_baseline_percentile",
"avoided_total_core_day_runtime_baseline_percentile",
"baseline_daily_mean_core_day_runtime_baseline_percentile",
"baseline_total_core_day_runtime_baseline_percentile",
"_daily_mean_core_day_demand_baseline_baseline_percentile",
"percent_savings_baseline_regional",
"avoided_daily_mean_core_day_runtime_baseline_regional",
"avoided_total_core_day_runtime_baseline_regional",
"baseline_daily_mean_core_day_runtime_baseline_regional",
"baseline_total_core_day_runtime_baseline_regional",
"_daily_mean_core_day_demand_baseline_baseline_regional",
"percent_savings_baseline_hourly_regional",
"avoided_daily_mean_core_day_runtime_baseline_hourly_regional",
"avoided_total_core_day_runtime_baseline_hourly_regional",
"baseline_daily_mean_core_day_runtime_baseline_hourly_regional",
"baseline_total_core_day_runtime_baseline_hourly_regional",
"_daily_mean_core_day_demand_baseline_baseline_hourly_regional",
"mean_demand",
"alpha",
"tau",
"mean_sq_err",
"root_mean_sq_err",
"cv_root_mean_sq_err",
"mean_abs_err",
"mean_abs_pct_err",
"nfev",
"total_core_heating_runtime",
"total_auxiliary_heating_core_day_runtime",
"total_emergency_heating_core_day_runtime",
"daily_mean_core_heating_runtime",
"core_heating_days_mean_indoor_temperature",
"core_heating_days_mean_outdoor_temperature",
"core_mean_indoor_temperature",
"core_mean_outdoor_temperature",
"heat_gain_constant",
"heat_loss_constant",
"hvac_constant",
"overall_temperature_variance",
"weekly_temperature_variance",
"avg_daily_cooling_runtime",
"avg_daily_heating_runtime",
"avg_daily_auxiliary_runtime",
"avg_daily_emergency_runtime",
"lm_intercept",
"lm_intercept_se",
"lm_main_slope",
"lm_main_slope_se",
"lm_secondary_slope",
"lm_secondary_slope_se",
"lm_cvrmse",
"lm_rsquared",
] + RHU_COLUMNS
REAL_OR_INTEGER_VALUED_COLUMNS_COOLING = [
"n_days_in_inputfile_date_range",
"n_days_both_heating_and_cooling",
"n_days_insufficient_data",
"n_core_cooling_days",
"baseline_percentile_core_cooling_comfort_temperature",
"regional_average_baseline_cooling_comfort_temperature",
"percent_savings_baseline_percentile",
"avoided_daily_mean_core_day_runtime_baseline_percentile",
"avoided_total_core_day_runtime_baseline_percentile",
"baseline_daily_mean_core_day_runtime_baseline_percentile",
"baseline_total_core_day_runtime_baseline_percentile",
"_daily_mean_core_day_demand_baseline_baseline_percentile",
"percent_savings_baseline_regional",
"avoided_daily_mean_core_day_runtime_baseline_regional",
"avoided_total_core_day_runtime_baseline_regional",
"baseline_daily_mean_core_day_runtime_baseline_regional",
"baseline_total_core_day_runtime_baseline_regional",
"_daily_mean_core_day_demand_baseline_baseline_regional",
"percent_savings_baseline_hourly_regional",
"avoided_daily_mean_core_day_runtime_baseline_hourly_regional",
"avoided_total_core_day_runtime_baseline_hourly_regional",
"baseline_daily_mean_core_day_runtime_baseline_hourly_regional",
"baseline_total_core_day_runtime_baseline_hourly_regional",
"_daily_mean_core_day_demand_baseline_baseline_hourly_regional",
"mean_demand",
"alpha",
"tau",
"mean_sq_err",
"root_mean_sq_err",
"cv_root_mean_sq_err",
"mean_abs_err",
"mean_abs_pct_err",
"nfev",
"total_core_cooling_runtime",
"daily_mean_core_cooling_runtime",
"core_cooling_days_mean_indoor_temperature",
"core_cooling_days_mean_outdoor_temperature",
"core_mean_indoor_temperature",
"core_mean_outdoor_temperature",
"heat_gain_constant",
"heat_loss_constant",
"hvac_constant",
"overall_temperature_variance",
"weekly_temperature_variance",
"avg_daily_cooling_runtime",
"avg_daily_heating_runtime",
"avg_daily_auxiliary_runtime",
"avg_daily_emergency_runtime",
"lm_intercept",
"lm_intercept_se",
"lm_main_slope",
"lm_main_slope_se",
"lm_secondary_slope",
"lm_secondary_slope_se",
"lm_cvrmse",
"lm_rsquared",
"excess_resistance_score_1hr",
"excess_resistance_score_2hr",
"excess_resistance_score_3hr",
]
REAL_OR_INTEGER_VALUED_COLUMNS_ALL = (
[
"n_days_in_inputfile_date_range",
"n_days_both_heating_and_cooling",
"n_days_insufficient_data",
"n_core_cooling_days",
"n_core_heating_days",
"baseline_percentile_core_cooling_comfort_temperature",
"baseline_percentile_core_heating_comfort_temperature",
"regional_average_baseline_cooling_comfort_temperature",
"regional_average_baseline_heating_comfort_temperature",
"percent_savings_baseline_percentile",
"avoided_daily_mean_core_day_runtime_baseline_percentile",
"avoided_total_core_day_runtime_baseline_percentile",
"baseline_daily_mean_core_day_runtime_baseline_percentile",
"baseline_total_core_day_runtime_baseline_percentile",
"_daily_mean_core_day_demand_baseline_baseline_percentile",
"percent_savings_baseline_regional",
"avoided_daily_mean_core_day_runtime_baseline_regional",
"avoided_total_core_day_runtime_baseline_regional",
"baseline_daily_mean_core_day_runtime_baseline_regional",
"baseline_total_core_day_runtime_baseline_regional",
"_daily_mean_core_day_demand_baseline_baseline_regional",
"percent_savings_baseline_hourly_regional",
"avoided_daily_mean_core_day_runtime_baseline_hourly_regional",
"avoided_total_core_day_runtime_baseline_hourly_regional",
"baseline_daily_mean_core_day_runtime_baseline_hourly_regional",
"baseline_total_core_day_runtime_baseline_hourly_regional",
"_daily_mean_core_day_demand_baseline_baseline_hourly_regional",
"mean_demand",
"alpha",
"tau",
"mean_sq_err",
"root_mean_sq_err",
"cv_root_mean_sq_err",
"mean_abs_err",
"mean_abs_pct_err",
"nfev",
"total_core_cooling_runtime",
"total_core_heating_runtime",
"total_auxiliary_heating_core_day_runtime",
"total_emergency_heating_core_day_runtime",
"daily_mean_core_cooling_runtime",
"daily_mean_core_heating_runtime",
"core_mean_indoor_temperature",
"core_mean_outdoor_temperature",
"heat_gain_constant",
"heat_loss_constant",
"hvac_constant",
"overall_temperature_variance",
"weekly_temperature_variance",
"avg_daily_cooling_runtime",
"avg_daily_heating_runtime",
"avg_daily_auxiliary_runtime",
"avg_daily_emergency_runtime",
"lm_intercept",
"lm_intercept_se",
"lm_main_slope",
"lm_main_slope_se",
"lm_secondary_slope",
"lm_secondary_slope_se",
"lm_cvrmse",
"lm_rsquared",
"excess_resistance_score_1hr",
"excess_resistance_score_2hr",
"excess_resistance_score_3hr",
]
+ RHU_COLUMNS
+ RHU2_IQFLT_COLUMNS
)
EXPORT_COLUMNS = [
"sw_version",
"ct_identifier",
"heat_type",
"heat_stage",
"cool_type",
"cool_stage",
"heating_or_cooling",
"zipcode",
"station",
"climate_zone",
"start_date",
"end_date",
"n_days_in_inputfile_date_range",
"n_days_both_heating_and_cooling",
"n_days_insufficient_data",
"n_core_cooling_days",
"n_core_heating_days",
"baseline_percentile_core_cooling_comfort_temperature",
"baseline_percentile_core_heating_comfort_temperature",
"regional_average_baseline_cooling_comfort_temperature",
"regional_average_baseline_heating_comfort_temperature",
"percent_savings_baseline_percentile",
"avoided_daily_mean_core_day_runtime_baseline_percentile",
"avoided_total_core_day_runtime_baseline_percentile",
"baseline_daily_mean_core_day_runtime_baseline_percentile",
"baseline_total_core_day_runtime_baseline_percentile",
"_daily_mean_core_day_demand_baseline_baseline_percentile",
"percent_savings_baseline_regional",
"avoided_daily_mean_core_day_runtime_baseline_regional",
"avoided_total_core_day_runtime_baseline_regional",
"baseline_daily_mean_core_day_runtime_baseline_regional",
"baseline_total_core_day_runtime_baseline_regional",
"_daily_mean_core_day_demand_baseline_baseline_regional",
"percent_savings_baseline_hourly_regional",
"avoided_daily_mean_core_day_runtime_baseline_hourly_regional",
"avoided_total_core_day_runtime_baseline_hourly_regional",
"baseline_daily_mean_core_day_runtime_baseline_hourly_regional",
"baseline_total_core_day_runtime_baseline_hourly_regional",
"_daily_mean_core_day_demand_baseline_baseline_hourly_regional",
"mean_demand",
"alpha",
"tau",
"mean_sq_err",
"root_mean_sq_err",
"cv_root_mean_sq_err",
"mean_abs_err",
"mean_abs_pct_err",
"cov_x",
"nfev",
"mesg",
"total_core_cooling_runtime",
"total_core_heating_runtime",
"total_auxiliary_heating_core_day_runtime",
"total_emergency_heating_core_day_runtime",
"daily_mean_core_cooling_runtime",
"daily_mean_core_heating_runtime",
"core_cooling_days_mean_indoor_temperature",
"core_cooling_days_mean_outdoor_temperature",
"core_heating_days_mean_indoor_temperature",
"core_heating_days_mean_outdoor_temperature",
"core_mean_indoor_temperature",
"core_mean_outdoor_temperature",
"heat_gain_constant",
"heat_loss_constant",
"hvac_constant",
"overall_temperature_variance",
"weekly_temperature_variance",
"aux_exceeds_heat_runtime_daily",
"aux_exceeds_heat_runtime_hourly",
"avg_daily_cooling_runtime",
"avg_daily_heating_runtime",
"avg_daily_auxiliary_runtime",
"avg_daily_emergency_runtime",
"lm_intercept",
"lm_intercept_se",
"lm_main_slope",
"lm_main_slope_se",
"lm_secondary_slope",
"lm_secondary_slope_se",
"lm_cvrmse",
"lm_rsquared",
"excess_resistance_score_1hr",
"excess_resistance_score_2hr",
"excess_resistance_score_3hr",
] + RHU_COLUMNS
CERTIFICATION_HEADERS = [
"product_id",
"sw_version",
"metric",
"filter",
"region",
"statistic",
"season",
"value",
]
| 34.819484
| 72
| 0.758476
|
4a148ecefdf30bed99e90b8773c92bb221d2a96e
| 1,825
|
py
|
Python
|
venv/Lib/site-packages/cryptography/hazmat/primitives/cmac.py
|
gilbertekalea/booking.com_crawler
|
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
|
[
"MIT"
] | 7
|
2022-03-10T07:03:14.000Z
|
2022-03-24T09:42:46.000Z
|
venv/Lib/site-packages/cryptography/hazmat/primitives/cmac.py
|
gilbertekalea/booking.com_crawler
|
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
|
[
"MIT"
] | 66
|
2020-12-14T21:01:55.000Z
|
2022-03-30T22:09:34.000Z
|
venv/Lib/site-packages/cryptography/hazmat/primitives/cmac.py
|
gilbertekalea/booking.com_crawler
|
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
|
[
"MIT"
] | 1
|
2022-03-10T07:15:54.000Z
|
2022-03-10T07:15:54.000Z
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import typing
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized,
)
from cryptography.hazmat.primitives import ciphers
class CMAC(object):
def __init__(
self,
algorithm: ciphers.BlockCipherAlgorithm,
backend: typing.Any = None,
ctx=None,
):
if not isinstance(algorithm, ciphers.BlockCipherAlgorithm):
raise TypeError("Expected instance of BlockCipherAlgorithm.")
self._algorithm = algorithm
if ctx is None:
from cryptography.hazmat.backends.openssl.backend import (
backend as ossl,
)
self._ctx = ossl.create_cmac_ctx(self._algorithm)
else:
self._ctx = ctx
def update(self, data: bytes) -> None:
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
utils._check_bytes("data", data)
self._ctx.update(data)
def finalize(self) -> bytes:
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
digest = self._ctx.finalize()
self._ctx = None
return digest
def verify(self, signature: bytes) -> None:
utils._check_bytes("signature", signature)
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
ctx, self._ctx = self._ctx, None
ctx.verify(signature)
def copy(self) -> "CMAC":
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
return CMAC(self._algorithm, ctx=self._ctx.copy())
| 29.918033
| 79
| 0.639452
|
4a148f93a76ef9276011cf3e165a4a22de16d62a
| 2,340
|
py
|
Python
|
bedrock/engine.py
|
openmednlp/bedrock
|
fdacbad92d59f27650eab1753fa694f90863b43e
|
[
"MIT"
] | 2
|
2019-04-28T23:14:31.000Z
|
2019-05-19T08:00:28.000Z
|
bedrock/engine.py
|
openmednlp/bedrock
|
fdacbad92d59f27650eab1753fa694f90863b43e
|
[
"MIT"
] | 5
|
2019-02-11T14:50:59.000Z
|
2021-09-29T17:28:23.000Z
|
bedrock/engine.py
|
openmednlp/bedrock
|
fdacbad92d59f27650eab1753fa694f90863b43e
|
[
"MIT"
] | 1
|
2020-04-18T08:48:32.000Z
|
2020-04-18T08:48:32.000Z
|
from bedrock.doc.doc import Doc
from bedrock.doc.relation import Relation
from typing import List
from bedrock.tagger.tagger import Tagger
from bedrock.annotator.annotator import Annotator
class ProcessingEngine:
def __init__(self, tagger: Tagger = None, annotators: List[Annotator] = None,
post_labeling_annotators: List[Annotator] = None):
self.tagger = tagger
self.annotators = annotators
self._post_labeling_annotators = post_labeling_annotators
def __set_tags(self, docs: List[Doc]):
if self.tagger is not None:
for doc in docs:
tokens, annotations, relations = self.tagger.get_tags(doc)
doc.set_tokens(tokens)
doc.set_annotations(annotations)
doc.set_relations(relations)
def __set_annotations(self, docs: List[Doc]):
if self.annotators is not None:
for doc in docs:
for annotator in self.annotators:
annotations, relations = annotator.get_annotations(doc)
next_index = doc.get_next_start_index() # returns 0 if doc.__annotations empty
if not annotations.empty:
annotations.index += next_index
max_annotations_index = annotations.index[-1]
doc.append_annotations(annotations, False)
if not relations.empty: # relations cannot exist without annotations
relations[[Relation.GOV_ID, Relation.DEP_ID]] += next_index
relations.index += max_annotations_index+1
doc.append_relations(relations, False)
def __run_post_labeling(self, docs: List[Doc]):
if self._post_labeling_annotators is not None:
for doc in docs:
for post_annotator in self._post_labeling_annotators:
annotations, relations = post_annotator.get_annotations(doc)
doc.set_annotations(annotations)
if relations is not None:
doc.set_relations(relations)
def process(self, docs: List[Doc]) -> List[Doc]:
self.__set_tags(docs)
self.__set_annotations(docs)
self.__run_post_labeling(docs)
return docs
| 44.150943
| 99
| 0.613675
|
4a148fb0222f48f6856007e9054317bcfd5df581
| 986
|
py
|
Python
|
pyravendb/tests/jvm_migrated_tests/test_what_changed.py
|
poissoncorp/ravendb-python-client
|
5003127e6aa6897ab957d73f494b9cbcf9362bf9
|
[
"MIT"
] | null | null | null |
pyravendb/tests/jvm_migrated_tests/test_what_changed.py
|
poissoncorp/ravendb-python-client
|
5003127e6aa6897ab957d73f494b9cbcf9362bf9
|
[
"MIT"
] | null | null | null |
pyravendb/tests/jvm_migrated_tests/test_what_changed.py
|
poissoncorp/ravendb-python-client
|
5003127e6aa6897ab957d73f494b9cbcf9362bf9
|
[
"MIT"
] | null | null | null |
from pyravendb.tests.test_base import TestBase, User
class TestWhatChanged(TestBase):
def setUp(self):
super(TestWhatChanged, self).setUp()
def test_has_changes(self):
with self.store.open_session() as session:
user1 = User("user1", None)
user2 = User("user2", 1)
session.store(user1, "users/1")
session.store(user2, "users/2")
session.save_changes()
with self.store.open_session() as session:
self.assertFalse(session.advanced.has_changes())
user1, user2 = session.load(["users/1", "users/2"], User)
self.assertFalse(session.advanced.has_changed(user1))
self.assertFalse(session.advanced.has_changed(user2))
user1.name = "newName"
self.assertTrue(session.advanced.has_changed(user1))
self.assertFalse(session.advanced.has_changed(user2))
self.assertTrue(session.advanced.has_changes())
| 30.8125
| 69
| 0.628803
|
4a1491c5bed72ce32dbc7e41aaa11c775c1cf54a
| 52
|
py
|
Python
|
tests/components/trafikverket_ferry/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/trafikverket_ferry/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/trafikverket_ferry/__init__.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for the Trafikverket Ferry integration."""
| 26
| 51
| 0.75
|
4a1492362aa501682e511ab7dff399491748bd22
| 471
|
py
|
Python
|
py_scr/direct_1_test.py
|
maanjum95/VHDL_System_Design_Lab
|
4b452d37c4f6eba9fd31e2cf80bd4202fc5f4940
|
[
"MIT"
] | 1
|
2021-11-19T19:58:32.000Z
|
2021-11-19T19:58:32.000Z
|
py_scr/direct_1_test.py
|
maanjum95/VHDL_System_Design_Lab
|
4b452d37c4f6eba9fd31e2cf80bd4202fc5f4940
|
[
"MIT"
] | null | null | null |
py_scr/direct_1_test.py
|
maanjum95/VHDL_System_Design_Lab
|
4b452d37c4f6eba9fd31e2cf80bd4202fc5f4940
|
[
"MIT"
] | 1
|
2021-11-26T23:41:12.000Z
|
2021-11-26T23:41:12.000Z
|
def low_high(a, b):
if a == 0:
a = 2**16
if b == 0:
b = 2**16
ab = a * b
mod_to = 2**16 + 1
print("Actual result", ab, "to", ab % mod_to)
ab_mod = ab % 2**16
ab_div = ab // 2**16
result = ab_mod - ab_div
if ab_mod < ab_div:
result += 2**16 + 1
print("Result", result, "{0:16b}".format(result))
print("MOD", ab_mod, "{0:16b}".format(ab_mod).zfill(7))
print("DIV", ab_div, "{0:16b}".format(ab_div).zfill(7))
low_high(65530, 65530)
| 15.193548
| 56
| 0.549894
|
4a1492ca70f206690e8a5025d5c7c1943fa5c446
| 30,907
|
py
|
Python
|
methodologist/methodologist.py
|
atkindel/methodologist
|
f79c9c6bedf27643db41b8de8081c464c893411e
|
[
"MIT"
] | null | null | null |
methodologist/methodologist.py
|
atkindel/methodologist
|
f79c9c6bedf27643db41b8de8081c464c893411e
|
[
"MIT"
] | null | null | null |
methodologist/methodologist.py
|
atkindel/methodologist
|
f79c9c6bedf27643db41b8de8081c464c893411e
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
#
# methodologist.py
# Functions for static lexical analysis over economists' code.
# Uses Pygments, which miraculously has lexers for everything (except SPSS)!
#
# Author: Alex Kindel
# Last edited: 11 March 2018
import sys
from collections import defaultdict, Counter
from rpy2 import robjects
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import StrVector
from pygments import lex
from pygments.lexers import StataLexer, OctaveLexer, SASLexer, SLexer, NumPyLexer, FortranLexer
from pygments.token import Operator, Keyword, Name, Text
from pygments.lexer import words, include
# Improvements to Pygments lexers
# The SASLexer class was kinda busted
# Perhaps I'm the only one who's ever used it??
class SuperSASLexer(SASLexer):
# Add missing function tokens
builtins_functions = SASLexer.builtins_functions + ("rxchange", "rxparse")
# Respecify regexes for operators, builtins
tokens = SASLexer.tokens
tokens['operators'] = [(r'(-|=|<=|>=|<|>|<>|&|!=|\||\*|\+|\^|/|!|~|~=)', Operator)]
tokens['keywords'] = [(words(SASLexer.builtins_statements,
prefix=r'\b',
suffix=r'\b'),
Keyword),
(words(SASLexer.builtins_sql,
prefix=r'\b',
suffix=r'\b'),
Keyword),
(words(SASLexer.builtins_conditionals,
prefix=r'\b',
suffix=r'\b'),
Keyword),
(words(SASLexer.builtins_macros,
prefix=r'\%',
suffix=r'\b'),
Name.Builtin),
(words(builtins_functions,
prefix=r'\b'),
Name.Builtin),
]
tokens['general'] = [include('keywords'),
include('vars-strings'),
include('special'),
include('numbers'),
include('operators')
]
# Enable more of the StataLexer class
# In general this could be improved to take better account of e.g. r() objects
class SuperStataLexer(StataLexer):
tokens = StataLexer.tokens
tokens["root"] = [include('comments'),
include('vars-strings'),
include('numbers'),
include('keywords'),
include('operators'),
(r'.', Text)
]
# NumPyLexer doesn't have Pandas or Scipy names in it
# We also add names from pandas.DataFrame
# TODO: Bootstrap this the way DynamicRLexer works
class SuperNumPyLexer(NumPyLexer):
pandas_names = ['Categorical', 'CategoricalIndex', 'DataFrame', 'DateOffset', 'DatetimeIndex', 'ExcelFile', 'ExcelWriter', 'Expr', 'Float64Index', 'Grouper', 'HDFStore', 'Index', 'IndexSlice', 'Int64Index', 'Interval', 'IntervalIndex', 'MultiIndex', 'NaT', 'Panel', 'Panel4D', 'Period', 'PeriodIndex', 'RangeIndex', 'Series', 'SparseArray', 'SparseDataFrame', 'SparseList', 'SparseSeries', 'Term', 'TimeGrouper', 'Timedelta', 'TimedeltaIndex', 'Timestamp', 'UInt64Index', 'WidePanel', '_DeprecatedModule', '__builtins__', '__cached__', '__doc__', '__docformat__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__version__', '_hashtable', '_lib', '_libs', '_np_version_under1p10', '_np_version_under1p11', '_np_version_under1p12', '_np_version_under1p13', '_np_version_under1p14', '_np_version_under1p15', '_tslib', '_version', 'api', 'bdate_range', 'compat', 'concat', 'core', 'crosstab', 'cut', 'date_range', 'datetime', 'datetools', 'describe_option', 'errors', 'eval', 'ewma', 'ewmcorr', 'ewmcov', 'ewmstd', 'ewmvar', 'ewmvol', 'expanding_apply', 'expanding_corr', 'expanding_count', 'expanding_cov', 'expanding_kurt', 'expanding_max', 'expanding_mean', 'expanding_median', 'expanding_min', 'expanding_quantile', 'expanding_skew', 'expanding_std', 'expanding_sum', 'expanding_var', 'factorize', 'get_dummies', 'get_option', 'get_store', 'groupby', 'infer_freq', 'interval_range', 'io', 'isna', 'isnull', 'json', 'lib', 'lreshape', 'match', 'melt', 'merge', 'merge_asof', 'merge_ordered', 'notna', 'notnull', 'np', 'offsets', 'option_context', 'options', 'ordered_merge', 'pandas', 'parser', 'period_range', 'pivot', 'pivot_table', 'plot_params', 'plotting', 'pnow', 'qcut', 'read_clipboard', 'read_csv', 'read_excel', 'read_feather', 'read_fwf', 'read_gbq', 'read_hdf', 'read_html', 'read_json', 'read_msgpack', 'read_parquet', 'read_pickle', 'read_sas', 'read_sql', 'read_sql_query', 'read_sql_table', 'read_stata', 'read_table', 'reset_option', 'rolling_apply', 'rolling_corr', 'rolling_count', 'rolling_cov', 'rolling_kurt', 'rolling_max', 'rolling_mean', 'rolling_median', 'rolling_min', 'rolling_quantile', 'rolling_skew', 'rolling_std', 'rolling_sum', 'rolling_var', 'rolling_window', 'scatter_matrix', 'set_eng_float_format', 'set_option', 'show_versions', 'stats', 'test', 'testing', 'timedelta_range', 'to_datetime', 'to_msgpack', 'to_numeric', 'to_pickle', 'to_timedelta', 'tools', 'tseries', 'tslib', 'unique', 'util', 'value_counts', 'wide_to_long']
pandas_df_names = ['T', '_AXIS_ALIASES', '_AXIS_IALIASES', '_AXIS_LEN', '_AXIS_NAMES', '_AXIS_NUMBERS', '_AXIS_ORDERS', '_AXIS_REVERSED', '_AXIS_SLICEMAP', '__abs__', '__add__', '__and__', '__array__', '__array_wrap__', '__bool__', '__bytes__', '__class__', '__contains__', '__copy__', '__deepcopy__', '__delattr__', '__delitem__', '__dict__', '__dir__', '__div__', '__doc__', '__eq__', '__finalize__', '__floordiv__', '__format__', '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__getstate__', '__gt__', '__hash__', '__iadd__', '__iand__', '__ifloordiv__', '__imod__', '__imul__', '__init__', '__init_subclass__', '__invert__', '__ior__', '__ipow__', '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', '__lt__', '__mod__', '__module__', '__mul__', '__ne__', '__neg__', '__new__', '__nonzero__', '__or__', '__pow__', '__radd__', '__rand__', '__rdiv__', '__reduce__', '__reduce_ex__', '__repr__', '__rfloordiv__', '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', '__rsub__', '__rtruediv__', '__rxor__', '__setattr__', '__setitem__', '__setstate__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__truediv__', '__unicode__', '__weakref__', '__xor__', '_accessors', '_add_numeric_operations', '_add_series_only_operations', '_add_series_or_dataframe_operations', '_agg_by_level', '_agg_doc', '_aggregate', '_aggregate_multiple_funcs', '_align_frame', '_align_series', '_apply_broadcast', '_apply_empty_result', '_apply_raw', '_apply_standard', '_at', '_box_col_values', '_box_item_values', '_builtin_table', '_check_inplace_setting', '_check_is_chained_assignment_possible', '_check_percentile', '_check_setitem_copy', '_clear_item_cache', '_clip_with_one_bound', '_clip_with_scalar', '_combine_const', '_combine_frame', '_combine_match_columns', '_combine_match_index', '_combine_series', '_combine_series_infer', '_compare_frame', '_compare_frame_evaluate', '_consolidate', '_consolidate_inplace', '_construct_axes_dict', '_construct_axes_dict_for_slice', '_construct_axes_dict_from', '_construct_axes_from_arguments', '_constructor', '_constructor_expanddim', '_constructor_sliced', '_convert', '_count_level', '_create_indexer', '_cython_table', '_deprecations', '_dir_additions', '_dir_deletions', '_drop_axis', '_ensure_valid_index', '_expand_axes', '_flex_compare_frame', '_from_arrays', '_from_axes', '_get_agg_axis', '_get_axis', '_get_axis_name', '_get_axis_number', '_get_axis_resolvers', '_get_block_manager_axis', '_get_bool_data', '_get_cacher', '_get_index_resolvers', '_get_item_cache', '_get_numeric_data', '_get_valid_indices', '_get_value', '_get_values', '_getitem_array', '_getitem_column', '_getitem_frame', '_getitem_multilevel', '_getitem_slice', '_gotitem', '_iat', '_iget_item_cache', '_iloc', '_indexed_same', '_info_axis', '_info_axis_name', '_info_axis_number', '_info_repr', '_init_dict', '_init_mgr', '_init_ndarray', '_internal_names', '_internal_names_set', '_is_builtin_func', '_is_cached', '_is_cython_func', '_is_datelike_mixed_type', '_is_mixed_type', '_is_numeric_mixed_type', '_is_view', '_ix', '_ixs', '_join_compat', '_loc', '_maybe_cache_changed', '_maybe_update_cacher', '_metadata', '_needs_reindex_multi', '_obj_with_exclusions', '_protect_consolidate', '_reduce', '_reindex_axes', '_reindex_axis', '_reindex_columns', '_reindex_index', '_reindex_multi', '_reindex_with_indexers', '_repr_data_resource_', '_repr_fits_horizontal_', '_repr_fits_vertical_', '_repr_html_', '_repr_latex_', '_reset_cache', '_reset_cacher', '_sanitize_column', '_selected_obj', '_selection', '_selection_list', '_selection_name', '_series', '_set_as_cached', '_set_axis', '_set_axis_name', '_set_is_copy', '_set_item', '_set_value', '_setitem_array', '_setitem_frame', '_setitem_slice', '_setup_axes', '_shallow_copy', '_slice', '_stat_axis', '_stat_axis_name', '_stat_axis_number', '_take', '_to_dict_of_blocks', '_try_aggregate_string_function', '_typ', '_unpickle_frame_compat', '_unpickle_matrix_compat', '_update_inplace', '_validate_dtype', '_values', '_where', '_xs', 'abs', 'add', 'add_prefix', 'add_suffix', 'agg', 'aggregate', 'align', 'all', 'any', 'append', 'apply', 'applymap', 'as_blocks', 'as_matrix', 'asfreq', 'asof', 'assign', 'astype', 'at', 'at_time', 'axes', 'between_time', 'bfill', 'blocks', 'bool', 'boxplot', 'clip', 'clip_lower', 'clip_upper', 'columns', 'combine', 'combine_first', 'compound', 'consolidate', 'convert_objects', 'copy', 'corr', 'corrwith', 'count', 'cov', 'cummax', 'cummin', 'cumprod', 'cumsum', 'describe', 'diff', 'div', 'divide', 'dot', 'drop', 'drop_duplicates', 'dropna', 'dtypes', 'duplicated', 'empty', 'eq', 'equals', 'eval', 'ewm', 'expanding', 'ffill', 'fillna', 'filter', 'first', 'first_valid_index', 'floordiv', 'from_csv', 'from_dict', 'from_items', 'from_records', 'ftypes', 'ge', 'get', 'get_dtype_counts', 'get_ftype_counts', 'get_value', 'get_values', 'groupby', 'gt', 'head', 'hist', 'iat', 'idxmax', 'idxmin', 'iloc', 'infer_objects', 'info', 'insert', 'interpolate', 'is_copy', 'isin', 'isna', 'isnull', 'items', 'iteritems', 'iterrows', 'itertuples', 'ix', 'join', 'keys', 'kurt', 'kurtosis', 'last', 'last_valid_index', 'le', 'loc', 'lookup', 'lt', 'mad', 'mask', 'max', 'mean', 'median', 'melt', 'memory_usage', 'merge', 'min', 'mod', 'mode', 'mul', 'multiply', 'ndim', 'ne', 'nlargest', 'notna', 'notnull', 'nsmallest', 'nunique', 'pct_change', 'pipe', 'pivot', 'pivot_table', 'plot', 'pop', 'pow', 'prod', 'product', 'quantile', 'query', 'radd', 'rank', 'rdiv', 'reindex', 'reindex_axis', 'reindex_like', 'rename', 'rename_axis', 'reorder_levels', 'replace', 'resample', 'reset_index', 'rfloordiv', 'rmod', 'rmul', 'rolling', 'round', 'rpow', 'rsub', 'rtruediv', 'sample', 'select', 'select_dtypes', 'sem', 'set_axis', 'set_index', 'set_value', 'shape', 'shift', 'size', 'skew', 'slice_shift', 'sort_index', 'sort_values', 'sortlevel', 'squeeze', 'stack', 'std', 'style', 'sub', 'subtract', 'sum', 'swapaxes', 'swaplevel', 'tail', 'take', 'to_clipboard', 'to_csv', 'to_dense', 'to_dict', 'to_excel', 'to_feather', 'to_gbq', 'to_hdf', 'to_html', 'to_json', 'to_latex', 'to_msgpack', 'to_panel', 'to_parquet', 'to_period', 'to_pickle', 'to_records', 'to_sparse', 'to_sql', 'to_stata', 'to_string', 'to_timestamp', 'to_xarray', 'transform', 'transpose', 'truediv', 'truncate', 'tshift', 'tz_convert', 'tz_localize', 'unstack', 'update', 'values', 'var', 'where', 'xs']
# Removed 'index' (kwarg)
scipy_names = ['ALLOW_THREADS', 'AxisError', 'BUFSIZE', 'CLIP', 'ComplexWarning', 'DataSource', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG', 'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT', 'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'False_', 'Inf', 'Infinity', 'LowLevelCallable', 'MAXDIMS', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'MachAr', 'ModuleDeprecationWarning', 'NAN', 'NINF', 'NZERO', 'NaN', 'PINF', 'PZERO', 'PackageLoader', 'RAISE', 'RankWarning', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID', 'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'ScalarType', 'TooHardError', 'True_', 'UFUNC_BUFSIZE_DEFAULT', 'UFUNC_PYVALS_NAME', 'VisibleDeprecationWarning', 'WRAP', '__SCIPY_SETUP__', '__all__', '__builtins__', '__cached__', '__config__', '__doc__', '__file__', '__loader__', '__name__', '__numpy_version__', '__package__', '__path__', '__spec__', '__version__', '_distributor_init', '_lib', 'absolute', 'absolute_import', 'add', 'add_docstring', 'add_newdoc', 'add_newdoc_ufunc', 'add_newdocs', 'alen', 'all', 'allclose', 'alltrue', 'amax', 'amin', 'angle', 'any', 'append', 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argpartition', 'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal', 'array_equiv', 'array_repr', 'array_split', 'array_str', 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray', 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett', 'base_repr', 'binary_repr', 'bincount', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman', 'block', 'bmat', 'bool8', 'bool_', 'broadcast', 'broadcast_arrays', 'broadcast_to', 'busday_count', 'busday_offset', 'busdaycalendar', 'byte', 'byte_bounds', 'bytes0', 'bytes_', 'c_', 'can_cast', 'cast', 'cbrt', 'cdouble', 'ceil', 'cfloat', 'char', 'character', 'chararray', 'choose', 'clip', 'clongdouble', 'clongfloat', 'column_stack', 'common_type', 'compare_chararrays', 'complex128', 'complex256', 'complex64', 'complex_', 'complexfloating', 'compress', 'concatenate', 'conj', 'conjugate', 'convolve', 'copy', 'copysign', 'copyto', 'corrcoef', 'correlate', 'cos', 'cosh', 'count_nonzero', 'cov', 'cross', 'csingle', 'ctypeslib', 'cumprod', 'cumproduct', 'cumsum', 'datetime64', 'datetime_as_string', 'datetime_data', 'deg2rad', 'degrees', 'delete', 'deprecate', 'deprecate_with_doc', 'diag', 'diag_indices', 'diag_indices_from', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide', 'division', 'divmod', 'dot', 'double', 'dsplit', 'dstack', 'dtype', 'e', 'ediff1d', 'einsum', 'einsum_path', 'emath', 'empty', 'empty_like', 'equal', 'errstate', 'euler_gamma', 'exp', 'exp2', 'expand_dims', 'expm1', 'extract', 'eye', 'fabs', 'fastCopyAndTranspose', 'fft', 'fill_diagonal', 'find_common_type', 'finfo', 'fix', 'flatiter', 'flatnonzero', 'flexible', 'flip', 'fliplr', 'flipud', 'float128', 'float16', 'float32', 'float64', 'float_', 'float_power', 'floating', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod', 'format_float_positional', 'format_float_scientific', 'format_parser', 'frexp', 'frombuffer', 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromregex', 'fromstring', 'full', 'full_like', 'fv', 'generic', 'genfromtxt', 'geomspace', 'get_array_wrap', 'get_include', 'get_printoptions', 'getbufsize', 'geterr', 'geterrcall', 'geterrobj', 'gradient', 'greater', 'greater_equal', 'half', 'hamming', 'hanning', 'heaviside', 'histogram', 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0', 'identity', 'ifft', 'iinfo', 'imag', 'in1d', 'index_exp', 'indices', 'inexact', 'inf', 'info', 'infty', 'inner', 'insert', 'int0', 'int16', 'int32', 'int64', 'int8', 'int_', 'int_asbuffer', 'intc', 'integer', 'interp', 'intersect1d', 'intp', 'invert', 'ipmt', 'irr', 'is_busday', 'isclose', 'iscomplex', 'iscomplexobj', 'isfinite', 'isfortran', 'isin', 'isinf', 'isnan', 'isnat', 'isneginf', 'isposinf', 'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_', 'issubdtype', 'issubsctype', 'iterable', 'ix_', 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort', 'linspace', 'little_endian', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logn', 'logspace', 'long', 'longcomplex', 'longdouble', 'longfloat', 'longlong', 'lookfor', 'ma', 'mafromtxt', 'mask_indices', 'mat', 'math', 'matmul', 'matrix', 'maximum', 'maximum_sctype', 'may_share_memory', 'mean', 'median', 'memmap', 'meshgrid', 'mgrid', 'min_scalar_type', 'minimum', 'mintypecode', 'mirr', 'mod', 'modf', 'moveaxis', 'msort', 'multiply', 'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nancumprod', 'nancumsum', 'nanmax', 'nanmean', 'nanmedian', 'nanmin', 'nanpercentile', 'nanprod', 'nanstd', 'nansum', 'nanvar', 'nbytes', 'ndarray', 'ndenumerate', 'ndfromtxt', 'ndim', 'ndindex', 'nditer', 'negative', 'nested_iters', 'newaxis', 'nextafter', 'nonzero', 'not_equal', 'nper', 'npv', 'number', 'obj2sctype', 'object0', 'object_', 'ogrid', 'ones', 'ones_like', 'outer', 'packbits', 'pad', 'partition', 'percentile', 'pi', 'piecewise', 'pkgload', 'place', 'pmt', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'positive', 'power', 'ppmt', 'print_function', 'prod', 'product', 'promote_types', 'ptp', 'put', 'putmask', 'pv', 'r_', 'rad2deg', 'radians', 'rand', 'randn', 'random', 'rank', 'rate', 'ravel', 'ravel_multi_index', 'real', 'real_if_close', 'rec', 'recarray', 'recfromcsv', 'recfromtxt', 'reciprocal', 'record', 'remainder', 'repeat', 'require', 'reshape', 'resize', 'result_type', 'right_shift', 'rint', 'roll', 'rollaxis', 'roots', 'rot90', 'round_', 'row_stack', 's_', 'safe_eval', 'save', 'savetxt', 'savez', 'savez_compressed', 'sctype2char', 'sctypeDict', 'sctypeNA', 'sctypes', 'searchsorted', 'select', 'set_numeric_ops', 'set_printoptions', 'set_string_function', 'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj', 'setxor1d', 'shape', 'shares_memory', 'short', 'show_config', 'show_numpy_config', 'sign', 'signbit', 'signedinteger', 'sin', 'sinc', 'single', 'singlecomplex', 'sinh', 'size', 'sometrue', 'sort', 'sort_complex', 'source', 'spacing', 'split', 'sqrt', 'square', 'squeeze', 'stack', 'std', 'str0', 'str_', 'string_', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot', 'test', 'tile', 'timedelta64', 'trace', 'tracemalloc_domain', 'transpose', 'trapz', 'tri', 'tril', 'tril_indices', 'tril_indices_from', 'trim_zeros', 'triu', 'triu_indices', 'triu_indices_from', 'true_divide', 'trunc', 'typeDict', 'typeNA', 'typecodes', 'typename', 'ubyte', 'ufunc', 'uint', 'uint0', 'uint16', 'uint32', 'uint64', 'uint8', 'uintc', 'uintp', 'ulonglong', 'unicode', 'unicode_', 'union1d', 'unique', 'unpackbits', 'unravel_index', 'unsignedinteger', 'unwrap', 'ushort', 'vander', 'var', 'vdot', 'vectorize', 'version', 'void', 'void0', 'vsplit', 'vstack', 'where', 'who', 'zeros', 'zeros_like']
EXTRA_KEYWORDS = set(list(NumPyLexer.EXTRA_KEYWORDS) + pandas_names + pandas_df_names + scipy_names)
# Minor change to get the operator lexer working
class SuperFortranFixedLexer(FortranLexer):
tokens = FortranLexer.tokens
tokens['root'].insert(0, (r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator))
# R lexer with namespace boostrapping from file imports
class DynamicRLexer(SLexer):
# Set base keyword tokenizer
tokens = SLexer.tokens
tokens['keywords'] = [(words(SLexer.builtins_base, suffix=r'(?![\w. =])'), Keyword.Pseudo),
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![\w.])',
Keyword.Reserved),
(r'(array|category|character|complex|double|function|integer|list|'
r'logical|matrix|numeric|vector|data.frame|c)'
r'(?![\w.])',
Keyword.Type),
(r'(?<=(library|require)\()\w*(?=\))',
Keyword.Namespace)]
builtins_base = SLexer.builtins_base
EXTRA_KEYWORDS = set(list(builtins_base))
def get_tokens_unprocessed(self, text):
for index, token, value in SLexer.get_tokens_unprocessed(self, text):
if token is Text and value in self.EXTRA_KEYWORDS:
# Rescue misclassified tokens from imports
yield index, Keyword.Pseudo, value
else:
yield index, token, value
def expand_namespace(self, pkg):
# Set up to install R packages as needed
base = importr('base')
utils = importr('utils')
devtools = importr('devtools')
utils.chooseCRANmirror(ind=1)
# Load or install as necessary
pkg_ix = None
try:
pkg_ix = importr(pkg, on_conflict="warn")
except Exception:
utils.install_packages(StrVector([pkg]))
try:
pkg_ix = importr(pkg)
except Exception:
if pkg in ["randomForestCI", "causalForest"]:
try:
robjects.r('install_github("swager/{}")'.format(pkg))
pkg_ix = importr(pkg)
except Exception:
pass # Give up
# List names in this package
ls = []
try:
ls = list(base.ls("package:{}".format(pkg)))
except Exception:
print("Could not get names for R package: {}.".format(pkg))
utility = list(base.ls("package:{}".format('utils')))
# Add names to keyword list
self.EXTRA_KEYWORDS = set(list(self.EXTRA_KEYWORDS) + ls + utility)
# Encoding methods
def encode_stata(filename, noisy):
stata_methods = ["Token.Keyword", "Token.Name.Function", "Token.Operator"]
# Read file
stata_code = None
with open(filename, 'r', encoding="utf8", errors="ignore") as f:
stata_code = f.read()
# Lex code
tokens = lex(stata_code, SuperStataLexer())
# Classify tokens by type
tks = defaultdict(list)
script_order = []
for tktype, value in tokens:
val = value.replace("\t", "").replace("\n", "").replace(' ', "") # Strip whitespace
# We generally want to know what a user looked at in the response object
if str(tktype) == "Token.Name.Function":
if val == "r(":
nv = None
while nv != ")":
try:
tp, nv = next(tokens)
val += nv
except StopIteration:
print(filename)
print(val)
else:
val = val.rstrip("(")
# Group consecutive text-type tokens
# TODO: I'm not 100% satisfied that I'm not losing information here -- come back to this later
# if str(tktype) == "Token.Text":
# tp = str(tktype)
# while str(tp) == "Token.Text":
# tp, nv =
# val += nv
# Store this token by type and by order, if interesting
tks[str(tktype)].append(val)
if str(tktype) in stata_methods:
script_order.append(val)
# Print token counts and script order
if noisy:
print("Token typology")
print(tks.keys())
print()
for tp in r_methods:
print(tp)
print(Counter(tks[tp]))
print()
print('Reduced script:')
print(script_order)
print()
# Return reduced-form script length
return script_order
def encode_matlab(filename, noisy):
matlab_methods = ['Token.Operator', 'Token.Name.Builtin', 'Token.Keyword']
# Read file
matlab_code = None
with open(filename, "r", encoding="utf8", errors="ignore") as f:
matlab_code = f.read()
# Lex code
# OctaveLexer seems to have better performance than MatlabLexer?
tokens = lex(matlab_code, OctaveLexer())
# Classify tokens by type
tks = defaultdict(list)
script_order = []
for tktype, value in tokens:
# TODO: I'd like to get a better sense for how user-defined operations are being used
# i.e. if a user-defined function is reused, maybe I should count the operators twice?
# Store token by type and order
tks[str(tktype)].append(value)
if str(tktype) in matlab_methods:
script_order.append(value)
# Print token counts and script order
if noisy:
print("Token typology")
print(tks.keys())
print()
for tp in r_methods:
print(tp)
print(Counter(tks[tp]))
print()
print('Reduced script:')
print(script_order)
print()
# Return reduced-form script length
return script_order
def encode_sas(filename, noisy):
sas_methods = ['Token.Keyword', "Token.Keyword.Reserved", "Token.Operator", "Token.Name.Function", "Token.Name.Builtin"]
# Read file
sas_code = None
with open(filename, "r", encoding="utf8", errors="ignore") as f:
sas_code = f.read()
# Lex code
tokens = lex(sas_code, SuperSASLexer())
# Classify tokens by type
tks = defaultdict(list)
script_order = []
for tktype, value in tokens:
val = value.replace("\t", "").replace("\n", "").replace(' ', "").replace(';', "") # Strip whitespace
# Store token by type and order
tks[str(tktype)].append(val)
if str(tktype) in sas_methods:
script_order.append(val)
# Print token counts and script order
if noisy:
print("Token typology")
print(tks.keys())
print()
for tp in r_methods:
print(tp)
print(Counter(tks[tp]))
print()
print('Reduced script:')
print(script_order)
print()
# Return reduced-form script length
return script_order
def encode_r(filename, noisy):
r_methods = ['Token.Keyword.Pseudo', 'Token.Operator', 'Token.Keyword.Namespace']
# Read file
r_code = None
with open(filename, "r", encoding="utf8", errors="ignore") as f:
r_code = f.read()
# Lex code
lexer = DynamicRLexer()
tks = lex(r_code, lexer)
# Bootstrap lexer namespace
packages = []
for tktype, pkg in tks:
if str(tktype) == 'Token.Keyword.Namespace':
lexer.expand_namespace(pkg)
if noisy:
print("Added {} namespace to keywords.".format(pkg))
# Re-lex
tokens = lex(r_code, lexer)
# Classify tokens by type
tks = defaultdict(list)
script_order = []
for tktype, value in tokens:
val = value.replace("\t", "").replace("\n", "").replace(' ', "").replace(';', "") # Strip whitespace
# print("{}: {}".format(str(tktype), val))
if val in [':', '<-', '=', '$']:
tktype = "Token.Text" # Ignore range, class and assignment operators
# Store token by type and order
tks[str(tktype)].append(val)
if str(tktype) in r_methods:
script_order.append(val)
# Print token counts and script order
if noisy:
print("Token typology")
print(tks.keys())
print()
for tp in r_methods:
print(tp)
print(Counter(tks[tp]))
print()
print('Reduced script:')
print(script_order)
print()
# Return reduced-form script length
return script_order
def encode_python(filename, noisy):
python_methods = ['Token.Name.Namespace', 'Token.Operator', 'Token.Keyword.Pseudo']
# Read file
python_code = None
with open(filename, "r", encoding="utf8", errors="ignore") as f:
python_code = f.read()
# Lex code
tokens = lex(python_code, SuperNumPyLexer())
# Classify tokens by type
tks = defaultdict(list)
script_order = []
for tktype, value in tokens:
val = value.replace("\t", "").replace("\n", "").replace(' ', "").replace(';', "") # Strip whitespace
if val in ['.', '=']:
tktype = "Token.Text" # Ignore dot and assignment operators
# Store token by type and order
tks[str(tktype)].append(val)
if str(tktype) in python_methods:
script_order.append(val)
# Print token counts and script order
if noisy:
print("Token typology")
print(tks.keys())
print()
for tp in r_methods:
print(tp)
print(Counter(tks[tp]))
print()
print('Reduced script:')
print(script_order)
print()
# Return reduced-form script length
return script_order
def encode_fortran(filename, noisy):
fortran_methods = ['Token.Keyword', 'Token.Name.Builtin', 'Token.Keyword.Type', 'Token.Operator']
# Read file
fortran_code = None
with open(filename, "r", encoding="utf8", errors="ignore") as f:
fortran_code = f.read()
# Lex code
tokens = lex(fortran_code, SuperFortranFixedLexer())
# Classify tokens by type
tks = defaultdict(list)
script_order = []
for tktype, value in tokens:
val = value.replace("\t", "").replace("\n", "").replace(' ', "").replace(';', "") # Strip whitespace
if val in ['=']:
tktype = "Token.Text" # Ignore dot and assignment operators
# Store token by type and order
tks[str(tktype)].append(val)
if str(tktype) in fortran_methods:
script_order.append(val)
# Print token counts and script order
if noisy:
print("Token typology")
print(tks.keys())
print()
for tp in r_methods:
print(tp)
print(Counter(tks[tp]))
print()
print('Reduced script:')
print(script_order)
print()
# Return reduced-form script length
return script_order
# Dispatch method to language-specific lexing
# These are pretty repetitive but idiosyncratic enough that I want to split them up now
def lexify(filename, noisy=False):
ext = filename.rsplit(".")[-1]
if ext in ["do", "ado"]:
return encode_stata(filename, noisy)
elif ext == "m":
return encode_matlab(filename, noisy)
elif ext == "sas":
return encode_sas(filename, noisy)
elif ext == "py":
return encode_python(filename, noisy)
elif ext in ["r", "R"]:
return encode_r(filename, noisy)
elif ext in ["f", "f90"]:
return encode_fortran(filename, noisy)
elif ext == "sps":
raise NotImplementedError("SPSS files not currently handled.")
else:
raise NotImplementedError("Methodologist doesn't know this file type: *.{}".format(ext))
if __name__ == "__main__":
script_len = len(lexify(sys.argv[1], noisy=True))
print("Read a script with {} functions.".format(str(script_len)))
| 67.043384
| 7,111
| 0.629469
|
4a1494b160ab316493ffe8add452445f9570ff25
| 1,304
|
py
|
Python
|
037. Sudoku Solver.py
|
youhusky/Facebook_Prepare
|
4045bcb652537711b3680b2aa17204ae73c6bde8
|
[
"MIT"
] | 6
|
2017-10-30T05:35:46.000Z
|
2020-12-15T06:51:52.000Z
|
037. Sudoku Solver.py
|
youhusky/Facebook_Prepare
|
4045bcb652537711b3680b2aa17204ae73c6bde8
|
[
"MIT"
] | 1
|
2017-10-30T04:11:31.000Z
|
2017-10-30T05:46:24.000Z
|
037. Sudoku Solver.py
|
youhusky/Facebook_Prepare
|
4045bcb652537711b3680b2aa17204ae73c6bde8
|
[
"MIT"
] | 2
|
2020-09-03T07:14:02.000Z
|
2021-05-21T19:19:57.000Z
|
# class Solution:
# @param board, a 9x9 2D array
# Solve the Sudoku by modifying the input board in-place.
# Do not return any value.
def solveSudoku(self, board):
def isValid(board, x, y):
for i in xrange(9):
if i != x and board[i][y] == board[x][y]:
return False
for j in xrange(9):
if j != y and board[x][j] == board[x][y]:
return False
i = 3 * (x / 3)
while i < 3 * (x / 3 + 1):
j = 3 * (y / 3)
while j < 3 * (y / 3 + 1):
if (i != x or j != y) and board[i][j] == board[x][y]:
return False
j += 1
i += 1
return True
def solver(board):
for i in xrange(len(board)):
for j in xrange(len(board[0])):
if(board[i][j] == '.'):
for k in xrange(9):
board[i][j] = chr(ord('1') + k)
if isValid(board, i, j) and solver(board):
return True
board[i][j] = '.'
return False
return True
solver(board)
| 37.257143
| 73
| 0.365798
|
4a1494eef4053a1522d82d07a4591a38a2b1717b
| 1,962
|
py
|
Python
|
Bio/EUtils/setup.py
|
eoc21/biopython
|
c0f8db8f55a506837c320459957a0ce99b0618b6
|
[
"PostgreSQL"
] | 3
|
2017-10-23T21:53:57.000Z
|
2019-09-23T05:14:12.000Z
|
Bio/EUtils/setup.py
|
eoc21/biopython
|
c0f8db8f55a506837c320459957a0ce99b0618b6
|
[
"PostgreSQL"
] | null | null | null |
Bio/EUtils/setup.py
|
eoc21/biopython
|
c0f8db8f55a506837c320459957a0ce99b0618b6
|
[
"PostgreSQL"
] | 1
|
2019-08-19T22:05:14.000Z
|
2019-08-19T22:05:14.000Z
|
#!/usr/bin/env python
import sys
from distutils.core import setup
try:
import EUtils
except ImportError:
import __init__ as EUtils
def _dict(**kwargs):
return kwargs
d = _dict(
name = "EUtils",
version = EUtils.__version__,
description = "Client interface to NCBI's EUtils/Entrez server",
author = "Andrew Dalke",
author_email = "dalke@dalkescientific.com",
maintainer = "Dalke Scientific Software, LLC",
maintainer_email = "dalke@dalkescientific.com",
url = "http://www.dalkescientific.com/EUtils/",
long_description = """\
EUtils is a client library for the Entrez databases at NCBI.
NCBI provides the EUtils web service so that software can query Entrez
directly, rather than going through the web interface and dealing with
the hassles of web scraping. For more information see
http://www.ncbi.nlm.nih.gov/entrez/query/static/eutils_help.html
This package provides two levels of interface. The lowest one makes a
programmatic interface to construct the query URL and make the
request. The higher level ones support history tracking and parsing
of query results. These greatly simplify working with the EUtils
server.
""",
package_dir = {"": ".."},
packages = ["EUtils", "EUtils.DTDs"],
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: Freely Distributable",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics", # a '-'? !
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Internet",
],
)
if sys.version_info < (2,2,4):
del d["classifiers"]
if __name__ == "__main__":
setup(**d)
| 31.142857
| 72
| 0.672273
|
4a14961344ff0b1d20e4e902cb86d959ce11b505
| 13,601
|
py
|
Python
|
tests/integration/standard/test_prepared_statements.py
|
fatelei/python-driver
|
3bddef6185f2691e1713dfe51d1fa26d1555724c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/standard/test_prepared_statements.py
|
fatelei/python-driver
|
3bddef6185f2691e1713dfe51d1fa26d1555724c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/standard/test_prepared_statements.py
|
fatelei/python-driver
|
3bddef6185f2691e1713dfe51d1fa26d1555724c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.integration import use_singledc, PROTOCOL_VERSION
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra import InvalidRequest
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import PreparedStatement, UNSET_VALUE
from tests.integration import get_server_versions
def setup_module():
use_singledc()
class PreparedStatementTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cass_version = get_server_versions()
def setUp(self):
self.cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
def tearDown(self):
self.cluster.shutdown()
def test_basic(self):
"""
Test basic PreparedStatement usage
"""
self.session.execute(
"""
CREATE KEYSPACE preparedtests
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
self.session.set_keyspace("preparedtests")
self.session.execute(
"""
CREATE TABLE cf0 (
a text,
b text,
c text,
PRIMARY KEY (a, b)
)
""")
prepared = self.session.prepare(
"""
INSERT INTO cf0 (a, b, c) VALUES (?, ?, ?)
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind(('a', 'b', 'c'))
self.session.execute(bound)
prepared = self.session.prepare(
"""
SELECT * FROM cf0 WHERE a=?
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind(('a'))
results = self.session.execute(bound)
self.assertEqual(results, [('a', 'b', 'c')])
# test with new dict binding
prepared = self.session.prepare(
"""
INSERT INTO cf0 (a, b, c) VALUES (?, ?, ?)
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({
'a': 'x',
'b': 'y',
'c': 'z'
})
self.session.execute(bound)
prepared = self.session.prepare(
"""
SELECT * FROM cf0 WHERE a=?
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'a': 'x'})
results = self.session.execute(bound)
self.assertEqual(results, [('x', 'y', 'z')])
def test_missing_primary_key(self):
"""
Ensure an InvalidRequest is thrown
when prepared statements are missing the primary key
"""
self._run_missing_primary_key(self.session)
def _run_missing_primary_key(self, session):
statement_to_prepare = """INSERT INTO test3rf.test (v) VALUES (?)"""
# logic needed work with changes in CASSANDRA-6237
if self.cass_version[0] >= (3, 0, 0):
self.assertRaises(InvalidRequest, session.prepare, statement_to_prepare)
else:
prepared = session.prepare(statement_to_prepare)
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind((1,))
self.assertRaises(InvalidRequest, session.execute, bound)
def test_missing_primary_key_dicts(self):
"""
Ensure an InvalidRequest is thrown
when prepared statements are missing the primary key
with dict bindings
"""
self._run_missing_primary_key_dicts(self.session)
def _run_missing_primary_key_dicts(self, session):
statement_to_prepare = """ INSERT INTO test3rf.test (v) VALUES (?)"""
# logic needed work with changes in CASSANDRA-6237
if self.cass_version[0] >= (3, 0, 0):
self.assertRaises(InvalidRequest, session.prepare, statement_to_prepare)
else:
prepared = session.prepare(statement_to_prepare)
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'v': 1})
self.assertRaises(InvalidRequest, session.execute, bound)
def test_too_many_bind_values(self):
"""
Ensure a ValueError is thrown when attempting to bind too many variables
"""
self._run_too_many_bind_values(self.session)
def _run_too_many_bind_values(self, session):
statement_to_prepare = """ INSERT INTO test3rf.test (v) VALUES (?)"""
# logic needed work with changes in CASSANDRA-6237
if self.cass_version[0] >= (3, 0, 0):
self.assertRaises(InvalidRequest, session.prepare, statement_to_prepare)
else:
prepared = session.prepare(statement_to_prepare)
self.assertIsInstance(prepared, PreparedStatement)
self.assertRaises(ValueError, prepared.bind, (1, 2))
def test_imprecise_bind_values_dicts(self):
"""
Ensure an error is thrown when attempting to bind the wrong values
with dict bindings
"""
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (?, ?)
""")
self.assertIsInstance(prepared, PreparedStatement)
# too many values is ok - others are ignored
prepared.bind({'k': 1, 'v': 2, 'v2': 3})
# right number, but one does not belong
if PROTOCOL_VERSION < 4:
# pre v4, the driver bails with key error when 'v' is found missing
self.assertRaises(KeyError, prepared.bind, {'k': 1, 'v2': 3})
else:
# post v4, the driver uses UNSET_VALUE for 'v' and 'v2' is ignored
prepared.bind({'k': 1, 'v2': 3})
# also catch too few variables with dicts
self.assertIsInstance(prepared, PreparedStatement)
if PROTOCOL_VERSION < 4:
self.assertRaises(KeyError, prepared.bind, {})
else:
# post v4, the driver attempts to use UNSET_VALUE for unspecified keys
self.assertRaises(ValueError, prepared.bind, {})
def test_none_values(self):
"""
Ensure binding None is handled correctly
"""
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (?, ?)
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind((1, None))
self.session.execute(bound)
prepared = self.session.prepare(
"""
SELECT * FROM test3rf.test WHERE k=?
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind((1,))
results = self.session.execute(bound)
self.assertEqual(results[0].v, None)
def test_unset_values(self):
"""
Test to validate that UNSET_VALUEs are bound, and have the expected effect
Prepare a statement and insert all values. Then follow with execute excluding
parameters. Verify that the original values are unaffected.
@since 2.6.0
@jira_ticket PYTHON-317
@expected_result UNSET_VALUE is implicitly added to bind parameters, and properly encoded, leving unset values unaffected.
@test_category prepared_statements:binding
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Binding UNSET values is not supported in protocol version < 4")
# table with at least two values so one can be used as a marker
self.session.execute("CREATE TABLE IF NOT EXISTS test1rf.test_unset_values (k int PRIMARY KEY, v0 int, v1 int)")
insert = self.session.prepare("INSERT INTO test1rf.test_unset_values (k, v0, v1) VALUES (?, ?, ?)")
select = self.session.prepare("SELECT * FROM test1rf.test_unset_values WHERE k=?")
bind_expected = [
# initial condition
((0, 0, 0), (0, 0, 0)),
# unset implicit
((0, 1,), (0, 1, 0)),
({'k': 0, 'v0': 2}, (0, 2, 0)),
({'k': 0, 'v1': 1}, (0, 2, 1)),
# unset explicit
((0, 3, UNSET_VALUE), (0, 3, 1)),
((0, UNSET_VALUE, 2), (0, 3, 2)),
({'k': 0, 'v0': 4, 'v1': UNSET_VALUE}, (0, 4, 2)),
({'k': 0, 'v0': UNSET_VALUE, 'v1': 3}, (0, 4, 3)),
# nulls still work
((0, None, None), (0, None, None)),
]
for params, expected in bind_expected:
self.session.execute(insert, params)
results = self.session.execute(select, (0,))
self.assertEqual(results[0], expected)
self.assertRaises(ValueError, self.session.execute, select, (UNSET_VALUE, 0, 0))
def test_no_meta(self):
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (0, 0)
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind(None)
bound.consistency_level = ConsistencyLevel.ALL
self.session.execute(bound)
prepared = self.session.prepare(
"""
SELECT * FROM test3rf.test WHERE k=0
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind(None)
bound.consistency_level = ConsistencyLevel.ALL
results = self.session.execute(bound)
self.assertEqual(results[0].v, 0)
def test_none_values_dicts(self):
"""
Ensure binding None is handled correctly with dict bindings
"""
# test with new dict binding
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (?, ?)
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'k': 1, 'v': None})
self.session.execute(bound)
prepared = self.session.prepare(
"""
SELECT * FROM test3rf.test WHERE k=?
""")
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'k': 1})
results = self.session.execute(bound)
self.assertEqual(results[0].v, None)
def test_async_binding(self):
"""
Ensure None binding over async queries
"""
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (?, ?)
""")
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, (873, None))
future.result()
prepared = self.session.prepare(
"""
SELECT * FROM test3rf.test WHERE k=?
""")
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, (873,))
results = future.result()
self.assertEqual(results[0].v, None)
def test_async_binding_dicts(self):
"""
Ensure None binding over async queries with dict bindings
"""
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (?, ?)
""")
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, {'k': 873, 'v': None})
future.result()
prepared = self.session.prepare(
"""
SELECT * FROM test3rf.test WHERE k=?
""")
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, {'k': 873})
results = future.result()
self.assertEqual(results[0].v, None)
def test_raise_error_on_prepared_statement_execution_dropped_table(self):
"""
test for error in executing prepared statement on a dropped table
test_raise_error_on_execute_prepared_statement_dropped_table tests that an InvalidRequest is raised when a
prepared statement is executed after its corresponding table is dropped. This happens because if a prepared
statement is invalid, the driver attempts to automatically re-prepare it on a non-existing table.
@expected_errors InvalidRequest If a prepared statement is executed on a dropped table
@since 2.6.0
@jira_ticket PYTHON-207
@expected_result InvalidRequest error should be raised upon prepared statement execution.
@test_category prepared_statements
"""
self.session.execute("CREATE TABLE test3rf.error_test (k int PRIMARY KEY, v int)")
prepared = self.session.prepare("SELECT * FROM test3rf.error_test WHERE k=?")
self.session.execute("DROP TABLE test3rf.error_test")
with self.assertRaises(InvalidRequest):
self.session.execute(prepared, [0])
| 35.054124
| 130
| 0.598265
|
4a1496708b88ab78882594ac265fccde1e9e6caa
| 8,322
|
py
|
Python
|
api/app.py
|
kallyas/employees-attendance-management-system
|
6e81a67ff840d8dc7d897a1692104754b8ecb36a
|
[
"MIT"
] | null | null | null |
api/app.py
|
kallyas/employees-attendance-management-system
|
6e81a67ff840d8dc7d897a1692104754b8ecb36a
|
[
"MIT"
] | null | null | null |
api/app.py
|
kallyas/employees-attendance-management-system
|
6e81a67ff840d8dc7d897a1692104754b8ecb36a
|
[
"MIT"
] | null | null | null |
# * ---------- IMPORTS --------- *
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import os
import psycopg2
import cv2
import numpy as np
import re
# Get the relativ path to this file (we will use it later)
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
# * ---------- Create App --------- *
app = Flask(__name__)
CORS(app, support_credentials=True)
# * ---------- DATABASE CONFIG --------- *
DATABASE_USER = os.environ['DATABASE_USER']
DATABASE_PASSWORD = os.environ['DATABASE_PASSWORD']
DATABASE_HOST = os.environ['DATABASE_HOST']
DATABASE_PORT = os.environ['DATABASE_PORT']
DATABASE_NAME = os.environ['DATABASE_NAME']
def DATABASE_CONNECTION():
return psycopg2.connect(user=DATABASE_USER,
password=DATABASE_PASSWORD,
host=DATABASE_HOST,
port=DATABASE_PORT,
database=DATABASE_NAME)
# * -------------------- ROUTES ------------------- *
# * ---------- Get data from the face recognition ---------- *
@app.route('/receive_data', methods=['POST'])
def get_receive_data():
if request.method == 'POST':
json_data = request.get_json()
# Check if the user is already in the DB
try:
# Connect to the DB
connection = DATABASE_CONNECTION()
cursor = connection.cursor()
# Query to check if the user as been saw by the camera today
user_saw_today_sql_query =\
f"SELECT * FROM users WHERE date = '{json_data['date']}' AND name = '{json_data['name']}'"
cursor.execute(user_saw_today_sql_query)
result = cursor.fetchall()
connection.commit()
# If use is already in the DB for today:
if result:
print('user IN')
image_path = f"{FILE_PATH}/assets/img/{json_data['date']}/{json_data['name']}/departure.jpg"
# Save image
os.makedirs(f"{FILE_PATH}/assets/img/{json_data['date']}/{json_data['name']}", exist_ok=True)
cv2.imwrite(image_path, np.array(json_data['picture_array']))
json_data['picture_path'] = image_path
# Update user in the DB
update_user_querry = f"UPDATE users SET departure_time = '{json_data['hour']}', departure_picture = '{json_data['picture_path']}' WHERE name = '{json_data['name']}' AND date = '{json_data['date']}'"
cursor.execute(update_user_querry)
else:
print("user OUT")
# Save image
image_path = f"{FILE_PATH}/assets/img/history/{json_data['date']}/{json_data['name']}/arrival.jpg"
os.makedirs(f"{FILE_PATH}/assets/img/history/{json_data['date']}/{json_data['name']}", exist_ok=True)
cv2.imwrite(image_path, np.array(json_data['picture_array']))
json_data['picture_path'] = image_path
# Create a new row for the user today:
insert_user_querry = f"INSERT INTO users (name, date, arrival_time, arrival_picture) VALUES ('{json_data['name']}', '{json_data['date']}', '{json_data['hour']}', '{json_data['picture_path']}')"
cursor.execute(insert_user_querry)
except (Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
finally:
connection.commit()
# closing database connection.
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
# Return user's data to the front
return jsonify(json_data)
# * ---------- Get all the data of an employee ---------- *
@app.route('/get_employee/<string:name>', methods=['GET'])
def get_employee(name):
answer_to_send = {}
# Check if the user is already in the DB
try:
# Connect to DB
connection = DATABASE_CONNECTION()
cursor = connection.cursor()
# Query the DB to get all the data of a user:
user_information_sql_query = f"SELECT * FROM users WHERE name = '{name}'"
cursor.execute(user_information_sql_query)
result = cursor.fetchall()
connection.commit()
# if the user exist in the db:
if result:
print('RESULT: ',result)
# Structure the data and put the dates in string for the front
for k,v in enumerate(result):
answer_to_send[k] = {}
for ko,vo in enumerate(result[k]):
answer_to_send[k][ko] = str(vo)
print('answer_to_send: ', answer_to_send)
else:
answer_to_send = {'error': 'User not found...'}
except (Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
finally:
# closing database connection:
if (connection):
cursor.close()
connection.close()
# Return the user's data to the front
return jsonify(answer_to_send)
# * --------- Get the 5 last users seen by the camera --------- *
@app.route('/get_5_last_entries', methods=['GET'])
def get_5_last_entries():
answer_to_send = {}
# Check if the user is already in the DB
try:
# Connect to DB
connection = DATABASE_CONNECTION()
cursor = connection.cursor()
# Query the DB to get all the data of a user:
lasts_entries_sql_query = f"SELECT * FROM users ORDER BY id DESC LIMIT 5;"
cursor.execute(lasts_entries_sql_query)
result = cursor.fetchall()
connection.commit()
# if DB is not empty:
if result:
# Structure the data and put the dates in string for the front
for k, v in enumerate(result):
answer_to_send[k] = {}
for ko, vo in enumerate(result[k]):
answer_to_send[k][ko] = str(vo)
else:
answer_to_send = {'error': 'error detect'}
except (Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
finally:
# closing database connection:
if (connection):
cursor.close()
connection.close()
# Return the user's data to the front
return jsonify(answer_to_send)
# * ---------- Add new employee ---------- *
@app.route('/add_employee', methods=['POST'])
@cross_origin(supports_credentials=True)
def add_employee():
try:
# Get the picture from the request
image_file = request.files['image']
print(request.form['nameOfEmployee'])
# Store it in the folder of the know faces:
file_path = os.path.join(f"assets/img/users/{request.form['nameOfEmployee']}.jpg")
image_file.save(file_path)
answer = 'new employee succesfully added'
except:
answer = 'Error while adding new employee. Please try later...'
return jsonify(answer)
# * ---------- Get employee list ---------- *
@app.route('/get_employee_list', methods=['GET'])
def get_employee_list():
employee_list = {}
# Walk in the user folder to get the user list
walk_count = 0
for file_name in os.listdir(f"{FILE_PATH}/assets/img/users/"):
# Capture the employee's name with the file's name
name = re.findall("(.*)\.jpg", file_name)
if name:
employee_list[walk_count] = name[0]
walk_count += 1
return jsonify(employee_list)
# * ---------- Delete employee ---------- *
@app.route('/delete_employee/<string:name>', methods=['GET'])
def delete_employee(name):
try:
# Remove the picture of the employee from the user's folder:
print('name: ', name)
file_path = os.path.join(f'assets/img/users/{name}.jpg')
os.remove(file_path)
answer = 'Employee succesfully removed'
except:
answer = 'Error while deleting new employee. Please try later'
return jsonify(answer)
# * -------------------- RUN SERVER -------------------- *
if __name__ == '__main__':
# * --- DEBUG MODE: --- *
app.run(host='127.0.0.1', port=5000, debug=True)
# * --- DOCKER PRODUCTION MODE: --- *
# app.run(host='0.0.0.0', port=os.environ['PORT']) -> DOCKER
| 35.716738
| 213
| 0.58099
|
4a1496c1d253abdb9dd2c6647d2b981d6bdd2b9d
| 1,803
|
py
|
Python
|
src/reliapy/distributions/continuous/_rdist.py
|
reliapy/reliapy
|
3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d
|
[
"BSD-3-Clause"
] | null | null | null |
src/reliapy/distributions/continuous/_rdist.py
|
reliapy/reliapy
|
3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d
|
[
"BSD-3-Clause"
] | null | null | null |
src/reliapy/distributions/continuous/_rdist.py
|
reliapy/reliapy
|
3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d
|
[
"BSD-3-Clause"
] | null | null | null |
from reliapy.distributions.continuous import _Continuous
from scipy.stats import rdist as prob
class RDist(_Continuous):
def __init__(self, c=None, loc=None, scale=None, random_state=None):
self.c = c
self.loc = loc
self.scale = scale
self.stats = prob.stats(c=self.c, loc=self.loc, scale=self.scale, moments='mv')
self.random_state = random_state
super().__init__()
def pdf(self, X=None):
"""
PDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
PDF of X.
"""
return prob.pdf(X, c=self.c, loc=self.loc, scale=self.scale)
def cdf(self, X=None):
"""
CDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
CDF of X.
"""
return prob.cdf(X, c=self.c, loc=self.loc, scale=self.scale)
def icdf(self, y=None):
"""
Inverse CDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
Inverse CDF of X.
"""
return prob.ppf(y, c=self.c, loc=self.loc, scale=self.scale)
def moment(self, n=1):
"""
Get the non-central moments of order n.
**Input:**
* **n** (`float`)
Order of the moment.
**Output**
non central moment.
"""
return prob.moment(n, c=self.c, loc=self.loc, scale=self.scale)
def rvs(self, n_sim=1):
"""
Get `n_sim` random samples.
**Input:**
* **n_sim** (`float`)
Number of random samples.
**Output**
Samples.
"""
return prob.rvs(c=self.c, loc=self.loc, scale=self.scale, size=n_sim, random_state=self.random_state)
| 22.5375
| 109
| 0.490849
|
4a14976f308f2656cb761c7597cbb94846a05fd4
| 2,294
|
py
|
Python
|
extract_data/extract_ventures.py
|
Neraud/FFXIV_RetainerInventoryHelper
|
398d3ebe37a3debf4b6e1a7b83dfe660404b10a1
|
[
"MIT"
] | 6
|
2019-09-07T11:53:21.000Z
|
2021-07-25T21:26:49.000Z
|
extract_data/extract_ventures.py
|
Neraud/FFXIV_RetainerInventoryHelper
|
398d3ebe37a3debf4b6e1a7b83dfe660404b10a1
|
[
"MIT"
] | 1
|
2021-12-18T14:41:08.000Z
|
2021-12-18T16:39:26.000Z
|
extract_data/extract_ventures.py
|
Neraud/FFXIV_RetainerInventoryHelper
|
398d3ebe37a3debf4b6e1a7b83dfe660404b10a1
|
[
"MIT"
] | 1
|
2021-08-12T09:45:18.000Z
|
2021-08-12T09:45:18.000Z
|
#!/usr/bin/env python3
import urllib.request, json
import codecs
quantity = {}
ventures = {}
req = urllib.request.Request("https://xivapi.com/RetainerTaskNormal?columns=ID,ItemTargetID,Quantity2&max_items=1000", headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as url:
response = json.loads(url.read().decode())
for result in response["Results"]:
if result['ItemTargetID'] != None and result['Quantity2'] != None and int(result['Quantity2']) > 0:
quantity[result['ID']] = { 'itemId': result['ItemTargetID'], 'qty': result['Quantity2'] }
req = urllib.request.Request("https://xivapi.com/RetainerTask?columns=ID,IsRandom,Task,ClassJobCategoryTargetID,RetainerLevel&max_items=1000", headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as url:
response = json.loads(url.read().decode())
for result in response["Results"]:
ventureId = result['ID']
isRandom = result['IsRandom']
task = result['Task']
if isRandom != None and int(isRandom) == 0 and task != None:
taskId = task['ID']
if result['ClassJobCategoryTargetID']:
classJobCategoryId = int(result['ClassJobCategoryTargetID'])
if classJobCategoryId == 17: classJobCategory = "MIN"
elif classJobCategoryId == 18: classJobCategory = "BTN"
elif classJobCategoryId == 19: classJobCategory = "FSH"
elif classJobCategoryId == 34: classJobCategory = "HUNT"
else: classJobCategory = "UKN"
else:
classJobCategory = "UKN"
retainerLevel = result['RetainerLevel']
if taskId in quantity:
itemId = quantity[taskId]['itemId']
qty = quantity[taskId]['qty']
ventures[taskId] = { 'taskId': taskId, 'ventureId': ventureId, 'classJobCategory': classJobCategory, 'retainerLevel': retainerLevel, 'itemId': itemId, 'qty': qty }
print("taskId", "retainerJob", "retainerLevel", "itemId", "qty", sep = "\t")
for taskId, venture in ventures.items():
print(venture["taskId"], venture["classJobCategory"], venture["retainerLevel"], venture["itemId"], venture["qty"], sep = "\t")
| 47.791667
| 181
| 0.621186
|
4a149a586dca57f2b779ac76856b80f472345603
| 2,305
|
py
|
Python
|
curent/stream/redis.py
|
bdemirtas/stream
|
862ee2f8877eead7294fe16968ee486508bd19ce
|
[
"BSD-3-Clause"
] | null | null | null |
curent/stream/redis.py
|
bdemirtas/stream
|
862ee2f8877eead7294fe16968ee486508bd19ce
|
[
"BSD-3-Clause"
] | null | null | null |
curent/stream/redis.py
|
bdemirtas/stream
|
862ee2f8877eead7294fe16968ee486508bd19ce
|
[
"BSD-3-Clause"
] | 1
|
2019-12-19T21:35:11.000Z
|
2019-12-19T21:35:11.000Z
|
"""Redis stream.
Uses Redis Python client.
"""
import logging
import attr
import redis
from curent.stream import (
Stream,
StreamError,
StreamMessage,
StreamPublisher,
StreamSubscriber,
)
from redis.exceptions import (
ConnectionError,
TimeoutError,
)
from curent.stream.helper import asynchronize
@attr.s(frozen=True, slots=True)
class RedisPublisher(StreamPublisher):
"""Redis publisher."""
_producer = attr.ib()
def send(self, msg):
"""Produce a message on the Redis producer.
"""
logging.debug(
"Producing message on Redis producer to %(topic)s", {"topic": msg.topic,}
)
try:
self._producer.publish(msg.topic, msg.payload)
except (ConnectionError, TimeoutError) as error:
raise StreamError("Failed to produce {!r}".format(msg.payload)) from error
@attr.s(frozen=True, slots=True)
class RedisSubscriber(StreamSubscriber):
"""Redis subscriber."""
_consumer = attr.ib()
def receive(self, timeout=2):
"""Poll for the next available message on the Redis consumer."""
while True:
logging.debug(
"Polling Redis consumer for %(timeout)d ms", {"timeout": timeout,}
)
response = self._consumer.get_message(timeout=timeout)
if response:
return StreamMessage(
topic=response["channel"], payload=response["data"]
)
def close(self):
"""Close the Redis consumer."""
self._consumer.unsubscribe()
@attr.s(frozen=True, slots=True)
class RedisStream(Stream):
"""Redis stream."""
def publish(self):
"""Publish to a Redis producer."""
logging.info("Producing to Redis at %(target)s", {"target": self.target,})
return RedisPublisher(self.loop, redis.StrictRedis(self.target))
def subscribe(self, topics):
"""Subscribe to topics on a Redis consumer."""
logging.info("Consuming from Redis at %(target)s", {"target": self.target,})
logging.info("Subscribing Redis consumer to %(topics)s", {"topics": topics,})
pubsub = redis.StrictRedis(self.target).pubsub()
pubsub.subscribe(topics)
return RedisSubscriber(self.loop, topics, pubsub)
| 27.117647
| 86
| 0.625163
|
4a149a7424f0e599e54413e3576d3dddc796c4f9
| 4,869
|
py
|
Python
|
tensorflow_datasets/image_classification/oxford_iiit_pet.py
|
stwind/datasets
|
118d3d2472a3bf2703d1374e25c2223dc7942c13
|
[
"Apache-2.0"
] | 1
|
2020-10-11T19:15:49.000Z
|
2020-10-11T19:15:49.000Z
|
tensorflow_datasets/image_classification/oxford_iiit_pet.py
|
cbaront/datasets
|
b097e0985eaaadc6b0c1f4dfa3b3cf88d116c607
|
[
"Apache-2.0"
] | 1
|
2021-02-23T20:16:05.000Z
|
2021-02-23T20:16:05.000Z
|
tensorflow_datasets/image_classification/oxford_iiit_pet.py
|
cbaront/datasets
|
b097e0985eaaadc6b0c1f4dfa3b3cf88d116c607
|
[
"Apache-2.0"
] | 1
|
2022-03-14T16:17:53.000Z
|
2022-03-14T16:17:53.000Z
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oxford-IIIT pet dataset."""
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The Oxford-IIIT pet dataset is a 37 category pet image dataset with roughly 200
images for each class. The images have large variations in scale, pose and
lighting. All images have an associated ground truth annotation of breed.
"""
_CITATION = """\
@InProceedings{parkhi12a,
author = "Parkhi, O. M. and Vedaldi, A. and Zisserman, A. and Jawahar, C.~V.",
title = "Cats and Dogs",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition",
year = "2012",
}
"""
_BASE_URL = "http://www.robots.ox.ac.uk/~vgg/data/pets/data"
_LABEL_CLASSES = [
"Abyssinian", "american_bulldog", "american_pit_bull_terrier",
"basset_hound", "beagle", "Bengal", "Birman", "Bombay", "boxer",
"British_Shorthair", "chihuahua", "Egyptian_Mau", "english_cocker_spaniel",
"english_setter", "german_shorthaired", "great_pyrenees", "havanese",
"japanese_chin", "keeshond", "leonberger", "Maine_Coon",
"miniature_pinscher", "newfoundland", "Persian", "pomeranian", "pug",
"Ragdoll", "Russian_Blue", "saint_bernard", "samoyed", "scottish_terrier",
"shiba_inu", "Siamese", "Sphynx", "staffordshire_bull_terrier",
"wheaten_terrier", "yorkshire_terrier"
]
_SPECIES_CLASSES = ["Cat", "Dog"]
class OxfordIIITPet(tfds.core.GeneratorBasedBuilder):
"""Oxford-IIIT pet dataset."""
VERSION = tfds.core.Version("3.2.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"label": tfds.features.ClassLabel(names=_LABEL_CLASSES),
"species": tfds.features.ClassLabel(names=_SPECIES_CLASSES),
"file_name": tfds.features.Text(),
"segmentation_mask": tfds.features.Image(shape=(None, None, 1))
}),
supervised_keys=("image", "label"),
homepage="http://www.robots.ox.ac.uk/~vgg/data/pets/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns splits."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": _BASE_URL + "/images.tar.gz",
"annotations": _BASE_URL + "/annotations.tar.gz",
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
# Setup train and test splits
train_split = tfds.core.SplitGenerator(
name="train",
gen_kwargs={
"images_dir_path": images_path_dir,
"annotations_dir_path": annotations_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
gen_kwargs={
"images_dir_path": images_path_dir,
"annotations_dir_path": annotations_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split]
def _generate_examples(self, images_dir_path, annotations_dir_path,
images_list_file):
with tf.io.gfile.GFile(images_list_file, "r") as images_list:
for line in images_list:
image_name, label, species, _ = line.strip().split(" ")
trimaps_dir_path = os.path.join(annotations_dir_path, "trimaps")
trimap_name = image_name + ".png"
image_name += ".jpg"
label = int(label) - 1
species = int(species) - 1
record = {
"image": os.path.join(images_dir_path, image_name),
"label": int(label),
"species": species,
"file_name": image_name,
"segmentation_mask": os.path.join(trimaps_dir_path, trimap_name)
}
yield image_name, record
| 36.335821
| 86
| 0.650852
|
4a149b042357afef8a8b8ca52096718f27b19a3c
| 627
|
py
|
Python
|
speechbrain/__init__.py
|
JasonSWFu/speechbrain
|
cb78ba2b33fceba273b055dc471535344c3053f0
|
[
"Apache-2.0"
] | 3,913
|
2021-03-14T13:54:52.000Z
|
2022-03-30T05:09:55.000Z
|
speechbrain/__init__.py
|
JasonSWFu/speechbrain
|
cb78ba2b33fceba273b055dc471535344c3053f0
|
[
"Apache-2.0"
] | 667
|
2021-03-14T20:11:17.000Z
|
2022-03-31T04:07:17.000Z
|
speechbrain/__init__.py
|
JasonSWFu/speechbrain
|
cb78ba2b33fceba273b055dc471535344c3053f0
|
[
"Apache-2.0"
] | 785
|
2021-03-14T13:20:57.000Z
|
2022-03-31T03:26:03.000Z
|
""" Comprehensive speech processing toolkit
"""
import os
from .core import Stage, Brain, create_experiment_directory, parse_arguments
from . import alignment # noqa
from . import dataio # noqa
from . import decoders # noqa
from . import lobes # noqa
from . import lm # noqa
from . import nnet # noqa
from . import processing # noqa
from . import tokenizers # noqa
from . import utils # noqa
with open(os.path.join(os.path.dirname(__file__), "version.txt")) as f:
version = f.read().strip()
__all__ = [
"Stage",
"Brain",
"create_experiment_directory",
"parse_arguments",
]
__version__ = version
| 24.115385
| 76
| 0.703349
|
4a149d4d5e4802211e5452ac84d5c4df1639ddd0
| 1,465
|
py
|
Python
|
app/core/models.py
|
starscream2290/reciper-app-api
|
9655456ff1dadb34f44dcdb88d8727ec5bd5f90f
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
starscream2290/reciper-app-api
|
9655456ff1dadb34f44dcdb88d8727ec5bd5f90f
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
starscream2290/reciper-app-api
|
9655456ff1dadb34f44dcdb88d8727ec5bd5f90f
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"creates and saves a new user"
if not email:
raise ValueError ("Users must have a valid Email Add")
user=self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
def create_superuser(self, email, password):
"creates and save a new super user"
user=self.create_user (email, password)
user.is_staff=True
user.is_superuser=True
user.save(using=self.db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"Custom user model that supports email instead of username"
email=models.EmailField(max_length=255, unique=True)
name=models.CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
objects=UserManager()
USERNAME_FIELD = "email"
class Tag(models.Model):
"Tag to be used for a recipe"
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
| 29.3
| 74
| 0.702389
|
4a149e52cdf05f628d957a9298bf525c37ed6a38
| 4,669
|
py
|
Python
|
test_autofit/graphical/test_unification.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 39
|
2019-01-24T10:45:23.000Z
|
2022-03-18T09:37:59.000Z
|
test_autofit/graphical/test_unification.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 260
|
2018-11-27T12:56:33.000Z
|
2022-03-31T16:08:59.000Z
|
test_autofit/graphical/test_unification.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 13
|
2018-11-30T16:49:05.000Z
|
2022-01-21T17:39:29.000Z
|
from random import random
import dill
import numpy as np
import pytest
import autofit as af
from autofit import graphical as g
from autofit.messages.normal import UniformNormalMessage
from autofit.messages.transform import log_10_transform
@pytest.fixture(
name="prior"
)
def make_prior():
return af.GaussianPrior(
mean=1,
sigma=2
)
def test():
mean_field = g.MeanField({
})
mean_field.instance_for_arguments({})
def test_retain_id(
prior
):
new_message = prior * prior
assert new_message.id == prior.id
@pytest.fixture(
name="x"
)
def make_x():
return np.linspace(
0, 1, 100
)
def test_projected_model():
model = af.Model(
af.Gaussian,
centre=af.UniformPrior()
)
samples = af.Samples(
model,
[
af.Sample(
-1.0, -1.0,
weight=random(),
kwargs={
("centre",): random(),
("normalization",): random(),
("sigma",): random(),
}
)
for _ in range(100)
]
)
result = af.Result(
samples=samples,
model=model
)
projected_model = result.projected_model
assert projected_model.prior_count == 3
assert projected_model.centre is not model.centre
assert projected_model.centre.id == model.centre.id
assert isinstance(
projected_model.centre,
af.UniformPrior
)
def test_uniform_normal(x):
message = UniformNormalMessage.shifted(
shift=1,
scale=2.1
)(
mean=0.0,
sigma=1.0
)
assert message.pdf(0.9) == 0
assert message.pdf(3.2) == 0
assert message.pdf(1.5) > 0
@pytest.mark.parametrize(
"lower_limit, upper_limit, unit_value, physical_value",
[
(0.0, 1.0, 0.5, 0.5),
(0.0, 1.0, 1.0, 1.0),
(0.0, 1.0, 0.0, 0.0),
(1.0, 2.0, 0.5, 1.5),
(1.0, 2.0, 1.0, 2.0),
(1.0, 2.0, 0.0, 1.0),
(0.0, 2.0, 0.5, 1.0),
(0.0, 2.0, 1.0, 2.0),
(0.0, 2.0, 0.0, 0.0),
]
)
def test_uniform_prior(
lower_limit,
upper_limit,
unit_value,
physical_value
):
assert af.UniformPrior(
lower_limit=lower_limit,
upper_limit=upper_limit,
).value_for(
unit_value
) == pytest.approx(
physical_value
)
def test_uniform_odd_result():
prior = af.UniformPrior(90.0, 100.0)
assert prior.value_for(
0.0
) == pytest.approx(90.0)
@pytest.mark.parametrize(
"lower_limit",
[
1, 90
]
)
@pytest.mark.parametrize(
"upper_limit",
[
110, 200
]
)
@pytest.mark.parametrize(
"unit",
[
0.00001, 0.5, 0.9
]
)
def test_log10(
lower_limit,
upper_limit,
unit
):
prior = af.LogUniformPrior(
lower_limit=lower_limit,
upper_limit=upper_limit
)
assert 10.0 ** (
np.log10(lower_limit)
+ unit * (np.log10(upper_limit) - np.log10(lower_limit))
) == pytest.approx(
prior.value_for(
unit
),
abs=0.001
)
@pytest.fixture(
name="uniform_prior"
)
def make_uniform_prior():
return af.UniformPrior(
lower_limit=10,
upper_limit=20,
id_=1
)
def test_prior_arithmetic(
uniform_prior
):
multiplied = uniform_prior * uniform_prior
divided = multiplied / uniform_prior
multiplied_value = multiplied.value_for(0.3)
divided_value = divided.value_for(0.3)
uniform_prior_value = uniform_prior.value_for(0.3)
assert multiplied_value != divided_value
assert divided_value == uniform_prior_value
def test_pickle_uniform_prior(
uniform_prior
):
pickled_prior = dill.loads(
dill.dumps(uniform_prior)
)
assert pickled_prior == uniform_prior
assert pickled_prior.id == uniform_prior.id
def test_pickle_log_uniform_prior():
log_uniform_prior = af.LogUniformPrior()
pickled_prior = dill.loads(
dill.dumps(log_uniform_prior)
)
assert pickled_prior == log_uniform_prior
@pytest.fixture(
name="LogMessage"
)
def make_log_message():
return UniformNormalMessage.shifted(
shift=1,
scale=2,
).transformed(
log_10_transform
)
def test_pickle_transformed(
LogMessage
):
dill.loads(
dill.dumps(LogMessage)
)
def test_pickle_transformed_instantiated(
LogMessage
):
instance = LogMessage(
mean=1,
sigma=2
)
dill.loads(
dill.dumps(instance)
)
| 19.135246
| 68
| 0.578711
|
4a14a0c88e83ba0dc01f9699a57511c30ff0594a
| 2,793
|
py
|
Python
|
packages/web_stack/setup.py
|
lesperry/Metagenomics
|
a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6
|
[
"CC-BY-3.0"
] | null | null | null |
packages/web_stack/setup.py
|
lesperry/Metagenomics
|
a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6
|
[
"CC-BY-3.0"
] | 2
|
2020-08-19T18:14:59.000Z
|
2020-08-20T01:19:12.000Z
|
packages/web_stack/setup.py
|
lesperry/Metagenomics
|
a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
SOURCE_DIR = "galaxy"
_version_re = re.compile(r'__version__\s+=\s+(.*)')
project_short_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
with open('%s/project_galaxy_%s.py' % (SOURCE_DIR, project_short_name), 'rb') as f:
init_contents = f.read().decode('utf-8')
def get_var(var_name):
pattern = re.compile(r'%s\s+=\s+(.*)' % var_name)
match = pattern.search(init_contents).group(1)
return str(ast.literal_eval(match))
version = get_var("__version__")
PROJECT_NAME = get_var("PROJECT_NAME")
PROJECT_URL = get_var("PROJECT_URL")
PROJECT_AUTHOR = get_var("PROJECT_AUTHOR")
PROJECT_EMAIL = get_var("PROJECT_EMAIL")
PROJECT_DESCRIPTION = get_var("PROJECT_DESCRIPTION")
TEST_DIR = 'tests'
PACKAGES = [
'galaxy',
'galaxy.web_stack',
]
ENTRY_POINTS = '''
[console_scripts]
'''
PACKAGE_DATA = {
# Be sure to update MANIFEST.in for source dist.
'galaxy': [
],
}
PACKAGE_DIR = {
SOURCE_DIR: SOURCE_DIR,
}
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
if os.path.exists("requirements.txt"):
requirements = open("requirements.txt").read().split("\n")
else:
# In tox, it will cover them anyway.
requirements = []
test_requirements = open("test-requirements.txt").read().split("\n")
setup(
name=PROJECT_NAME,
version=version,
description=PROJECT_DESCRIPTION,
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
author=PROJECT_AUTHOR,
author_email=PROJECT_EMAIL,
url=PROJECT_URL,
packages=PACKAGES,
entry_points=ENTRY_POINTS,
package_data=PACKAGE_DATA,
package_dir=PACKAGE_DIR,
include_package_data=True,
install_requires=requirements,
license="AFL",
zip_safe=False,
keywords='galaxy',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Academic Free License (AFL)',
'Operating System :: POSIX',
'Topic :: Software Development',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite=TEST_DIR,
tests_require=test_requirements
)
| 28.212121
| 83
| 0.655926
|
4a14a0da716e0930fd303660f9777485f97cce54
| 1,017
|
py
|
Python
|
Easy/118.PascalsTriangle.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 39
|
2020-07-04T11:15:13.000Z
|
2022-02-04T22:33:42.000Z
|
Easy/118.PascalsTriangle.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 1
|
2020-07-15T11:53:37.000Z
|
2020-07-15T11:53:37.000Z
|
Easy/118.PascalsTriangle.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 20
|
2020-07-14T19:12:53.000Z
|
2022-03-02T06:28:17.000Z
|
"""
Given a non-negative integer numRows, generate the first numRows of
Pascal's triangle.
In Pascal's triangle, each number is the sum of the two numbers directly
above it.
Example:
Input: 5
Output:
[
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1]
]
"""
#Difficulty: Easy
#15 / 15 test cases passed.
#Runtime: 28 ms
#Memory Usage: 13.6 MB
#Runtime: 28 ms, faster than 72.51% of Python3 online submissions for Pascal's Triangle.
#Memory Usage: 13.6 MB, less than 7.14% of Python3 online submissions for Pascal's Triangle.
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if not numRows: return None
triangle = [[1]]
for n in range(1, numRows):
t = triangle[n-1]
l = [t[0], t[-1]]
while len(l) < n+1:
for i in range(1, len(t)):
l.insert(len(l) - 1, t[i-1] + t[i])
triangle.extend([l])
return triangle
| 26.763158
| 92
| 0.548673
|
4a14a15562d2be8892280ab5a44793708ee54f38
| 15,497
|
py
|
Python
|
tensorflow/python/debug/wrappers/dumping_wrapper_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/debug/wrappers/dumping_wrapper_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/debug/wrappers/dumping_wrapper_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit Tests for classes in dumping_wrapper.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import tempfile
import threading
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
@test_util.run_v1_only("b/120545219")
class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.session_root = tempfile.mkdtemp()
self.v = variables.VariableV1(10.0, dtype=dtypes.float32, name="v")
self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")
self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self.session_root):
file_io.delete_recursively(self.session_root)
def _assert_correct_run_subdir_naming(self, run_subdir):
self.assertStartsWith(run_subdir, "run_")
self.assertEqual(2, run_subdir.count("_"))
self.assertGreater(int(run_subdir.split("_")[1]), 0)
def testConstructWrapperWithExistingNonEmptyRootDirRaisesException(self):
dir_path = os.path.join(self.session_root, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "session_root path points to a non-empty directory"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=self.session_root, log_usage=False)
def testConstructWrapperWithExistingFileDumpRootRaisesException(self):
file_path = os.path.join(self.session_root, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(gfile.Exists(file_path))
self.assertFalse(gfile.IsDirectory(file_path))
with self.assertRaisesRegexp(ValueError,
"session_root path points to a file"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=file_path, log_usage=False)
def testConstructWrapperWithNonexistentSessionRootCreatesDirectory(self):
new_dir_path = os.path.join(tempfile.mkdtemp(), "new_dir")
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=new_dir_path, log_usage=False)
self.assertTrue(gfile.IsDirectory(new_dir_path))
# Cleanup.
gfile.DeleteRecursively(new_dir_path)
def testDumpingOnASingleRunWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingOnASingleRunWorksWithRelativePathForDebugDumpDir(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
cwd = os.getcwd()
try:
os.chdir(self.session_root)
dump = debug_data.DebugDumpDir(
os.path.relpath(dump_dirs[0], self.session_root))
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
finally:
os.chdir(cwd)
def testDumpingOnASingleRunWithFeedDictWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
feed_dict = {self.ph: 3.2}
sess.run(self.inc_w_ph, feed_dict=feed_dict)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_w_ph), dump.run_fetches_info)
self.assertEqual(repr(feed_dict.keys()), dump.run_feed_keys_info)
def testDumpingOnMultipleRunsWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(3, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testUsingNonCallableAsWatchFnRaisesTypeError(self):
bad_watch_fn = "bad_watch_fn"
with self.assertRaisesRegexp(TypeError, "watch_fn is not callable"):
dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=bad_watch_fn,
log_usage=False)
def testDumpingWithLegacyWatchFnOnFetchesWorks(self):
"""Use a watch_fn that returns different whitelists for different runs."""
def watch_fn(fetches, feeds):
del feeds
# A watch_fn that picks fetch name.
if fetches.name == "inc_v:0":
# If inc_v, watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If dec_v, watch nothing.
return "DebugIdentity", r"$^", r"$^"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
sess.run(self.dec_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(6, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertGreater(dump.size, 0)
self.assertAllClose([10.0 - 0.4 * (i / 2)],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.dec_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingWithLegacyWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return ["DebugIdentity", "DebugNumericSummary"], r".*", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
def testDumpingWithWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity", "DebugNumericSummary"],
node_name_regex_whitelist=r"^v.*",
op_type_regex_whitelist=r".*",
tensor_dtype_regex_whitelist=".*_ref")
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
dumped_nodes = [dump.node_name for dump in dump.dumped_tensor_data]
self.assertNotIn("inc_v", dumped_nodes)
self.assertNotIn("delta", dumped_nodes)
def testDumpingDebugHookWithoutWatchFnWorks(self):
dumping_hook = hooks.DumpingDebugHook(self.session_root, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch every ref-type tensor.
return framework.WatchOptions(
debug_ops="DebugIdentity",
tensor_dtype_regex_whitelist=".*_ref")
else:
# If even-index run, watch nothing.
return framework.WatchOptions(
debug_ops="DebugIdentity",
node_name_regex_whitelist=r"^$",
op_type_regex_whitelist=r"^$")
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertNotIn("delta",
[datum.node_name for datum in dump.dumped_tensor_data])
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulLegacyWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If even-index run, watch nothing.
return "DebugIdentity", r"$^", r"$^"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingFromMultipleThreadsObeysThreadNameFilter(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False,
thread_name_filter=r"MainThread$")
self.assertAllClose(1.0, sess.run(self.delta))
child_thread_result = []
def child_thread_job():
child_thread_result.append(sess.run(self.eta))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
thread.join()
self.assertAllClose([-1.4], child_thread_result)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertEqual(1, dump.size)
self.assertEqual("delta", dump.dumped_tensor_data[0].node_name)
def testDumpingWrapperWithEmptyFetchWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run([])
if __name__ == "__main__":
googletest.main()
| 39.735897
| 80
| 0.703491
|
4a14a2a2489dcd237627f0197f30244e4ab3c12e
| 3,102
|
py
|
Python
|
tests/test_nbqa_diff.py
|
tcbegley/nbQA
|
75bbb8aade6f482a14d12b1db967005c5f7a70e4
|
[
"MIT"
] | null | null | null |
tests/test_nbqa_diff.py
|
tcbegley/nbQA
|
75bbb8aade6f482a14d12b1db967005c5f7a70e4
|
[
"MIT"
] | null | null | null |
tests/test_nbqa_diff.py
|
tcbegley/nbQA
|
75bbb8aade6f482a14d12b1db967005c5f7a70e4
|
[
"MIT"
] | null | null | null |
"""Check --nbqa-diff flag."""
import os
import re
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING
import pytest
from nbqa.__main__ import main
if TYPE_CHECKING:
from _pytest.capture import CaptureFixture
SPARKLES = "\N{sparkles}"
SHORTCAKE = "\N{shortcake}"
COLLISION = "\N{collision symbol}"
BROKEN_HEART = "\N{broken heart}"
TESTS_DIR = Path("tests")
TEST_DATA_DIR = TESTS_DIR / "data"
DIRTY_NOTEBOOK = TEST_DATA_DIR / "notebook_for_testing.ipynb"
CLEAN_NOTEBOOK = TEST_DATA_DIR / "clean_notebook.ipynb"
def test_diff_present(capsys: "CaptureFixture") -> None:
"""Test the results on --nbqa-diff on a dirty notebook."""
main(["black", str(DIRTY_NOTEBOOK), "--nbqa-diff"])
out, err = capsys.readouterr()
err = err.encode("ascii", "backslashreplace").decode()
expected_out = (
"\x1b[1mCell 2\x1b[0m\n"
"------\n"
f"--- {str(DIRTY_NOTEBOOK)}\n"
f"+++ {str(DIRTY_NOTEBOOK)}\n"
"@@ -12,8 +12,8 @@\n"
" 'hello goodbye'\n"
' """\n'
" \n"
"\x1b[31m- return 'hello {}'.format(name)\n"
'\x1b[0m\x1b[32m+ return "hello {}".format(name)\n'
"\x1b[0m \n"
" \n"
" !ls\n"
"\x1b[31m-hello(3) \n"
"\x1b[0m\x1b[32m+hello(3)\n"
"\x1b[0m\n"
"To apply these changes use `--nbqa-mutate` instead of `--nbqa-diff`\n"
)
assert out == expected_out
expected_err = (
dedent(
f"""\
reformatted {str(DIRTY_NOTEBOOK)}
All done! {SPARKLES} {SHORTCAKE} {SPARKLES}
1 file reformatted.
"""
)
.encode("ascii", "backslashreplace")
.decode()
)
assert err == expected_err
def test_diff_and_mutate() -> None:
"""
Check a ValueError is raised if we use both --nbqa-mutate and --nbqa-diff.
"""
msg = re.escape(
"""\
Cannot use both `--nbqa-diff` and `--nbqa-mutate` flags together!
Use `--nbqa-diff` to preview changes, and `--nbqa-mutate` to apply them.\
"""
)
with pytest.raises(ValueError, match=msg):
main(["black", str(DIRTY_NOTEBOOK), "--nbqa-mutate", "--nbqa-diff"])
def test_invalid_syntax_with_nbqa_diff(capsys: "CaptureFixture") -> None:
"""
Check that using nbqa-diff when there's invalid syntax doesn't have empty output.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
path = os.path.join("tests", "invalid_data", "assignment_to_literal.ipynb")
main(["black", os.path.abspath(path), "--nbqa-diff", "--nbqa-dont-skip-bad-cells"])
out, err = capsys.readouterr()
expected_out = ""
expected_err = (
(f"{COLLISION} {BROKEN_HEART} {COLLISION}\n1 file failed to reformat.\n")
.encode("ascii", "backslashreplace")
.decode()
)
# This is required because linux supports emojis
# so both should have \\ for comparison
err = err.encode("ascii", "backslashreplace").decode()
assert expected_out == out
assert expected_err in err
| 28.722222
| 87
| 0.60187
|
4a14a2ff2d814d02e082581cdd718f5e1da8044f
| 3,470
|
py
|
Python
|
horovod/spark/keras/optimizer.py
|
hcyang99/horovod
|
825cc197468548da47dcd38872d5b4ba6e6a125b
|
[
"Apache-2.0"
] | 1
|
2020-05-07T08:26:36.000Z
|
2020-05-07T08:26:36.000Z
|
horovod/spark/keras/optimizer.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | null | null | null |
horovod/spark/keras/optimizer.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import io
import six
import h5py
from horovod.run.common.util import codec
def serialize_bare_keras_optimizer(x):
import keras
from horovod.spark.keras.bare import save_bare_keras_optimizer
return _serialize_keras_optimizer(x,
optimizer_class=keras.optimizers.Optimizer,
save_optimizer_fn=save_bare_keras_optimizer)
def deserialize_bare_keras_optimizer(x):
from horovod.spark.keras.bare import load_bare_keras_optimizer
return _deserialize_keras_optimizer(x,
load_keras_optimizer_fn=load_bare_keras_optimizer)
def serialize_tf_keras_optimizer(x):
import tensorflow as tf
from horovod.spark.keras.tensorflow import save_tf_keras_optimizer
return _serialize_keras_optimizer(x,
optimizer_class=tf.keras.optimizers.Optimizer,
save_optimizer_fn=save_tf_keras_optimizer)
def deserialize_tf_keras_optimizer(x):
from horovod.spark.keras.tensorflow import load_tf_keras_optimizer
return _deserialize_keras_optimizer(x,
load_keras_optimizer_fn=load_tf_keras_optimizer)
def _serialize_keras_optimizer(opt, optimizer_class, save_optimizer_fn):
if isinstance(opt, str):
return opt
elif isinstance(opt, optimizer_class):
bio = io.BytesIO()
with h5py.File(bio, 'w') as f:
save_optimizer_fn(opt, f)
return codec.dumps_base64(bio.getvalue())
else:
raise \
ValueError('Keras optimizer has to be an instance of str or keras.optimizers.Optimizer')
def is_string(obj):
if six.PY3:
return isinstance(obj, str)
if six.PY2:
if not isinstance(obj, str):
return False
# Now we need to check if it is not byte array. Bytearrays in python 2 are essentially an
# instance of string. There is not a good way to distinguish between the two types other than
# trying to decode the object
# https://stackoverflow.com/questions/34869889/what-is-the-proper-way-to-determine-if-an-object-is-a-bytes-like-object-in-pytho
obj_copy = copy.copy(obj)
try:
obj_copy.decode('ascii')
return True
except (UnicodeDecodeError, AttributeError):
return False
def _deserialize_keras_optimizer(serialized_opt, load_keras_optimizer_fn):
if is_string(serialized_opt):
return serialized_opt
bio = io.BytesIO(serialized_opt)
with h5py.File(bio, 'r') as f:
return load_keras_optimizer_fn(f)
| 35.773196
| 135
| 0.678963
|
4a14a30042a081350670d31d559217bc57eddda4
| 148
|
py
|
Python
|
Algoritimos/ex4.9.py
|
mathspin/Algoritimos-py
|
3a814dd924d9ee4c15ee4734170ed82f70e95479
|
[
"MIT"
] | null | null | null |
Algoritimos/ex4.9.py
|
mathspin/Algoritimos-py
|
3a814dd924d9ee4c15ee4734170ed82f70e95479
|
[
"MIT"
] | null | null | null |
Algoritimos/ex4.9.py
|
mathspin/Algoritimos-py
|
3a814dd924d9ee4c15ee4734170ed82f70e95479
|
[
"MIT"
] | null | null | null |
i = 0
maior=0
soma = 0
while i < 15:
n = int(input("digite o codigo: "))
if n > maior:
maior = n
soma += n
i += 1
print(maior)
print(soma/i)
| 11.384615
| 36
| 0.567568
|
4a14a300a28723168ef92286f457b5a334b1dc70
| 934
|
py
|
Python
|
pipeline/models/pca.py
|
DPBayes/dp-representation-transfer
|
0c8389cc36659a7606bceac2531eaef7663ac49c
|
[
"MIT"
] | 1
|
2021-12-09T03:45:18.000Z
|
2021-12-09T03:45:18.000Z
|
pipeline/models/pca.py
|
DPBayes/dp-representation-transfer
|
0c8389cc36659a7606bceac2531eaef7663ac49c
|
[
"MIT"
] | 1
|
2019-10-03T19:32:29.000Z
|
2019-10-04T14:09:28.000Z
|
pipeline/models/pca.py
|
DPBayes/dp-representation-transfer
|
0c8389cc36659a7606bceac2531eaef7663ac49c
|
[
"MIT"
] | 1
|
2019-10-15T07:16:42.000Z
|
2019-10-15T07:16:42.000Z
|
# PCA
import numpy as np
from sklearn.decomposition import PCA as sk_PCA
import pickle
class PCA:
def init(self, input_dim, output_dim):
self.pca = sk_PCA(n_components=output_dim)
return self
def learn(self, x,
validation_split=0.0, # unused
validation_data=None, # unused
log_file_prefix=None, # unused
per_epoch_callback_funs=[],
callbacks=[]): # unused
# validation_split not (yet?) supported
assert validation_split == 0.0
self.pca.fit(x)
for callback in per_epoch_callback_funs:
callback()
def encode(self, x):
return self.pca.transform(x)
def decode(self, x):
return self.pca.inverse_transform(x)
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.pca, f)
def load (self, filename):
with open(filename, 'rb') as f:
self.pca = pickle.load(f)
return self
| 23.35
| 47
| 0.639186
|
4a14a3a696e59564fefc1a7b2dc4f133fc3e7f68
| 667
|
py
|
Python
|
online_shop/manage.py
|
SwagatoMondal/Online-Shopping
|
7ca6755692caa57dd6ece2183b066438f4fcafab
|
[
"Apache-2.0"
] | 2
|
2021-03-24T10:55:54.000Z
|
2021-04-05T12:36:56.000Z
|
online_shop/manage.py
|
SwagatoMondal/Online-Shopping
|
7ca6755692caa57dd6ece2183b066438f4fcafab
|
[
"Apache-2.0"
] | null | null | null |
online_shop/manage.py
|
SwagatoMondal/Online-Shopping
|
7ca6755692caa57dd6ece2183b066438f4fcafab
|
[
"Apache-2.0"
] | 1
|
2022-02-23T15:18:17.000Z
|
2022-02-23T15:18:17.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'online_shop.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29
| 75
| 0.68066
|
4a14a4425902475bbee570f4d3493e5caf515032
| 3,146
|
py
|
Python
|
saleor/endevel_settings.py
|
EndevelCZ/saleor
|
009af798b4e9d2348de703bdcbe8d4f1a2a60e4f
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/endevel_settings.py
|
EndevelCZ/saleor
|
009af798b4e9d2348de703bdcbe8d4f1a2a60e4f
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/endevel_settings.py
|
EndevelCZ/saleor
|
009af798b4e9d2348de703bdcbe8d4f1a2a60e4f
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from decouple import AutoConfig
import raven
config = AutoConfig(os.environ.get('DJANGO_CONFIG_ENV_DIR'))
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = config('PROJECT_SECRET_KEY', default='')
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default='', cast=lambda v: [s.strip() for s in v.split(',')])
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('POSTGRESQL_DATABASE', default=''),
'USER': config('POSTGRESQL_USER', default=''),
'HOST': config('POSTGRESQL_HOST', default=''),
'PORT': config('POSTGRESQL_PORT', default=''),
'PASSWORD': config('POSTGRESQL_PASSWORD', default=''),
'ATOMIC_REQUESTS': True,
'CONN_MAX_AGE': 600,
}
}
EMAIL_HOST = config('EMAIL_HOST', default='localhost')
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL', default='')
DEFAULT_REPLY_EMAIL = config('DEFAULT_REPLY_EMAIL', default='')
EMAIL_SUBJECT_PREFIX = ''
SERVER_EMAIL = 'info@endevel.cz' # The email address that error messages come from
SEND_BROKEN_LINK_EMAILS = False
UPLOADED_ATTACHMENTS_DIR = u'/tmp/'
PATH_TO_LOGDIR = os.path.join(config('PROJECT_HOME_DIR', ''), 'log/')
os.makedirs(PATH_TO_LOGDIR, exist_ok=True)
DEBUG_LOG_FILE = os.path.join(PATH_TO_LOGDIR, 'debug.log')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d'
'%(thread)d <%(name)s|%(filename)s:%(lineno)s> %(message)s'
},
'plain': {
'format': '%(asctime)s %(levelname)s <%(name)s> %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'filename': DEBUG_LOG_FILE,
'formatter': 'verbose',
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'plain',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'main': {
'handlers': ['file', 'console'],
'level': 'DEBUG'
},
'': {
'handlers': ['console', 'file'], #
'level': 'ERROR',
'propagate': True,
},
},
}
RAVEN_CONFIG = {
'dsn': config('SENTRY_DSN', default=''),
'release': raven.fetch_git_sha(PROJECT_DIR),
'environment': config('ENVIRONMENT', default=''),
}
| 31.46
| 101
| 0.581373
|
4a14a46d904669512978dad6d555da244921dad2
| 5,121
|
py
|
Python
|
slack_bolt/request/internals.py
|
shaydewael/bolt-python
|
42fa6a916850251e7fe90d681902d9d111b0a397
|
[
"MIT"
] | null | null | null |
slack_bolt/request/internals.py
|
shaydewael/bolt-python
|
42fa6a916850251e7fe90d681902d9d111b0a397
|
[
"MIT"
] | null | null | null |
slack_bolt/request/internals.py
|
shaydewael/bolt-python
|
42fa6a916850251e7fe90d681902d9d111b0a397
|
[
"MIT"
] | null | null | null |
import json
from typing import Optional, Dict, Union, List, Any
from urllib.parse import parse_qsl, parse_qs
from slack_bolt.context import BoltContext
def parse_query(
query: Optional[Union[str, Dict[str, str], Dict[str, List[str]]]]
) -> Dict[str, List[str]]:
if query is None:
return {}
elif isinstance(query, str):
return parse_qs(query)
elif isinstance(query, dict) or hasattr(query, "items"):
result: Dict[str, List[str]] = {}
for name, value in query.items():
if isinstance(value, list):
result[name] = value
elif isinstance(value, str):
result[name] = [value]
else:
raise ValueError(
f"Unsupported type ({type(value)}) of element in headers ({query})"
)
return result # type: ignore
else:
raise ValueError(f"Unsupported type of query detected ({type(query)})")
def parse_body(body: str, content_type: Optional[str]) -> Dict[str, Any]:
if not body:
return {}
if (
content_type is not None and content_type == "application/json"
) or body.startswith("{"):
return json.loads(body)
else:
if "payload" in body:
params = dict(parse_qsl(body))
if "payload" in params:
return json.loads(params.get("payload"))
else:
return {}
else:
return dict(parse_qsl(body))
def extract_enterprise_id(payload: Dict[str, Any]) -> Optional[str]:
if "enterprise" in payload:
org = payload.get("enterprise")
if isinstance(org, str):
return org
elif "id" in org:
return org.get("id") # type: ignore
if "enterprise_id" in payload:
return payload.get("enterprise_id")
if "team" in payload and "enterprise_id" in payload["team"]:
# In the case where the type is view_submission
return payload["team"].get("enterprise_id")
if "event" in payload:
return extract_enterprise_id(payload["event"])
return None
def extract_team_id(payload: Dict[str, Any]) -> Optional[str]:
if "team" in payload:
team = payload.get("team")
if isinstance(team, str):
return team
elif team and "id" in team:
return team.get("id")
if "team_id" in payload:
return payload.get("team_id")
if "event" in payload:
return extract_team_id(payload["event"])
if "user" in payload:
return payload.get("user")["team_id"]
return None
def extract_user_id(payload: Dict[str, Any]) -> Optional[str]:
if "user" in payload:
user = payload.get("user")
if isinstance(user, str):
return user
elif "id" in user:
return user.get("id") # type: ignore
if "user_id" in payload:
return payload.get("user_id")
if "event" in payload:
return extract_user_id(payload["event"])
return None
def extract_channel_id(payload: Dict[str, Any]) -> Optional[str]:
if "channel" in payload:
channel = payload.get("channel")
if isinstance(channel, str):
return channel
elif "id" in channel:
return channel.get("id") # type: ignore
if "channel_id" in payload:
return payload.get("channel_id")
if "event" in payload:
return extract_channel_id(payload["event"])
if "item" in payload:
# reaction_added: body["event"]["item"]
return extract_channel_id(payload["item"])
return None
def build_context(context: BoltContext, payload: Dict[str, Any],) -> BoltContext:
enterprise_id = extract_enterprise_id(payload)
if enterprise_id:
context["enterprise_id"] = enterprise_id
team_id = extract_team_id(payload)
if team_id:
context["team_id"] = team_id
user_id = extract_user_id(payload)
if user_id:
context["user_id"] = user_id
channel_id = extract_channel_id(payload)
if channel_id:
context["channel_id"] = channel_id
if "response_url" in payload:
context["response_url"] = payload["response_url"]
return context
def extract_content_type(headers: Dict[str, List[str]]) -> Optional[str]:
content_type: Optional[str] = headers.get("content-type", [None])[0]
if content_type:
return content_type.split(";")[0]
return None
def build_normalized_headers(
headers: Optional[Dict[str, Union[str, List[str]]]]
) -> Dict[str, List[str]]:
normalized_headers: Dict[str, List[str]] = {}
if headers is not None:
for key, value in headers.items():
normalized_name = key.lower()
if isinstance(value, list):
normalized_headers[normalized_name] = value
elif isinstance(value, str):
normalized_headers[normalized_name] = [value]
else:
raise ValueError(
f"Unsupported type ({type(value)}) of element in headers ({headers})"
)
return normalized_headers # type: ignore
| 33.253247
| 89
| 0.603593
|
4a14a475cb3e7bb30558b0e7f10bbf0d344fbe54
| 80
|
py
|
Python
|
torchsweeper/__init__.py
|
crnbaker/torchsweeper
|
1c0b696e9cf880d6ac799afb6174d60fbd148b6a
|
[
"MIT"
] | null | null | null |
torchsweeper/__init__.py
|
crnbaker/torchsweeper
|
1c0b696e9cf880d6ac799afb6174d60fbd148b6a
|
[
"MIT"
] | null | null | null |
torchsweeper/__init__.py
|
crnbaker/torchsweeper
|
1c0b696e9cf880d6ac799afb6174d60fbd148b6a
|
[
"MIT"
] | null | null | null |
from .decorators import Timer, ParameterSweeper
__all__ = ['Timer', 'Sweeper']
| 20
| 47
| 0.75
|
4a14a4d88b5310e169e5b3e071f020427f8c3395
| 12,597
|
py
|
Python
|
test/functional/talan_dgp_block_size_sync.py
|
talanproject/talan
|
83bd4549703efc1cdb68511f3432520cf07b5d52
|
[
"MIT"
] | null | null | null |
test/functional/talan_dgp_block_size_sync.py
|
talanproject/talan
|
83bd4549703efc1cdb68511f3432520cf07b5d52
|
[
"MIT"
] | null | null | null |
test/functional/talan_dgp_block_size_sync.py
|
talanproject/talan
|
83bd4549703efc1cdb68511f3432520cf07b5d52
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.talan import *
from test_framework.address import *
from test_framework.blocktools import *
import io
"""
Note, these tests do not test the functionality of the DGP template contract itself, for tests for the DGP template, see talan-dgp.py
"""
class TalanDGPBlockSizeSyncTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 8
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def create_block_of_approx_max_size(self, size_in_bytes):
tip = self.node.getblock(self.node.getbestblockhash())
block = create_block(int(self.node.getbestblockhash(), 16), create_coinbase(self.node.getblockcount()+1), tip['time'])
block.hashUTXORoot = int(tip['hashUTXORoot'], 16)
block.hashStateRoot = int(tip['hashStateRoot'], 16)
unspents = self.node.listunspent()
while len(block.serialize()) < size_in_bytes:
unspent = unspents.pop(0)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)]
for i in range(50):
tx.vout.append(CTxOut(int(unspent['amount']*COIN/100 - 11000), scriptPubKey=CScript([OP_TRUE]*10000)))
tx_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
block.vtx.append(CTransaction())
block.vtx[-1].deserialize(f)
while len(block.serialize()) > size_in_bytes:
block.vtx[-1].vout.pop(-1)
if not block.vtx[-1].vout:
block.vtx.pop(-1)
tx_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(block.vtx[-1].serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
block.vtx[-1] = CTransaction()
block.vtx[-1].deserialize(f)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
print("block size", len(block.serialize()))
return block
def create_proposal_contract(self, block_size=2000000):
"""
pragma solidity ^0.4.11;
contract blockSize {
uint32[1] _blockSize=[
8000000 //block size in bytes
];
function getBlockSize() constant returns(uint32[1] _size){
return _blockSize;
}
}
"""
# The contracts below only differ in the _blockSize variable
if block_size == 32000000:
contract_data = self.node.createcontract("60606040526020604051908101604052806301e8480063ffffffff16815250600090600161002e92919061003f565b50341561003a57600080fd5b610115565b8260016007016008900481019282156100d15791602002820160005b8382111561009f57835183826101000a81548163ffffffff021916908363ffffffff160217905550926020019260040160208160030104928301926001030261005b565b80156100cf5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009f565b505b5090506100de91906100e2565b5090565b61011291905b8082111561010e57600081816101000a81549063ffffffff0219169055506001016100e8565b5090565b90565b610162806101246000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a72305820322c4456cb00ecc4c7f2878fe22cc7ff6addbf199842e68a4b23e98d51446b080029", 10000000)
elif block_size == 8000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280627a120062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a723058209bab110523b5fdedfb12512d3aedc1ba1add53dff85edb77aeec48ebdc01c35c0029", 10000000)
elif block_size == 4000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280623d090062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a72305820c5f02b85c3d9d7b93140775449355f53a7cb98dcafc56f07cdb09e9f2dc240550029", 10000000)
elif block_size == 2000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280621e848062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a723058201f747ceade404003185ab16248ecd30e8c1a63a811e55d7961ce3a47ddd01b160029", 10000000)
elif block_size == 1000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280620f424062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a7230582034c00d84f338629f594676d9bc32d5b9d7b92f3b438e9cc82a3efd92805f14730029", 10000000)
self.proposal_address = contract_data['address']
def assert_block_accepted(self, block, with_witness=True):
current_block_count = self.node.getblockcount()
assert_equal(self.node.submitblock(bytes_to_hex_str(block.serialize(with_witness))), None)
assert_equal(self.node.getblockcount(), current_block_count+1)
t = time.time()
while time.time() < t+5:
if self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash():
break
else:
assert(False)
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
def assert_block_limits(self, max_accepted_block_size, possible_block_sizes):
accepted_block_sizes = possible_block_sizes[0:possible_block_sizes.index(max_accepted_block_size)+1]
for block_size in accepted_block_sizes:
block = self.create_block_of_approx_max_size(block_size)
self.assert_block_accepted(block)
t = time.time()
while time.time() < t+5:
if self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash():
break
else:
assert(False)
# Make sure that both nodes now have the same tip
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
def run_test(self):
# stop 6 nodes that will be used later for IBD
for i in range(2, 8):
self.stop_node(i)
# Generate some blocks to make sure we have enough spendable outputs
self.node = self.nodes[0]
self.node.generate(1000 + COINBASE_MATURITY)
self.BLOCK_SIZE_DGP = DGPState(self.node, "0000000000000000000000000000000000000081")
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
# Start off by setting ourself as admin
admin_address = self.node.getnewaddress()
# Set ourself up as admin
self.BLOCK_SIZE_DGP.send_set_initial_admin(admin_address)
self.node.generate(1)
possible_block_sizes = [1000000, 2000000, 4000000, 8000000]
ascending_block_sizes = sorted(possible_block_sizes)
for max_block_size in possible_block_sizes:
self.create_proposal_contract(max_block_size)
self.BLOCK_SIZE_DGP.send_add_address_proposal(self.proposal_address, 2, admin_address)
self.node.generate(2) # We need to generate 2 blocks now for it to activate
self.assert_block_limits(max_block_size, ascending_block_sizes)
# Bring the last nodes online and make sure that they sync with node 0 and 1 (A and B)
for i in range(2, 8):
self.start_node(i)
connect_nodes_bi(self.nodes, 0, i)
connect_nodes_bi(self.nodes, 1, i)
self.sync_all()
if __name__ == '__main__':
TalanDGPBlockSizeSyncTest().main()
| 83.98
| 1,358
| 0.830118
|
4a14a67557217e7a0414a2bc012c367ec9b51193
| 4,194
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/peer_express_route_circuit_connection_py3.py
|
xiafu-msft/azure-sdk-for-python
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/peer_express_route_circuit_connection_py3.py
|
xiafu-msft/azure-sdk-for-python
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/peer_express_route_circuit_connection_py3.py
|
xiafu-msft/azure-sdk-for-python
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class PeerExpressRouteCircuitConnection(SubResource):
"""Peer Express Route Circuit Connection in an ExpressRouteCircuitPeering
resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param express_route_circuit_peering: Reference to Express Route Circuit
Private Peering Resource of the circuit.
:type express_route_circuit_peering:
~azure.mgmt.network.v2019_02_01.models.SubResource
:param peer_express_route_circuit_peering: Reference to Express Route
Circuit Private Peering Resource of the peered circuit.
:type peer_express_route_circuit_peering:
~azure.mgmt.network.v2019_02_01.models.SubResource
:param address_prefix: /29 IP address space to carve out Customer
addresses for tunnels.
:type address_prefix: str
:param circuit_connection_status: Express Route Circuit connection state.
Possible values include: 'Connected', 'Connecting', 'Disconnected'
:type circuit_connection_status: str or
~azure.mgmt.network.v2019_02_01.models.CircuitConnectionStatus
:param connection_name: The name of the express route circuit connection
resource.
:type connection_name: str
:param auth_resource_guid: The resource guid of the authorization used for
the express route circuit connection.
:type auth_resource_guid: str
:ivar provisioning_state: Provisioning state of the peer express route
circuit connection resource. Possible values are: 'Succeeded', 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'express_route_circuit_peering': {'key': 'properties.expressRouteCircuitPeering', 'type': 'SubResource'},
'peer_express_route_circuit_peering': {'key': 'properties.peerExpressRouteCircuitPeering', 'type': 'SubResource'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'circuit_connection_status': {'key': 'properties.circuitConnectionStatus', 'type': 'str'},
'connection_name': {'key': 'properties.connectionName', 'type': 'str'},
'auth_resource_guid': {'key': 'properties.authResourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, express_route_circuit_peering=None, peer_express_route_circuit_peering=None, address_prefix: str=None, circuit_connection_status=None, connection_name: str=None, auth_resource_guid: str=None, name: str=None, **kwargs) -> None:
super(PeerExpressRouteCircuitConnection, self).__init__(id=id, **kwargs)
self.express_route_circuit_peering = express_route_circuit_peering
self.peer_express_route_circuit_peering = peer_express_route_circuit_peering
self.address_prefix = address_prefix
self.circuit_connection_status = circuit_connection_status
self.connection_name = connection_name
self.auth_resource_guid = auth_resource_guid
self.provisioning_state = None
self.name = name
self.etag = None
| 48.767442
| 266
| 0.695517
|
4a14a7a4de1be0a8b054c9413f8fce6ae18c4045
| 2,527
|
py
|
Python
|
message.py
|
parkr/steve
|
48df9b17a46b933616dc97833d68ae514a9343ad
|
[
"MIT"
] | 5
|
2015-11-08T17:46:17.000Z
|
2021-01-09T15:49:47.000Z
|
message.py
|
parkr/steve
|
48df9b17a46b933616dc97833d68ae514a9343ad
|
[
"MIT"
] | 2
|
2015-06-16T00:19:21.000Z
|
2015-06-16T00:19:32.000Z
|
message.py
|
parkr/steve
|
48df9b17a46b933616dc97833d68ae514a9343ad
|
[
"MIT"
] | 2
|
2016-07-16T15:13:04.000Z
|
2018-03-05T22:08:24.000Z
|
import engine
import re
from sqlalchemy.sql import select
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import MetaData, Table
from sqlalchemy import Column, Integer, String, Text
Base = declarative_base()
class Message(Base):
__tablename__ = 'messages'
DASHES = '-'
id = Column(Integer, primary_key=True)
recipient = Column(String(255))
sender = Column(String(255))
who_from = Column(String(255))
subject = Column(String(255))
body_plain = Column(Text)
stripped_text = Column(Text)
timestamp = Column(Integer)
signature = Column(String(255))
message_headers = Column(Text)
def __init__(self, attributes):
underscorize = re.compile(self.DASHES, re.MULTILINE)
for key in attributes.keys():
setattr(self, underscorize.sub('_', key), attributes[key])
def __repr__(self):
return "<Message('%s','%s', '%s')>" % (self.id, self.who_from, self.timestamp)
def as_json(self):
return {
"id": self.id,
"recipient": self.recipient,
"sender": self.sender,
'who_from': self.who_from,
'subject': self.subject,
'body_plain': self.body_plain,
'stripped_text': self.stripped_text,
'timestamp': self.timestamp,
'signature': self.signature,
'message_headers': self.message_headers
}
def latest():
conn = engine.build_engine().connect()
results = conn.execute(select([" * FROM messages"])).fetchall()
return [build_message(r) for r in results]
def build_message(result):
return Message({
"id": int(result[0]),
"recipient": result[1],
"sender": result[2],
'who_from': result[3],
'subject': result[4],
'body_plain': result[5],
'stripped_text': result[6],
'timestamp': result[7],
'signature': result[8],
'message_headers': result[9]
})
if __name__ == "__main__":
e = engine.build_engine()
metadata = MetaData(bind=e)
messages_table = Table('messages', metadata,
Column('id', Integer, primary_key=True),
Column('recipient', String(255)),
Column('sender', String(255)),
Column('who_from', String(255)),
Column('subject', String(255)),
Column('body_plain', Text),
Column('stripped_text', Text),
Column('timestamp', Integer),
Column('signature', String(255)),
Column('message_headers', Text),
)
metadata.create_all()
| 29.383721
| 82
| 0.616541
|
4a14a7c9dcf07898e101b5bc8484e50c276e4964
| 4,265
|
py
|
Python
|
rasa/nlu/extractors/entity_synonyms.py
|
RielKIM/rasa
|
a3e329efd16032c900900181dcf57b01884e1a97
|
[
"Apache-2.0"
] | null | null | null |
rasa/nlu/extractors/entity_synonyms.py
|
RielKIM/rasa
|
a3e329efd16032c900900181dcf57b01884e1a97
|
[
"Apache-2.0"
] | null | null | null |
rasa/nlu/extractors/entity_synonyms.py
|
RielKIM/rasa
|
a3e329efd16032c900900181dcf57b01884e1a97
|
[
"Apache-2.0"
] | 1
|
2020-03-12T16:09:06.000Z
|
2020-03-12T16:09:06.000Z
|
import os
import warnings
from typing import Any, Dict, Optional, Text
from rasa.constants import DOCS_URL_TRAINING_DATA_NLU
from rasa.nlu.constants import ENTITIES_ATTRIBUTE
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.extractors import EntityExtractor
from rasa.nlu.model import Metadata
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.utils import write_json_to_file
import rasa.utils.io
from rasa.utils.common import raise_warning
class EntitySynonymMapper(EntityExtractor):
provides = [ENTITIES_ATTRIBUTE]
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
synonyms: Optional[Dict[Text, Any]] = None,
) -> None:
super().__init__(component_config)
self.synonyms = synonyms if synonyms else {}
def train(
self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any
) -> None:
for key, value in list(training_data.entity_synonyms.items()):
self.add_entities_if_synonyms(key, value)
for example in training_data.entity_examples:
for entity in example.get(ENTITIES_ATTRIBUTE, []):
entity_val = example.text[entity["start"] : entity["end"]]
self.add_entities_if_synonyms(entity_val, str(entity.get("value")))
def process(self, message: Message, **kwargs: Any) -> None:
updated_entities = message.get(ENTITIES_ATTRIBUTE, [])[:]
self.replace_synonyms(updated_entities)
message.set(ENTITIES_ATTRIBUTE, updated_entities, add_to_output=True)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
if self.synonyms:
file_name = file_name + ".json"
entity_synonyms_file = os.path.join(model_dir, file_name)
write_json_to_file(
entity_synonyms_file, self.synonyms, separators=(",", ": ")
)
return {"file": file_name}
else:
return {"file": None}
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional[Metadata] = None,
cached_component: Optional["EntitySynonymMapper"] = None,
**kwargs: Any,
) -> "EntitySynonymMapper":
file_name = meta.get("file")
if not file_name:
synonyms = None
return cls(meta, synonyms)
entity_synonyms_file = os.path.join(model_dir, file_name)
if os.path.isfile(entity_synonyms_file):
synonyms = rasa.utils.io.read_json_file(entity_synonyms_file)
else:
synonyms = None
raise_warning(
f"Failed to load synonyms file from '{entity_synonyms_file}'.",
docs=DOCS_URL_TRAINING_DATA_NLU + "#entity-synonyms",
)
return cls(meta, synonyms)
def replace_synonyms(self, entities) -> None:
for entity in entities:
# need to wrap in `str` to handle e.g. entity values of type int
entity_value = str(entity["value"])
if entity_value.lower() in self.synonyms:
entity["value"] = self.synonyms[entity_value.lower()]
self.add_processor_name(entity)
def add_entities_if_synonyms(self, entity_a, entity_b) -> None:
if entity_b is not None:
original = str(entity_a)
replacement = str(entity_b)
if original != replacement:
original = original.lower()
if original in self.synonyms and self.synonyms[original] != replacement:
raise_warning(
f"Found conflicting synonym definitions "
f"for {repr(original)}. Overwriting target "
f"{repr(self.synonyms[original])} with "
f"{repr(replacement)}. "
f"Check your training data and remove "
f"conflicting synonym definitions to "
f"prevent this from happening.",
docs=DOCS_URL_TRAINING_DATA_NLU + "#entity-synonyms",
)
self.synonyms[original] = replacement
| 37.412281
| 88
| 0.615006
|
4a14a83d1aa258e6f779fae29d7e18f440f2592d
| 3,234
|
py
|
Python
|
middleware/legato/templates/legato_gfx_pda_7000b/Support_BSP_PIC32MZ_DA_Curiosity.py
|
automaate/gfx3.8
|
55bf94302f00c8d513c84d910185cef2ca6b5be2
|
[
"0BSD"
] | null | null | null |
middleware/legato/templates/legato_gfx_pda_7000b/Support_BSP_PIC32MZ_DA_Curiosity.py
|
automaate/gfx3.8
|
55bf94302f00c8d513c84d910185cef2ca6b5be2
|
[
"0BSD"
] | null | null | null |
middleware/legato/templates/legato_gfx_pda_7000b/Support_BSP_PIC32MZ_DA_Curiosity.py
|
automaate/gfx3.8
|
55bf94302f00c8d513c84d910185cef2ca6b5be2
|
[
"0BSD"
] | null | null | null |
# coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
############ GLCD + TOUCH I2C CONFIG ######################################################
mzda_cu_activateList = ["le_gfx_driver_glcd", "gfx_hal_le", "i2c2", "drv_i2c", "drv_i2c0", "core_timer", "sys_time", "ddr", "le_gfx_driver_2dgpu"]
mzda_cu_connectList = [["gfx_legato", "gfx_driver", "le_gfx_driver_glcd", "gfx_driver_glcd"],
["le_gfx_driver_glcd", "Graphics Display", "gfx_disp_pdatm7000b_800x480", "gfx_display"],
["drv_i2c_0", "drv_i2c_I2C_dependency", "i2c2", "I2C2_I2C"],
["gfx_maxtouch_controller", "i2c", "drv_i2c_0", "drv_i2c"],
["sys_time", "sys_time_TMR_dependency", "core_timer", "CORE_TIMER_TMR"],
["gfx_legato", "gpu_driver", "le_gfx_driver_2dgpu", "gfx_driver_2dgpu"]]
mzda_cu_pinConfig = [{"pin": 42, "name": "BSP_MAXTOUCH_CHG", "type": "GPIO", "direction": "In", "latch": "", "abcd": ""}, #RH14
{"pin": 79, "name": "TM7000_BACKLIGHT", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}, #RD0
{"pin": 129, "name": "SCL2", "type": "SCL2", "direction": "", "latch": "", "abcd": ""}, #RA2
{"pin": 128, "name": "SDA2", "type": "SDA2", "direction": "", "latch": "", "abcd": ""}] #RA3
##################################################################################
def mzda_cu_eventHandler(event):
global pinConfigureFxn
if (event == "configure"):
#Override default pin configur function w/ PIC32M specific one
pinConfigureFxn = configurePinsPIC32M
try:
Database.setSymbolValue("le_gfx_driver_glcd", "PixelClockDivider", 10, 1)
except:
return
mzda_cu_interfaceList = ["GLCD"]
pic32mz_da_curiosity = bspSupportObj(mzda_cu_pinConfig, mzda_cu_activateList, None, mzda_cu_connectList, mzda_cu_eventHandler)
addDisplayIntfSupport("BSP_PIC32MZ_DA_Curiosity", mzda_cu_interfaceList)
addBSPSupport("BSP_PIC32MZ_DA_Curiosity", "GLCD", pic32mz_da_curiosity)
| 56.736842
| 147
| 0.650588
|
4a14a8572eb3731935589594a7f31a7eea794576
| 9,238
|
py
|
Python
|
python/libs/quickjson.py
|
rmrector/script.artwork.dump
|
02141823e9d920de7e377ae606a1fe680cad4417
|
[
"MIT"
] | 7
|
2021-01-23T06:05:33.000Z
|
2021-12-06T11:25:21.000Z
|
python/libs/quickjson.py
|
rmrector/script.artwork.dump
|
02141823e9d920de7e377ae606a1fe680cad4417
|
[
"MIT"
] | 10
|
2020-12-20T08:20:35.000Z
|
2022-03-18T22:22:41.000Z
|
python/libs/quickjson.py
|
rmrector/script.artwork.dump
|
02141823e9d920de7e377ae606a1fe680cad4417
|
[
"MIT"
] | null | null | null |
import json
from itertools import chain
from libs import mediatypes, pykodi
from libs.pykodi import log
# [0] method part, [1] list: properties, [2] dict: extra params
typemap = {mediatypes.MOVIE: ('Movie', ['art', 'imdbnumber', 'file', 'premiered', 'uniqueid', 'setid'], None),
mediatypes.MOVIESET: ('MovieSet', ['art'], {'movies': {'properties': ['art', 'file']}}),
mediatypes.TVSHOW: ('TVShow', ['art', 'imdbnumber', 'season', 'file', 'premiered', 'uniqueid'], None),
mediatypes.EPISODE: ('Episode', ['art', 'uniqueid', 'tvshowid', 'season', 'episode', 'file', 'showtitle', 'seasonid'], None),
mediatypes.SEASON: ('Season', ['season', 'art', 'tvshowid', 'showtitle'], None),
mediatypes.MUSICVIDEO: ('MusicVideo', ['art', 'file', 'title', 'artist'], None),
mediatypes.ARTIST: ('Artist', ['art', 'musicbrainzartistid'], None),
mediatypes.ALBUM: ('Album', ['art', 'musicbrainzalbumid', 'musicbrainzreleasegroupid',
'musicbrainzalbumartistid', 'artist', 'artistid', 'title'], None),
mediatypes.SONG: ('Song', ['art', 'musicbrainztrackid', 'musicbrainzalbumartistid', 'album',
'albumartist', 'albumartistid', 'albumid', 'file', 'disc', 'artist', 'title'], None)}
def get_item_details(dbid, mediatype):
assert mediatype in typemap
mapped = typemap[mediatype]
basestr = 'VideoLibrary.Get{0}Details' if mediatype not in mediatypes.audiotypes else 'AudioLibrary.Get{0}Details'
json_request = get_base_json_request(basestr.format(mapped[0]))
json_request['params'][mediatype + 'id'] = dbid
json_request['params']['properties'] = mapped[1]
if mapped[2]:
json_request['params'].update(mapped[2])
json_result = pykodi.execute_jsonrpc(json_request)
result_key = mediatype + 'details'
if check_json_result(json_result, result_key, json_request):
result = json_result['result'][result_key]
return result
def get_item_list(mediatype, extraparams=None, overrideprops=None):
json_request, json_result = _inner_get_item_list(mediatype, extraparams, overrideprops)
result_key = mediatype + 's'
if not check_json_result(json_result, result_key, json_request):
return []
return _extract_result_list(json_result, mediatype)
def _inner_get_item_list(mediatype, extraparams=None, overrideprops=None):
assert mediatype in typemap
mapped = typemap[mediatype]
basestr = 'VideoLibrary.Get{0}s' if mediatype not in mediatypes.audiotypes else 'AudioLibrary.Get{0}s'
json_request = get_base_json_request(basestr.format(mapped[0]))
json_request['params']['sort'] = {'method': _determine_sort_method(mediatype), 'order': 'ascending'}
json_request['params']['properties'] = mapped[1] if overrideprops is None else overrideprops
if extraparams:
json_request['params'].update(extraparams)
json_result = pykodi.execute_jsonrpc(json_request)
return json_request, json_result
def _determine_sort_method(mediatype):
if mediatype in (mediatypes.EPISODE, mediatypes.SEASON):
return 'tvshowtitle'
return 'sorttitle'
def _extract_result_list(json_result, mediatype):
result = json_result['result'][mediatype + 's']
return result
def iter_item_list(mediatype):
first_and_count = _get_first_item_and_count(mediatype)
if not first_and_count[0]:
return (), 0
first_item, totalcount = first_and_count
return _get_iter_with_first(mediatype, first_item), totalcount
def _get_first_item_and_count(mediatype):
extraparams = {'limits': {'start': 0, 'end': 1}}
json_request, json_result = _inner_get_item_list(mediatype, extraparams)
if not check_json_result(json_result, mediatype + 's', json_request):
return None, 0
total = json_result['result']['limits']['total']
itemlist = _extract_result_list(json_result, mediatype)
if not itemlist:
return None, 0
return itemlist[0], total
def _get_iter_with_first(mediatype, first_item):
yield first_item
for item in _get_iter(mediatype):
yield item
def _get_iter(mediatype):
chunksize = 4000 if mediatype == mediatypes.EPISODE else 1000
source_exhausted = False
lastend = 1
while not source_exhausted:
extraparams = {'limits': {'start': lastend, 'end': lastend + chunksize}}
json_request, json_result = _inner_get_item_list(mediatype, extraparams)
if not check_json_result(json_result, mediatype + 's', json_request):
break
total = json_result['result']['limits']['total']
if lastend + chunksize >= total:
source_exhausted = True
lastend = json_result['result']['limits']['end']
for item in _extract_result_list(json_result, mediatype):
yield item
def get_albums(artistname=None, dbid=None):
if artistname is None or dbid is None:
return get_item_list(mediatypes.ALBUM)
# filter artistid is slow for artists with many albums, much faster to filter based on
# artist name and then filter the result for proper artistID. songs are good, though
allalbums = get_item_list(mediatypes.ALBUM, {'filter':
{'field': 'artist', 'operator': 'is', 'value': artistname}})
return [album for album in allalbums if album['artistid'] and album['artistid'][0] == dbid]
def get_artists_byname(artistname):
return get_item_list(mediatypes.ARTIST,
{'filter': {"field": "artist", "operator": "is", "value": artistname}}, [])
def get_songs(mediatype=None, dbid=None, songfilter=None):
if songfilter is None and (mediatype is None or dbid is None):
return get_item_list(mediatypes.SONG)
if not songfilter:
songfilter = {mediatype + 'id': dbid}
return get_item_list(mediatypes.SONG, {'filter': songfilter})
def set_item_details(dbid, mediatype, **details):
assert mediatype in typemap
mapped = typemap[mediatype]
basestr = 'VideoLibrary.Set{0}Details' if mediatype not in mediatypes.audiotypes else 'AudioLibrary.Set{0}Details'
json_request = get_base_json_request(basestr.format(mapped[0]))
json_request['params'] = details
json_request['params'][mediatype + 'id'] = dbid
json_result = pykodi.execute_jsonrpc(json_request)
if not check_json_result(json_result, 'OK', json_request):
log(json_result)
def get_textures(url=None):
json_request = get_base_json_request('Textures.GetTextures')
json_request['params']['properties'] = ['url']
if url is not None:
json_request['params']['filter'] = {'field': 'url', 'operator': 'is', 'value': url}
json_result = pykodi.execute_jsonrpc(json_request)
if check_json_result(json_result, 'textures', json_request):
return json_result['result']['textures']
else:
return []
def remove_texture(textureid):
json_request = get_base_json_request('Textures.RemoveTexture')
json_request['params']['textureid'] = textureid
json_result = pykodi.execute_jsonrpc(json_request)
if not check_json_result(json_result, 'OK', json_request):
log(json_result)
def remove_texture_byurl(url):
textures = get_textures(url)
for texture in textures:
remove_texture(texture['textureid'])
def get_available_art(dbid, mediatype, arttype=None):
lb = 'VideoLibrary' if mediatype not in mediatypes.audiotypes else 'AudioLibrary'
json_request = get_base_json_request(lb + '.GetAvailableArt')
json_request['params']['item'] = {mediatype + 'id': dbid}
if arttype is not None:
json_request['params']['arttype'] = arttype
json_result = pykodi.execute_jsonrpc(json_request)
if check_json_result(json_result, 'availableart', json_request):
return json_result['result']['availableart']
else:
return []
def get_base_json_request(method):
return {'jsonrpc': '2.0', 'method': method, 'params': {}, 'id': 1}
def get_application_properties(properties):
json_request = get_base_json_request('Application.GetProperties')
json_request['params']['properties'] = properties
json_result = pykodi.execute_jsonrpc(json_request)
if check_json_result(json_result, None, json_request):
return json_result['result']
def get_settingvalue(setting):
json_request = get_base_json_request('Settings.GetSettingValue')
json_request['params']['setting'] = setting
json_result = pykodi.execute_jsonrpc(json_request)
if check_json_result(json_result, None, json_request):
return json_result['result']['value']
def check_json_result(json_result, result_key, json_request):
if 'error' in json_result:
raise JSONException(json_request, json_result)
return 'result' in json_result and (not result_key or result_key in json_result['result'])
class JSONException(Exception):
def __init__(self, json_request, json_result):
self.json_request = json_request
self.json_result = json_result
message = "There was an error with a JSON-RPC request.\nRequest: "
message += json.dumps(json_request, cls=pykodi.PrettyJSONEncoder)
message += "\nResult: "
message += json.dumps(json_result, cls=pykodi.PrettyJSONEncoder)
self.message = message
super(JSONException, self).__init__(message)
| 42.571429
| 129
| 0.704481
|
4a14a915bdb33f1ac6e8fc1839b32bc81fa8de05
| 28,656
|
py
|
Python
|
tensorflow/python/data/kernel_tests/iterator_ops_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 13
|
2018-07-23T18:53:35.000Z
|
2021-11-18T19:56:45.000Z
|
tensorflow/python/data/kernel_tests/iterator_ops_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 1
|
2018-02-23T03:17:44.000Z
|
2018-02-23T03:17:44.000Z
|
tensorflow/python/data/kernel_tests/iterator_ops_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 13
|
2018-09-07T13:28:38.000Z
|
2020-07-17T15:06:24.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
class IteratorTest(test.TestCase):
def testAttemptingGradientsRaiseExceptions(self):
component = constant_op.constant([1])
side = constant_op.constant(0)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset.make_one_shot_iterator().get_next()
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, component)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, side)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, [component, side])
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegexp(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset.make_one_shot_iterator()
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for i in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % i
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.test_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (
dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (
dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
get_next = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual([None], iterator.output_shapes.as_list())
with self.test_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_4 = dataset_4.make_one_shot_iterator()
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
next_element = feedable_iterator.get_next()
self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
self.assertEqual([], feedable_iterator.output_shapes)
with self.test_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset.make_one_shot_iterator()
initializable_iterator = dataset.make_initializable_iterator()
structure_iterator = iterator_ops.Iterator.from_structure(
dataset.output_types)
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.test_session() as sess:
handle_int_scalar = sess.run(
dataset_int_scalar.make_one_shot_iterator().string_handle())
handle_float_vector = sess.run(
dataset_float_vector.make_one_shot_iterator().string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.test_session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = src.make_one_shot_iterator()
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, itr.output_types, itr.output_shapes)
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = client_dataset.make_initializable_iterator()
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.test_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
def testIncorrectIteratorRestore(self):
def _path():
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
_path(), parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(_path()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def _build_range_dataset_graph():
start = 1
stop = 10
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
def _build_reader_dataset_graph():
filenames = ["test"] # Does not exist but we don't care in this test.
iterator = readers.FixedLengthRecordDataset(
filenames, 1, 0, 0).make_initializable_iterator()
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next_op, save_op, restore_op
# Saving iterator for RangeDataset graph.
with ops.Graph().as_default() as g:
init_op, _, save_op, _ = _build_range_dataset_graph()
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(save_op)
# Attempt to restore the saved iterator into an IteratorResource of
# incompatible type. An iterator of RangeDataset has output type int64,
# while an iterator of FixedLengthRecordDataset has output type string.
# So an InvalidArgumentError should be raised by
# IteratorResource::set_iterator.
with ops.Graph().as_default() as g:
_, _, _, restore_op = _build_reader_dataset_graph()
with self.test_session(graph=g) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(restore_op)
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.Dataset.range(10).make_one_shot_iterator()
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertTrue(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE in str(warning.message))
if __name__ == "__main__":
test.main()
| 39.634855
| 80
| 0.668307
|
4a14aa751d80d05387010bd9a68da53ef3528722
| 42
|
py
|
Python
|
cat/__init__.py
|
csengor/toraman_webapp
|
35c10531ce83e2fc744dd8df0e01624e109aaec8
|
[
"MIT"
] | 2
|
2020-02-01T08:21:09.000Z
|
2020-10-08T17:36:35.000Z
|
cat/__init__.py
|
toramanPRO/toraman_webapp
|
35c10531ce83e2fc744dd8df0e01624e109aaec8
|
[
"MIT"
] | 1
|
2019-12-15T07:39:24.000Z
|
2019-12-15T07:39:24.000Z
|
cat/__init__.py
|
toramanPRO/toraman_webapp
|
35c10531ce83e2fc744dd8df0e01624e109aaec8
|
[
"MIT"
] | null | null | null |
default_app_config = 'cat.apps.CatConfig'
| 21
| 41
| 0.809524
|
4a14aabb18185c6e9db3109a71503f4bb9ddab00
| 266
|
py
|
Python
|
tests/artificial/transf_None/trend_PolyTrend/cycle_12/ar_/test_artificial_1024_None_PolyTrend_12__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/artificial/transf_None/trend_PolyTrend/cycle_12/ar_/test_artificial_1024_None_PolyTrend_12__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/artificial/transf_None/trend_PolyTrend/cycle_12/ar_/test_artificial_1024_None_PolyTrend_12__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0);
| 38
| 161
| 0.729323
|
4a14ab71231651cb45ad32d8e548d7f58af34598
| 4,080
|
py
|
Python
|
tensorboard/plugins/memoryutil/summary.py
|
venky-intel/tensorboard
|
2c375b892e883240deadf3358414c283e52210be
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/plugins/memoryutil/summary.py
|
venky-intel/tensorboard
|
2c375b892e883240deadf3358414c283e52210be
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/plugins/memoryutil/summary.py
|
venky-intel/tensorboard
|
2c375b892e883240deadf3358414c283e52210be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text summaries and TensorFlow operations to create them.
A text summary stores a single string value.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorboard.plugins.text import metadata
def op(name,
data,
display_name=None,
description=None,
collections=None):
"""Create a text summary op.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1D and 2D tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2D subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary API, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
data: A string-type Tensor to summarize. The text must be encoded in UTF-8.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of ops.GraphKeys. The collections to which to add
the summary. Defaults to [Graph Keys.SUMMARIES].
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
with tf.control_dependencies([tf.assert_type(data, tf.string)]):
return tf.summary.tensor_summary(name='memory_summary',
tensor=data,
collections=collections,
summary_metadata=summary_metadata)
def pb(name, data, display_name=None, description=None):
"""Create a text summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
data: A Python bytestring (of type bytes), or Unicode string. Or a numpy
data array of those types.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Raises:
ValueError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
try:
tensor = tf.make_tensor_proto(data, dtype=tf.string)
except TypeError as e:
raise ValueError(e)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
summary = tf.Summary()
summary.value.add(tag='%s/memory_summary' % name,
metadata=summary_metadata,
tensor=tensor)
return summary
| 38.130841
| 80
| 0.701716
|
4a14ac56c16bcbd33c99a25e8d0322743313dc9a
| 157,251
|
py
|
Python
|
hail/python/hail/matrixtable.py
|
jkgoodrich/hail
|
95ce1d792b553a5e97b390d349237a7ed86fbf98
|
[
"MIT"
] | null | null | null |
hail/python/hail/matrixtable.py
|
jkgoodrich/hail
|
95ce1d792b553a5e97b390d349237a7ed86fbf98
|
[
"MIT"
] | null | null | null |
hail/python/hail/matrixtable.py
|
jkgoodrich/hail
|
95ce1d792b553a5e97b390d349237a7ed86fbf98
|
[
"MIT"
] | null | null | null |
import itertools
from typing import Optional, Dict, Tuple, Any, List
from collections import Counter
import hail as hl
from hail.expr.expressions import Expression, StructExpression, \
expr_struct, expr_any, expr_bool, analyze, Indices, \
construct_reference, construct_expr, extract_refs_by_indices, \
ExpressionException, TupleExpression, unify_all
from hail.expr.types import types_match, tarray, tset
from hail.expr.matrix_type import tmatrix
import hail.ir as ir
from hail.table import Table, ExprContainer, TableIndexKeyError
from hail.typecheck import typecheck, typecheck_method, dictof, anytype, \
anyfunc, nullable, sequenceof, oneof, numeric, lazy, enumeration
from hail.utils import storage_level, default_handler
from hail.utils.java import warning, Env
from hail.utils.misc import wrap_to_tuple, \
get_key_by_exprs, \
get_select_exprs, check_annotate_exprs, process_joins
class GroupedMatrixTable(ExprContainer):
"""Matrix table grouped by row or column that can be aggregated into a new matrix table."""
def __init__(self,
parent: 'MatrixTable',
row_keys=None,
computed_row_key=None,
col_keys=None,
computed_col_key=None,
entry_fields=None,
row_fields=None,
col_fields=None,
partitions=None):
super(GroupedMatrixTable, self).__init__()
self._parent = parent
self._copy_fields_from(parent)
self._row_keys = row_keys
self._computed_row_key = computed_row_key
self._col_keys = col_keys
self._computed_col_key = computed_col_key
self._entry_fields = entry_fields
self._row_fields = row_fields
self._col_fields = col_fields
self._partitions = partitions
def _copy(self, *,
row_keys=None,
computed_row_key=None,
col_keys=None,
computed_col_key=None,
entry_fields=None,
row_fields=None,
col_fields=None,
partitions=None):
return GroupedMatrixTable(
parent=self._parent,
row_keys=row_keys if row_keys is not None else self._row_keys,
computed_row_key=computed_row_key if computed_row_key is not None else self._computed_row_key,
col_keys=col_keys if col_keys is not None else self._col_keys,
computed_col_key=computed_col_key if computed_col_key is not None else self._computed_col_key,
entry_fields=entry_fields if entry_fields is not None else self._entry_fields,
row_fields=row_fields if row_fields is not None else self._row_fields,
col_fields=col_fields if col_fields is not None else self._col_fields,
partitions=partitions if partitions is not None else self._partitions
)
def _fixed_indices(self):
if self._row_keys is None and self._col_keys is None:
return self._parent._entry_indices
if self._row_keys is not None and self._col_keys is None:
return self._parent._col_indices
if self._row_keys is None and self._col_keys is not None:
return self._parent._row_indices
return self._parent._global_indices
@typecheck_method(item=str)
def __getitem__(self, item):
return self._get_field(item)
def describe(self, handler=print):
"""Print information about grouped matrix table."""
if self._row_keys is None:
rowstr = ""
else:
rowstr = "\nRows: \n" + "\n ".join(["{}: {}".format(k, v._type) for k, v in self._row_keys.items()])
if self._col_keys is None:
colstr = ""
else:
colstr = "\nColumns: \n" + "\n ".join(["{}: {}".format(k, v) for k, v in self._col_keys.items()])
s = (f'----------------------------------------\n'
f'GroupedMatrixTable grouped by {rowstr}{colstr}\n'
f'----------------------------------------\n'
f'Parent MatrixTable:\n')
handler(s)
self._parent.describe(handler)
@typecheck_method(exprs=oneof(str, Expression),
named_exprs=expr_any)
def group_rows_by(self, *exprs, **named_exprs) -> 'GroupedMatrixTable':
"""Group rows.
Examples
--------
Aggregate to a matrix with genes as row keys, computing the number of
non-reference calls as an entry field:
>>> dataset_result = (dataset.group_rows_by(dataset.gene)
... .aggregate(n_non_ref = hl.agg.count_where(dataset.GT.is_non_ref())))
Notes
-----
All complex expressions must be passed as named expressions.
Parameters
----------
exprs : args of :class:`str` or :class:`.Expression`
Row fields to group by.
named_exprs : keyword args of :class:`.Expression`
Row-indexed expressions to group by.
Returns
-------
:class:`.GroupedMatrixTable`
Grouped matrix. Can be used to call :meth:`.GroupedMatrixTable.aggregate`.
"""
if self._row_keys is not None:
raise NotImplementedError("GroupedMatrixTable is already grouped by rows.")
if self._col_keys is not None:
raise NotImplementedError("GroupedMatrixTable is already grouped by cols; cannot also group by rows.")
caller = 'group_rows_by'
row_key, computed_key = get_key_by_exprs(caller,
exprs,
named_exprs,
self._parent._row_indices,
override_protected_indices={self._parent._global_indices,
self._parent._col_indices})
self._check_bindings(caller, computed_key, self._parent._row_indices)
return self._copy(row_keys=row_key, computed_row_key=computed_key)
@typecheck_method(exprs=oneof(str, Expression),
named_exprs=expr_any)
def group_cols_by(self, *exprs, **named_exprs) -> 'GroupedMatrixTable':
"""Group columns.
Examples
--------
Aggregate to a matrix with cohort as column keys, computing the call rate
as an entry field:
>>> dataset_result = (dataset.group_cols_by(dataset.cohort)
... .aggregate(call_rate = hl.agg.fraction(hl.is_defined(dataset.GT))))
Notes
-----
All complex expressions must be passed as named expressions.
Parameters
----------
exprs : args of :class:`str` or :class:`.Expression`
Column fields to group by.
named_exprs : keyword args of :class:`.Expression`
Column-indexed expressions to group by.
Returns
-------
:class:`.GroupedMatrixTable`
Grouped matrix, can be used to call :meth:`.GroupedMatrixTable.aggregate`.
"""
if self._row_keys is not None:
raise NotImplementedError("GroupedMatrixTable is already grouped by rows; cannot also group by cols.")
if self._col_keys is not None:
raise NotImplementedError("GroupedMatrixTable is already grouped by cols.")
caller = 'group_cols_by'
col_key, computed_key = get_key_by_exprs(caller,
exprs,
named_exprs,
self._parent._col_indices,
override_protected_indices={self._parent._global_indices,
self._parent._row_indices})
self._check_bindings(caller, computed_key, self._parent._col_indices)
return self._copy(col_keys=col_key, computed_col_key=computed_key)
def _check_bindings(self, caller, new_bindings, indices):
empty = []
def iter_option(o):
return o if o is not None else empty
if indices == self._parent._row_indices:
fixed_fields = [*self._parent.globals, *self._parent.col]
else:
assert indices == self._parent._col_indices
fixed_fields = [*self._parent.globals, *self._parent.row]
bound_fields = set(itertools.chain(
iter_option(self._row_keys),
iter_option(self._col_keys),
iter_option(self._col_fields),
iter_option(self._row_fields),
iter_option(self._entry_fields),
fixed_fields))
for k in new_bindings:
if k in bound_fields:
raise ExpressionException(f"{caller!r} cannot assign duplicate field {k!r}")
def partition_hint(self, n: int) -> 'GroupedMatrixTable':
"""Set the target number of partitions for aggregation.
Examples
--------
Use `partition_hint` in a :meth:`.MatrixTable.group_rows_by` /
:meth:`.GroupedMatrixTable.aggregate` pipeline:
>>> dataset_result = (dataset.group_rows_by(dataset.gene)
... .partition_hint(5)
... .aggregate(n_non_ref = hl.agg.count_where(dataset.GT.is_non_ref())))
Notes
-----
Until Hail's query optimizer is intelligent enough to sample records at all
stages of a pipeline, it can be necessary in some places to provide some
explicit hints.
The default number of partitions for :meth:`.GroupedMatrixTable.aggregate` is
the number of partitions in the upstream dataset. If the aggregation greatly
reduces the size of the dataset, providing a hint for the target number of
partitions can accelerate downstream operations.
Parameters
----------
n : int
Number of partitions.
Returns
-------
:class:`.GroupedMatrixTable`
Same grouped matrix table with a partition hint.
"""
self._partitions = n
return self
@typecheck_method(named_exprs=expr_any)
def aggregate_cols(self, **named_exprs) -> 'GroupedMatrixTable':
"""Aggregate cols by group.
Examples
--------
Aggregate to a matrix with cohort as column keys, computing the mean height
per cohort as a new column field:
>>> dataset_result = (dataset.group_cols_by(dataset.cohort)
... .aggregate_cols(mean_height = hl.agg.mean(dataset.pheno.height))
... .result())
Notes
-----
The aggregation scope includes all column fields and global fields.
See Also
--------
:meth:`.result`
Parameters
----------
named_exprs : varargs of :class:`.Expression`
Aggregation expressions.
Returns
-------
:class:`.GroupedMatrixTable`
"""
if self._row_keys is not None:
raise NotImplementedError("GroupedMatrixTable is already grouped by rows. Cannot aggregate over cols.")
assert self._col_keys is not None
base = self._col_fields if self._col_fields is not None else hl.struct()
for k, e in named_exprs.items():
analyze('GroupedMatrixTable.aggregate_cols', e, self._parent._global_indices, {self._parent._col_axis})
self._check_bindings('aggregate_cols', named_exprs, self._parent._col_indices)
return self._copy(col_fields=base.annotate(**named_exprs))
@typecheck_method(named_exprs=expr_any)
def aggregate_rows(self, **named_exprs) -> 'GroupedMatrixTable':
"""Aggregate rows by group.
Examples
--------
Aggregate to a matrix with genes as row keys, collecting the functional
consequences per gene as a set as a new row field:
>>> dataset_result = (dataset.group_rows_by(dataset.gene)
... .aggregate_rows(consequences = hl.agg.collect_as_set(dataset.consequence))
... .result())
Notes
-----
The aggregation scope includes all row fields and global fields.
See Also
--------
:meth:`.result`
Parameters
----------
named_exprs : varargs of :class:`.Expression`
Aggregation expressions.
Returns
-------
:class:`.GroupedMatrixTable`
"""
if self._col_keys is not None:
raise NotImplementedError("GroupedMatrixTable is already grouped by cols. Cannot aggregate over rows.")
assert self._row_keys is not None
base = self._row_fields if self._row_fields is not None else hl.struct()
for k, e in named_exprs.items():
analyze('GroupedMatrixTable.aggregate_rows', e, self._parent._global_indices, {self._parent._row_axis})
self._check_bindings('aggregate_rows', named_exprs, self._parent._row_indices)
return self._copy(row_fields=base.annotate(**named_exprs))
@typecheck_method(named_exprs=expr_any)
def aggregate_entries(self, **named_exprs) -> 'GroupedMatrixTable':
"""Aggregate entries by group.
Examples
--------
Aggregate to a matrix with genes as row keys, computing the number of
non-reference calls as an entry field:
>>> dataset_result = (dataset.group_rows_by(dataset.gene)
... .aggregate_entries(n_non_ref = hl.agg.count_where(dataset.GT.is_non_ref()))
... .result())
See Also
--------
:meth:`.aggregate`, :meth:`.result`
Parameters
----------
named_exprs : varargs of :class:`.Expression`
Aggregation expressions.
Returns
-------
:class:`.GroupedMatrixTable`
"""
assert self._row_keys is not None or self._col_keys is not None
base = self._entry_fields if self._entry_fields is not None else hl.struct()
for k, e in named_exprs.items():
analyze('GroupedMatrixTable.aggregate_entries', e, self._fixed_indices(), {self._parent._row_axis, self._parent._col_axis})
self._check_bindings('aggregate_entries', named_exprs,
self._parent._col_indices if self._col_keys is not None else self._parent._row_indices)
return self._copy(entry_fields=base.annotate(**named_exprs))
def result(self) -> 'MatrixTable':
"""Return the result of aggregating by group.
Examples
--------
Aggregate to a matrix with genes as row keys, collecting the functional
consequences per gene as a row field and computing the number of
non-reference calls as an entry field:
>>> dataset_result = (dataset.group_rows_by(dataset.gene)
... .aggregate_rows(consequences = hl.agg.collect_as_set(dataset.consequence))
... .aggregate_entries(n_non_ref = hl.agg.count_where(dataset.GT.is_non_ref()))
... .result())
Aggregate to a matrix with cohort as column keys, computing the mean height
per cohort as a column field and computing the number of non-reference calls
as an entry field:
>>> dataset_result = (dataset.group_cols_by(dataset.cohort)
... .aggregate_cols(mean_height = hl.agg.stats(dataset.pheno.height).mean)
... .aggregate_entries(n_non_ref = hl.agg.count_where(dataset.GT.is_non_ref()))
... .result())
See Also
--------
:meth:`.aggregate`
Returns
-------
:class:`.MatrixTable`
Aggregated matrix table.
"""
assert self._row_keys is not None or self._col_keys is not None
defined_exprs = []
for e in [self._row_fields, self._col_fields, self._entry_fields]:
if e is not None:
defined_exprs.append(e)
for e in [self._computed_row_key, self._computed_col_key]:
if e is not None:
defined_exprs.extend(e.values())
def promote_none(e):
return hl.struct() if e is None else e
entry_exprs = promote_none(self._entry_fields)
if len(entry_exprs) == 0:
warning("'GroupedMatrixTable.result': No entry fields were defined.")
base, cleanup = self._parent._process_joins(*defined_exprs)
if self._col_keys is not None:
cck = self._computed_col_key or {}
computed_key_uids = {k: Env.get_uid() for k in cck}
modified_keys = [computed_key_uids.get(k, k) for k in self._col_keys]
mt = MatrixTable(ir.MatrixAggregateColsByKey(
ir.MatrixMapCols(
base._mir,
self._parent.col.annotate(**{computed_key_uids[k]: v for k, v in cck.items()})._ir,
modified_keys),
entry_exprs._ir,
promote_none(self._col_fields)._ir))
if cck:
mt = mt.rename({v: k for k, v in computed_key_uids.items()})
else:
cck = self._computed_row_key or {}
computed_key_uids = {k: Env.get_uid() for k in cck}
modified_keys = [computed_key_uids.get(k, k) for k in self._row_keys]
mt = MatrixTable(ir.MatrixAggregateRowsByKey(
ir.MatrixKeyRowsBy(
ir.MatrixMapRows(
ir.MatrixKeyRowsBy(base._mir, []),
self._parent._rvrow.annotate(**{computed_key_uids[k]: v for k, v in cck.items()})._ir),
modified_keys),
entry_exprs._ir,
promote_none(self._row_fields)._ir))
if cck:
mt = mt.rename({v: k for k, v in computed_key_uids.items()})
return cleanup(mt)
@typecheck_method(named_exprs=expr_any)
def aggregate(self, **named_exprs) -> 'MatrixTable':
"""Aggregate entries by group, used after :meth:`.MatrixTable.group_rows_by`
or :meth:`.MatrixTable.group_cols_by`.
Examples
--------
Aggregate to a matrix with genes as row keys, computing the number of
non-reference calls as an entry field:
>>> dataset_result = (dataset.group_rows_by(dataset.gene)
... .aggregate(n_non_ref = hl.agg.count_where(dataset.GT.is_non_ref())))
Notes
-----
Alias for :meth:`aggregate_entries`, :meth:`result`.
See Also
--------
:meth:`aggregate_entries`, :meth:`result`
Parameters
----------
named_exprs : varargs of :class:`.Expression`
Aggregation expressions.
Returns
-------
:class:`.MatrixTable`
Aggregated matrix table.
"""
return self.aggregate_entries(**named_exprs).result()
matrix_table_type = lazy()
class MatrixTable(ExprContainer):
"""Hail's distributed implementation of a structured matrix.
Use :func:`.read_matrix_table` to read a matrix table that was written with
:meth:`.MatrixTable.write`.
Examples
--------
Add annotations:
>>> dataset = dataset.annotate_globals(pli = {'SCN1A': 0.999, 'SONIC': 0.014},
... populations = ['AFR', 'EAS', 'EUR', 'SAS', 'AMR', 'HIS'])
>>> dataset = dataset.annotate_cols(pop = dataset.populations[hl.int(hl.rand_unif(0, 6))],
... sample_gq = hl.agg.mean(dataset.GQ),
... sample_dp = hl.agg.mean(dataset.DP))
>>> dataset = dataset.annotate_rows(variant_gq = hl.agg.mean(dataset.GQ),
... variant_dp = hl.agg.mean(dataset.GQ),
... sas_hets = hl.agg.count_where(dataset.GT.is_het()))
>>> dataset = dataset.annotate_entries(gq_by_dp = dataset.GQ / dataset.DP)
Filter:
>>> dataset = dataset.filter_cols(dataset.pop != 'EUR')
>>> datasetm = dataset.filter_rows((dataset.variant_gq > 10) & (dataset.variant_dp > 5))
>>> dataset = dataset.filter_entries(dataset.gq_by_dp > 1)
Query:
>>> col_stats = dataset.aggregate_cols(hl.struct(pop_counts=hl.agg.counter(dataset.pop),
... high_quality=hl.agg.fraction((dataset.sample_gq > 10) & (dataset.sample_dp > 5))))
>>> print(col_stats.pop_counts)
>>> print(col_stats.high_quality)
>>> het_dist = dataset.aggregate_rows(hl.agg.stats(dataset.sas_hets))
>>> print(het_dist)
>>> entry_stats = dataset.aggregate_entries(hl.struct(call_rate=hl.agg.fraction(hl.is_defined(dataset.GT)),
... global_gq_mean=hl.agg.mean(dataset.GQ)))
>>> print(entry_stats.call_rate)
>>> print(entry_stats.global_gq_mean)
"""
@staticmethod
def _from_java(jmir):
return MatrixTable(ir.JavaMatrix(jmir))
def __init__(self, mir):
super(MatrixTable, self).__init__()
self._mir = mir
self._globals = None
self._col_values = None
self._row_axis = 'row'
self._col_axis = 'column'
self._global_indices = Indices(self, set())
self._row_indices = Indices(self, {self._row_axis})
self._col_indices = Indices(self, {self._col_axis})
self._entry_indices = Indices(self, {self._row_axis, self._col_axis})
self._type = self._mir.typ
self._global_type = self._type.global_type
self._col_type = self._type.col_type
self._row_type = self._type.row_type
self._entry_type = self._type.entry_type
self._globals = construct_reference('global', self._global_type,
indices=self._global_indices)
self._rvrow = construct_reference('va',
self._type.row_type,
indices=self._row_indices)
self._row = hl.struct(**{k: self._rvrow[k] for k in self._row_type.keys()})
self._col = construct_reference('sa', self._col_type,
indices=self._col_indices)
self._entry = construct_reference('g', self._entry_type,
indices=self._entry_indices)
self._indices_from_ref = {'global': self._global_indices,
'va': self._row_indices,
'sa': self._col_indices,
'g': self._entry_indices}
self._row_key = hl.struct(
**{k: self._row[k] for k in self._type.row_key})
self._partition_key = self._row_key
self._col_key = hl.struct(
**{k: self._col[k] for k in self._type.col_key})
self._num_samples = None
for k, v in itertools.chain(self._globals.items(),
self._row.items(),
self._col.items(),
self._entry.items()):
self._set_field(k, v)
@property
def _schema(self) -> tmatrix:
return tmatrix(
self._global_type,
self._col_type, list(self._col_key),
self._row_type, list(self._row_key),
self._entry_type)
def __getitem__(self, item):
invalid_usage = TypeError("MatrixTable.__getitem__: invalid index argument(s)\n"
" Usage 1: field selection: mt['field']\n"
" Usage 2: Entry joining: mt[mt2.row_key, mt2.col_key]\n\n"
" To join row or column fields, use one of the following:\n"
" rows:\n"
" mt.index_rows(mt2.row_key)\n"
" mt.rows().index(mt2.row_key)\n"
" mt.rows()[mt2.row_key]\n"
" cols:\n"
" mt.index_cols(mt2.col_key)\n"
" mt.cols().index(mt2.col_key)\n"
" mt.cols()[mt2.col_key]")
if isinstance(item, str):
return self._get_field(item)
if isinstance(item, tuple) and len(item) == 2:
# this is the join path
exprs = item
row_key = wrap_to_tuple(exprs[0])
col_key = wrap_to_tuple(exprs[1])
try:
return self.index_entries(row_key, col_key)
except TypeError as e:
raise invalid_usage from e
raise invalid_usage
@property
def _col_key_types(self):
return [v.dtype for _, v in self.col_key.items()]
@property
def _row_key_types(self):
return [v.dtype for _, v in self.row_key.items()]
@property
def col_key(self) -> 'StructExpression':
"""Column key struct.
Examples
--------
Get the column key field names:
>>> list(dataset.col_key)
['s']
Returns
-------
:class:`.StructExpression`
"""
return self._col_key
@property
def row_key(self) -> 'StructExpression':
"""Row key struct.
Examples
--------
Get the row key field names:
>>> list(dataset.row_key)
['locus', 'alleles']
Returns
-------
:class:`.StructExpression`
"""
return self._row_key
@property
def globals(self) -> 'StructExpression':
"""Returns a struct expression including all global fields.
Returns
-------
:class:`.StructExpression`
"""
return self._globals
@property
def row(self) -> 'StructExpression':
"""Returns a struct expression of all row-indexed fields, including keys.
Examples
--------
Get the first five row field names:
>>> list(dataset.row)[:5]
['locus', 'alleles', 'rsid', 'qual', 'filters']
Returns
-------
:class:`.StructExpression`
Struct of all row fields.
"""
return self._row
@property
def row_value(self) -> 'StructExpression':
"""Returns a struct expression including all non-key row-indexed fields.
Examples
--------
Get the first five non-key row field names:
>>> list(dataset.row_value)[:5]
['rsid', 'qual', 'filters', 'info', 'use_as_marker']
Returns
-------
:class:`.StructExpression`
Struct of all row fields, minus keys.
"""
return self._row.drop(*self.row_key)
@property
def col(self) -> 'StructExpression':
"""Returns a struct expression of all column-indexed fields, including keys.
Examples
--------
Get all column field names:
>>> list(dataset.col) # doctest: +SKIP_OUTPUT_CHECK
['s', 'sample_qc', 'is_case', 'pheno', 'cov', 'cov1', 'cov2', 'cohorts', 'pop']
Returns
-------
:class:`.StructExpression`
Struct of all column fields.
"""
return self._col
@property
def col_value(self) -> 'StructExpression':
"""Returns a struct expression including all non-key column-indexed fields.
Examples
--------
Get all non-key column field names:
>>> list(dataset.col_value) # doctest: +SKIP_OUTPUT_CHECK
['sample_qc', 'is_case', 'pheno', 'cov', 'cov1', 'cov2', 'cohorts', 'pop']
Returns
-------
:class:`.StructExpression`
Struct of all column fields, minus keys.
"""
return self._col.drop(*self.col_key)
@property
def entry(self) -> 'StructExpression':
"""Returns a struct expression including all row-and-column-indexed fields.
Examples
--------
Get all entry field names:
>>> list(dataset.entry)
['GT', 'AD', 'DP', 'GQ', 'PL']
Returns
-------
:class:`.StructExpression`
Struct of all entry fields.
"""
return self._entry
@typecheck_method(keys=oneof(str, Expression),
named_keys=expr_any)
def key_cols_by(self, *keys, **named_keys) -> 'MatrixTable':
"""Key columns by a new set of fields.
See :meth:`.Table.key_by` for more information on defining a key.
Parameters
----------
keys : varargs of :class:`str` or :class:`.Expression`.
Column fields to key by.
named_keys : keyword args of :class:`.Expression`.
Column fields to key by.
Returns
-------
:class:`.MatrixTable`
"""
key_fields, computed_keys = get_key_by_exprs("MatrixTable.key_cols_by", keys, named_keys, self._col_indices)
if not computed_keys:
return MatrixTable(ir.MatrixMapCols(self._mir, self._col._ir, key_fields))
else:
new_col = self.col.annotate(**computed_keys)
base, cleanup = self._process_joins(new_col)
return cleanup(MatrixTable(
ir.MatrixMapCols(
base._mir,
new_col._ir,
key_fields
)))
@typecheck_method(new_key=str)
def _key_rows_by_assert_sorted(self, *new_key):
rk_names = list(self.row_key)
i = 0
while (i < min(len(new_key), len(rk_names))):
if new_key[i] != rk_names[i]:
break
i += 1
if i < 1:
raise ValueError(
f'cannot implement an unsafe sort with no shared key:\n new key: {new_key}\n old key: {rk_names}')
return MatrixTable(ir.MatrixKeyRowsBy(self._mir, list(new_key), is_sorted=True))
@typecheck_method(keys=oneof(str, Expression),
named_keys=expr_any)
def key_rows_by(self, *keys, **named_keys) -> 'MatrixTable':
"""Key rows by a new set of fields.
Examples
--------
>>> dataset_result = dataset.key_rows_by('locus')
>>> dataset_result = dataset.key_rows_by(dataset['locus'])
>>> dataset_result = dataset.key_rows_by(**dataset.row_key.drop('alleles'))
All of these expressions key the dataset by the 'locus' field, dropping
the 'alleles' field from the row key.
>>> dataset_result = dataset.key_rows_by(contig=dataset['locus'].contig,
... position=dataset['locus'].position,
... alleles=dataset['alleles'])
This keys the dataset by the newly defined fields, 'contig' and 'position',
and the 'alleles' field. The old row key field, 'locus', is preserved as
a non-key field.
Notes
-----
See :meth:`.Table.key_by` for more information on defining a key.
Parameters
----------
keys : varargs of :class:`str` or :class:`.Expression`.
Row fields to key by.
named_keys : keyword args of :class:`.Expression`.
Row fields to key by.
Returns
-------
:class:`.MatrixTable`
"""
key_fields, computed_keys = get_key_by_exprs("MatrixTable.key_rows_by", keys, named_keys, self._row_indices)
if not computed_keys:
return MatrixTable(ir.MatrixKeyRowsBy(self._mir, key_fields))
else:
new_row = self._rvrow.annotate(**computed_keys)
base, cleanup = self._process_joins(new_row)
return cleanup(MatrixTable(
ir.MatrixKeyRowsBy(
ir.MatrixMapRows(
ir.MatrixKeyRowsBy(base._mir, []),
new_row._ir),
list(key_fields))))
@typecheck_method(named_exprs=expr_any)
def annotate_globals(self, **named_exprs) -> 'MatrixTable':
"""Create new global fields by name.
Examples
--------
Add two global fields:
>>> pops_1kg = {'EUR', 'AFR', 'EAS', 'SAS', 'AMR'}
>>> dataset_result = dataset.annotate_globals(pops_in_1kg = pops_1kg,
... gene_list = ['SHH', 'SCN1A', 'SPTA1', 'DISC1'])
Add global fields from another table and matrix table:
>>> dataset_result = dataset.annotate_globals(thing1 = dataset2.index_globals().global_field,
... thing2 = v_metadata.index_globals().global_field)
Note
----
This method does not support aggregation.
Notes
-----
This method creates new global fields, but can also overwrite existing fields. Only
same-scope fields can be overwritten: for example, it is not possible to annotate a
row field `foo` and later create an global field `foo`. However, it would be possible
to create an global field `foo` and later create another global field `foo`, overwriting
the first.
The arguments to the method should either be :class:`.Expression`
objects, or should be implicitly interpretable as expressions.
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
Matrix table with new global field(s).
"""
caller = "MatrixTable.annotate_globals"
check_annotate_exprs(caller, named_exprs, self._global_indices, set())
return self._select_globals(caller, self.globals.annotate(**named_exprs))
@typecheck_method(named_exprs=expr_any)
def annotate_rows(self, **named_exprs) -> 'MatrixTable':
"""Create new row-indexed fields by name.
Examples
--------
Compute call statistics for high quality samples per variant:
>>> high_quality_calls = hl.agg.filter(dataset.sample_qc.gq_stats.mean > 20,
... hl.agg.call_stats(dataset.GT, dataset.alleles))
>>> dataset_result = dataset.annotate_rows(call_stats = high_quality_calls)
Add functional annotations from a :class:`.Table`, `v_metadata`, and a
:class:`.MatrixTable`, `dataset2_AF`, both keyed by locus and alleles.
>>> dataset_result = dataset.annotate_rows(consequence = v_metadata[dataset.locus, dataset.alleles].consequence,
... dataset2_AF = dataset2.index_rows(dataset.row_key).info.AF)
Note
----
This method supports aggregation over columns. For instance, the usage:
>>> dataset_result = dataset.annotate_rows(mean_GQ = hl.agg.mean(dataset.GQ))
will compute the mean per row.
Notes
-----
This method creates new row fields, but can also overwrite existing fields. Only
non-key, same-scope fields can be overwritten: for example, it is not possible
to annotate a global field `foo` and later create an row field `foo`. However,
it would be possible to create an row field `foo` and later create another row
field `foo`, overwriting the first, as long as `foo` is not a row key.
The arguments to the method should either be :class:`.Expression`
objects, or should be implicitly interpretable as expressions.
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
Matrix table with new row-indexed field(s).
"""
caller = "MatrixTable.annotate_rows"
check_annotate_exprs(caller, named_exprs, self._row_indices, {self._col_axis})
return self._select_rows(caller, self._rvrow.annotate(**named_exprs))
@typecheck_method(named_exprs=expr_any)
def annotate_cols(self, **named_exprs) -> 'MatrixTable':
"""Create new column-indexed fields by name.
Examples
--------
Compute statistics about the GQ distribution per sample:
>>> dataset_result = dataset.annotate_cols(sample_gq_stats = hl.agg.stats(dataset.GQ))
Add sample metadata from a :class:`.hail.Table`.
>>> dataset_result = dataset.annotate_cols(population = s_metadata[dataset.s].pop)
Note
----
This method supports aggregation over rows. For instance, the usage:
>>> dataset_result = dataset.annotate_cols(mean_GQ = hl.agg.mean(dataset.GQ))
will compute the mean per column.
Notes
-----
This method creates new column fields, but can also overwrite existing fields. Only
same-scope fields can be overwritten: for example, it is not possible to annotate a
global field `foo` and later create an column field `foo`. However, it would be possible
to create an column field `foo` and later create another column field `foo`, overwriting
the first.
The arguments to the method should either be :class:`.Expression`
objects, or should be implicitly interpretable as expressions.
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
Matrix table with new column-indexed field(s).
"""
caller = "MatrixTable.annotate_cols"
check_annotate_exprs(caller, named_exprs, self._col_indices, {self._row_axis})
return self._select_cols(caller, self.col.annotate(**named_exprs))
@typecheck_method(named_exprs=expr_any)
def annotate_entries(self, **named_exprs) -> 'MatrixTable':
"""Create new row-and-column-indexed fields by name.
Examples
--------
Compute the allele dosage using the PL field:
>>> def get_dosage(pl):
... # convert to linear scale
... linear_scaled = pl.map(lambda x: 10 ** - (x / 10))
...
... # normalize to sum to 1
... ls_sum = hl.sum(linear_scaled)
... linear_scaled = linear_scaled.map(lambda x: x / ls_sum)
...
... # multiply by [0, 1, 2] and sum
... return hl.sum(linear_scaled * [0, 1, 2])
>>>
>>> dataset_result = dataset.annotate_entries(dosage = get_dosage(dataset.PL))
Note
----
This method does not support aggregation.
Notes
-----
This method creates new entry fields, but can also overwrite existing fields. Only
same-scope fields can be overwritten: for example, it is not possible to annotate a
global field `foo` and later create an entry field `foo`. However, it would be possible
to create an entry field `foo` and later create another entry field `foo`, overwriting
the first.
The arguments to the method should either be :class:`.Expression`
objects, or should be implicitly interpretable as expressions.
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
Matrix table with new row-and-column-indexed field(s).
"""
caller = "MatrixTable.annotate_entries"
check_annotate_exprs(caller, named_exprs, self._entry_indices, set())
return self._select_entries(caller, s=self.entry.annotate(**named_exprs))
def select_globals(self, *exprs, **named_exprs) -> 'MatrixTable':
"""Select existing global fields or create new fields by name, dropping the rest.
Examples
--------
Select one existing field and compute a new one:
>>> dataset_result = dataset.select_globals(dataset.global_field_1,
... another_global=['AFR', 'EUR', 'EAS', 'AMR', 'SAS'])
Notes
-----
This method creates new global fields. If a created field shares its name
with a differently-indexed field of the table, the method will fail.
Note
----
See :meth:`.Table.select` for more information about using ``select`` methods.
Note
----
This method does not support aggregation.
Parameters
----------
exprs : variable-length args of :class:`str` or :class:`.Expression`
Arguments that specify field names or nested field reference expressions.
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
MatrixTable with specified global fields.
"""
caller = 'MatrixTable.select_globals'
new_global = get_select_exprs(caller,
exprs,
named_exprs,
self._global_indices,
self._globals)
return self._select_globals(caller, new_global)
def select_rows(self, *exprs, **named_exprs) -> 'MatrixTable':
"""Select existing row fields or create new fields by name, dropping all
other non-key fields.
Examples
--------
Select existing fields and compute a new one:
>>> dataset_result = dataset.select_rows(
... dataset.variant_qc.gq_stats.mean,
... high_quality_cases = hl.agg.count_where((dataset.GQ > 20) &
... dataset.is_case))
Notes
-----
This method creates new row fields. If a created field shares its name
with a differently-indexed field of the table, or with a row key, the
method will fail.
Row keys are preserved. To drop or change a row key field, use
:meth:`MatrixTable.key_rows_by`.
Note
----
See :meth:`.Table.select` for more information about using ``select`` methods.
Note
----
This method supports aggregation over columns. For instance, the usage:
>>> dataset_result = dataset.select_rows(mean_GQ = hl.agg.mean(dataset.GQ))
will compute the mean per row.
Parameters
----------
exprs : variable-length args of :class:`str` or :class:`.Expression`
Arguments that specify field names or nested field reference expressions.
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
MatrixTable with specified row fields.
"""
caller = 'MatrixTable.select_rows'
new_row = get_select_exprs(caller,
exprs,
named_exprs,
self._row_indices,
self._rvrow)
return self._select_rows(caller, new_row)
def select_cols(self, *exprs, **named_exprs) -> 'MatrixTable':
"""Select existing column fields or create new fields by name, dropping the rest.
Examples
--------
Select existing fields and compute a new one:
>>> dataset_result = dataset.select_cols(
... dataset.sample_qc,
... dataset.pheno.age,
... isCohort1 = dataset.pheno.cohort_name == 'Cohort1')
Notes
-----
This method creates new column fields. If a created field shares its name
with a differently-indexed field of the table, the method will fail.
Note
----
See :meth:`.Table.select` for more information about using ``select`` methods.
Note
----
This method supports aggregation over rows. For instance, the usage:
>>> dataset_result = dataset.select_cols(mean_GQ = hl.agg.mean(dataset.GQ))
will compute the mean per column.
Parameters
----------
exprs : variable-length args of :class:`str` or :class:`.Expression`
Arguments that specify field names or nested field reference expressions.
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
MatrixTable with specified column fields.
"""
caller = 'MatrixTable.select_cols'
new_col = get_select_exprs(caller,
exprs,
named_exprs,
self._col_indices,
self._col)
return self._select_cols(caller, new_col)
def select_entries(self, *exprs, **named_exprs) -> 'MatrixTable':
"""Select existing entry fields or create new fields by name, dropping the rest.
Examples
--------
Drop all entry fields aside from `GT`:
>>> dataset_result = dataset.select_entries(dataset.GT)
Notes
-----
This method creates new entry fields. If a created field shares its name
with a differently-indexed field of the table, the method will fail.
Note
----
See :meth:`.Table.select` for more information about using ``select`` methods.
Note
----
This method does not support aggregation.
Parameters
----------
exprs : variable-length args of :class:`str` or :class:`.Expression`
Arguments that specify field names or nested field reference expressions.
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.MatrixTable`
MatrixTable with specified entry fields.
"""
caller = 'MatrixTable.select_entries'
new_entry = get_select_exprs(caller,
exprs,
named_exprs,
self._entry_indices,
self._entry)
return self._select_entries(caller, new_entry)
@typecheck_method(exprs=oneof(str, Expression))
def drop(self, *exprs) -> 'MatrixTable':
"""Drop fields.
Examples
--------
Drop fields `PL` (an entry field), `info` (a row field), and `pheno` (a column
field): using strings:
>>> dataset_result = dataset.drop('PL', 'info', 'pheno')
Drop fields `PL` (an entry field), `info` (a row field), and `pheno` (a column
field): using field references:
>>> dataset_result = dataset.drop(dataset.PL, dataset.info, dataset.pheno)
Drop a list of fields:
>>> fields_to_drop = ['PL', 'info', 'pheno']
>>> dataset_result = dataset.drop(*fields_to_drop)
Notes
-----
This method can be used to drop global, row-indexed, column-indexed, or
row-and-column-indexed (entry) fields. The arguments can be either strings
(``'field'``), or top-level field references (``table.field`` or
``table['field']``).
Key fields (belonging to either the row key or the column key) cannot be
dropped using this method. In order to drop a key field, use :meth:`.key_rows_by`
or :meth:`.key_cols_by` to remove the field from the key before dropping.
While many operations exist independently for rows, columns, entries, and
globals, only one is needed for dropping due to the lack of any necessary
contextual information.
Parameters
----------
exprs : varargs of :class:`str` or :class:`.Expression`
Names of fields to drop or field reference expressions.
Returns
-------
:class:`.MatrixTable`
Matrix table without specified fields.
"""
def check_key(name, keys):
if name in keys:
raise ValueError("MatrixTable.drop: cannot drop key field '{}'".format(name))
return name
all_field_exprs = {e: k for k, e in self._fields.items()}
fields_to_drop = set()
for e in exprs:
if isinstance(e, Expression):
if e in all_field_exprs:
fields_to_drop.add(all_field_exprs[e])
else:
raise ExpressionException("Method 'drop' expects string field names or top-level field expressions"
" (e.g. 'foo', matrix.foo, or matrix['foo'])")
else:
assert isinstance(e, str)
if e not in self._fields:
raise IndexError("MatrixTable has no field '{}'".format(e))
fields_to_drop.add(e)
m = self
global_fields = [field for field in fields_to_drop if self._fields[field]._indices == self._global_indices]
if global_fields:
m = m._select_globals("MatrixTable.drop", m.globals.drop(*global_fields))
row_fields = [check_key(field, list(self.row_key)) for field in fields_to_drop if self._fields[field]._indices == self._row_indices]
if row_fields:
m = m._select_rows("MatrixTable.drop", row=m.row.drop(*row_fields))
col_fields = [check_key(field, list(self.col_key)) for field in fields_to_drop if self._fields[field]._indices == self._col_indices]
if col_fields:
m = m._select_cols("MatrixTable.drop", m.col.drop(*col_fields))
entry_fields = [field for field in fields_to_drop if self._fields[field]._indices == self._entry_indices]
if entry_fields:
m = m._select_entries("MatrixTable.drop", m.entry.drop(*entry_fields))
return m
@typecheck_method(other=Table)
def semi_join_rows(self, other: 'Table') -> 'MatrixTable':
"""Filters the matrix table to rows whose key appears in `other`.
Parameters
----------
other : :class:`.Table`
Table with compatible key field(s).
Returns
-------
:class:`.MatrixTable`
Notes
-----
The row key type of the matrix table must match the key type of `other`.
This method does not change the schema of the matrix table; it is
filtering the matrix table to row keys present in another table.
To discard rows whose key is present in `other`, use
:meth:`.anti_join_rows`.
Examples
--------
>>> ds_result = ds.semi_join_rows(rows_to_keep)
It may be expensive to key the matrix table by the right-side key.
In this case, it is possible to implement a semi-join using a non-key
field as follows:
>>> ds_result = ds.filter_rows(hl.is_defined(rows_to_keep.index(ds['locus'], ds['alleles'])))
See Also
--------
:meth:`.anti_join_rows`, :meth:`.filter_rows`, :meth:`.semi_join_cols`
"""
return self.filter_rows(hl.is_defined(other.index(self.row_key)))
@typecheck_method(other=Table)
def anti_join_rows(self, other: 'Table') -> 'MatrixTable':
"""Filters the table to rows whose key does not appear in `other`.
Parameters
----------
other : :class:`.Table`
Table with compatible key field(s).
Returns
-------
:class:`.MatrixTable`
Notes
-----
The row key type of the matrix table must match the key type of `other`.
This method does not change the schema of the table; it is a method of
filtering the matrix table to row keys not present in another table.
To restrict to rows whose key is present in `other`, use
:meth:`.semi_join_rows`.
Examples
--------
>>> ds_result = ds.anti_join_rows(rows_to_remove)
It may be expensive to key the matrix table by the right-side key.
In this case, it is possible to implement an anti-join using a non-key
field as follows:
>>> ds_result = ds.filter_rows(hl.is_missing(rows_to_remove.index(ds['locus'], ds['alleles'])))
See Also
--------
:meth:`.anti_join_rows`, :meth:`.filter_rows`, :meth:`.anti_join_cols`
"""
return self.filter_rows(hl.is_missing(other.index(self.row_key)))
@typecheck_method(other=Table)
def semi_join_cols(self, other: 'Table') -> 'MatrixTable':
"""Filters the matrix table to columns whose key appears in `other`.
Parameters
----------
other : :class:`.Table`
Table with compatible key field(s).
Returns
-------
:class:`.MatrixTable`
Notes
-----
The column key type of the matrix table must match the key type of `other`.
This method does not change the schema of the matrix table; it is a
filtering the matrix table to column keys not present in another table.
To discard collumns whose key is present in `other`, use
:meth:`.anti_join_cols`.
Examples
--------
>>> ds_result = ds.semi_join_cols(cols_to_keep)
It may be inconvenient to key the matrix table by the right-side key.
In this case, it is possible to implement a semi-join using a non-key
field as follows:
>>> ds_result = ds.filter_cols(hl.is_defined(cols_to_keep.index(ds['s'])))
See Also
--------
:meth:`.anti_join_cols`, :meth:`.filter_cols`, :meth:`.semi_join_rows`
"""
return self.filter_cols(hl.is_defined(other.index(self.col_key)))
@typecheck_method(other=Table)
def anti_join_cols(self, other: 'Table') -> 'MatrixTable':
"""Filters the table to columns whose key does not appear in `other`.
Parameters
----------
other : :class:`.Table`
Table with compatible key field(s).
Returns
-------
:class:`.MatrixTable`
Notes
-----
The column key type of the matrix table must match the key type of `other`.
This method does not change the schema of the table; it is a method of
filtering the matrix table to column keys not present in another table.
To restrict to columns whose key is present in `other`, use
:meth:`.semi_join_cols`.
Examples
--------
>>> ds_result = ds.anti_join_cols(cols_to_remove)
It may be inconvenient to key the matrix table by the right-side key.
In this case, it is possible to implement an anti-join using a non-key
field as follows:
>>> ds_result = ds.filter_cols(hl.is_missing(cols_to_remove.index(ds['s'])))
See Also
--------
:meth:`.semi_join_cols`, :meth:`.filter_cols`, :meth:`.anti_join_rows`
"""
return self.filter_cols(hl.is_missing(other.index(self.col_key)))
@typecheck_method(expr=expr_bool, keep=bool)
def filter_rows(self, expr, keep: bool = True) -> 'MatrixTable':
"""Filter rows of the matrix.
Examples
--------
Keep rows where `variant_qc.AF` is below 1%:
>>> dataset_result = dataset.filter_rows(dataset.variant_qc.AF[1] < 0.01, keep=True)
Remove rows where `filters` is non-empty:
>>> dataset_result = dataset.filter_rows(dataset.filters.size() > 0, keep=False)
Notes
-----
The expression `expr` will be evaluated for every row of the table. If
`keep` is ``True``, then rows where `expr` evaluates to ``True`` will be
kept (the filter removes the rows where the predicate evaluates to
``False``). If `keep` is ``False``, then rows where `expr` evaluates to
``True`` will be removed (the filter keeps the rows where the predicate
evaluates to ``False``).
Warning
-------
When `expr` evaluates to missing, the row will be removed regardless of `keep`.
Note
----
This method supports aggregation over columns. For instance,
>>> dataset_result = dataset.filter_rows(hl.agg.mean(dataset.GQ) > 20.0)
will remove rows where the mean GQ of all entries in the row is smaller than
20.
Parameters
----------
expr : bool or :class:`.BooleanExpression`
Filter expression.
keep : bool
Keep rows where `expr` is true.
Returns
-------
:class:`.MatrixTable`
Filtered matrix table.
"""
caller = 'MatrixTable.filter_rows'
analyze(caller, expr, self._row_indices, {self._col_axis})
if expr._aggregations:
bool_uid = Env.get_uid()
mt = self._select_rows(caller, self.row.annotate(**{bool_uid: expr}))
return mt.filter_rows(mt[bool_uid], keep).drop(bool_uid)
base, cleanup = self._process_joins(expr)
mt = MatrixTable(ir.MatrixFilterRows(base._mir, ir.filter_predicate_with_keep(expr._ir, keep)))
return cleanup(mt)
@typecheck_method(expr=expr_bool, keep=bool)
def filter_cols(self, expr, keep: bool = True) -> 'MatrixTable':
"""Filter columns of the matrix.
Examples
--------
Keep columns where `pheno.is_case` is ``True`` and `pheno.age` is larger
than 50:
>>> dataset_result = dataset.filter_cols(dataset.pheno.is_case &
... (dataset.pheno.age > 50),
... keep=True)
Remove columns where `sample_qc.gq_stats.mean` is less than 20:
>>> dataset_result = dataset.filter_cols(dataset.sample_qc.gq_stats.mean < 20,
... keep=False)
Remove columns where `s` is found in a Python set:
>>> samples_to_remove = {'NA12878', 'NA12891', 'NA12892'}
>>> set_to_remove = hl.literal(samples_to_remove)
>>> dataset_result = dataset.filter_cols(~set_to_remove.contains(dataset['s']))
Notes
-----
The expression `expr` will be evaluated for every column of the table.
If `keep` is ``True``, then columns where `expr` evaluates to ``True``
will be kept (the filter removes the columns where the predicate
evaluates to ``False``). If `keep` is ``False``, then columns where
`expr` evaluates to ``True`` will be removed (the filter keeps the
columns where the predicate evaluates to ``False``).
Warning
-------
When `expr` evaluates to missing, the column will be removed regardless of
`keep`.
Note
----
This method supports aggregation over rows. For instance,
>>> dataset_result = dataset.filter_cols(hl.agg.mean(dataset.GQ) > 20.0)
will remove columns where the mean GQ of all entries in the column is smaller
than 20.
Parameters
----------
expr : bool or :class:`.BooleanExpression`
Filter expression.
keep : bool
Keep columns where `expr` is true.
Returns
-------
:class:`.MatrixTable`
Filtered matrix table.
"""
caller = 'MatrixTable.filter_cols'
analyze(caller, expr, self._col_indices, {self._row_axis})
if expr._aggregations:
bool_uid = Env.get_uid()
mt = self._select_cols(caller, self.col.annotate(**{bool_uid: expr}))
return mt.filter_cols(mt[bool_uid], keep).drop(bool_uid)
base, cleanup = self._process_joins(expr)
mt = MatrixTable(ir.MatrixFilterCols(base._mir, ir.filter_predicate_with_keep(expr._ir, keep)))
return cleanup(mt)
@typecheck_method(expr=expr_bool, keep=bool)
def filter_entries(self, expr, keep: bool = True) -> 'MatrixTable':
"""Filter entries of the matrix.
Parameters
----------
expr : bool or :class:`.BooleanExpression`
Filter expression.
keep : bool
Keep entries where `expr` is true.
Returns
-------
:class:`.MatrixTable`
Filtered matrix table.
Examples
--------
Keep entries where the sum of `AD` is greater than 10 and `GQ` is greater than 20:
>>> dataset_result = dataset.filter_entries((hl.sum(dataset.AD) > 10) & (dataset.GQ > 20))
Warning
-------
When `expr` evaluates to missing, the entry will be removed regardless of
`keep`.
Note
----
This method does not support aggregation.
Notes
-----
The expression `expr` will be evaluated for every entry of the table.
If `keep` is ``True``, then entries where `expr` evaluates to ``True``
will be kept (the filter removes the entries where the predicate
evaluates to ``False``). If `keep` is ``False``, then entries where
`expr` evaluates to ``True`` will be removed (the filter keeps the
entries where the predicate evaluates to ``False``).
Filtered entries are removed entirely from downstream operations. This
means that the resulting matrix table has sparsity -- that is, that the
number of entries is **smaller** than the product of :meth:`count_rows`
and :meth:`count_cols`. To re-densify a filtered matrix table, use the
:meth:`unfilter_entries` method to restore filtered entries, populated
all fields with missing values. Below are some properties of an
entry-filtered matrix table.
1. Filtered entries are not included in the :meth:`entries` table.
>>> mt_range = hl.utils.range_matrix_table(10, 10)
>>> mt_range = mt_range.annotate_entries(x = mt_range.row_idx + mt_range.col_idx)
>>> mt_range.count()
(10, 10)
>>> mt_range.entries().count()
100
>>> mt_filt = mt_range.filter_entries(mt_range.x % 2 == 0)
>>> mt_filt.count()
(10, 10)
>>> mt_filt.count_rows() * mt_filt.count_cols()
100
>>> mt_filt.entries().count()
50
2. Filtered entries are not included in aggregation.
>>> mt_filt.aggregate_entries(hl.agg.count())
50
>>> mt_filt = mt_filt.annotate_cols(col_n = hl.agg.count())
>>> mt_filt.col_n.take(5)
[5, 5, 5, 5, 5]
>>> mt_filt = mt_filt.annotate_rows(row_n = hl.agg.count())
>>> mt_filt.row_n.take(5)
[5, 5, 5, 5, 5]
3. Annotating a new entry field will not annotate filtered entries.
>>> mt_filt = mt_filt.annotate_entries(y = 1)
>>> mt_filt.aggregate_entries(hl.agg.sum(mt_filt.y))
50
4. If all the entries in a row or column of a matrix table are
filtered, the row or column remains.
>>> mt_filt.filter_entries(False).count()
(10, 10)
See Also
--------
:meth:`unfilter_entries`, :meth:`compute_entry_filter_stats`
"""
base, cleanup = self._process_joins(expr)
analyze('MatrixTable.filter_entries', expr, self._entry_indices)
m = MatrixTable(ir.MatrixFilterEntries(base._mir, ir.filter_predicate_with_keep(expr._ir, keep)))
return cleanup(m)
def unfilter_entries(self):
"""Unfilters filtered entries, populating fields with missing values.
Returns
-------
:class:`MatrixTable`
Notes
-----
This method is used in the case that a pipeline downstream of :meth:`filter_entries`
requires a fully dense (no filtered entries) matrix table.
Generally, if this method is required in a pipeline, the upstream pipeline can
be rewritten to use annotation instead of entry filtering.
See Also
--------
:meth:`filter_entries`, :meth:`compute_entry_filter_stats`
"""
entry_ir = hl.if_else(
hl.is_defined(self.entry),
self.entry,
hl.literal(hl.Struct(**{k: hl.missing(v.dtype) for k, v in self.entry.items()})))._ir
return MatrixTable(ir.MatrixMapEntries(self._mir, entry_ir))
@typecheck_method(row_field=str, col_field=str)
def compute_entry_filter_stats(self, row_field='entry_stats_row', col_field='entry_stats_col') -> 'MatrixTable':
"""Compute statistics about the number and fraction of filtered entries.
.. include:: _templates/experimental.rst
Parameters
----------
row_field : :class:`str`
Name for computed row field (default: ``entry_stats_row``.
col_field : :class:`str`
Name for computed column field (default: ``entry_stats_col``.
Returns
-------
:class:`.MatrixTable`
Notes
-----
Adds a new row field, `row_field`, and a new column field, `col_field`,
each of which are structs with the following fields:
- *n_filtered* (:data:`.tint64`) - Number of filtered entries per row
or column.
- *n_remaining* (:data:`.tint64`) - Number of entries not filtered per
row or column.
- *fraction_filtered* (:data:`.tfloat32`) - Number of filtered entries
divided by the total number of filtered and remaining entries.
See Also
--------
:meth:`filter_entries`, :meth:`unfilter_entries`
"""
def result(count):
return hl.rbind(count,
hl.agg.count(),
lambda n_tot, n_def: hl.struct(n_filtered=n_tot - n_def,
n_remaining=n_def,
fraction_filtered=(n_tot - n_def) / n_tot))
mt = self
mt = mt.annotate_cols(**{col_field: result(mt.count_rows(_localize=False))})
mt = mt.annotate_rows(**{row_field: result(mt.count_cols(_localize=False))})
return mt
@typecheck_method(named_exprs=expr_any)
def transmute_globals(self, **named_exprs) -> 'MatrixTable':
"""Similar to :meth:`.MatrixTable.annotate_globals`, but drops referenced fields.
Notes
-----
This method adds new global fields according to `named_exprs`, and
drops all global fields referenced in those expressions. See
:meth:`.Table.transmute` for full documentation on how transmute
methods work.
See Also
--------
:meth:`.Table.transmute`, :meth:`.MatrixTable.select_globals`,
:meth:`.MatrixTable.annotate_globals`
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Annotation expressions.
Returns
-------
:class:`.MatrixTable`
"""
caller = 'MatrixTable.transmute_globals'
check_annotate_exprs(caller, named_exprs, self._global_indices, set())
fields_referenced = extract_refs_by_indices(named_exprs.values(), self._global_indices) - set(named_exprs.keys())
return self._select_globals(caller,
self.globals.annotate(**named_exprs).drop(*fields_referenced))
@typecheck_method(named_exprs=expr_any)
def transmute_rows(self, **named_exprs) -> 'MatrixTable':
"""Similar to :meth:`.MatrixTable.annotate_rows`, but drops referenced fields.
Notes
-----
This method adds new row fields according to `named_exprs`, and drops
all row fields referenced in those expressions. See
:meth:`.Table.transmute` for full documentation on how transmute
methods work.
Note
----
:meth:`transmute_rows` will not drop key fields.
Note
----
This method supports aggregation over columns.
See Also
--------
:meth:`.Table.transmute`, :meth:`.MatrixTable.select_rows`,
:meth:`.MatrixTable.annotate_rows`
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Annotation expressions.
Returns
-------
:class:`.MatrixTable`
"""
caller = 'MatrixTable.transmute_rows'
check_annotate_exprs(caller, named_exprs, self._row_indices, {self._col_axis})
fields_referenced = extract_refs_by_indices(named_exprs.values(), self._row_indices) - set(named_exprs.keys())
fields_referenced -= set(self.row_key)
return self._select_rows(caller, self.row.annotate(**named_exprs).drop(*fields_referenced))
@typecheck_method(named_exprs=expr_any)
def transmute_cols(self, **named_exprs) -> 'MatrixTable':
"""Similar to :meth:`.MatrixTable.annotate_cols`, but drops referenced fields.
Notes
-----
This method adds new column fields according to `named_exprs`, and
drops all column fields referenced in those expressions. See
:meth:`.Table.transmute` for full documentation on how transmute
methods work.
Note
----
:meth:`transmute_cols` will not drop key fields.
Note
----
This method supports aggregation over rows.
See Also
--------
:meth:`.Table.transmute`, :meth:`.MatrixTable.select_cols`,
:meth:`.MatrixTable.annotate_cols`
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Annotation expressions.
Returns
-------
:class:`.MatrixTable`
"""
caller = 'MatrixTable.transmute_cols'
check_annotate_exprs(caller, named_exprs, self._col_indices, {self._row_axis})
fields_referenced = extract_refs_by_indices(named_exprs.values(), self._col_indices) - set(named_exprs.keys())
fields_referenced -= set(self.col_key)
return self._select_cols(caller,
self.col.annotate(**named_exprs).drop(*fields_referenced))
@typecheck_method(named_exprs=expr_any)
def transmute_entries(self, **named_exprs) -> 'MatrixTable':
"""Similar to :meth:`.MatrixTable.annotate_entries`, but drops referenced fields.
Notes
-----
This method adds new entry fields according to `named_exprs`, and
drops all entry fields referenced in those expressions. See
:meth:`.Table.transmute` for full documentation on how transmute
methods work.
See Also
--------
:meth:`.Table.transmute`, :meth:`.MatrixTable.select_entries`,
:meth:`.MatrixTable.annotate_entries`
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Annotation expressions.
Returns
-------
:class:`.MatrixTable`
"""
caller = 'MatrixTable.transmute_entries'
check_annotate_exprs(caller, named_exprs, self._entry_indices, set())
fields_referenced = extract_refs_by_indices(named_exprs.values(), self._entry_indices) - set(named_exprs.keys())
return self._select_entries(caller,
self.entry.annotate(**named_exprs).drop(*fields_referenced))
@typecheck_method(expr=expr_any, _localize=bool)
def aggregate_rows(self, expr, _localize=True) -> Any:
"""Aggregate over rows to a local value.
Examples
--------
Aggregate over rows:
>>> dataset.aggregate_rows(hl.struct(n_high_quality=hl.agg.count_where(dataset.qual > 40),
... mean_qual=hl.agg.mean(dataset.qual)))
Struct(n_high_quality=13, mean_qual=544323.8915384616)
Notes
-----
Unlike most :class:`.MatrixTable` methods, this method does not support
meaningful references to fields that are not global or indexed by row.
This method should be thought of as a more convenient alternative to
the following:
>>> rows_table = dataset.rows()
>>> rows_table.aggregate(hl.struct(n_high_quality=hl.agg.count_where(rows_table.qual > 40),
... mean_qual=hl.agg.mean(rows_table.qual)))
Note
----
This method supports (and expects!) aggregation over rows.
Parameters
----------
expr : :class:`.Expression`
Aggregation expression.
Returns
-------
any
Aggregated value dependent on `expr`.
"""
base, _ = self._process_joins(expr)
analyze('MatrixTable.aggregate_rows', expr, self._global_indices, {self._row_axis})
subst_query = ir.subst(expr._ir, {}, {'va': ir.Ref('row')})
agg_ir = ir.TableAggregate(ir.MatrixRowsTable(base._mir), subst_query)
if _localize:
return Env.backend().execute(ir.MakeTuple([agg_ir]))[0]
else:
return construct_expr(ir.LiftMeOut(agg_ir), expr.dtype)
@typecheck_method(expr=expr_any, _localize=bool)
def aggregate_cols(self, expr, _localize=True) -> Any:
"""Aggregate over columns to a local value.
Examples
--------
Aggregate over columns:
>>> dataset.aggregate_cols(
... hl.struct(fraction_female=hl.agg.fraction(dataset.pheno.is_female),
... case_ratio=hl.agg.count_where(dataset.is_case) / hl.agg.count()))
Struct(fraction_female=0.48, case_ratio=1.0)
Notes
-----
Unlike most :class:`.MatrixTable` methods, this method does not support
meaningful references to fields that are not global or indexed by column.
This method should be thought of as a more convenient alternative to
the following:
>>> cols_table = dataset.cols()
>>> cols_table.aggregate(
... hl.struct(fraction_female=hl.agg.fraction(cols_table.pheno.is_female),
... case_ratio=hl.agg.count_where(cols_table.is_case) / hl.agg.count()))
Note
----
This method supports (and expects!) aggregation over columns.
Parameters
----------
expr : :class:`.Expression`
Aggregation expression.
Returns
-------
any
Aggregated value dependent on `expr`.
"""
base, _ = self._process_joins(expr)
analyze('MatrixTable.aggregate_cols', expr, self._global_indices, {self._col_axis})
subst_query = ir.subst(expr._ir, {}, {'sa': ir.Ref('row')})
agg_ir = ir.TableAggregate(ir.MatrixColsTable(base._mir), subst_query)
if _localize:
return Env.backend().execute(ir.MakeTuple([agg_ir]))[0]
else:
return construct_expr(ir.LiftMeOut(agg_ir), expr.dtype)
@typecheck_method(expr=expr_any, _localize=bool)
def aggregate_entries(self, expr, _localize=True):
"""Aggregate over entries to a local value.
Examples
--------
Aggregate over entries:
>>> dataset.aggregate_entries(hl.struct(global_gq_mean=hl.agg.mean(dataset.GQ),
... call_rate=hl.agg.fraction(hl.is_defined(dataset.GT))))
Struct(global_gq_mean=64.01841473178543, call_rate=0.9607692307692308)
Notes
-----
This method should be thought of as a more convenient alternative to
the following:
>>> entries_table = dataset.entries()
>>> entries_table.aggregate(hl.struct(global_gq_mean=hl.agg.mean(entries_table.GQ),
... call_rate=hl.agg.fraction(hl.is_defined(entries_table.GT))))
Note
----
This method supports (and expects!) aggregation over entries.
Parameters
----------
expr : :class:`.Expression`
Aggregation expressions.
Returns
-------
any
Aggregated value dependent on `expr`.
"""
base, _ = self._process_joins(expr)
analyze('MatrixTable.aggregate_entries', expr, self._global_indices, {self._row_axis, self._col_axis})
agg_ir = ir.MatrixAggregate(base._mir, expr._ir)
if _localize:
return Env.backend().execute(agg_ir)
else:
return construct_expr(ir.LiftMeOut(agg_ir), expr.dtype)
@typecheck_method(field_expr=oneof(str, Expression))
def explode_rows(self, field_expr) -> 'MatrixTable':
"""Explodes a row field of type array or set, copying the entire row for each element.
Examples
--------
Explode rows by annotated genes:
>>> dataset_result = dataset.explode_rows(dataset.gene)
Notes
-----
The new matrix table will have `N` copies of each row, where `N` is the number
of elements that row contains for the field denoted by `field_expr`. The field
referenced in `field_expr` is replaced in the sequence of duplicated rows by the
sequence of elements in the array or set. All other fields remain the same,
including entry fields.
If the field referenced with `field_expr` is missing or empty, the row is
removed entirely.
Parameters
----------
field_expr : str or :class:`.Expression`
Field name or (possibly nested) field reference expression.
Returns
-------
:class:MatrixTable`
Matrix table exploded row-wise for each element of `field_expr`.
"""
if isinstance(field_expr, str):
if field_expr not in self._fields:
raise KeyError("MatrixTable has no field '{}'".format(field_expr))
elif self._fields[field_expr]._indices != self._row_indices:
raise ExpressionException("Method 'explode_rows' expects a field indexed by row, found axes '{}'"
.format(self._fields[field_expr]._indices.axes))
root = [field_expr]
field_expr = self._fields[field_expr]
else:
analyze('MatrixTable.explode_rows', field_expr, self._row_indices, set(self._fields.keys()))
if not field_expr._ir.is_nested_field:
raise ExpressionException(
"method 'explode_rows' requires a field or subfield, not a complex expression")
nested = field_expr._ir
root = []
while isinstance(nested, ir.GetField):
root.append(nested.name)
nested = nested.o
root = root[::-1]
if not isinstance(field_expr.dtype, (tarray, tset)):
raise ValueError(f"method 'explode_rows' expects array or set, found: {field_expr.dtype}")
if self.row_key is not None:
for k in self.row_key.values():
if k is field_expr:
raise ValueError("method 'explode_rows' cannot explode a key field")
return MatrixTable(ir.MatrixExplodeRows(self._mir, root))
@typecheck_method(field_expr=oneof(str, Expression))
def explode_cols(self, field_expr) -> 'MatrixTable':
"""Explodes a column field of type array or set, copying the entire column for each element.
Examples
--------
Explode columns by annotated cohorts:
>>> dataset_result = dataset.explode_cols(dataset.cohorts)
Notes
-----
The new matrix table will have `N` copies of each column, where `N` is the
number of elements that column contains for the field denoted by `field_expr`.
The field referenced in `field_expr` is replaced in the sequence of duplicated
columns by the sequence of elements in the array or set. All other fields remain
the same, including entry fields.
If the field referenced with `field_expr` is missing or empty, the column is
removed entirely.
Parameters
----------
field_expr : str or :class:`.Expression`
Field name or (possibly nested) field reference expression.
Returns
-------
:class:`.MatrixTable`
Matrix table exploded column-wise for each element of `field_expr`.
"""
if isinstance(field_expr, str):
if field_expr not in self._fields:
raise KeyError("MatrixTable has no field '{}'".format(field_expr))
elif self._fields[field_expr]._indices != self._col_indices:
raise ExpressionException("Method 'explode_cols' expects a field indexed by col, found axes '{}'"
.format(self._fields[field_expr]._indices.axes))
root = [field_expr]
field_expr = self._fields[field_expr]
else:
analyze('MatrixTable.explode_cols', field_expr, self._col_indices)
if not field_expr._ir.is_nested_field:
raise ExpressionException(
"method 'explode_cols' requires a field or subfield, not a complex expression")
root = []
nested = field_expr._ir
while isinstance(nested, ir.GetField):
root.append(nested.name)
nested = nested.o
root = root[::-1]
if not isinstance(field_expr.dtype, (tarray, tset)):
raise ValueError(f"method 'explode_cols' expects array or set, found: {field_expr.dtype}")
if self.col_key is not None:
for k in self.col_key.values():
if k is field_expr:
raise ValueError("method 'explode_cols' cannot explode a key field")
return MatrixTable(ir.MatrixExplodeCols(self._mir, root))
@typecheck_method(exprs=oneof(str, Expression), named_exprs=expr_any)
def group_rows_by(self, *exprs, **named_exprs) -> 'GroupedMatrixTable':
"""Group rows, used with :meth:`.GroupedMatrixTable.aggregate`.
Examples
--------
Aggregate to a matrix with genes as row keys, computing the number of
non-reference calls as an entry field:
>>> dataset_result = (dataset.group_rows_by(dataset.gene)
... .aggregate(n_non_ref = hl.agg.count_where(dataset.GT.is_non_ref())))
Notes
-----
All complex expressions must be passed as named expressions.
Parameters
----------
exprs : args of :class:`str` or :class:`.Expression`
Row fields to group by.
named_exprs : keyword args of :class:`.Expression`
Row-indexed expressions to group by.
Returns
-------
:class:`.GroupedMatrixTable`
Grouped matrix. Can be used to call :meth:`.GroupedMatrixTable.aggregate`.
"""
return GroupedMatrixTable(self).group_rows_by(*exprs, **named_exprs)
@typecheck_method(exprs=oneof(str, Expression), named_exprs=expr_any)
def group_cols_by(self, *exprs, **named_exprs) -> 'GroupedMatrixTable':
"""Group columns, used with :meth:`.GroupedMatrixTable.aggregate`.
Examples
--------
Aggregate to a matrix with cohort as column keys, computing the call rate
as an entry field:
>>> dataset_result = (dataset.group_cols_by(dataset.cohort)
... .aggregate(call_rate = hl.agg.fraction(hl.is_defined(dataset.GT))))
Notes
-----
All complex expressions must be passed as named expressions.
Parameters
----------
exprs : args of :class:`str` or :class:`.Expression`
Column fields to group by.
named_exprs : keyword args of :class:`.Expression`
Column-indexed expressions to group by.
Returns
-------
:class:`.GroupedMatrixTable`
Grouped matrix, can be used to call :meth:`.GroupedMatrixTable.aggregate`.
"""
return GroupedMatrixTable(self).group_cols_by(*exprs, **named_exprs)
def collect_cols_by_key(self) -> 'MatrixTable':
"""Collect values for each unique column key into arrays.
Examples
--------
>>> mt = hl.utils.range_matrix_table(3, 3)
>>> col_dict = hl.literal({0: [1], 1: [2, 3], 2: [4, 5, 6]})
>>> mt = (mt.annotate_cols(foo = col_dict.get(mt.col_idx))
... .explode_cols('foo'))
>>> mt = mt.annotate_entries(bar = mt.row_idx * mt.foo)
>>> mt.cols().show() # doctest: +SKIP_OUTPUT_CHECK
+---------+-------+
| col_idx | foo |
+---------+-------+
| int32 | int32 |
+---------+-------+
| 0 | 1 |
| 1 | 2 |
| 1 | 3 |
| 2 | 4 |
| 2 | 5 |
| 2 | 6 |
+---------+-------+
>>> mt.entries().show() # doctest: +SKIP_OUTPUT_CHECK
+---------+---------+-------+-------+
| row_idx | col_idx | foo | bar |
+---------+---------+-------+-------+
| int32 | int32 | int32 | int32 |
+---------+---------+-------+-------+
| 0 | 0 | 1 | 0 |
| 0 | 1 | 2 | 0 |
| 0 | 1 | 3 | 0 |
| 0 | 2 | 4 | 0 |
| 0 | 2 | 5 | 0 |
| 0 | 2 | 6 | 0 |
| 1 | 0 | 1 | 1 |
| 1 | 1 | 2 | 2 |
| 1 | 1 | 3 | 3 |
| 1 | 2 | 4 | 4 |
+---------+---------+-------+-------+
showing top 10 rows
>>> mt = mt.collect_cols_by_key()
>>> mt.cols().show()
+---------+--------------+
| col_idx | foo |
+---------+--------------+
| int32 | array<int32> |
+---------+--------------+
| 0 | [1] |
| 1 | [2,3] |
| 2 | [4,5,6] |
+---------+--------------+
>>> mt.entries().show() # doctest: +SKIP_OUTPUT_CHECK
+---------+---------+--------------+--------------+
| row_idx | col_idx | foo | bar |
+---------+---------+--------------+--------------+
| int32 | int32 | array<int32> | array<int32> |
+---------+---------+--------------+--------------+
| 0 | 0 | [1] | [0] |
| 0 | 1 | [2,3] | [0,0] |
| 0 | 2 | [4,5,6] | [0,0,0] |
| 1 | 0 | [1] | [1] |
| 1 | 1 | [2,3] | [2,3] |
| 1 | 2 | [4,5,6] | [4,5,6] |
| 2 | 0 | [1] | [2] |
| 2 | 1 | [2,3] | [4,6] |
| 2 | 2 | [4,5,6] | [8,10,12] |
+---------+---------+--------------+--------------+
Notes
-----
Each entry field and each non-key column field of type t is replaced by
a field of type array<t>. The value of each such field is an array
containing all values of that field sharing the corresponding column
key. In each column, the newly collected arrays all have the same
length, and the values of each pre-collection column are guaranteed to
be located at the same index in their corresponding arrays.
Note
-----
The order of the columns is not guaranteed.
Returns
-------
:class:`.MatrixTable`
"""
return MatrixTable(ir.MatrixCollectColsByKey(self._mir))
@typecheck_method(_localize=bool)
def count_rows(self, _localize=True) -> int:
"""Count the number of rows in the matrix.
Examples
--------
Count the number of rows:
>>> n_rows = dataset.count_rows()
Returns
-------
:obj:`int`
Number of rows in the matrix.
"""
count_ir = ir.TableCount(ir.MatrixRowsTable(self._mir))
if _localize:
return Env.backend().execute(count_ir)
else:
return construct_expr(ir.LiftMeOut(count_ir), hl.tint64)
def _force_count_rows(self):
return Env.backend().execute(ir.MatrixToValueApply(self._mir, {'name': 'ForceCountMatrixTable'}))
def _force_count_cols(self):
return self.cols()._force_count()
@typecheck_method(_localize=bool)
def count_cols(self, _localize=True) -> int:
"""Count the number of columns in the matrix.
Examples
--------
Count the number of columns:
>>> n_cols = dataset.count_cols()
Returns
-------
:obj:`int`
Number of columns in the matrix.
"""
count_ir = ir.TableCount(ir.MatrixColsTable(self._mir))
if _localize:
return Env.backend().execute(count_ir)
else:
return construct_expr(ir.LiftMeOut(count_ir), hl.tint64)
def count(self) -> Tuple[int, int]:
"""Count the number of rows and columns in the matrix.
Examples
--------
>>> dataset.count()
Returns
-------
:obj:`int`, :obj:`int`
Number of rows, number of cols.
"""
count_ir = ir.MatrixCount(self._mir)
return Env.backend().execute(count_ir)
@typecheck_method(output=str,
overwrite=bool,
stage_locally=bool,
_codec_spec=nullable(str),
_read_if_exists=bool,
_intervals=nullable(sequenceof(anytype)),
_filter_intervals=bool,
_drop_cols=bool,
_drop_rows=bool)
def checkpoint(self, output: str, overwrite: bool = False, stage_locally: bool = False,
_codec_spec: Optional[str] = None, _read_if_exists: bool = False,
_intervals=None, _filter_intervals=False, _drop_cols=False, _drop_rows=False) -> 'MatrixTable':
"""Checkpoint the matrix table to disk by writing and reading using a fast, but less space-efficient codec.
Parameters
----------
output : str
Path at which to write.
stage_locally: bool
If ``True``, major output will be written to temporary local storage
before being copied to ``output``
overwrite : bool
If ``True``, overwrite an existing file at the destination.
Returns
-------
:class:`MatrixTable`
.. include:: _templates/write_warning.rst
Notes
-----
An alias for :meth:`write` followed by :func:`.read_matrix_table`. It is
possible to read the file at this path later with
:func:`.read_matrix_table`. A faster, but less efficient, codec is used
or writing the data so the file will be larger than if one used
:meth:`write`.
Examples
--------
>>> dataset = dataset.checkpoint('output/dataset_checkpoint.mt')
"""
if _codec_spec is None:
_codec_spec = """{
"name": "LEB128BufferSpec",
"child": {
"name": "BlockingBufferSpec",
"blockSize": 32768,
"child": {
"name": "LZ4FastBlockBufferSpec",
"blockSize": 32768,
"child": {
"name": "StreamBlockBufferSpec"
}
}
}
}"""
if not _read_if_exists or not hl.hadoop_exists(f'{output}/_SUCCESS'):
self.write(output=output, overwrite=overwrite, stage_locally=stage_locally, _codec_spec=_codec_spec)
_assert_type = self._type
_load_refs = False
else:
_assert_type = None
_load_refs = True
return hl.read_matrix_table(
output,
_intervals=_intervals,
_filter_intervals=_filter_intervals,
_drop_cols=_drop_cols,
_drop_rows=_drop_rows,
_assert_type=_assert_type,
_load_refs=_load_refs
)
@typecheck_method(output=str,
overwrite=bool,
stage_locally=bool,
_codec_spec=nullable(str),
_partitions=nullable(expr_any),
_checkpoint_file=nullable(str))
def write(self, output: str, overwrite: bool = False, stage_locally: bool = False,
_codec_spec: Optional[str] = None, _partitions=None, _checkpoint_file=None):
"""Write to disk.
Examples
--------
>>> dataset.write('output/dataset.mt')
.. include:: _templates/write_warning.rst
See Also
--------
:func:`.read_matrix_table`
Parameters
----------
output : str
Path at which to write.
stage_locally: bool
If ``True``, major output will be written to temporary local storage
before being copied to ``output``
overwrite : bool
If ``True``, overwrite an existing file at the destination.
"""
if _partitions is not None:
_partitions, _partitions_type = hl.utils._dumps_partitions(_partitions, self.row_key.dtype)
else:
_partitions_type = None
writer = ir.MatrixNativeWriter(output, overwrite, stage_locally, _codec_spec, _partitions, _partitions_type, _checkpoint_file)
Env.backend().execute(ir.MatrixWrite(self._mir, writer))
class _Show:
def __init__(self, table, n_rows, actual_n_cols, displayed_n_cols, width, truncate, types):
self.table_show = table._show(n_rows, width, truncate, types)
self.actual_n_cols = actual_n_cols
self.displayed_n_cols = displayed_n_cols
def __str__(self):
s = self.table_show.__str__()
if self.displayed_n_cols != self.actual_n_cols:
s += f"showing the first { self.displayed_n_cols } of { self.actual_n_cols } columns"
return s
def __repr__(self):
return self.__str__()
def _repr_html_(self):
s = self.table_show._repr_html_()
if self.displayed_n_cols != self.actual_n_cols:
s += '<p style="background: #fdd; padding: 0.4em;">'
s += f"showing the first { self.displayed_n_cols } of { self.actual_n_cols } columns"
s += '</p>\n'
return s
@typecheck_method(n_rows=nullable(int),
n_cols=nullable(int),
include_row_fields=bool,
width=nullable(int),
truncate=nullable(int),
types=bool,
handler=nullable(anyfunc))
def show(self,
n_rows=None,
n_cols=None,
include_row_fields=False,
width=None,
truncate=None,
types=True,
handler=None):
"""Print the first few rows of the matrix table to the console.
.. include:: _templates/experimental.rst
Notes
-----
The output can be passed piped to another output source using the `handler` argument:
>>> mt.show(handler=lambda x: logging.info(x)) # doctest: +SKIP
Parameters
----------
n_rows : :obj:`int`
Maximum number of rows to show.
n_cols : :obj:`int`
Maximum number of columns to show.
width : :obj:`int`
Horizontal width at which to break fields.
truncate : :obj:`int`, optional
Truncate each field to the given number of characters. If
``None``, truncate fields to the given `width`.
types : :obj:`bool`
Print an extra header line with the type of each field.
handler : Callable[[str], Any]
Handler function for data string.
"""
def estimate_size(struct_expression):
return sum(max(len(f), len(str(x.dtype))) + 3
for f, x in struct_expression.flatten().items())
if n_cols is None:
import shutil
(characters, _) = shutil.get_terminal_size((80, 10))
characters -= 6 # borders
key_characters = estimate_size(self.row_key)
characters -= key_characters
if include_row_fields:
characters -= estimate_size(self.row_value)
characters = max(characters, 0)
n_cols = characters // (estimate_size(self.entry) + 4) # 4 for the column index
actual_n_cols = self.count_cols()
displayed_n_cols = min(actual_n_cols, n_cols)
t = self.localize_entries('entries', 'cols')
if len(t.key) > 0:
t = t.order_by(*t.key)
col_key_type = self.col_key.dtype
if len(col_key_type) == 1 and col_key_type[0] in (hl.tstr, hl.tint32, hl.tint64):
cols = self.col_key[0].take(displayed_n_cols)
entries = {repr(cols[i]): t.entries[i]
for i in range(0, displayed_n_cols)}
else:
entries = {f'<col {i}>': t.entries[i] for i in range(0, displayed_n_cols)}
t = t.select(
**{f: t[f] for f in self.row_key},
**{f: t[f] for f in self.row_value if include_row_fields},
**entries)
if handler is None:
handler = default_handler()
return handler(MatrixTable._Show(t, n_rows, actual_n_cols, displayed_n_cols, width, truncate, types))
def globals_table(self) -> Table:
"""Returns a table with a single row with the globals of the matrix table.
Examples
--------
Extract the globals table:
>>> globals_table = dataset.globals_table()
Returns
-------
:class:`.Table`
Table with the globals from the matrix, with a single row.
"""
return Table.parallelize(
[hl.eval(self.globals)], self._global_type)
def rows(self) -> Table:
"""Returns a table with all row fields in the matrix.
Examples
--------
Extract the row table:
>>> rows_table = dataset.rows()
Returns
-------
:class:`.Table`
Table with all row fields from the matrix, with one row per row of the matrix.
"""
return Table(ir.MatrixRowsTable(self._mir))
def cols(self) -> Table:
"""Returns a table with all column fields in the matrix.
Examples
--------
Extract the column table:
>>> cols_table = dataset.cols()
Warning
-------
Matrix table columns are typically sorted by the order at import, and
not necessarily by column key. Since tables are always sorted by key,
the table which results from this command will have its rows sorted by
the column key (which becomes the table key). To preserve the original
column order as the table row order, first unkey the columns using
:meth:`key_cols_by` with no arguments.
Returns
-------
:class:`.Table`
Table with all column fields from the matrix, with one row per column of the matrix.
"""
if len(self.col_key) != 0 and Env.hc()._warn_cols_order:
warning("cols(): Resulting column table is sorted by 'col_key'."
"\n To preserve matrix table column order, "
"first unkey columns with 'key_cols_by()'")
Env.hc()._warn_cols_order = False
return Table(ir.MatrixColsTable(self._mir))
def entries(self) -> Table:
"""Returns a matrix in coordinate table form.
Examples
--------
Extract the entry table:
>>> entries_table = dataset.entries()
Notes
-----
The coordinate table representation of the source matrix table contains
one row for each **non-filtered** entry of the matrix -- if a matrix table
has no filtered entries and contains N rows and M columns, the table will contain
``M * N`` rows, which can be **a very large number**.
This representation can be useful for aggregating over both axes of a matrix table
at the same time -- it is not possible to aggregate over a matrix table using
:meth:`group_rows_by` and :meth:`group_cols_by` at the same time (aggregating
by population and chromosome from a variant-by-sample genetics representation,
for instance). After moving to the coordinate representation with :meth:`entries`,
it is possible to group and aggregate the resulting table much more flexibly,
albeit with potentially poorer computational performance.
Warning
-------
The table returned by this method should be used for aggregation or queries,
but never exported or written to disk without extensive filtering and field
selection -- the disk footprint of an entries_table could be 100x (or more!)
larger than its parent matrix. This means that if you try to export the entries
table of a 10 terabyte matrix, you could write a petabyte of data!
Warning
-------
Matrix table columns are typically sorted by the order at import, and
not necessarily by column key. Since tables are always sorted by key,
the table which results from this command will have its rows sorted by
the compound (row key, column key) which becomes the table key.
To preserve the original row-major entry order as the table row order,
first unkey the columns using :meth:`key_cols_by` with no arguments.
Warning
-------
If the matrix table has no row key, but has a column key, this operation
may require a full shuffle to sort by the column key, depending on the
pipeline.
Returns
-------
:class:`.Table`
Table with all non-global fields from the matrix, with **one row per entry of the matrix**.
"""
if Env.hc()._warn_entries_order and len(self.col_key) > 0:
warning("entries(): Resulting entries table is sorted by '(row_key, col_key)'."
"\n To preserve row-major matrix table order, "
"first unkey columns with 'key_cols_by()'")
Env.hc()._warn_entries_order = False
return Table(ir.MatrixEntriesTable(self._mir))
def index_globals(self) -> Expression:
"""Return this matrix table's global variables for use in another
expression context.
Examples
--------
>>> dataset1 = dataset.annotate_globals(pli={'SCN1A': 0.999, 'SONIC': 0.014})
>>> pli_dict = dataset1.index_globals().pli
>>> dataset_result = dataset2.annotate_rows(gene_pli = dataset2.gene.map(lambda x: pli_dict.get(x)))
Returns
-------
:class:`.StructExpression`
"""
return construct_expr(ir.TableGetGlobals(ir.MatrixRowsTable(self._mir)), self.globals.dtype)
def index_rows(self, *exprs, all_matches=False) -> 'Expression':
"""Expose the row values as if looked up in a dictionary, indexing
with `exprs`.
Examples
--------
>>> dataset_result = dataset.annotate_rows(qual = dataset2.index_rows(dataset.locus, dataset.alleles).qual)
Or equivalently:
>>> dataset_result = dataset.annotate_rows(qual = dataset2.index_rows(dataset.row_key).qual)
Parameters
----------
exprs : variable-length args of :class:`.Expression`
Index expressions.
all_matches : bool
Experimental. If ``True``, value of expression is array of all matches.
Notes
-----
``index_rows(exprs)`` is equivalent to ``rows().index(exprs)``
or ``rows()[exprs]``.
The type of the resulting struct is the same as the type of
:meth:`.row_value`.
Returns
-------
:class:`.Expression`
"""
try:
return self.rows()._index(*exprs, all_matches=all_matches)
except TableIndexKeyError as err:
raise ExpressionException(
f"Key type mismatch: cannot index matrix table with given expressions:\n"
f" MatrixTable row key: {', '.join(str(t) for t in err.key_type.values()) or '<<<empty key>>>'}\n"
f" Index expressions: {', '.join(str(e.dtype) for e in err.index_expressions)}")
def index_cols(self, *exprs, all_matches=False) -> 'Expression':
"""Expose the column values as if looked up in a dictionary, indexing
with `exprs`.
Examples
--------
>>> dataset_result = dataset.annotate_cols(pheno = dataset2.index_cols(dataset.s).pheno)
Or equivalently:
>>> dataset_result = dataset.annotate_cols(pheno = dataset2.index_cols(dataset.col_key).pheno)
Parameters
----------
exprs : variable-length args of :class:`.Expression`
Index expressions.
all_matches : bool
Experimental. If ``True``, value of expression is array of all matches.
Notes
-----
``index_cols(cols)`` is equivalent to ``cols().index(exprs)``
or ``cols()[exprs]``.
The type of the resulting struct is the same as the type of
:meth:`.col_value`.
Returns
-------
:class:`.Expression`
"""
try:
return self.cols()._index(*exprs, all_matches=all_matches)
except TableIndexKeyError as err:
raise ExpressionException(
f"Key type mismatch: cannot index matrix table with given expressions:\n"
f" MatrixTable col key: {', '.join(str(t) for t in err.key_type.values()) or '<<<empty key>>>'}\n"
f" Index expressions: {', '.join(str(e.dtype) for e in err.index_expressions)}")
def index_entries(self, row_exprs, col_exprs):
"""Expose the entries as if looked up in a dictionary, indexing
with `exprs`.
Examples
--------
>>> dataset_result = dataset.annotate_entries(GQ2 = dataset2.index_entries(dataset.row_key, dataset.col_key).GQ)
Or equivalently:
>>> dataset_result = dataset.annotate_entries(GQ2 = dataset2[dataset.row_key, dataset.col_key].GQ)
Parameters
----------
row_exprs : tuple of :class:`.Expression`
Row index expressions.
col_exprs : tuple of :class:`.Expression`
Column index expressions.
Notes
-----
The type of the resulting struct is the same as the type of
:meth:`.entry`.
Note
----
There is a shorthand syntax for :meth:`.MatrixTable.index_entries` using
square brackets (the Python ``__getitem__`` syntax). This syntax is
preferred.
>>> dataset_result = dataset.annotate_entries(GQ2 = dataset2[dataset.row_key, dataset.col_key].GQ)
Returns
-------
:class:`.StructExpression`
"""
row_exprs = wrap_to_tuple(row_exprs)
col_exprs = wrap_to_tuple(col_exprs)
if len(row_exprs) == 0 or len(col_exprs) == 0:
raise ValueError("'MatrixTable.index_entries:' 'row_exprs' and 'col_exprs' must not be empty")
row_non_exprs = list(filter(lambda e: not isinstance(e, Expression), row_exprs))
if row_non_exprs:
raise TypeError(f"'MatrixTable.index_entries': row_exprs expects expressions, found {row_non_exprs}")
col_non_exprs = list(filter(lambda e: not isinstance(e, Expression), col_exprs))
if col_non_exprs:
raise TypeError(f"'MatrixTable.index_entries': col_exprs expects expressions, found {col_non_exprs}")
if not types_match(self.row_key.values(), row_exprs):
if (len(row_exprs) == 1
and isinstance(row_exprs[0], TupleExpression)
and types_match(self.row_key.values(), row_exprs[0])):
return self.index_entries(tuple(row_exprs[0]), col_exprs)
elif (len(row_exprs) == 1
and isinstance(row_exprs[0], StructExpression)
and types_match(self.row_key.values(), row_exprs[0].values())):
return self.index_entries(tuple(row_exprs[0].values()), col_exprs)
elif len(row_exprs) != len(self.row_key):
raise ExpressionException(f'Key mismatch: matrix table has {len(self.row_key)} row key fields, '
f'found {len(row_exprs)} index expressions')
else:
raise ExpressionException(
f"Key type mismatch: Cannot index matrix table with given expressions\n"
f" MatrixTable row key: {', '.join(str(t) for t in self.row_key.dtype.values())}\n"
f" Row index expressions: {', '.join(str(e.dtype) for e in row_exprs)}")
if not types_match(self.col_key.values(), col_exprs):
if (len(col_exprs) == 1
and isinstance(col_exprs[0], TupleExpression)
and types_match(self.col_key.values(), col_exprs[0])):
return self.index_entries(row_exprs, tuple(col_exprs[0]))
elif (len(col_exprs) == 1
and isinstance(col_exprs[0], StructExpression)
and types_match(self.col_key.values(), col_exprs[0].values())):
return self.index_entries(row_exprs, tuple(col_exprs[0].values()))
elif len(col_exprs) != len(self.col_key):
raise ExpressionException(f'Key mismatch: matrix table has {len(self.col_key)} col key fields, '
f'found {len(col_exprs)} index expressions.')
else:
raise ExpressionException(
f"Key type mismatch: cannot index matrix table with given expressions:\n"
f" MatrixTable col key: {', '.join(str(t) for t in self.col_key.dtype.values())}\n"
f" Col index expressions: {', '.join(str(e.dtype) for e in col_exprs)}")
indices, aggregations = unify_all(*(row_exprs + col_exprs))
src = indices.source
if aggregations:
raise ExpressionException('Cannot join using an aggregated field')
uid = Env.get_uid()
uids = [uid]
if isinstance(src, Table):
# join table with matrix.entries_table()
return self.entries().index(*(row_exprs + col_exprs))
else:
assert isinstance(src, MatrixTable)
row_uid = Env.get_uid()
uids.append(row_uid)
col_uid = Env.get_uid()
uids.append(col_uid)
def joiner(left: MatrixTable):
localized = self._localize_entries(row_uid, col_uid)
src_cols_indexed = self.add_col_index(col_uid).cols()
src_cols_indexed = src_cols_indexed.annotate(**{col_uid: hl.int32(src_cols_indexed[col_uid])})
left = left._annotate_all(row_exprs={row_uid: localized.index(*row_exprs)[row_uid]},
col_exprs={col_uid: src_cols_indexed.index(*col_exprs)[col_uid]})
return left.annotate_entries(**{uid: left[row_uid][left[col_uid]]})
join_ir = ir.Join(ir.GetField(ir.TopLevelReference('g'), uid),
uids,
[*row_exprs, *col_exprs],
joiner)
return construct_expr(join_ir, self.entry.dtype, indices, aggregations)
@typecheck_method(entries_field_name=str, cols_field_name=str)
def _localize_entries(self, entries_field_name, cols_field_name) -> 'Table':
return Table(ir.CastMatrixToTable(
self._mir, entries_field_name, cols_field_name))
@typecheck_method(entries_array_field_name=nullable(str),
columns_array_field_name=nullable(str))
def localize_entries(self,
entries_array_field_name=None,
columns_array_field_name=None) -> 'Table':
"""Convert the matrix table to a table with entries localized as an array of structs.
Examples
--------
Build a numpy ndarray from a small :class:`.MatrixTable`:
>>> mt = hl.utils.range_matrix_table(3,3)
>>> mt = mt.select_entries(x = mt.row_idx * mt.col_idx)
>>> mt.show()
+---------+-------+-------+-------+
| row_idx | 0.x | 1.x | 2.x |
+---------+-------+-------+-------+
| int32 | int32 | int32 | int32 |
+---------+-------+-------+-------+
| 0 | 0 | 0 | 0 |
| 1 | 0 | 1 | 2 |
| 2 | 0 | 2 | 4 |
+---------+-------+-------+-------+
>>> t = mt.localize_entries('entry_structs', 'columns')
>>> t.describe()
----------------------------------------
Global fields:
'columns': array<struct {
col_idx: int32
}>
----------------------------------------
Row fields:
'row_idx': int32
'entry_structs': array<struct {
x: int32
}>
----------------------------------------
Key: ['row_idx']
----------------------------------------
>>> t = t.select(entries = t.entry_structs.map(lambda entry: entry.x))
>>> import numpy as np
>>> np.array(t.entries.collect())
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Notes
-----
Both of the added fields are arrays of length equal to
``mt.count_cols()``. Missing entries are represented as missing structs
in the entries array.
Parameters
----------
entries_array_field_name : :class:`str`
The name of the table field containing the array of entry structs
for the given row.
columns_array_field_name : :class:`str`
The name of the global field containing the array of column
structs.
Returns
-------
:class:`.Table`
A table whose fields are the row fields of this matrix table plus
one field named ``entries_array_field_name``. The global fields of
this table are the global fields of this matrix table plus one field
named ``columns_array_field_name``.
"""
entries = entries_array_field_name or Env.get_uid()
cols = columns_array_field_name or Env.get_uid()
t = self._localize_entries(entries, cols)
if entries_array_field_name is None:
t = t.drop(entries)
if columns_array_field_name is None:
t = t.drop(cols)
return t
@typecheck_method(row_exprs=dictof(str, expr_any),
col_exprs=dictof(str, expr_any),
entry_exprs=dictof(str, expr_any),
global_exprs=dictof(str, expr_any))
def _annotate_all(self,
row_exprs={},
col_exprs={},
entry_exprs={},
global_exprs={},
) -> 'MatrixTable':
all_exprs = list(itertools.chain(row_exprs.values(),
col_exprs.values(),
entry_exprs.values(),
global_exprs.values()))
for field_name in list(itertools.chain(row_exprs.keys(),
col_exprs.keys(),
entry_exprs.keys(),
global_exprs.keys())):
if field_name in self._fields:
raise RuntimeError(f'field {repr(field_name)} already in matrix table, cannot use _annotate_all')
base, cleanup = self._process_joins(*all_exprs)
mir = base._mir
if row_exprs:
row_struct = ir.InsertFields.construct_with_deduplication(
base.row._ir, [(n, e._ir) for (n, e) in row_exprs.items()], None)
mir = ir.MatrixMapRows(mir, row_struct)
if col_exprs:
col_struct = ir.InsertFields.construct_with_deduplication(
base.col._ir, [(n, e._ir) for (n, e) in col_exprs.items()], None)
mir = ir.MatrixMapCols(mir, col_struct, None)
if entry_exprs:
entry_struct = ir.InsertFields.construct_with_deduplication(
base.entry._ir, [(n, e._ir) for (n, e) in entry_exprs.items()], None)
mir = ir.MatrixMapEntries(mir, entry_struct)
if global_exprs:
globals_struct = ir.InsertFields.construct_with_deduplication(
base.globals._ir, [(n, e._ir) for (n, e) in global_exprs.items()], None)
mir = ir.MatrixMapGlobals(mir, globals_struct)
return cleanup(MatrixTable(mir))
@typecheck_method(row_exprs=dictof(str, expr_any),
row_key=nullable(sequenceof(str)),
col_exprs=dictof(str, expr_any),
col_key=nullable(sequenceof(str)),
entry_exprs=dictof(str, expr_any),
global_exprs=dictof(str, expr_any))
def _select_all(self,
row_exprs={},
row_key=None,
col_exprs={},
col_key=None,
entry_exprs={},
global_exprs={},
) -> 'MatrixTable':
all_names = list(itertools.chain(row_exprs.keys(),
col_exprs.keys(),
entry_exprs.keys(),
global_exprs.keys()))
uids = {k: Env.get_uid() for k in all_names}
mt = self._annotate_all({uids[k]: v for k, v in row_exprs.items()},
{uids[k]: v for k, v in col_exprs.items()},
{uids[k]: v for k, v in entry_exprs.items()},
{uids[k]: v for k, v in global_exprs.items()})
keep = set()
if row_key is not None:
old_key = list(mt.row_key)
mt = mt.key_rows_by(*(uids[k] for k in row_key)).drop(*old_key)
else:
keep = keep.union(set(mt.row_key))
if col_key is not None:
old_key = list(mt.col_key)
mt = mt.key_cols_by(*(uids[k] for k in col_key)).drop(*old_key)
else:
keep = keep.union(set(mt.col_key))
keep = keep.union(uids.values())
return (mt.drop(*(f for f in mt._fields if f not in keep))
.rename({uid: original for original, uid in uids.items()}))
def _process_joins(self, *exprs) -> 'MatrixTable':
return process_joins(self, exprs)
def describe(self, handler=print, *, widget=False):
"""Print information about the fields in the matrix table.
Note
----
The `widget` argument is **experimental**.
Parameters
----------
handler : Callable[[str], None]
Handler function for returned string.
widget : bool
Create an interactive IPython widget.
"""
if widget:
from hail.experimental.interact import interact
return interact(self)
def format_type(typ):
return typ.pretty(indent=4).lstrip()
if len(self.globals) == 0:
global_fields = '\n None'
else:
global_fields = ''.join("\n '{name}': {type}".format(
name=f, type=format_type(t)) for f, t in self.globals.dtype.items())
if len(self.row) == 0:
row_fields = '\n None'
else:
row_fields = ''.join("\n '{name}': {type}".format(
name=f, type=format_type(t)) for f, t in self.row.dtype.items())
row_key = '[' + ', '.join("'{name}'".format(name=f) for f in self.row_key) + ']' \
if self.row_key else None
if len(self.col) == 0:
col_fields = '\n None'
else:
col_fields = ''.join("\n '{name}': {type}".format(
name=f, type=format_type(t)) for f, t in self.col.dtype.items())
col_key = '[' + ', '.join("'{name}'".format(name=f) for f in self.col_key) + ']' \
if self.col_key else None
if len(self.entry) == 0:
entry_fields = '\n None'
else:
entry_fields = ''.join("\n '{name}': {type}".format(
name=f, type=format_type(t)) for f, t in self.entry.dtype.items())
s = '----------------------------------------\n' \
'Global fields:{g}\n' \
'----------------------------------------\n' \
'Column fields:{c}\n' \
'----------------------------------------\n' \
'Row fields:{r}\n' \
'----------------------------------------\n' \
'Entry fields:{e}\n' \
'----------------------------------------\n' \
'Column key: {ck}\n' \
'Row key: {rk}\n' \
'----------------------------------------'.format(g=global_fields,
rk=row_key,
r=row_fields,
ck=col_key,
c=col_fields,
e=entry_fields)
handler(s)
@typecheck_method(indices=sequenceof(int))
def choose_cols(self, indices: List[int]) -> 'MatrixTable':
"""Choose a new set of columns from a list of old column indices.
Examples
--------
Randomly shuffle column order:
>>> import random
>>> indices = list(range(dataset.count_cols()))
>>> random.shuffle(indices)
>>> dataset_reordered = dataset.choose_cols(indices)
Take the first ten columns:
>>> dataset_result = dataset.choose_cols(list(range(10)))
Parameters
----------
indices : :obj:`list` of :obj:`int`
List of old column indices.
Returns
-------
:class:`.MatrixTable`
"""
n_cols = self.count_cols()
for i in indices:
if not 0 <= i < n_cols:
raise ValueError(f"'choose_cols': expect indices between 0 and {n_cols}, found {i}")
return MatrixTable(ir.MatrixChooseCols(self._mir, indices))
def n_partitions(self) -> int:
"""Number of partitions.
Notes
-----
The data in a dataset is divided into chunks called partitions, which
may be stored together or across a network, so that each partition may
be read and processed in parallel by available cores. Partitions are a
core concept of distributed computation in Spark, see `here
<http://spark.apache.org/docs/latest/programming-guide.html#resilient-distributed-datasets-rdds>`__
for details.
Returns
-------
int
Number of partitions.
"""
return Env.backend().execute(ir.MatrixToValueApply(self._mir, {'name': 'NPartitionsMatrixTable'}))
@typecheck_method(n_partitions=int,
shuffle=bool)
def repartition(self, n_partitions: int, shuffle: bool = True) -> 'MatrixTable':
"""Change the number of partitions.
Examples
--------
Repartition to 500 partitions:
>>> dataset_result = dataset.repartition(500)
Notes
-----
Check the current number of partitions with :meth:`.n_partitions`.
The data in a dataset is divided into chunks called partitions, which
may be stored together or across a network, so that each partition may
be read and processed in parallel by available cores. When a matrix with
:math:`M` rows is first imported, each of the :math:`k` partitions will
contain about :math:`M/k` of the rows. Since each partition has some
computational overhead, decreasing the number of partitions can improve
performance after significant filtering. Since it's recommended to have
at least 2 - 4 partitions per core, increasing the number of partitions
can allow one to take advantage of more cores. Partitions are a core
concept of distributed computation in Spark, see `their documentation
<http://spark.apache.org/docs/latest/programming-guide.html#resilient-distributed-datasets-rdds>`__
for details.
When ``shuffle=True``, Hail does a full shuffle of the data
and creates equal sized partitions. When ``shuffle=False``,
Hail combines existing partitions to avoid a full
shuffle. These algorithms correspond to the `repartition` and
`coalesce` commands in Spark, respectively. In particular,
when ``shuffle=False``, ``n_partitions`` cannot exceed current
number of partitions.
Parameters
----------
n_partitions : int
Desired number of partitions.
shuffle : bool
If ``True``, use full shuffle to repartition.
Returns
-------
:class:`.MatrixTable`
Repartitioned dataset.
"""
if hl.current_backend().requires_lowering:
tmp = hl.utils.new_temp_file()
if len(self.row_key) == 0:
uid = Env.get_uid()
tmp2 = hl.utils.new_temp_file()
self.checkpoint(tmp2)
ht = hl.read_matrix_table(tmp2).add_row_index(uid).key_rows_by(uid)
ht.checkpoint(tmp)
return hl.read_matrix_table(tmp, _n_partitions=n_partitions).drop(uid)
else:
# checkpoint rather than write to use fast codec
self.checkpoint(tmp)
return hl.read_matrix_table(tmp, _n_partitions=n_partitions)
return MatrixTable(ir.MatrixRepartition(
self._mir, n_partitions,
ir.RepartitionStrategy.SHUFFLE if shuffle else ir.RepartitionStrategy.COALESCE))
@typecheck_method(max_partitions=int)
def naive_coalesce(self, max_partitions: int) -> 'MatrixTable':
"""Naively decrease the number of partitions.
Example
-------
Naively repartition to 10 partitions:
>>> dataset_result = dataset.naive_coalesce(10)
Warning
-------
:meth:`.naive_coalesce` simply combines adjacent partitions to achieve
the desired number. It does not attempt to rebalance, unlike
:meth:`.repartition`, so it can produce a heavily unbalanced dataset. An
unbalanced dataset can be inefficient to operate on because the work is
not evenly distributed across partitions.
Parameters
----------
max_partitions : int
Desired number of partitions. If the current number of partitions is
less than or equal to `max_partitions`, do nothing.
Returns
-------
:class:`.MatrixTable`
Matrix table with at most `max_partitions` partitions.
"""
if hl.current_backend().requires_lowering:
return self.repartition(max_partitions)
return MatrixTable(ir.MatrixRepartition(
self._mir, max_partitions, ir.RepartitionStrategy.NAIVE_COALESCE))
def cache(self) -> 'MatrixTable':
"""Persist the dataset in memory.
Examples
--------
Persist the dataset in memory:
>>> dataset = dataset.cache() # doctest: +SKIP
Notes
-----
This method is an alias for :func:`persist("MEMORY_ONLY") <hail.MatrixTable.persist>`.
Returns
-------
:class:`.MatrixTable`
Cached dataset.
"""
return self.persist('MEMORY_ONLY')
@typecheck_method(storage_level=storage_level)
def persist(self, storage_level: str = 'MEMORY_AND_DISK') -> 'MatrixTable':
"""Persist this table in memory or on disk.
Examples
--------
Persist the dataset to both memory and disk:
>>> dataset = dataset.persist() # doctest: +SKIP
Notes
-----
The :meth:`.MatrixTable.persist` and :meth:`.MatrixTable.cache`
methods store the current dataset on disk or in memory temporarily to
avoid redundant computation and improve the performance of Hail
pipelines. This method is not a substitution for :meth:`.Table.write`,
which stores a permanent file.
Most users should use the "MEMORY_AND_DISK" storage level. See the `Spark
documentation
<http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence>`__
for a more in-depth discussion of persisting data.
Parameters
----------
storage_level : str
Storage level. One of: NONE, DISK_ONLY,
DISK_ONLY_2, MEMORY_ONLY, MEMORY_ONLY_2, MEMORY_ONLY_SER,
MEMORY_ONLY_SER_2, MEMORY_AND_DISK, MEMORY_AND_DISK_2,
MEMORY_AND_DISK_SER, MEMORY_AND_DISK_SER_2, OFF_HEAP
Returns
-------
:class:`.MatrixTable`
Persisted dataset.
"""
return Env.backend().persist_matrix_table(self, storage_level)
def unpersist(self) -> 'MatrixTable':
"""
Unpersists this dataset from memory/disk.
Notes
-----
This function will have no effect on a dataset that was not previously
persisted.
Returns
-------
:class:`.MatrixTable`
Unpersisted dataset.
"""
return Env.backend().unpersist_matrix_table(self)
@typecheck_method(name=str)
def add_row_index(self, name: str = 'row_idx') -> 'MatrixTable':
"""Add the integer index of each row as a new row field.
Examples
--------
>>> dataset_result = dataset.add_row_index()
Notes
-----
The field added is type :py:data:`.tint64`.
The row index is 0-indexed; the values are found in the range
``[0, N)``, where ``N`` is the total number of rows.
Parameters
----------
name : :class:`str`
Name for row index field.
Returns
-------
:class:`.MatrixTable`
Dataset with new field.
"""
return self.annotate_rows(**{name: hl.scan.count()})
@typecheck_method(name=str)
def add_col_index(self, name: str = 'col_idx') -> 'MatrixTable':
"""Add the integer index of each column as a new column field.
Examples
--------
>>> dataset_result = dataset.add_col_index()
Notes
-----
The field added is type :py:data:`.tint32`.
The column index is 0-indexed; the values are found in the range
``[0, N)``, where ``N`` is the total number of columns.
Parameters
----------
name: :class:`str`
Name for column index field.
Returns
-------
:class:`.MatrixTable`
Dataset with new field.
"""
return self.annotate_cols(**{name: hl.scan.count()})
@typecheck_method(other=matrix_table_type,
tolerance=numeric,
absolute=bool)
def _same(self, other, tolerance=1e-6, absolute=False) -> bool:
entries_name = Env.get_uid()
cols_name = Env.get_uid()
if list(self.col_key) != list(other.col_key):
print(f'different col keys:\n {list(self.col_key)}\n {list(other.col_key)}')
return False
return self._localize_entries(entries_name, cols_name)._same(
other._localize_entries(entries_name, cols_name), tolerance, absolute)
@typecheck_method(caller=str, s=expr_struct())
def _select_entries(self, caller, s) -> 'MatrixTable':
base, cleanup = self._process_joins(s)
analyze(caller, s, self._entry_indices)
return cleanup(MatrixTable(ir.MatrixMapEntries(base._mir, s._ir)))
@typecheck_method(caller=str,
row=expr_struct())
def _select_rows(self, caller, row) -> 'MatrixTable':
analyze(caller, row, self._row_indices, {self._col_axis})
base, cleanup = self._process_joins(row)
return cleanup(MatrixTable(ir.MatrixMapRows(base._mir, row._ir)))
@typecheck_method(caller=str,
col=expr_struct(),
new_key=nullable(sequenceof(str)))
def _select_cols(self, caller, col, new_key=None) -> 'MatrixTable':
analyze(caller, col, self._col_indices, {self._row_axis})
base, cleanup = self._process_joins(col)
return cleanup(MatrixTable(ir.MatrixMapCols(base._mir, col._ir, new_key)))
@typecheck_method(caller=str, s=expr_struct())
def _select_globals(self, caller, s) -> 'MatrixTable':
base, cleanup = self._process_joins(s)
analyze(caller, s, self._global_indices)
return cleanup(MatrixTable(ir.MatrixMapGlobals(base._mir, s._ir)))
@typecheck(datasets=matrix_table_type, _check_cols=bool)
def union_rows(*datasets: 'MatrixTable', _check_cols=True) -> 'MatrixTable':
"""Take the union of dataset rows.
Examples
--------
.. testsetup::
dataset_to_union_1 = dataset
dataset_to_union_2 = dataset
Union the rows of two datasets:
>>> dataset_result = dataset_to_union_1.union_rows(dataset_to_union_2)
Given a list of datasets, take the union of all rows:
>>> all_datasets = [dataset_to_union_1, dataset_to_union_2]
The following three syntaxes are equivalent:
>>> dataset_result = dataset_to_union_1.union_rows(dataset_to_union_2)
>>> dataset_result = all_datasets[0].union_rows(*all_datasets[1:])
>>> dataset_result = hl.MatrixTable.union_rows(*all_datasets)
Notes
-----
In order to combine two datasets, three requirements must be met:
- The column keys must be identical, both in type, value, and ordering.
- The row key schemas and row schemas must match.
- The entry schemas must match.
The column fields in the resulting dataset are the column fields from
the first dataset; the column schemas do not need to match.
This method does not deduplicate; if a row exists identically in two
datasets, then it will be duplicated in the result.
Warning
-------
This method can trigger a shuffle, if partitions from two datasets
overlap.
Parameters
----------
datasets : varargs of :class:`.MatrixTable`
Datasets to combine.
Returns
-------
:class:`.MatrixTable`
Dataset with rows from each member of `datasets`.
"""
if len(datasets) == 0:
raise ValueError('Expected at least one argument')
elif len(datasets) == 1:
return datasets[0]
else:
error_msg = "'MatrixTable.union_rows' expects {} for all datasets to be the same. Found: \ndataset {}: {} \ndataset {}: {}"
first = datasets[0]
for i, next in enumerate(datasets[1:]):
if first.row_key.keys() != next.row_key.keys():
raise ValueError(error_msg.format(
"row keys", 0, first.row_key.keys(), i + 1, next.row_key.keys()
))
if first.row.dtype != next.row.dtype:
raise ValueError(error_msg.format(
"row types", 0, first.row.dtype, i + 1, next.row.dtype
))
if first.entry.dtype != next.entry.dtype:
raise ValueError(error_msg.format(
"entry field types", 0, first.entry.dtype, i + 1, next.entry.dtype
))
if first.col_key.dtype != next.col_key.dtype:
raise ValueError(error_msg.format(
"col key types", 0, first.col_key.dtype, i + 1, next.col_key.dtype
))
if _check_cols:
wrong_keys = hl.eval(hl.rbind(first.col_key.collect(_localize=False), lambda first_keys: (
hl.enumerate([mt.col_key.collect(_localize=False) for mt in datasets[1:]])
.find(lambda x: ~(x[1] == first_keys))[0])))
if wrong_keys is not None:
raise ValueError(f"'MatrixTable.union_rows' expects all datasets to have the same columns. "
f"Datasets 0 and {wrong_keys + 1} have different columns (or possibly different order).")
return MatrixTable(ir.MatrixUnionRows(*[d._mir for d in datasets]))
@typecheck_method(other=matrix_table_type,
row_join_type=enumeration('inner', 'outer'))
def union_cols(self, other: 'MatrixTable', row_join_type='inner') -> 'MatrixTable':
"""Take the union of dataset columns.
Examples
--------
Union the columns of two datasets:
>>> dataset_result = dataset_to_union_1.union_cols(dataset_to_union_2)
Notes
-----
In order to combine two datasets, three requirements must be met:
- The row keys must match.
- The column key schemas and column schemas must match.
- The entry schemas must match.
The row fields in the resulting dataset are the row fields from the
first dataset; the row schemas do not need to match.
This method creates a :class:`.MatrixTable` which contains all columns
from both input datasets. The set of rows included in the result is
determined by the `row_join_type` parameter.
- With the default value of ``'inner'``, an inner join is performed
on rows, so that only rows whose row key exists in both input datasets
are included. In this case, the entries for each row are the
concatenation of all entries of the corresponding rows in the input
datasets.
- With `row_join_type` set to ``'outer'``, an outer join is perfomed on
rows, so that row keys which exist in only one input dataset are also
included. For those rows, the entry fields for the columns coming
from the other dataset will be missing.
Only distinct row keys from each dataset are included (equivalent to
calling :meth:`.distinct_by_row` on each dataset first).
This method does not deduplicate; if a column key exists identically in
two datasets, then it will be duplicated in the result.
Parameters
----------
other : :class:`.MatrixTable`
Dataset to concatenate.
outer : bool
If `True`, perform an outer join on rows, otherwise perform an
inner join. Default `False`.
Returns
-------
:class:`.MatrixTable`
Dataset with columns from both datasets.
"""
if self.entry.dtype != other.entry.dtype:
raise ValueError(f'entry types differ:\n'
f' left: {self.entry.dtype}\n'
f' right: {other.entry.dtype}')
if self.col.dtype != other.col.dtype:
raise ValueError(f'column types differ:\n'
f' left: {self.col.dtype}\n'
f' right: {other.col.dtype}')
if self.col_key.keys() != other.col_key.keys():
raise ValueError(f'column key fields differ:\n'
f' left: {", ".join(self.col_key.keys())}\n'
f' right: {", ".join(other.col_key.keys())}')
if list(self.row_key.dtype.values()) != list(other.row_key.dtype.values()):
raise ValueError(f'row key types differ:\n'
f' left: {", ".join(self.row_key.dtype.values())}\n'
f' right: {", ".join(other.row_key.dtype.values())}')
return MatrixTable(ir.MatrixUnionCols(self._mir, other._mir, row_join_type))
@typecheck_method(n_rows=nullable(int), n_cols=nullable(int), n=nullable(int))
def head(self, n_rows: Optional[int], n_cols: Optional[int] = None, *, n: Optional[int] = None) -> 'MatrixTable':
"""Subset matrix to first `n_rows` rows and `n_cols` cols.
Examples
--------
>>> mt_range = hl.utils.range_matrix_table(100, 100)
Passing only one argument will take the first `n_rows` rows:
>>> mt_range.head(10).count()
(10, 100)
Passing two arguments refers to rows and columns, respectively:
>>> mt_range.head(10, 20).count()
(10, 20)
Either argument may be ``None`` to indicate no filter.
First 10 rows, all columns:
>>> mt_range.head(10, None).count()
(10, 100)
All rows, first 10 columns:
>>> mt_range.head(None, 10).count()
(100, 10)
Notes
-----
The number of partitions in the new matrix is equal to the number of
partitions containing the first `n_rows` rows.
Parameters
----------
n_rows : :obj:`int`
Number of rows to include (all rows included if ``None``).
n_cols : :obj:`int`, optional
Number of cols to include (all cols included if ``None``).
n : :obj:`int`
Deprecated in favor of n_rows.
Returns
-------
:class:`.MatrixTable`
Matrix including the first `n_rows` rows and first `n_cols` cols.
"""
if n_rows is not None and n is not None:
raise ValueError('Both n and n_rows specified. Only one may be specified.')
if n_rows is not None:
n_rows_name = 'n_rows'
else:
n_rows = n
n_rows_name = 'n'
mt = self
if n_rows is not None:
if n_rows < 0:
raise ValueError(f"MatrixTable.head: expect '{n_rows_name}' to be non-negative or None, found '{n_rows}'")
mt = MatrixTable(ir.MatrixRowsHead(mt._mir, n_rows))
if n_cols is not None:
if n_cols < 0:
raise ValueError(f"MatrixTable.head: expect 'n_cols' to be non-negative or None, found '{n_cols}'")
mt = MatrixTable(ir.MatrixColsHead(mt._mir, n_cols))
return mt
@typecheck_method(n=nullable(int), n_cols=nullable(int))
def tail(self, n: Optional[int], n_cols: Optional[int] = None) -> 'MatrixTable':
"""Subset matrix to last `n` rows.
Examples
--------
>>> mt_range = hl.utils.range_matrix_table(100, 100)
Passing only one argument will take the last `n` rows:
>>> mt_range.tail(10).count()
(10, 100)
Passing two arguments refers to rows and columns, respectively:
>>> mt_range.tail(10, 20).count()
(10, 20)
Either argument may be ``None`` to indicate no filter.
Last 10 rows, all columns:
>>> mt_range.tail(10, None).count()
(10, 100)
All rows, last 10 columns:
>>> mt_range.tail(None, 10).count()
(100, 10)
Notes
-----
For backwards compatibility, the `n` parameter is not named `n_rows`,
but the parameter refers to the number of rows to keep.
The number of partitions in the new matrix is equal to the number of
partitions containing the last `n` rows.
Parameters
----------
n : :obj:`int`
Number of rows to include (all rows included if ``None``).
n_cols : :obj:`int`, optional
Number of cols to include (all cols included if ``None``).
Returns
-------
:class:`.MatrixTable`
Matrix including the last `n` rows and last `n_cols` cols.
"""
mt = self
if n is not None:
if n < 0:
raise ValueError(f"MatrixTable.tail: expect 'n' to be non-negative or None, found '{n}'")
mt = MatrixTable(ir.MatrixRowsTail(mt._mir, n))
if n_cols is not None:
if n_cols < 0:
raise ValueError(f"MatrixTable.tail: expect 'n_cols' to be non-negative or None, found '{n_cols}'")
mt = MatrixTable(ir.MatrixColsTail(mt._mir, n_cols))
return mt
@typecheck_method(parts=sequenceof(int), keep=bool)
def _filter_partitions(self, parts, keep=True) -> 'MatrixTable':
return MatrixTable(ir.MatrixToMatrixApply(self._mir, {'name': 'MatrixFilterPartitions', 'parts': parts, 'keep': keep}))
@classmethod
@typecheck_method(table=Table)
def from_rows_table(cls, table: Table) -> 'MatrixTable':
"""Construct matrix table with no columns from a table.
.. include:: _templates/experimental.rst
Examples
--------
Import a text table and construct a rows-only matrix table:
>>> table = hl.import_table('data/variant-lof.tsv')
>>> table = table.transmute(**hl.parse_variant(table['v'])).key_by('locus', 'alleles')
>>> sites_mt = hl.MatrixTable.from_rows_table(table)
Notes
-----
All fields in the table become row-indexed fields in the
result.
Parameters
----------
table : :class:`.Table`
The table to be converted.
Returns
-------
:class:`.MatrixTable`
"""
col_values_uid = Env.get_uid()
entries_uid = Env.get_uid()
return (table.annotate_globals(**{col_values_uid: hl.empty_array(hl.tstruct())})
.annotate(**{entries_uid: hl.empty_array(hl.tstruct())})
._unlocalize_entries(entries_uid, col_values_uid, []))
@typecheck_method(p=numeric,
seed=nullable(int))
def sample_rows(self, p: float, seed=None) -> 'MatrixTable':
"""Downsample the matrix table by keeping each row with probability ``p``.
Examples
--------
Downsample the dataset to approximately 1% of its rows.
>>> small_dataset = dataset.sample_rows(0.01)
Notes
-----
Although the :class:`MatrixTable` returned by this method may be
small, it requires a full pass over the rows of the sampled object.
Parameters
----------
p : :obj:`float`
Probability of keeping each row.
seed : :obj:`int`
Random seed.
Returns
-------
:class:`.MatrixTable`
Matrix table with approximately ``p * n_rows`` rows.
"""
if not 0 <= p <= 1:
raise ValueError("Requires 'p' in [0,1]. Found p={}".format(p))
return self.filter_rows(hl.rand_bool(p, seed))
@typecheck_method(p=numeric,
seed=nullable(int))
def sample_cols(self, p: float, seed=None) -> 'MatrixTable':
"""Downsample the matrix table by keeping each column with probability ``p``.
Examples
--------
Downsample the dataset to approximately 1% of its columns.
>>> small_dataset = dataset.sample_cols(0.01)
Parameters
----------
p : :obj:`float`
Probability of keeping each column.
seed : :obj:`int`
Random seed.
Returns
-------
:class:`.MatrixTable`
Matrix table with approximately ``p * n_cols`` column.
"""
if not 0 <= p <= 1:
raise ValueError("Requires 'p' in [0,1]. Found p={}".format(p))
return self.filter_cols(hl.rand_bool(p, seed))
@typecheck_method(fields=dictof(str, str))
def rename(self, fields: Dict[str, str]) -> 'MatrixTable':
"""Rename fields of a matrix table.
Examples
--------
Rename column key `s` to `SampleID`, still keying by `SampleID`.
>>> dataset_result = dataset.rename({'s': 'SampleID'})
You can rename a field to a field name that already exists, as long as
that field also gets renamed (no name collisions). Here, we rename the
column key `s` to `info`, and the row field `info` to `vcf_info`:
>>> dataset_result = dataset.rename({'s': 'info', 'info': 'vcf_info'})
Parameters
----------
fields : :obj:`dict` from :class:`str` to :obj:`str`
Mapping from old field names to new field names.
Returns
-------
:class:`.MatrixTable`
Matrix table with renamed fields.
"""
seen = {}
row_map = {}
col_map = {}
entry_map = {}
global_map = {}
for k, v in fields.items():
if v in seen:
raise ValueError(
"Cannot rename two fields to the same name: attempted to rename {} and {} both to {}".format(
repr(seen[v]), repr(k), repr(v)))
if v in self._fields and v not in fields:
raise ValueError("Cannot rename {} to {}: field already exists.".format(repr(k), repr(v)))
seen[v] = k
if self[k]._indices == self._row_indices:
row_map[k] = v
elif self[k]._indices == self._col_indices:
col_map[k] = v
elif self[k]._indices == self._entry_indices:
entry_map[k] = v
elif self[k]._indices == self._global_indices:
global_map[k] = v
return MatrixTable(ir.MatrixRename(self._mir, global_map, col_map, row_map, entry_map))
def distinct_by_row(self) -> 'MatrixTable':
"""Remove rows with a duplicate row key, keeping exactly one row for each unique key.
Returns
-------
:class:`.MatrixTable`
"""
return MatrixTable(ir.MatrixDistinctByRow(self._mir))
def distinct_by_col(self) -> 'MatrixTable':
"""Remove columns with a duplicate row key, keeping exactly one column for each unique key.
Returns
-------
:class:`.MatrixTable`
"""
index_uid = Env.get_uid()
col_key_fields = list(self.col_key)
t = self.key_cols_by().cols()
t = t.add_index(index_uid)
unique_cols = t.aggregate(
hl.agg.group_by(
hl.struct(**{f: t[f] for f in col_key_fields}), hl.agg.take(t[index_uid], 1)))
unique_cols = sorted([v[0] for _, v in unique_cols.items()])
return self.choose_cols(unique_cols)
@typecheck_method(separator=str)
def make_table(self, separator='.') -> Table:
"""Make a table from a matrix table with one field per sample.
Examples
--------
Consider a matrix table with the following schema:
.. code-block:: text
Global fields:
'batch': str
Column fields:
's': str
Row fields:
'locus': locus<GRCh37>
'alleles': array<str>
Entry fields:
'GT': call
'GQ': int32
Column key:
's': str
Row key:
'locus': locus<GRCh37>
'alleles': array<str>
and three sample IDs: `A`, `B` and `C`. Then the result of
:meth:`.make_table`:
>>> ht = mt.make_table() # doctest: +SKIP
has the original row fields along with 6 additional fields,
one for each sample and entry field:
.. code-block:: text
Global fields:
'batch': str
Row fields:
'locus': locus<GRCh37>
'alleles': array<str>
'A.GT': call
'A.GQ': int32
'B.GT': call
'B.GQ': int32
'C.GT': call
'C.GQ': int32
Key:
'locus': locus<GRCh37>
'alleles': array<str>
Notes
-----
The table has one row for each row of the input matrix. The
per sample and entry fields are formed by concatenating the
sample ID with the entry field name using `separator`. If the
entry field name is empty, the separator is omitted.
The table inherits the globals from the matrix table.
Parameters
----------
separator : :class:`str`
Separator between sample IDs and entry field names.
Returns
-------
:class:`.Table`
"""
if not (len(self.col_key) == 1 and self.col_key[0].dtype == hl.tstr):
raise ValueError("column key must be a single field of type str")
col_keys = self.col_key[0].collect()
counts = Counter(col_keys)
if counts[None] > 0:
raise ValueError("'make_table' encountered a missing column key; ensure all identifiers are defined.\n"
" To fill in key index, run:\n"
" mt = mt.key_cols_by(ck = hl.coalesce(mt.COL_KEY_NAME, 'missing_' + hl.str(hl.scan.count())))")
duplicates = [k for k, count in counts.items() if count > 1]
if duplicates:
raise ValueError(f"column keys must be unique, found duplicates: {', '.join(duplicates)}")
entries_uid = Env.get_uid()
cols_uid = Env.get_uid()
t = self
t = t._localize_entries(entries_uid, cols_uid)
def fmt(f, col_key):
if f:
return col_key + separator + f
else:
return col_key
t = t.annotate(**{
fmt(f, col_keys[i]): t[entries_uid][i][j]
for i in range(len(col_keys))
for j, f in enumerate(self.entry)
})
t = t.drop(cols_uid, entries_uid)
return t
@typecheck_method(rows=bool, cols=bool, entries=bool, handler=nullable(anyfunc))
def summarize(self, *, rows=True, cols=True, entries=True, handler=None):
"""Compute and print summary information about the fields in the matrix table.
.. include:: _templates/experimental.rst
Parameters
----------
rows : :obj:`bool`
Compute summary for the row fields.
cols : :obj:`bool`
Compute summary for the column fields.
entries : :obj:`bool`
Compute summary for the entry fields.
"""
if handler is None:
handler = default_handler()
if cols:
handler(self.col._summarize(header='Columns', top=True))
if rows:
handler(self.row._summarize(header='Rows', top=True))
if entries:
handler(self.entry._summarize(header='Entries', top=True))
def _write_block_matrix(self, path, overwrite, entry_field, block_size):
mt = self
mt = mt._select_all(entry_exprs={entry_field: mt[entry_field]})
writer = ir.MatrixBlockMatrixWriter(path, overwrite, entry_field, block_size)
Env.backend().execute(ir.MatrixWrite(self._mir, writer))
def _calculate_new_partitions(self, n_partitions):
"""returns a set of range bounds that can be passed to write"""
mt = self.rows()
mt = mt.select()
return Env.backend().execute(ir.TableToValueApply(
mt._tir,
{'name': 'TableCalculateNewPartitions',
'nPartitions': n_partitions}))
matrix_table_type.set(MatrixTable)
| 37.449631
| 141
| 0.567475
|
4a14adcbec00ab8342339ffef4c773e7ec06cf78
| 8,279
|
py
|
Python
|
nanopq/pq.py
|
de9uch1/nanopq
|
4c1d724494a71f9736b15928a8c03b0ba13ffd19
|
[
"MIT"
] | 217
|
2018-07-25T23:33:56.000Z
|
2022-03-19T13:44:07.000Z
|
nanopq/pq.py
|
de9uch1/nanopq
|
4c1d724494a71f9736b15928a8c03b0ba13ffd19
|
[
"MIT"
] | 11
|
2019-01-23T10:56:18.000Z
|
2022-03-31T14:58:40.000Z
|
nanopq/pq.py
|
de9uch1/nanopq
|
4c1d724494a71f9736b15928a8c03b0ba13ffd19
|
[
"MIT"
] | 35
|
2018-07-30T12:53:12.000Z
|
2022-02-08T04:11:38.000Z
|
import numpy as np
from scipy.cluster.vq import kmeans2, vq
class PQ(object):
"""Pure python implementation of Product Quantization (PQ) [Jegou11]_.
For the indexing phase of database vectors,
a `D`-dim input vector is divided into `M` `D`/`M`-dim sub-vectors.
Each sub-vector is quantized into a small integer via `Ks` codewords.
For the querying phase, given a new `D`-dim query vector, the distance beween the query
and the database PQ-codes are efficiently approximated via Asymmetric Distance.
All vectors must be np.ndarray with np.float32
.. [Jegou11] H. Jegou et al., "Product Quantization for Nearest Neighbor Search", IEEE TPAMI 2011
Args:
M (int): The number of sub-space
Ks (int): The number of codewords for each subspace
(typically 256, so that each sub-vector is quantized
into 256 bits = 1 byte = uint8)
verbose (bool): Verbose flag
Attributes:
M (int): The number of sub-space
Ks (int): The number of codewords for each subspace
verbose (bool): Verbose flag
code_dtype (object): dtype of PQ-code. Either np.uint{8, 16, 32}
codewords (np.ndarray): shape=(M, Ks, Ds) with dtype=np.float32.
codewords[m][ks] means ks-th codeword (Ds-dim) for m-th subspace
Ds (int): The dim of each sub-vector, i.e., Ds=D/M
"""
def __init__(self, M, Ks=256, verbose=True):
assert 0 < Ks <= 2 ** 32
self.M, self.Ks, self.verbose = M, Ks, verbose
self.code_dtype = (
np.uint8 if Ks <= 2 ** 8 else (np.uint16 if Ks <= 2 ** 16 else np.uint32)
)
self.codewords = None
self.Ds = None
if verbose:
print("M: {}, Ks: {}, code_dtype: {}".format(M, Ks, self.code_dtype))
def __eq__(self, other):
if isinstance(other, PQ):
return (self.M, self.Ks, self.verbose, self.code_dtype, self.Ds) == (
other.M,
other.Ks,
other.verbose,
other.code_dtype,
other.Ds,
) and np.array_equal(self.codewords, other.codewords)
else:
return False
def fit(self, vecs, iter=20, seed=123):
"""Given training vectors, run k-means for each sub-space and create
codewords for each sub-space.
This function should be run once first of all.
Args:
vecs (np.ndarray): Training vectors with shape=(N, D) and dtype=np.float32.
iter (int): The number of iteration for k-means
seed (int): The seed for random process
Returns:
object: self
"""
assert vecs.dtype == np.float32
assert vecs.ndim == 2
N, D = vecs.shape
assert self.Ks < N, "the number of training vector should be more than Ks"
assert D % self.M == 0, "input dimension must be dividable by M"
self.Ds = int(D / self.M)
np.random.seed(seed)
if self.verbose:
print("iter: {}, seed: {}".format(iter, seed))
# [m][ks][ds]: m-th subspace, ks-the codeword, ds-th dim
self.codewords = np.zeros((self.M, self.Ks, self.Ds), dtype=np.float32)
for m in range(self.M):
if self.verbose:
print("Training the subspace: {} / {}".format(m, self.M))
vecs_sub = vecs[:, m * self.Ds : (m + 1) * self.Ds]
self.codewords[m], _ = kmeans2(vecs_sub, self.Ks, iter=iter, minit="points")
return self
def encode(self, vecs):
"""Encode input vectors into PQ-codes.
Args:
vecs (np.ndarray): Input vectors with shape=(N, D) and dtype=np.float32.
Returns:
np.ndarray: PQ codes with shape=(N, M) and dtype=self.code_dtype
"""
assert vecs.dtype == np.float32
assert vecs.ndim == 2
N, D = vecs.shape
assert D == self.Ds * self.M, "input dimension must be Ds * M"
# codes[n][m] : code of n-th vec, m-th subspace
codes = np.empty((N, self.M), dtype=self.code_dtype)
for m in range(self.M):
if self.verbose:
print("Encoding the subspace: {} / {}".format(m, self.M))
vecs_sub = vecs[:, m * self.Ds : (m + 1) * self.Ds]
codes[:, m], _ = vq(vecs_sub, self.codewords[m])
return codes
def decode(self, codes):
"""Given PQ-codes, reconstruct original D-dimensional vectors
approximately by fetching the codewords.
Args:
codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.
Each row is a PQ-code
Returns:
np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32
"""
assert codes.ndim == 2
N, M = codes.shape
assert M == self.M
assert codes.dtype == self.code_dtype
vecs = np.empty((N, self.Ds * self.M), dtype=np.float32)
for m in range(self.M):
vecs[:, m * self.Ds : (m + 1) * self.Ds] = self.codewords[m][codes[:, m], :]
return vecs
def dtable(self, query):
"""Compute a distance table for a query vector.
The distances are computed by comparing each sub-vector of the query
to the codewords for each sub-subspace.
`dtable[m][ks]` contains the squared Euclidean distance between
the `m`-th sub-vector of the query and the `ks`-th codeword
for the `m`-th sub-space (`self.codewords[m][ks]`).
Args:
query (np.ndarray): Input vector with shape=(D, ) and dtype=np.float32
Returns:
nanopq.DistanceTable:
Distance table. which contains
dtable with shape=(M, Ks) and dtype=np.float32
"""
assert query.dtype == np.float32
assert query.ndim == 1, "input must be a single vector"
(D,) = query.shape
assert D == self.Ds * self.M, "input dimension must be Ds * M"
# dtable[m] : distance between m-th subvec and m-th codewords (m-th subspace)
# dtable[m][ks] : distance between m-th subvec and ks-th codeword of m-th codewords
dtable = np.empty((self.M, self.Ks), dtype=np.float32)
for m in range(self.M):
query_sub = query[m * self.Ds : (m + 1) * self.Ds]
dtable[m, :] = np.linalg.norm(self.codewords[m] - query_sub, axis=1) ** 2
return DistanceTable(dtable)
class DistanceTable(object):
"""Distance table from query to codeworkds.
Given a query vector, a PQ/OPQ instance compute this DistanceTable class
using :func:`PQ.dtable` or :func:`OPQ.dtable`.
The Asymmetric Distance from query to each database codes can be computed
by :func:`DistanceTable.adist`.
Args:
dtable (np.ndarray): Distance table with shape=(M, Ks) and dtype=np.float32
computed by :func:`PQ.dtable` or :func:`OPQ.dtable`
Attributes:
dtable (np.ndarray): Distance table with shape=(M, Ks) and dtype=np.float32.
Note that dtable[m][ks] contains the squared Euclidean distance between
(1) m-th sub-vector of query and (2) ks-th codeword for m-th subspace.
"""
def __init__(self, dtable):
assert dtable.ndim == 2
assert dtable.dtype == np.float32
self.dtable = dtable
def adist(self, codes):
"""Given PQ-codes, compute Asymmetric Distances between the query (self.dtable)
and the PQ-codes.
Args:
codes (np.ndarray): PQ codes with shape=(N, M) and
dtype=pq.code_dtype where pq is a pq instance that creates the codes
Returns:
np.ndarray: Asymmetric Distances with shape=(N, ) and dtype=np.float32
"""
assert codes.ndim == 2
N, M = codes.shape
assert M == self.dtable.shape[0]
# Fetch distance values using codes. The following codes are
dists = np.sum(self.dtable[range(M), codes], axis=1)
# The above line is equivalent to the followings:
# dists = np.zeros((N, )).astype(np.float32)
# for n in range(N):
# for m in range(M):
# dists[n] += self.dtable[m][codes[n][m]]
return dists
| 36.632743
| 101
| 0.584612
|
4a14adfd695311d72533b62fe81cc3fa4e161a02
| 475
|
py
|
Python
|
custodian/vasp/tests/conftest.py
|
jmmshn/custodian
|
9f42fbd0fe449cb5a167de946fa0bfdd7fa60ed6
|
[
"MIT"
] | null | null | null |
custodian/vasp/tests/conftest.py
|
jmmshn/custodian
|
9f42fbd0fe449cb5a167de946fa0bfdd7fa60ed6
|
[
"MIT"
] | null | null | null |
custodian/vasp/tests/conftest.py
|
jmmshn/custodian
|
9f42fbd0fe449cb5a167de946fa0bfdd7fa60ed6
|
[
"MIT"
] | null | null | null |
"""
This module mocks functions needed for pytest.
"""
import multiprocessing
import pytest
def mock_cpu_count(*args, **kwargs):
"""
Instead of running multiprocessing.cpu_count(), we return a fixed
value during tests
"""
return 64
@pytest.fixture(autouse=True)
def patch_get_potential_energy(monkeypatch):
"""
Monkeypatch the multiprocessing.cpu_count() function
"""
monkeypatch.setattr(multiprocessing, "cpu_count", mock_cpu_count)
| 20.652174
| 69
| 0.722105
|
4a14b0214f1c6be30ff6772c3be4ad578a2a2771
| 3,546
|
py
|
Python
|
tests/test_branflake.py
|
curt/branflake
|
a266bdb5c3641478363cc153045a64a7d9f7aed6
|
[
"MIT"
] | null | null | null |
tests/test_branflake.py
|
curt/branflake
|
a266bdb5c3641478363cc153045a64a7d9f7aed6
|
[
"MIT"
] | null | null | null |
tests/test_branflake.py
|
curt/branflake
|
a266bdb5c3641478363cc153045a64a7d9f7aed6
|
[
"MIT"
] | null | null | null |
""" test_branflake.py
"""
from time import sleep, struct_time
from uuid import UUID
import pytest
from branflake import Branflake
# easier to read than a pile of zeros
ONE_MILLION = 1000000
# define low, one, and high bytearrays
LOW_BYTEARRAY = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ONE_BYTEARRAY = b'\x00\x00\x00\x00\x00\x00\x00\x01'
HIGH_BYTEARRAY = b'\xff\xff\xff\xff\xff\xff\xff\xff'
# define lowest and highest integers
ONE_INT64 = int.from_bytes(ONE_BYTEARRAY, 'big', signed=False)
HIGH_INT64 = int.from_bytes(HIGH_BYTEARRAY, 'big', signed=False)
ZERO_INT128 = int.from_bytes(LOW_BYTEARRAY + LOW_BYTEARRAY, 'big', signed=False)
LOW_INT128 = int.from_bytes(ONE_BYTEARRAY + LOW_BYTEARRAY, 'big', signed=False)
HIGH_INT128 = int.from_bytes(HIGH_BYTEARRAY + HIGH_BYTEARRAY, 'big', signed=False)
# create earliest and latest possible branflakes
EARLIEST_BRANFLAKE = Branflake(ONE_INT64, LOW_BYTEARRAY)
LATEST_BRANFLAKE = Branflake(HIGH_INT64, HIGH_BYTEARRAY)
# create two branflakes at least 5 microseconds apart
FIRST_BRANFLAKE = Branflake()
sleep(5 / ONE_MILLION)
SECOND_BRANFLAKE = Branflake()
def test_earliest():
""" Checks the integrity of the earliest branflake
"""
assert EARLIEST_BRANFLAKE
assert EARLIEST_BRANFLAKE.to_bytes() == (ONE_BYTEARRAY + LOW_BYTEARRAY)
def test_latest():
""" Checks the integrity of the latest branflake
"""
assert LATEST_BRANFLAKE
assert LATEST_BRANFLAKE.to_bytes() == (HIGH_BYTEARRAY + HIGH_BYTEARRAY)
def test_first():
""" Tests first branflake for types and lengths
"""
assert FIRST_BRANFLAKE
assert len(FIRST_BRANFLAKE.get_random_bytes()) == 8
assert len(FIRST_BRANFLAKE.get_time_bytes()) == 8
assert len(FIRST_BRANFLAKE.to_bytes()) == 16
assert len(FIRST_BRANFLAKE.to_hex_bytes()) == 32
assert len(FIRST_BRANFLAKE.to_hex_string()) == 32
assert len(FIRST_BRANFLAKE.to_base64_string()) == 22
assert isinstance(FIRST_BRANFLAKE.to_gmtime(), struct_time)
assert isinstance(FIRST_BRANFLAKE.to_uuid(), UUID)
assert isinstance(FIRST_BRANFLAKE.to_microseconds(), int)
assert isinstance(FIRST_BRANFLAKE.to_seconds(), float)
def test_compare():
""" Compares second and first branflakes with inequality operator.
"""
assert SECOND_BRANFLAKE
assert SECOND_BRANFLAKE.to_int() > FIRST_BRANFLAKE.to_int()
assert SECOND_BRANFLAKE.to_hex_string() > FIRST_BRANFLAKE.to_hex_string()
def test_reconstitute():
""" Reconsitutes branflakes using 'from' methods.
"""
assert ((Branflake.from_int(FIRST_BRANFLAKE.to_int())).to_int()
== FIRST_BRANFLAKE.to_int())
assert ((Branflake.from_hex_string(FIRST_BRANFLAKE.to_hex_string())).to_int()
== FIRST_BRANFLAKE.to_int())
assert ((Branflake.from_bytes(FIRST_BRANFLAKE.to_bytes())).to_int()
== FIRST_BRANFLAKE.to_int())
assert ((Branflake.from_base64_string(FIRST_BRANFLAKE.to_base64_string())).to_int()
== FIRST_BRANFLAKE.to_int())
def test_out_of_bounds():
""" Tests creating branflakes with out-of-bounds times.
"""
with pytest.raises(ValueError):
Branflake(HIGH_INT64 + 1, HIGH_BYTEARRAY)
with pytest.raises(ValueError):
Branflake(-1, HIGH_BYTEARRAY)
with pytest.raises(ValueError):
Branflake.from_int(-1)
def test_wrong_length():
""" Tests creating branflakes with inputs of wrong length.
"""
with pytest.raises(ValueError):
Branflake.from_bytes(b'\x00')
with pytest.raises(ValueError):
Branflake.from_hex_string('0')
| 37.326316
| 87
| 0.73181
|
4a14b222546a139b4d9f436480a31598c0029511
| 2,280
|
py
|
Python
|
src/multiple_trees/phylo_tree.py
|
npakudin/embrionic_tree
|
fe4c6379cf9b63ec116678549bd284abff638655
|
[
"MIT"
] | null | null | null |
src/multiple_trees/phylo_tree.py
|
npakudin/embrionic_tree
|
fe4c6379cf9b63ec116678549bd284abff638655
|
[
"MIT"
] | 5
|
2020-08-01T03:01:38.000Z
|
2022-03-12T00:45:17.000Z
|
src/multiple_trees/phylo_tree.py
|
npakudin/embrionic_tree
|
fe4c6379cf9b63ec116678549bd284abff638655
|
[
"MIT"
] | null | null | null |
import networkx as nx
def cut(distance_matrix, names):
G = nx.Graph()
for row_index, row in enumerate(distance_matrix):
for col_index, item in enumerate(row):
if (col_index < row_index):
# G.add_edge(names[row_index], names[col_index], weight=item)
G.add_edge(names[row_index], names[col_index], weight=item)
return nx.stoer_wagner(G)
def sub_matrix(distance_matrix, names, part):
sub_matr = []
sub_names = []
# print(f"len(distance_matrix):{len(distance_matrix)}, names: {names}, part: {part}")
for row_index, row in enumerate(part):
sub_matr.append([])
sub_names.append(names[part[row_index]])
for col_index, item in enumerate(part):
sub_matr[row_index].append(distance_matrix[part[row_index]][part[col_index]])
return (sub_matr, sub_names)
class PhyloTree:
def __init__(self, distance_matrix, names):
assert len(distance_matrix) > 0
if len(distance_matrix) == 1:
self.index = distance_matrix[0][0]
self.name = names[0]
self.l = None
self.r = None
else:
self.name = '*'
self.cut_value, (part1, part2) = cut(distance_matrix, list(range(0, len(distance_matrix))))
print(f"cut_value/count: {self.cut_value / len(distance_matrix)}, part1: {part1}, part2: {part2}")
sub_matr1, sub_names1 = sub_matrix(distance_matrix, names, part1)
self.l = PhyloTree(sub_matr1, sub_names1)
sub_matr2, sub_names2 = sub_matrix(distance_matrix, names, part2)
self.r = PhyloTree(sub_matr2, sub_names2)
def to_str(self, level):
res = ""
pad = "\n" + (" " * level)
res += pad + "name: " + str(self.name)
if not (self.l.is_none()):
res += pad + "l: " + self.l.to_str(level + 1)
if not (self.r.is_none()):
res += pad + "r: " + self.r.to_str(level + 1)
return res
def __str__(self):
return self.to_str(0)
# phyloTree = PhyloTree(distance_matrix, list(range(0, len(distance_matrix))))
# cut_value, partition = cut(distance_matrix, list(range(0, len(distance_matrix))))
# print(f"cut_value: {cut_value}, partition: {partition}")
| 36.774194
| 110
| 0.607895
|
4a14b2ef832d23747b215586208d233ebf37068e
| 7,250
|
py
|
Python
|
gff_to_bed_maker/gff_to_bed_maker.py
|
tyrmi/PGU
|
bd04b62599e13124426f0830fe53e4324aca8439
|
[
"BSD-3-Clause"
] | 3
|
2017-07-17T07:45:37.000Z
|
2019-04-12T21:03:20.000Z
|
gff_to_bed_maker/gff_to_bed_maker.py
|
tyrmi/PGU
|
bd04b62599e13124426f0830fe53e4324aca8439
|
[
"BSD-3-Clause"
] | null | null | null |
gff_to_bed_maker/gff_to_bed_maker.py
|
tyrmi/PGU
|
bd04b62599e13124426f0830fe53e4324aca8439
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Copyright © 2017 Jaakko Tyrmi. All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import optparse
from collections import defaultdict
import sys
NAME = 'gff_to_bed_maker'
VERSION = '17.04.20'
AUTHOR = 'Jaakko Tyrmi'
descr = """This program converts areas defined in an input gff3 files to bed
file format. The -f parameter can be used to limit the output features to a
certain type (e.g exon, CDS...). The -I parameter takes a path to a file
containing list of ID names to include in the output. The ID is parsed from
9. column of the gff file. NOTICE that the ID field is defined as the string
between '=' and ':' or ';' characters. This makes it possible to list IDs
without detailing possible exon numbers etc. that may be included after ':'
character.
"""
print '\n\nRunning {0} v.{1}, by {2}'.format(NAME, VERSION, AUTHOR)
parser = optparse.OptionParser(description=descr)
parser.add_option('-i', '--input_path', help='path to a gff file')
parser.add_option('-o', '--output_path', help='path to output bed file')
parser.add_option('-f', '--feature_types', help='feature to extract (default '
'all), comma delimited list '
'may be used to include '
'several types.')
parser.add_option('-I', '--id_list_path', help='list of id:s to include in '
'the output')
parser.add_option('-g', '--include_gene_name_in_bed', help='creates an addional '
'column to bed file '
'(making it non-standard) '
'containing the gene name'
'(compatible with pimatic.py '
'script)')
args = parser.parse_args()[0]
def gff_to_bed_maker(input_path, output_path, feature_types, id_list_path,
include_gene_name_in_bed):
if feature_types is None:
feature_types = []
elif feature_types.lower == 'all':
feature_types = []
else:
feature_types = feature_types.lower()
feature_types = feature_types.split(',')
feature_types = set(feature_types)
if include_gene_name_in_bed.lower() in ('true', 't'):
include_gene_name_in_bed = True
elif include_gene_name_in_bed.lower() in ('false', 'f', None):
include_gene_name_in_bed = False
if include_gene_name_in_bed and id_list_path is None:
print 'Error! -g parameter can be set true only when id list is available (-I parameter)!'
sys.exit(0)
allowed_ids = set([])
if id_list_path is not None:
print 'Reading ID list...'
in_handle = open(id_list_path)
for line in in_handle:
line = line.strip()
if not line: continue
allowed_ids.add(line)
print '{0} IDs found.\n'.format(len(allowed_ids))
in_handle.close()
in_handle = open(input_path)
out_handle = open(output_path, 'w')
if include_gene_name_in_bed:
out_handle.write('#CHR\tSTART\tEND\tGENE_NAME\n')
else:
out_handle.write('#CHR\tSTART\tEND\n')
features_found = 0
features_written = 0
feature_types_included = defaultdict(int)
feature_length_included = defaultdict(int)
print 'Processing gff file...'
for line in in_handle:
if line.startswith('#'): continue
line = line.strip()
if not line: continue
line = line.split('\t')
if len(line) != 9: continue
features_found += 1
#Skip if incorrect ID
if id_list_path is not None:
attributes = line[8]
attributes = attributes.replace(':', ';')
attributes = attributes.split(';')
ID_found = False
for a in attributes:
if a.startswith('ID='):
if a[3:] in allowed_ids:
ID_found = True
ID_string = a[3:]
break
if not ID_found: continue
#Skip if incorrect feature
line[2] = line[2].lower()
if line[2] in feature_types or not feature_types:
features_written += 1
feature_types_included[line[2]] += 1
feature_length_included[line[2]] += int(line[4])-int(line[3])
corrected_start_pos = int(line[3])
corrected_start_pos = (corrected_start_pos-1)
if include_gene_name_in_bed:
out_handle.write('{0}\t{1}\t{2}\t{3}\n'.format(line[0],
corrected_start_pos,
line[4],
ID_string))
else:
out_handle.write('{0}\t{1}\t{2}\n'.format(line[0],
corrected_start_pos,
line[4]))
in_handle.close()
out_handle.close()
print '\nDone.'
print '{0}/{1} features kept.'.format(features_written, features_found)
print '\nfeature\tnumber_included\tlength_of_sequence_included'
for feature in feature_types_included:
print '{0}\t{1}\t{2}'.format(feature,
feature_types_included[feature],
feature_length_included[feature])
gff_to_bed_maker(args.input_path, args.output_path, args.feature_types,
args.id_list_path, args.include_gene_name_in_bed)
| 45.886076
| 99
| 0.584966
|
4a14b300828b066424646d5c743326c46ee829f4
| 1,431
|
py
|
Python
|
classifier_util.py
|
alexrudnick/nlp-doodles
|
ae29e87177b40cbdc330f5dc177fe43fd71a1519
|
[
"CC0-1.0"
] | 3
|
2020-10-06T04:36:37.000Z
|
2020-10-20T12:29:27.000Z
|
classifier_util.py
|
alexrudnick/nlp-doodles
|
ae29e87177b40cbdc330f5dc177fe43fd71a1519
|
[
"CC0-1.0"
] | null | null | null |
classifier_util.py
|
alexrudnick/nlp-doodles
|
ae29e87177b40cbdc330f5dc177fe43fd71a1519
|
[
"CC0-1.0"
] | 1
|
2020-10-20T04:32:05.000Z
|
2020-10-20T04:32:05.000Z
|
"""Little utility functions used in several classifier sketches.
"""
import sklearn.datasets
import numpy as np
def split_training_test(document_pairs):
"""
Given a list of things, split them into training and test.
Returns a pair of lists: training, test.
Simplest split: every 10th thing is in the test set.
"""
training = []
test = []
for i, pair in enumerate(document_pairs):
if i % 10 == 9:
test.append(pair)
else:
training.append(pair)
return training, test
def load_binary_toy_documents():
out = [
("pos", "a".split()),
("pos", "a".split()),
("pos", "a".split()),
("neg", "b".split()),
("neg", "b".split()),
("neg", "b".split()),
("neg", "c".split()),
("neg", "c".split()),
("neg", "c".split()),
("pos", "a".split()),
]
return out
def load_multinomial_toy_documents():
out = []
for i in range(30):
out.append(("A", "b c".split()))
for i in range(30):
out.append(("B", "a c".split()))
for i in range(30):
out.append(("C", "a b".split()))
return out
def load_iris_example_pairs():
out = []
iris = sklearn.datasets.load_iris()
for example, label in zip(iris.data, iris.target):
# tack a 1 on the end here as a bias term.
out.append((label, np.append(example, [1])))
return out
| 26.018182
| 64
| 0.542278
|
4a14b3dfde321fcd7ebdef70037863e598d6b8b0
| 18,118
|
py
|
Python
|
tests/test_environment.py
|
startupgrind/python-percy-client
|
8c6ce83c74b9dbe51beadac5923f5d9f1d999f58
|
[
"MIT"
] | null | null | null |
tests/test_environment.py
|
startupgrind/python-percy-client
|
8c6ce83c74b9dbe51beadac5923f5d9f1d999f58
|
[
"MIT"
] | 2
|
2019-07-24T20:43:35.000Z
|
2019-07-24T23:03:29.000Z
|
tests/test_environment.py
|
startupgrind/python-percy-client
|
8c6ce83c74b9dbe51beadac5923f5d9f1d999f58
|
[
"MIT"
] | null | null | null |
import os
import percy
import pytest
import sys
class BaseTestPercyEnvironment(object):
def setup_method(self, method):
self.original_env = {}
self.original_env['TRAVIS_BUILD_ID'] = os.getenv('TRAVIS_BUILD_ID', None)
self.original_env['TRAVIS_BUILD_NUMBER'] = os.getenv('TRAVIS_BUILD_NUMBER', None)
self.original_env['TRAVIS_COMMIT'] = os.getenv('TRAVIS_COMMIT', None)
self.original_env['TRAVIS_BRANCH'] = os.getenv('TRAVIS_BRANCH', None)
self.original_env['TRAVIS_PULL_REQUEST'] = os.getenv('TRAVIS_PULL_REQUEST', None)
self.original_env['TRAVIS_PULL_REQUEST_BRANCH'] = os.getenv('TRAVIS_PULL_REQUEST_BRANCH', None)
def teardown_method(self, method):
self.clear_env_vars()
# Restore the original environment variables.
for key, value in self.original_env.items():
if value:
os.environ[key] = value
def clear_env_vars(self):
all_possible_env_vars = [
# Unset Percy vars.
'PERCY_COMMIT',
'PERCY_BRANCH',
'PERCY_TARGET_BRANCH',
'PERCY_TARGET_COMMIT',
'PERCY_PULL_REQUEST',
'PERCY_PARALLEL_NONCE',
'PERCY_PARALLEL_TOTAL',
# Unset Travis vars.
'TRAVIS_BUILD_ID',
'TRAVIS_BUILD_NUMBER',
'TRAVIS_COMMIT',
'TRAVIS_BRANCH',
'TRAVIS_PULL_REQUEST',
'TRAVIS_PULL_REQUEST_BRANCH',
'CI_NODE_TOTAL',
# Unset Jenkins vars.
'JENKINS_URL',
'BUILD_NUMBER',
'ghprbPullId',
'ghprbActualCommit',
'ghprbSourceBranch',
'GIT_COMMIT',
# Unset Circle CI vars.
'CIRCLECI',
'CIRCLE_SHA1',
'CIRCLE_BRANCH',
'CIRCLE_BUILD_NUM',
'CIRCLE_WORKFLOW_WORKSPACE_ID',
'CI_PULL_REQUESTS',
'CIRCLE_NODE_TOTAL',
# Unset Codeship vars.
'CI_NAME',
'CI_BRANCH',
'CI_PULL_REQUEST',
'CI_COMMIT_ID',
'CI_BUILD_NUMBER',
'CI_BUILD_ID',
'CI_NODE_TOTAL',
# Unset Drone vars.
'CI',
'DRONE',
'DRONE_COMMIT',
'DRONE_BRANCH',
'CI_PULL_REQUEST',
# Unset Semaphore CI vars
'CI',
'SEMAPHORE',
'REVISION',
'BRANCH_NAME',
'SEMAPHORE_BRANCH_ID',
'SEMAPHORE_BUILD_NUMBER',
'SEMAPHORE_CURRENT_THREAD',
'SEMAPHORE_THREAD_COUNT',
'PULL_REQUEST_NUMBER',
# Unset Buildkite vars
'BUILDKITE',
'BUILDKITE_COMMIT',
'BUILDKITE_BRANCH',
'BUILDKITE_PULL_REQUEST',
'BUILDKITE_BUILD_ID',
'BUILDKITE_PARALLEL_JOB_COUNT',
# Unset GitLab vars
'GITLAB_CI',
'CI_COMMIT_SHA',
'CI_COMMIT_REF_NAME',
'CI_JOB_ID',
'CI_JOB_STAGE',
]
for env_var in all_possible_env_vars:
if os.getenv(env_var):
del os.environ[env_var]
class TestNoEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestNoEnvironment, self).setup_method(self)
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == None
def test_target_branch(self):
assert self.environment.target_branch == None
# Can be overridden with PERCY_TARGET_BRANCH.
os.environ['PERCY_TARGET_BRANCH'] = 'staging'
assert self.environment.target_branch == 'staging'
def test_target_commit_sha(self):
assert self.environment.target_commit_sha == None
# Can be overridden with PERCY_TARGET_COMMIT.
os.environ['PERCY_TARGET_COMMIT'] = 'test-target-commit'
assert self.environment.target_commit_sha == 'test-target-commit'
def test_pull_request_number(self):
assert self.environment.pull_request_number == None
# Can be overridden with PERCY_PULL_REQUEST.
os.environ['PERCY_PULL_REQUEST'] = '1234'
assert self.environment.pull_request_number == '1234'
def test_commit_live(self, monkeypatch):
def isstr(s):
if sys.version_info >= (3,0):
return isinstance(s, str)
else:
return isinstance(s, basestring)
# Call commit using the real _raw_commit_data, which calls git underneath, so allow full
# commit object with attributes containing any string. (Real data changes with each commit)
commit_data = self.environment.commit_data
assert isstr(commit_data['branch'])
assert isstr(commit_data['sha'])
assert isstr(commit_data['message'])
assert isstr(commit_data['committed_at'])
assert isstr(commit_data['author_name'])
assert isstr(commit_data['author_email'])
assert isstr(commit_data['committer_name'])
assert isstr(commit_data['committer_email'])
@pytest.fixture()
def test_commit_with_failed_raw_commit(self, monkeypatch):
# Call commit using faking a _raw_commit_data failure.
# If git command fails, only data from environment variables.
os.environ['PERCY_COMMIT'] = 'testcommitsha'
os.environ['PERCY_BRANCH'] = 'testbranch'
monkeypatch.setattr(self.environment, '_raw_commit_output', lambda x: '')
assert self.environment.commit_data == {
'branch': 'testbranch',
'author_email': None,
'author_name': None,
'committer_email': None,
'committer_name': None,
'sha': 'testcommitsha',
}
self.clear_env_vars()
@pytest.fixture()
def test_commit_with_mocked_raw_commit(self, monkeypatch):
# Call commit with _raw_commit_data returning mock data, so we can confirm it
# gets formatted correctly
os.environ['PERCY_BRANCH'] = 'the-coolest-branch'
def fake_raw_commit(commit_sha):
return """COMMIT_SHA:2fcd1b107aa25e62a06de7782d0c17544c669d139
AUTHOR_NAME:Tim Haines
AUTHOR_EMAIL:timhaines@example.com
COMMITTER_NAME:Other Tim Haines
COMMITTER_EMAIL:othertimhaines@example.com
COMMITTED_DATE:2018-03-10 14:41:02 -0800
COMMIT_MESSAGE:This is a great commit"""
monkeypatch.setattr(self.environment, '_raw_commit_output', fake_raw_commit)
assert self.environment.commit_data == {
'branch': 'the-coolest-branch',
'sha': '2fcd1b107aa25e62a06de7782d0c17544c669d139',
'committed_at': '2018-03-10 14:41:02 -0800',
'message': 'This is a great commit',
'author_name': 'Tim Haines',
'author_email': 'timhaines@example.com',
'committer_name': 'Other Tim Haines',
'committer_email': 'othertimhaines@example.com'
}
@pytest.fixture()
def test_branch(self, monkeypatch):
# Default calls _raw_branch_output and call git underneath, so allow any non-empty string.
assert len(self.environment.branch) > 0
# If git command fails, falls back to None and prints warning.
monkeypatch.setattr(self.environment, '_raw_branch_output', lambda: '')
assert self.environment.branch == None
# Can be overridden with PERCY_BRANCH.
os.environ['PERCY_BRANCH'] = 'foo'
assert self.environment.branch == 'foo'
def test_commit_sha(self):
assert not self.environment.commit_sha
# Can be overridden with PERCY_COMMIT.
os.environ['PERCY_COMMIT'] = 'commit-sha'
assert self.environment.commit_sha == 'commit-sha'
def test_parallel_nonce(self):
os.environ['PERCY_PARALLEL_NONCE'] = 'foo'
assert self.environment.parallel_nonce == 'foo'
def test_parallel_total(self):
os.environ['PERCY_PARALLEL_TOTAL'] = '2'
assert self.environment.parallel_total_shards == 2
class TestTravisEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestTravisEnvironment, self).setup_method(self)
os.environ['TRAVIS_BUILD_ID'] = '1234'
os.environ['TRAVIS_BUILD_NUMBER'] = 'travis-build-number'
os.environ['TRAVIS_PULL_REQUEST'] = 'false'
os.environ['TRAVIS_PULL_REQUEST_BRANCH'] = 'false'
os.environ['TRAVIS_COMMIT'] = 'travis-commit-sha'
os.environ['TRAVIS_BRANCH'] = 'travis-branch'
os.environ['CI_NODE_TOTAL'] = '3'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'travis'
def test_pull_request_number(self):
assert self.environment.pull_request_number == None
os.environ['TRAVIS_PULL_REQUEST'] = '256'
assert self.environment.pull_request_number == '256'
# PERCY env vars should take precendence over CI. Checked here once, assume other envs work.
os.environ['PERCY_PULL_REQUEST'] = '1234'
assert self.environment.pull_request_number == '1234'
def test_branch(self):
assert self.environment.branch == 'travis-branch'
# Triggers special path if PR build in Travis.
os.environ['TRAVIS_PULL_REQUEST'] = '256'
os.environ['TRAVIS_PULL_REQUEST_BRANCH'] = 'travis-pr-branch'
assert self.environment.branch == 'travis-pr-branch'
os.environ['PERCY_BRANCH'] = 'foo'
assert self.environment.branch == 'foo'
def test_commit_sha(self):
assert self.environment.commit_sha == 'travis-commit-sha'
os.environ['PERCY_COMMIT'] = 'commit-sha'
assert self.environment.commit_sha == 'commit-sha'
def test_parallel_nonce(self):
assert self.environment.parallel_nonce == 'travis-build-number'
os.environ['PERCY_PARALLEL_NONCE'] = 'nonce'
assert self.environment.parallel_nonce == 'nonce'
def test_parallel_total(self):
assert self.environment.parallel_total_shards == 3
os.environ['CI_NODE_TOTAL'] = ''
assert self.environment.parallel_total_shards == None
os.environ['PERCY_PARALLEL_TOTAL'] = '1'
assert self.environment.parallel_total_shards == 1
class TestJenkinsEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestJenkinsEnvironment, self).setup_method(self)
os.environ['JENKINS_URL'] = 'http://localhost:8080/'
os.environ['BUILD_NUMBER'] = 'jenkins-build-number'
os.environ['ghprbSourceBranch'] = 'jenkins-source-branch'
os.environ['ghprbActualCommit'] = 'jenkins-commit-sha'
os.environ['GIT_COMMIT'] = 'jenkins-commit-sha-from-git-plugin'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'jenkins'
def test_pull_request_number(self):
assert self.environment.pull_request_number == None
os.environ['ghprbPullId'] = '256'
assert self.environment.pull_request_number == '256'
def test_branch(self):
assert self.environment.branch == 'jenkins-source-branch'
def test_commit_sha(self):
assert self.environment.commit_sha == 'jenkins-commit-sha'
del os.environ['ghprbActualCommit']
assert self.environment.commit_sha == 'jenkins-commit-sha-from-git-plugin'
def test_parallel_nonce(self):
assert self.environment.parallel_nonce == 'jenkins-build-number'
def test_parallel_total(self):
assert self.environment.parallel_total_shards is None
class TestCircleEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestCircleEnvironment, self).setup_method(self)
os.environ['CIRCLECI'] = 'true'
os.environ['CIRCLE_BRANCH'] = 'circle-branch'
os.environ['CIRCLE_SHA1'] = 'circle-commit-sha'
os.environ['CIRCLE_BUILD_NUM'] = 'circle-build-number'
os.environ['CIRCLE_WORKFLOW_WORKSPACE_ID'] = 'circle-workflow-workspace-id'
os.environ['CIRCLE_NODE_TOTAL'] = '3'
os.environ['CI_PULL_REQUESTS'] = 'https://github.com/owner/repo-name/pull/123'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'circle'
def test_branch(self):
assert self.environment.branch == 'circle-branch'
def test_commit_sha(self):
assert self.environment.commit_sha == 'circle-commit-sha'
def test_parallel_nonce(self):
assert self.environment.parallel_nonce == 'circle-build-number'
def test_parallel_total(self):
assert self.environment.parallel_total_shards == 3
class TestCodeshipEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestCodeshipEnvironment, self).setup_method(self)
os.environ['CI_NAME'] = 'codeship'
os.environ['CI_BRANCH'] = 'codeship-branch'
os.environ['CI_BUILD_NUMBER'] = 'codeship-build-number'
os.environ['CI_BUILD_ID'] = 'codeship-build-id'
os.environ['CI_PULL_REQUEST'] = 'false' # This is always false on Codeship, unfortunately.
os.environ['CI_COMMIT_ID'] = 'codeship-commit-sha'
os.environ['CI_NODE_TOTAL'] = '3'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'codeship'
def test_branch(self):
assert self.environment.branch == 'codeship-branch'
def test_commit_sha(self):
assert self.environment.commit_sha == 'codeship-commit-sha'
def test_parallel_nonce(self):
assert self.environment.parallel_nonce == 'codeship-build-number'
del os.environ['CI_BUILD_NUMBER']
assert self.environment.parallel_nonce == 'codeship-build-id'
def test_parallel_total(self):
assert self.environment.parallel_total_shards == 3
class TestDroneEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestDroneEnvironment, self).setup_method(self)
os.environ['DRONE'] = 'true'
os.environ['DRONE_COMMIT'] = 'drone-commit-sha'
os.environ['DRONE_BRANCH'] = 'drone-branch'
os.environ['CI_PULL_REQUEST'] = '123'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'drone'
def test_branch(self):
assert self.environment.branch == 'drone-branch'
def test_commit_sha(self):
assert self.environment.commit_sha == 'drone-commit-sha'
def test_parallel_nonce(self):
assert self.environment.parallel_nonce is None
def test_parallel_total(self):
assert self.environment.parallel_total_shards is None
class TestSemaphoreEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestSemaphoreEnvironment, self).setup_method(self)
os.environ['SEMAPHORE'] = 'true'
os.environ['BRANCH_NAME'] = 'semaphore-branch'
os.environ['REVISION'] = 'semaphore-commit-sha'
os.environ['SEMAPHORE_BRANCH_ID'] = 'semaphore-branch-id'
os.environ['SEMAPHORE_BUILD_NUMBER'] = 'semaphore-build-number'
os.environ['SEMAPHORE_THREAD_COUNT'] = '2'
os.environ['PULL_REQUEST_NUMBER'] = '123'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'semaphore'
def test_branch(self):
assert self.environment.branch == 'semaphore-branch'
def test_commit_sha(self):
assert self.environment.commit_sha == 'semaphore-commit-sha'
def test_parallel_nonce(self):
expected_nonce = 'semaphore-branch-id/semaphore-build-number'
assert self.environment.parallel_nonce == expected_nonce
def test_parallel_total(self):
assert self.environment.parallel_total_shards == 2
class TestBuildkiteEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestBuildkiteEnvironment, self).setup_method(self)
os.environ['BUILDKITE'] = 'true'
os.environ['BUILDKITE_COMMIT'] = 'buildkite-commit-sha'
os.environ['BUILDKITE_BRANCH'] = 'buildkite-branch'
os.environ['BUILDKITE_PULL_REQUEST'] = 'false'
os.environ['BUILDKITE_BUILD_ID'] = 'buildkite-build-id'
os.environ['BUILDKITE_PARALLEL_JOB_COUNT'] = '2'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'buildkite'
def test_branch(self):
assert self.environment.branch == 'buildkite-branch'
def test_commit_sha(self):
assert self.environment.commit_sha == 'buildkite-commit-sha'
os.environ['BUILDKITE_COMMIT'] = 'HEAD'
assert self.environment.commit_sha is None
def test_parallel_nonce(self):
assert self.environment.parallel_nonce == 'buildkite-build-id'
def test_parallel_total(self):
assert self.environment.parallel_total_shards == 2
class TestGitlabEnvironment(BaseTestPercyEnvironment):
def setup_method(self, method):
super(TestGitlabEnvironment, self).setup_method(self)
os.environ['GITLAB_CI'] = 'true'
os.environ['CI_COMMIT_SHA'] = 'gitlab-commit-sha'
os.environ['CI_COMMIT_REF_NAME'] = 'gitlab-branch'
os.environ['CI_JOB_ID'] = 'gitlab-job-id'
os.environ['CI_JOB_STAGE'] = 'test'
self.environment = percy.Environment()
def test_current_ci(self):
assert self.environment.current_ci == 'gitlab'
def test_branch(self):
assert self.environment.branch == 'gitlab-branch'
def test_commit_sha(self):
assert self.environment.commit_sha == 'gitlab-commit-sha'
def test_parallel_nonce(self):
assert self.environment.parallel_nonce == 'gitlab-branch/gitlab-job-id'
| 37.745833
| 103
| 0.652004
|
4a14b492b4400287fd617e21b06586a79c44874b
| 1,991
|
py
|
Python
|
drillbit/sdk_tests/iphone/build_project.py
|
ludolphus/titanium_mobile
|
cdc9f49deea8a9d7dd7382115f98144a16b7a5b3
|
[
"Apache-2.0"
] | 2
|
2015-05-30T20:28:13.000Z
|
2021-01-08T17:02:41.000Z
|
drillbit/sdk_tests/iphone/build_project.py
|
arnaudsj/titanium_mobile
|
4ed83dd6b355947a88f52efbf4ac82d86a2eeffd
|
[
"Apache-2.0"
] | 6
|
2015-04-27T22:12:58.000Z
|
2020-05-23T01:14:06.000Z
|
drillbit/sdk_tests/iphone/build_project.py
|
arnaudsj/titanium_mobile
|
4ed83dd6b355947a88f52efbf4ac82d86a2eeffd
|
[
"Apache-2.0"
] | 1
|
2019-03-15T04:55:17.000Z
|
2019-03-15T04:55:17.000Z
|
#!/usr/bin/env python
import os, sys, mobilesdk
class BuildProject(mobilesdk.MobileSDKTest):
def testLogIndexSearch(self):
supportDir = os.path.join(self.mobileDir, "support")
iphoneSupportDir = os.path.join(supportDir, "iphone")
sys.path.insert(0, supportDir)
sys.path.insert(0, iphoneSupportDir)
import tiapp
from builder import is_indexing_enabled
testResourcesDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "testResources")
tiappEnableMdfind = tiapp.TiAppXML(os.path.join(testResourcesDir, "tiapp_enablemdfind.xml"))
tiappDisableMdfind = tiapp.TiAppXML(os.path.join(testResourcesDir, "tiapp_disablemdfind.xml"))
simulatorDir = os.path.expanduser('~/Library/Application Support/iPhone Simulator/%s' % self.iosVersion)
nonRootDisabled = "/:\n\tIndexing enabled.\n/Volumes/Dummy:\n\tIndexing disabled."
rootDisabledUsersEnabled = "/:\n\tIndexing disabled.\n/Users:\n\tIndexing enabled."
onlyRootDisabled = "/:\n\tIndexing disabled."
onlyRootEnabled = "/:\n\tIndexing enabled."
# should be disabled in leopard, regardless of enablemdfind property
self.assertFalse(is_indexing_enabled(tiappEnableMdfind, simulatorDir,
platform_release="9.0.0"))
# enablemdfind = false should disable
self.assertFalse(is_indexing_enabled(tiappDisableMdfind, simulatorDir))
# indexing disabled on non-root volume should still be enabled
self.assertTrue(is_indexing_enabled(tiappEnableMdfind, simulatorDir,
indexer_status=nonRootDisabled))
# indexing enabled when / is disabled, but /Users is enabled
self.assertTrue(is_indexing_enabled(tiappEnableMdfind, simulatorDir,
indexer_status=rootDisabledUsersEnabled))
# indexing disabled when / is disabled by itself
self.assertFalse(is_indexing_enabled(tiappEnableMdfind, simulatorDir,
indexer_status=onlyRootDisabled))
# indexing enabled when / is enabled by itself
self.assertTrue(is_indexing_enabled(tiappEnableMdfind, simulatorDir,
indexer_status=onlyRootEnabled))
| 39.82
| 106
| 0.787042
|
4a14b59db7668ff4153ea630f39b505c02f77b8e
| 1,842
|
py
|
Python
|
setup.py
|
Luke050/instabot
|
148efeda4919e1ab727c5378bdf7a502e7187353
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Luke050/instabot
|
148efeda4919e1ab727c5378bdf7a502e7187353
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Luke050/instabot
|
148efeda4919e1ab727c5378bdf7a502e7187353
|
[
"Apache-2.0"
] | null | null | null |
from os import path
from codecs import open
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='instabot',
version='0.36.0',
description='Instagram bot scripts for promotion and API python wrapper.',
long_description=long_description,
author='Daniil Okhlopkov, Evgeny Kemerov',
author_email='danokhlopkov@gmail.com, eskemerov@gmail.com',
license='Apache Software License 2.0',
url='https://github.com/instagrambot/instabot',
keywords=['instagram', 'bot', 'api', 'wrapper'],
install_requires=[
'tqdm>=4.30.0',
'requests>=2.21.0',
'requests-toolbelt>=0.8.0',
'itsdangerous>=0.24',
'click>=5.1',
'schedule>=0.6.0',
'pysocks>=1.6.8',
'responses>=0.10.5',
'future>=0.17.1',
'six>=1.12.0',
'huepy>=0.9.8.1',
],
classifiers=[
# How mature is this project? Common values are
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Information Technology',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=find_packages(),
)
| 32.315789
| 78
| 0.624321
|
4a14b95cbf864ea2ea478b8f6a541a4ad16f49dc
| 3,243
|
py
|
Python
|
AnalysisData.py
|
StuPeter/AutoHome_spider
|
ec2359bed96a70b908afc95d7098cca2174f0f47
|
[
"MIT"
] | 53
|
2019-05-13T08:44:49.000Z
|
2022-01-11T16:09:45.000Z
|
AnalysisData.py
|
qqccmm/AutoHome_spider
|
ec2359bed96a70b908afc95d7098cca2174f0f47
|
[
"MIT"
] | 2
|
2020-08-24T04:54:23.000Z
|
2020-11-13T06:46:45.000Z
|
AnalysisData.py
|
qqccmm/AutoHome_spider
|
ec2359bed96a70b908afc95d7098cca2174f0f47
|
[
"MIT"
] | 21
|
2019-05-13T08:43:48.000Z
|
2022-02-10T06:25:03.000Z
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
#
# @Version : 1.0
# @Time : 2019/7/8
# @Author : 圈圈烃
# @File : AnalysisData
# @Description:汽车之家数据分析
#
#
from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.charts import Bar
import csv
provinceList = [
'北京', '广东', '山东', '江苏', '河南', '上海', '河北', '浙江', '香港特别行政区', '陕西', '湖南', '重庆', '福建', '天津', '云南', '四川', '广西壮族自治区',
'安徽', '海南', '江西', '湖北', '山西', '辽宁', '台湾', '黑龙江', '内蒙古自治区', '澳门特别行政区', '贵州', '甘肃', '青海', '新疆维吾尔自治区', '西藏自治区', '吉林',
'宁夏回族自治区']
def map_visualmap(dataP, dataN) -> Map:
c = (
Map()
.add("雷克萨斯IS好评分布", dataP, "china")
.add("雷克萨斯IS差评分布", dataN, "china")
.set_global_opts(
# title_opts=opts.TitleOpts(title="雷克萨斯IS好评分布图"),
# title_opts=opts.TitleOpts(title="雷克萨斯IS差评分布图"),
# title_opts=opts.TitleOpts(title="雷克萨斯IS车友分布图"),
visualmap_opts=opts.VisualMapOpts(max_=200),
)
)
return c
def bar_base(data) -> Bar:
c = (
Bar()
.add_xaxis(['好评指数', '差评指数'])
.add_yaxis("雷克萨斯ES", data)
.set_global_opts(title_opts=opts.TitleOpts(title="雷克萨斯IS情感分析", subtitle="好差评指数"))
)
return c
def main():
userLocList = list()
locList = list()
scoreList = list()
emotionalList = list()
csvData = csv.reader(open('雷克萨斯IS.csv', encoding='utf_8_sig'))
for row in csvData:
userLoc = row[0] + "|" + row[5][:2]
if row[8] != 'None':
score = round(float(row[8]), 5)
scoreList.append(score)
if score >= 0.5:
emotionalList.append(1)
# userLocList.append(userLoc) # 好评
else:
emotionalList.append(0)
userLocList.append(userLoc) # 差评
# userLocList.append(userLoc) # 分布
# 根据用户名列表去重
userLocList = list(set(userLocList))
# 地理位置统计
for loc in userLocList:
loc = loc[-2:]
if loc == "内蒙":
loc = "内蒙古"
locList.append(loc)
locDict = {}
locFreqList = list()
for i in set(locList):
locDict[i] = locList.count(i)
locFreq = (i, locList.count(i))
locFreqList.append(locFreq)
print(locFreqList)
P = [('湖北', 15), ('新疆', 1), ('河南', 5), ('广西', 19), ('青海', 2), ('山西', 2), ('陕西', 5), ('吉林', 1), ('辽宁', 11), ('广东', 176), ('上海', 38), ('贵州', 10), ('澳门', 3), ('湖南', 7), ('浙江', 82), ('天津', 7), ('河北', 11), ('海外', 3), ('重庆', 7), ('四川', 35), ('安徽', 13), ('江苏', 73), ('北京', 55), ('内蒙古', 3), ('黑龙', 4), ('云南', 11), ('其它', 2), ('福建', 21), ('西藏', 3), ('江西', 7), ('山东', 24), ('海南', 4), ('甘肃', 1)]
N = [('广东', 131), ('江苏', 67), ('福建', 10), ('海外', 2), ('安徽', 5), ('上海', 36), ('黑龙', 3), ('西藏', 1), ('新疆', 2), ('河南', 6), ('湖北', 10), ('吉林', 1), ('重庆', 7), ('云南', 12), ('湖南', 8), ('贵州', 6), ('甘肃', 2), ('天津', 5), ('广西', 14), ('山西', 3), ('江西', 5), ('河北', 9), ('陕西', 10), ('北京', 46), ('澳门', 3), ('辽宁', 8), ('香港', 4), ('四川', 32), ('山东', 16), ('浙江', 42), ('内蒙古', 1), ('海南', 1)]
# 绘制分布图
c = map_visualmap(P, N)
c.render()
# 绘制直方图
# c2 = bar_base([emotionalList.count(1), emotionalList.count(0)])
# c2.render()
if __name__ == '__main__':
main()
| 35.25
| 388
| 0.483811
|
4a14b9b656c01de3a93b7fba0575c791fc39a515
| 4,608
|
py
|
Python
|
cli.py
|
Gilbert189/Nihonium
|
f2f9b0064bf624a2b6e462e9c819ddd8679dab59
|
[
"MIT"
] | null | null | null |
cli.py
|
Gilbert189/Nihonium
|
f2f9b0064bf624a2b6e462e9c819ddd8679dab59
|
[
"MIT"
] | null | null | null |
cli.py
|
Gilbert189/Nihonium
|
f2f9b0064bf624a2b6e462e9c819ddd8679dab59
|
[
"MIT"
] | null | null | null |
"""
A (not human-friendly) CLI for Flerovium.
Does double duty as a bridge to Java.
"""
import sys, json, commands, versions, re, datetime, getopt
# Initialize variables
version = versions.Version(2, 0, 0) # Specifies this CLI version.
with open("config.json", "r") as f:
config = json.loads(f.read())
for k, v in config.items(): globals()[k] = v
uptime = datetime.datetime.fromisoformat(uptime)
pattern = bot_info["prefix"]+"(.+)"
# Copied from Nickel
def logEntry(entry: str, timestamp=None, printToScreen: bool=verbose, severity=0):
if timestamp is None: timestamp = datetime.datetime.now()
if not readOnly:
if not os.path.isdir('logs'): os.mkdir('logs')
with open("logs/" + timestamp.strftime("%Y%m%d") + ".log", "a+", encoding="utf-8") as logfile:
logfile.write(f"[{timestamp.strftime('%I:%M:%S.%f %p')} {['INFO','WARNING','ERROR'][severity]}] " + entry + "\n")
logfile.seek(0)
if printToScreen:
if severity==0: print(entry,file=sys.stderr)
elif severity==1: warnings.warn(entry)
elif severity==2: raise RuntimeException(entry)
def assemble_botdata():
return {
"uptime": uptime,
"data": stats,
"thread_ids": None,
"post_ids": None,
"cookies": None,
"session": None,
"headers": None,
"version": version,
"bot_info": bot_info
}
def assemble_userdata(user):
return {
"name": f"{user}#{discriminator}",
"uID": uid
}
def getFunction(match):
match[0] = match[0].lower()
if "ex_commands" in dir(commands):
if "flerovium" in commands.ex_commands:
if match[0] in commands.ex_commands["flerovium"]:
funct = commands.ex_commands["flerovium"][match[0]]
if "Command" in type(funct).__name__: funct = funct.command
return funct
if match[0] in commands.commands:
funct = commands.commands[match[0]]
if "Command" in type(funct).__name__: funct = funct.command
if funct.__name__ in inc_commands:
return lambda *a: ":no_entry_sign: The command you issued is incompatible with Flerovium."
else:
return funct
# Get attributes
def getAttr():
#logEntry("Updating attribs.json...")
#with open("attribs.json", "wb") as f: f.write(requests.get("https://raw.githubusercontent.com/Gilbert189/Flerovium/main/attribs.json").content)
logEntry("attribs.json updated.")
allowed = ["inc_commands", "replace", "attrs_version", "fancy", "readOnly", "verbose", "legacy", "pattern"]
important = ["bot_info"]
with open("attribs.json", "r+") as f:
for x, i in json.loads(f.read()).items():
if x in allowed: globals()[x] = i
# Check compatibility with Flerovium
if bot_info["id"] not in commands.alt_minvers:
logEntry("The copy of commands.py doesn't have alt_minvers argument set.", severity=1)
#ask("The copy of commands.py doesn't have Flerovium on alt_minvers.\nWe don't know if this copy supports Flerovium.\nContinue anyway?",{"Yes":lambda:0,"No":exit})()
elif commands.alt_minvers[bot_info["id"]] > version:
raise ImportError(
"The copy of commands.py is incompatible with this version of Flerovium."+
"\nFlerovium is in version {}, while commands.py requires at least {}".format(str(version),str(commands.flerovium_minver))
)
def formatToDiscord(text):
"""A function to convert TBG formatting tags to Discord formatting tags."""
replace = {
"b":"**",
"i":"_",
"u":"__",
"s":"~~",
"code":"\n```\n",
"quote":"\n```\n"
"url":f"({a.group(1)})" if len(a.groups) > 0 else ""
}
for t,d in replace.items():text = re.sub(r"\[{}(=.+?)?\]|\[/{}\]".format(t, t), d, text)
return text
logEntry(f"Received {sys.argv}")
user, discriminator, uid = sys.argv[1:4]
discriminator = int(discriminator)
args = sys.argv[4:]
output = None
func = getFunction(args)
if not output:
if len(args) > 1: output=func(assemble_botdata(),{},assemble_userdata(user),*args[1:])
elif func: output=func(assemble_botdata(),assemble_userdata(user),{})
else: output=":no_entry_sign: Flerovium cannot process your command. (yet)"
if type(output)==str:
# replace the text
for x, rep in replace.items():
if "__name__" in dir(func):
if x == func.__name__:
for fr, to in rep.items(): output = re.sub(fr, to, output)
output = formatToDiscord(output)
output = {"type": "text", "data": output}
print(json.dumps(output))
| 36.283465
| 169
| 0.621094
|
4a14bc5dcf3a01d18050a1728d18ac1553ed8804
| 8,830
|
py
|
Python
|
homeassistant/components/group/light.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/group/light.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/group/light.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""This platform allows several lights to be grouped into one light."""
from __future__ import annotations
from collections import Counter
import itertools
import logging
from typing import Any, Set, cast
import voluptuous as vol
from homeassistant.components import light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_MODE,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_SUPPORTED_COLOR_MODES,
ATTR_TRANSITION,
ATTR_WHITE,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_ONOFF,
PLATFORM_SCHEMA,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
CONF_NAME,
CONF_UNIQUE_ID,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import CoreState, Event, HomeAssistant, State
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType
from . import GroupEntity
from .util import find_state_attributes, mean_tuple, reduce_attribute
DEFAULT_NAME = "Light Group"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Required(CONF_ENTITIES): cv.entities_domain(light.DOMAIN),
}
)
SUPPORT_GROUP_LIGHT = (
SUPPORT_EFFECT | SUPPORT_FLASH | SUPPORT_TRANSITION | SUPPORT_WHITE_VALUE
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: dict[str, Any] | None = None,
) -> None:
"""Initialize light.group platform."""
async_add_entities(
[
LightGroup(
config.get(CONF_UNIQUE_ID), config[CONF_NAME], config[CONF_ENTITIES]
)
]
)
FORWARDED_ATTRIBUTES = frozenset(
{
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_TRANSITION,
ATTR_WHITE,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
}
)
class LightGroup(GroupEntity, light.LightEntity):
"""Representation of a light group."""
_attr_available = False
_attr_icon = "mdi:lightbulb-group"
_attr_is_on = False
_attr_max_mireds = 500
_attr_min_mireds = 154
_attr_should_poll = False
def __init__(self, unique_id: str | None, name: str, entity_ids: list[str]) -> None:
"""Initialize a light group."""
self._entity_ids = entity_ids
self._white_value: int | None = None
self._attr_name = name
self._attr_extra_state_attributes = {ATTR_ENTITY_ID: entity_ids}
self._attr_unique_id = unique_id
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
async def async_state_changed_listener(event: Event) -> None:
"""Handle child updates."""
self.async_set_context(event.context)
await self.async_defer_or_update_ha_state()
self.async_on_remove(
async_track_state_change_event(
self.hass, self._entity_ids, async_state_changed_listener
)
)
if self.hass.state == CoreState.running:
await self.async_update()
return
await super().async_added_to_hass()
@property
def white_value(self) -> int | None:
"""Return the white value of this light group between 0..255."""
return self._white_value
async def async_turn_on(self, **kwargs: Any) -> None:
"""Forward the turn_on command to all lights in the light group."""
data = {
key: value for key, value in kwargs.items() if key in FORWARDED_ATTRIBUTES
}
data[ATTR_ENTITY_ID] = self._entity_ids
_LOGGER.debug("Forwarded turn_on command: %s", data)
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
data,
blocking=True,
context=self._context,
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Forward the turn_off command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_OFF,
data,
blocking=True,
context=self._context,
)
async def async_update(self) -> None:
"""Query all members and determine the light group state."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states: list[State] = list(filter(None, all_states))
on_states = [state for state in states if state.state == STATE_ON]
self._attr_is_on = len(on_states) > 0
self._attr_available = any(state.state != STATE_UNAVAILABLE for state in states)
self._attr_brightness = reduce_attribute(on_states, ATTR_BRIGHTNESS)
self._attr_hs_color = reduce_attribute(
on_states, ATTR_HS_COLOR, reduce=mean_tuple
)
self._attr_rgb_color = reduce_attribute(
on_states, ATTR_RGB_COLOR, reduce=mean_tuple
)
self._attr_rgbw_color = reduce_attribute(
on_states, ATTR_RGBW_COLOR, reduce=mean_tuple
)
self._attr_rgbww_color = reduce_attribute(
on_states, ATTR_RGBWW_COLOR, reduce=mean_tuple
)
self._attr_xy_color = reduce_attribute(
on_states, ATTR_XY_COLOR, reduce=mean_tuple
)
self._white_value = reduce_attribute(on_states, ATTR_WHITE_VALUE)
self._attr_color_temp = reduce_attribute(on_states, ATTR_COLOR_TEMP)
self._attr_min_mireds = reduce_attribute(
states, ATTR_MIN_MIREDS, default=154, reduce=min
)
self._attr_max_mireds = reduce_attribute(
states, ATTR_MAX_MIREDS, default=500, reduce=max
)
self._attr_effect_list = None
all_effect_lists = list(find_state_attributes(states, ATTR_EFFECT_LIST))
if all_effect_lists:
# Merge all effects from all effect_lists with a union merge.
self._attr_effect_list = list(set().union(*all_effect_lists))
self._attr_effect_list.sort()
if "None" in self._attr_effect_list:
self._attr_effect_list.remove("None")
self._attr_effect_list.insert(0, "None")
self._attr_effect = None
all_effects = list(find_state_attributes(on_states, ATTR_EFFECT))
if all_effects:
# Report the most common effect.
effects_count = Counter(itertools.chain(all_effects))
self._attr_effect = effects_count.most_common(1)[0][0]
self._attr_color_mode = None
all_color_modes = list(find_state_attributes(on_states, ATTR_COLOR_MODE))
if all_color_modes:
# Report the most common color mode, select brightness and onoff last
color_mode_count = Counter(itertools.chain(all_color_modes))
if COLOR_MODE_ONOFF in color_mode_count:
color_mode_count[COLOR_MODE_ONOFF] = -1
if COLOR_MODE_BRIGHTNESS in color_mode_count:
color_mode_count[COLOR_MODE_BRIGHTNESS] = 0
self._attr_color_mode = color_mode_count.most_common(1)[0][0]
self._attr_supported_color_modes = None
all_supported_color_modes = list(
find_state_attributes(states, ATTR_SUPPORTED_COLOR_MODES)
)
if all_supported_color_modes:
# Merge all color modes.
self._attr_supported_color_modes = cast(
Set[str], set().union(*all_supported_color_modes)
)
self._attr_supported_features = 0
for support in find_state_attributes(states, ATTR_SUPPORTED_FEATURES):
# Merge supported features by emulating support for every feature
# we find.
self._attr_supported_features |= support
# Bitwise-and the supported features with the GroupedLight's features
# so that we don't break in the future when a new feature is added.
self._attr_supported_features &= SUPPORT_GROUP_LIGHT
| 33.320755
| 88
| 0.667271
|
4a14bc75baf9a25843b73b7dbf07d924203c8ed4
| 6,125
|
py
|
Python
|
finorch/sessions/cit/client.py
|
ADACS-Australia/SS2021B-DBrown
|
67b93b316e6f9ab09e3bd5105edbbc71108e0723
|
[
"MIT"
] | null | null | null |
finorch/sessions/cit/client.py
|
ADACS-Australia/SS2021B-DBrown
|
67b93b316e6f9ab09e3bd5105edbbc71108e0723
|
[
"MIT"
] | null | null | null |
finorch/sessions/cit/client.py
|
ADACS-Australia/SS2021B-DBrown
|
67b93b316e6f9ab09e3bd5105edbbc71108e0723
|
[
"MIT"
] | null | null | null |
import logging
import os
import sys
import uuid
import warnings
from pathlib import Path
from finorch.sessions.abstract_client import AbstractClient
from finorch.transport.exceptions import TransportStartJobException
from finorch.utils.cd import cd
from finorch.utils.job_status import JobStatus
SUBMIT_SCRIPT = """#!/bin/bash
. .env
{python} -m finorch.wrapper.wrapper cit
"""
class CITClient(AbstractClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _write_environment(self, environment_file):
with open(environment_file, "w") as f:
for k, v in os.environ.items():
if "()" in k:
continue
else:
f.write(f'{k}="{v}"\n')
def _submit_condor_job(self, job_identifier, katscript):
# Create the working directory for the job
exec_dir = self._exec_path / job_identifier
os.makedirs(exec_dir, exist_ok=True)
with cd(exec_dir):
# Write the katscript
with open(exec_dir / 'script.k', 'w') as f:
f.write(katscript)
# Write the environment file
self._write_environment(exec_dir / '.env')
submit_script_path = exec_dir / 'submit.sh'
with open(submit_script_path, 'w') as f:
script = SUBMIT_SCRIPT.format(python=sys.executable)
f.write(script)
logging.info(f"Trying to submit from {exec_dir}")
# Create the submit object from the dag and submit it
# Retry up to 5 times before failing, condor submit is quite flakey at times
for attempt in range(1, 6):
try:
with cd(exec_dir):
warnings.filterwarnings("ignore")
import htcondor
submit = htcondor.Submit({
"universe": "scheduler",
"executable": "/bin/bash",
"arguments": "submit.sh",
"log": "log",
"output": "out",
"error": "error",
"request_cpus": "1",
"request_memory": "16G"
})
result = htcondor.Schedd().submit(submit, count=1)
# Record the command and the output
logging.info(f"Success: condor submit succeeded, got ClusterId={result.cluster()}")
# Return the condor ClusterId
return result.cluster()
except Exception as e:
# Record the error occurred
logging.error(f"Error: condor submit failed, trying again {attempt}/5")
logging.error(e)
raise TransportStartJobException("Unable to submit condor job. Condor submit failed 5 times in a row, "
"assuming something is wrong.")
def _cancel_condor_job(self, job_id):
logging.info("Trying to terminate job {}...".format(job_id))
warnings.filterwarnings("ignore")
import htcondor
htcondor.Schedd().act(htcondor.JobAction.Hold, f"ClusterId == {job_id} && ProcID <= 1")
def start_job(self, katscript):
job_identifier = str(uuid.uuid4())
logging.info("Starting job with the following script")
logging.info(katscript)
logging.info(job_identifier)
condor_id = self._submit_condor_job(job_identifier, katscript)
self.db.add_job(job_identifier, condor_id)
return job_identifier
def terminate(self):
return super().terminate()
def get_jobs(self):
jobs = self.db.get_jobs()
return jobs
def get_job_status(self, job_identifier):
status = self.db.get_job_status(job_identifier)
# If the job status is less than or equal to RUNNING, then we need to derive the current job status and update
# the job status accordingly.
new_status = status
if status <= JobStatus.RUNNING:
# Check if the job is completed, or started, or queued
p = Path(self._exec_path)
if (p / job_identifier / 'finished').exists():
new_status = JobStatus.COMPLETED
elif (p / job_identifier / 'started').exists():
new_status = JobStatus.RUNNING
else:
new_status = JobStatus.QUEUED
# Update the job if the status has changed
if new_status != status:
self.db.update_job_status(job_identifier, new_status)
return new_status
def get_job_file(self, job_identifier, file_path):
full_file_path = Path(self._exec_path / job_identifier / file_path)
if full_file_path.exists():
try:
with open(full_file_path, 'rb') as f:
return f.read()
except Exception:
return None, f"Unable to retrieve file {full_file_path} as the file could not be read."
return None, f"Unable to retrieve file {full_file_path} as the file does not exist."
def get_job_file_list(self, job_identifier):
full_path = Path(self._exec_path / job_identifier)
if full_path.exists():
# list the files
file_list = list()
for p in full_path.rglob('*.*'):
file_list.append([p.name, str(p), p.stat().st_size])
return file_list
return None, f"Unable to retrieve file list for the job identifier {job_identifier}"
def stop_job(self, job_identifier):
# If the current job status is less than or equal to running, then cancel the job
if self.get_job_status(job_identifier) <= JobStatus.RUNNING:
# Tell condor to cancel the job
self._cancel_condor_job(self.db.get_job_batch_id(job_identifier))
# Mark the job as cancelled
self.db.update_job_status(job_identifier, JobStatus.CANCELLED)
| 37.121212
| 118
| 0.580408
|
4a14bc77e117aa87e638d0c757d0b4edfc19b7f8
| 1,476
|
py
|
Python
|
oneVSmulti_demo.py
|
NTUMitoLab/Mitochondrial-Network-Model
|
9804e0a80ee83ece6d88c9592f9c8bab0c4b9894
|
[
"MIT"
] | null | null | null |
oneVSmulti_demo.py
|
NTUMitoLab/Mitochondrial-Network-Model
|
9804e0a80ee83ece6d88c9592f9c8bab0c4b9894
|
[
"MIT"
] | 1
|
2022-03-06T07:08:17.000Z
|
2022-03-10T09:16:43.000Z
|
oneVSmulti_demo.py
|
NTUMitoLab/Mitochondrial-Network-Model
|
9804e0a80ee83ece6d88c9592f9c8bab0c4b9894
|
[
"MIT"
] | null | null | null |
import networkmodel
import myGA
import KLDivergence
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import multiprocessing
import time
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
from scipy.stats import entropy
from geneticalgorithm import geneticalgorithm as ga
def get_ans(param):
netinfo = networkmodel.Iterate(param, 200, 200)
return [netinfo.iloc[-1, 5], netinfo.iloc[-1, 7]]
def get_answers(params):
with multiprocessing.Pool() as pool:
pool_out = pool.map(get_ans, params)
return pool_out
if __name__ == "__main__":
params = [[0.03, 0.0015]] * 100
start_time = time.time()
ANS = get_answers(params)
print(ANS)
duration = time.time() - start_time
print(f"Duration {duration} seconds")
# true_param = [0.03, 0.0015]
# num_of_sample = 100
# TRUE_ANS = []
# start_time = time.time()
# for i in range(num_of_sample):
# netinfo_ans = networkmodel.Iterate(true_param, 200, 200)
# TRUE_ANS.append([netinfo_ans.iloc[-1,5], netinfo_ans.iloc[-1,7]])
# print(TRUE_ANS)
# duration = time.time() - start_time
# print(f"Duratioin {duration} seconds")
# x_ans = np.array([ans[0] for ans in TRUE_ANS])
# y_ans = np.array([ans[1] for ans in TRUE_ANS])
# plt.scatter(x_ans, y_ans, c='black', s=20, edgecolor='white')
# plt.show()
| 26.836364
| 72
| 0.686992
|
4a14be4fecae578fde861b4a71c2c7555517d364
| 636
|
py
|
Python
|
setup.py
|
bdraco/tesla_powerwall
|
dbf4493796c13e08fef2e8ddda547ad9ef1e2469
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
bdraco/tesla_powerwall
|
dbf4493796c13e08fef2e8ddda547ad9ef1e2469
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
bdraco/tesla_powerwall
|
dbf4493796c13e08fef2e8ddda547ad9ef1e2469
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="tesla_powerwall",
author="Jrester",
author_email="jrester379@gmail.com",
version='0.3.14',
description="API for Tesla Powerwall",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jrester/tesla_powerwall",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
],
install_requires=["requests>=2.22.0", "packaging>=20.5"],
)
| 28.909091
| 61
| 0.671384
|
4a14be7441110d461eb3993fb4d378c7ac3691ad
| 2,380
|
py
|
Python
|
Modell/eingabeAuswerten.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
Modell/eingabeAuswerten.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
Modell/eingabeAuswerten.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
import sounddevice as sd
from PIL import Image
import time
import numpy as np
import tensorflow.keras.models
class schlange:
def __init__(self,max_groesse):
self.__max_groesse = max_groesse
self.__voll = False
self.__liste = []
def anhaengen(self,ding):
self.__liste.append(ding)
if len(self.__liste) > self.__max_groesse:
self.__voll=True
self.__liste.pop(0)
def get_liste(self):
return np.array(self.__liste)
def nullen(self):
for i in range(len(self.__liste)):self.__liste[i]=0
def ist_voll(self):
return self.__voll
def fft_auf_slice(daten,slicegroesse):
slice_FFT = abs(np.fft.fft(daten,slicegroesse)[0:int(slicegroesse/2)])
slice_FFT = np.uint8(np.clip(slice_FFT,0,65535)/256)
return slice_FFT
#Lade Kerasmodell
model = tensorflow.keras.models.load_model("model.h5")
Schluesselwortdauer = 2 #Sekunden
samplerate = 16000
channelzahl = 1
blockgroesse = 512
slicedauer = blockgroesse/samplerate # Sekunden
sliceanzahl = np.floor((Schluesselwortdauer * samplerate)/blockgroesse).astype(np.int16) # Ohne Ueberlappung
dtype = np.int16
datenListe = []
while(1):
input("Press Enter to continue...")
spektrogramm = []
aufnahme = sd.rec(samplerate= samplerate,channels=channelzahl,dtype=dtype,frames=samplerate*2)
sd.wait()
sd.play(aufnahme,samplerate=16000)
aufnahme = aufnahme.flatten()
fenster = np.hamming(blockgroesse)
start = 0
ende = blockgroesse
while(ende<len(aufnahme)):
teilaufnahme = aufnahme[start:ende]
teilaufnahme = teilaufnahme*fenster
teilaufnahme_FFT = abs(np.fft.fft(teilaufnahme,blockgroesse)[0:int(blockgroesse/2)])
spektrogramm.append(teilaufnahme_FFT)
start += blockgroesse
ende += blockgroesse
spektrogramm = np.array(spektrogramm)
hoehe,breite = spektrogramm.shape
for i in range(hoehe):
for j in range(breite):
if(spektrogramm[i,j]>65535):
spektrogramm[i,j]=65535
spektrogramm[i,j]=np.uint8(spektrogramm[i,j]/256)
#bild = Image.fromarray(spektrogramm)
#bild.show()
spektrogramm = np.expand_dims(spektrogramm.reshape(62, 256, 1),axis = 0)
spektrogramm = spektrogramm.astype(np.float32)
ergebnis = model.predict(spektrogramm)
print((ergebnis*100).astype(np.int))
| 34
| 108
| 0.686975
|
4a14beeaaf5a646a113208941da52dbcb9cdfe9a
| 3,357
|
py
|
Python
|
cell_classifier/mlp/train_classifier.py
|
abhinav-kumar-thakur/table-understanding
|
5448e02dec87ea3974bfc118ebeace86e8918285
|
[
"MIT"
] | null | null | null |
cell_classifier/mlp/train_classifier.py
|
abhinav-kumar-thakur/table-understanding
|
5448e02dec87ea3974bfc118ebeace86e8918285
|
[
"MIT"
] | null | null | null |
cell_classifier/mlp/train_classifier.py
|
abhinav-kumar-thakur/table-understanding
|
5448e02dec87ea3974bfc118ebeace86e8918285
|
[
"MIT"
] | null | null | null |
from joblib import dump, load
from sklearn.ensemble import RandomForestClassifier
import pickle
import numpy as np
from type.cell.semantic_cell_type import SemanticCellType
from type.cell.cell_type_pmf import CellTypePMF
from sklearn.metrics import f1_score
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.model_selection import GridSearchCV
import itertools
from cell_classifier.mlp.model import MLP, MLPloader
from torch.utils.data import DataLoader
import torch
class MLPClassifierTrainer:
def __init__(self, model_file):
self.model_file = model_file
print(self.model_file)
def prepare_data(self, sheets, tags):
feats = [sheet.meta['embeddings'][i][j] for sheet in sheets
for i in range(sheet.values.shape[0])
for j in range(sheet.values.shape[1])]
y = [tag[i][j].get_best_type().id() for tag in tags
for i in range(len(tag))
for j in range(len(tag[i]))]
feats = np.array(feats)
y = np.array(y)
return feats, y
def predict_wrapper(self, res, r, c):
pred = np.empty((r, c), dtype=CellTypePMF)
idx = 0
for i in range(r):
for j in range(c):
t_id = res[idx]
t_l = SemanticCellType.id2str[t_id]
cell_class_dict = {
SemanticCellType.inverse_dict[t_l]: 1.0
}
pred[i][j] = CellTypePMF(cell_class_dict)
idx += 1
return pred
def fit(self, sheets, tags, eval_sheets, eval_tags):
X_train, y_train = self.prepare_data(sheets, tags)
X_eval, y_eval = self.prepare_data(eval_sheets, eval_tags)
print(X_eval.shape, y_eval.shape, y_eval[0])
#print("train", np.bincount(y_train))
#print("eval", np.bincount(y_eval))
best_model = None
best_loss = 100000
for lr in [0.01, 0.001, 0.0001]:
loader = DataLoader(dataset=MLPloader(X_train, y_train), batch_size=32,
shuffle=True)
model = MLP(X_train.shape[1], len(SemanticCellType.id2str)).to(torch.device("cuda:0"))
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = 0.0001)
train_loss = 0
for epoch in range(100):
for batch_idx, (data,label) in enumerate(loader):
data = data.to(torch.device("cuda:0")).float()
label = label.to(torch.device("cuda:0"))
optimizer.zero_grad()
out = model(data)
loss = loss_fn(out, label)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 50 == 0:
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(loader.dataset)))
if train_loss / len(loader.dataset) < best_loss:
best_loss = train_loss / len(loader.dataset)
best_model = model
self.model = best_model
self.save_model()
def save_model(self):
torch.save(self.model.state_dict(), self.model_file)
#dump(self.model, self.model_file)
| 33.237624
| 113
| 0.577599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.