hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fea14b02a1a8d2b002bb7dfd2a72defa7e5ba2 | 435 | py | Python | tests/integration/cli/test_misc.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | tests/integration/cli/test_misc.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | 1 | 2022-02-12T01:31:16.000Z | 2022-02-22T00:26:49.000Z | tests/integration/cli/test_misc.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | import pytest
# NOTE: test all the things without a direct test elsewhere
@pytest.mark.parametrize(
"args",
(
[],
["--version"],
["--config"],
["--help"],
["accounts"],
["networks"],
["networks", "list"],
["plugins"],
),
)
def test_invocation(ape_cli, runner, args):
result = runner.invoke(ape_cli, args)
assert result.exit_code == 0, result.output
| 20.714286 | 59 | 0.537931 | import pytest
@pytest.mark.parametrize(
"args",
(
[],
["--version"],
["--config"],
["--help"],
["accounts"],
["networks"],
["networks", "list"],
["plugins"],
),
)
def test_invocation(ape_cli, runner, args):
result = runner.invoke(ape_cli, args)
assert result.exit_code == 0, result.output
| true | true |
f7fea14b9dbf1257815be16f64f7d7f300f8a3ce | 3,542 | py | Python | ss_baselines/savi/ppo/slurm_utils.py | tynguyen/sound-spaces | b196f3a36b4076752400cbf186e9cf2e160cc3c2 | [
"CC-BY-4.0"
] | 171 | 2020-08-21T06:45:19.000Z | 2022-03-30T03:52:15.000Z | ss_baselines/savi/ppo/slurm_utils.py | tynguyen/sound-spaces | b196f3a36b4076752400cbf186e9cf2e160cc3c2 | [
"CC-BY-4.0"
] | 69 | 2020-09-19T02:44:10.000Z | 2022-03-17T03:57:55.000Z | ss_baselines/savi/ppo/slurm_utils.py | tynguyen/sound-spaces | b196f3a36b4076752400cbf186e9cf2e160cc3c2 | [
"CC-BY-4.0"
] | 41 | 2020-08-25T06:58:24.000Z | 2022-03-23T05:04:47.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import shlex
import signal
import subprocess
import threading
from os import path as osp
from typing import Any, Optional, Tuple
import ifcfg
import torch
from habitat import logger
EXIT = threading.Event()
EXIT.clear()
REQUEUE = threading.Event()
REQUEUE.clear()
MAIN_PID = os.getpid()
SLURM_JOBID = os.environ.get("SLURM_JOB_ID", None)
INTERRUPTED_STATE_FILE = osp.join(
os.environ["HOME"], ".interrupted_states", f"{SLURM_JOBID}.pth"
)
def _clean_exit_handler(signum, frame):
EXIT.set()
print("Exiting cleanly", flush=True)
def _requeue_handler(signal, frame):
print("Got signal to requeue", flush=True)
EXIT.set()
REQUEUE.set()
def add_signal_handlers():
signal.signal(signal.SIGINT, _clean_exit_handler)
signal.signal(signal.SIGTERM, _clean_exit_handler)
# SIGUSR2 can be sent to all processes to have them cleanup
# and exit nicely. This is nice to use with SLURM as scancel <job_id>
# sets a 30 second timer for the job to exit, and it can take more than
# 30 seconds for the job to cleanup and exit nicely. When using NCCL,
# forcing the job to exit without cleaning up can be bad.
# scancel --signal SIGUSR2 <job_id> will set no such timer and will give
# the job ample time to cleanup and exit.
signal.signal(signal.SIGUSR2, _clean_exit_handler)
signal.signal(signal.SIGUSR1, _requeue_handler)
def save_interrupted_state(state: Any, filename: str = None, model_dir: str = None):
r"""Saves the interrupted job state to the specified filename.
This is useful when working with preemptable job partitions.
This method will do nothing if SLURM is not currently being used and the filename is the default
:param state: The state to save
:param filename: The filename. Defaults to "${HOME}/.interrupted_states/${SLURM_JOBID}.pth"
"""
if SLURM_JOBID is None and filename is None:
logger.warn("SLURM_JOBID is none, not saving interrupted state")
return
if filename is None:
if model_dir is not None:
filename = os.path.join(model_dir, 'interrupted_state.pth')
else:
filename = INTERRUPTED_STATE_FILE
torch.save(state, filename)
def load_interrupted_state(filename: str = None, model_dir: str = None) -> Optional[Any]:
r"""Loads the saved interrupted state
:param filename: The filename of the saved state.
Defaults to "${HOME}/.interrupted_states/${SLURM_JOBID}.pth"
:return: The saved state if the file exists, else none
"""
if SLURM_JOBID is None and filename is None:
return None
if filename is None:
if model_dir is not None:
filename = os.path.join(model_dir, 'interrupted_state.pth')
else:
filename = INTERRUPTED_STATE_FILE
if not osp.exists(filename):
return None
return torch.load(filename, map_location="cpu")
def requeue_job():
r"""Requeue the job by calling ``scontrol requeue ${SLURM_JOBID}``"""
if SLURM_JOBID is None:
return
if os.environ['SLURM_PROCID'] == '0' and os.getpid() == MAIN_PID:
logger.info(f"Requeueing job {SLURM_JOBID}")
subprocess.check_call(shlex.split(f"scontrol requeue {SLURM_JOBID}"))
def get_ifname():
return ifcfg.default_interface()["device"] | 31.070175 | 100 | 0.702428 |
import os
import shlex
import signal
import subprocess
import threading
from os import path as osp
from typing import Any, Optional, Tuple
import ifcfg
import torch
from habitat import logger
EXIT = threading.Event()
EXIT.clear()
REQUEUE = threading.Event()
REQUEUE.clear()
MAIN_PID = os.getpid()
SLURM_JOBID = os.environ.get("SLURM_JOB_ID", None)
INTERRUPTED_STATE_FILE = osp.join(
os.environ["HOME"], ".interrupted_states", f"{SLURM_JOBID}.pth"
)
def _clean_exit_handler(signum, frame):
EXIT.set()
print("Exiting cleanly", flush=True)
def _requeue_handler(signal, frame):
print("Got signal to requeue", flush=True)
EXIT.set()
REQUEUE.set()
def add_signal_handlers():
signal.signal(signal.SIGINT, _clean_exit_handler)
signal.signal(signal.SIGTERM, _clean_exit_handler)
signal.signal(signal.SIGUSR2, _clean_exit_handler)
signal.signal(signal.SIGUSR1, _requeue_handler)
def save_interrupted_state(state: Any, filename: str = None, model_dir: str = None):
if SLURM_JOBID is None and filename is None:
logger.warn("SLURM_JOBID is none, not saving interrupted state")
return
if filename is None:
if model_dir is not None:
filename = os.path.join(model_dir, 'interrupted_state.pth')
else:
filename = INTERRUPTED_STATE_FILE
torch.save(state, filename)
def load_interrupted_state(filename: str = None, model_dir: str = None) -> Optional[Any]:
if SLURM_JOBID is None and filename is None:
return None
if filename is None:
if model_dir is not None:
filename = os.path.join(model_dir, 'interrupted_state.pth')
else:
filename = INTERRUPTED_STATE_FILE
if not osp.exists(filename):
return None
return torch.load(filename, map_location="cpu")
def requeue_job():
if SLURM_JOBID is None:
return
if os.environ['SLURM_PROCID'] == '0' and os.getpid() == MAIN_PID:
logger.info(f"Requeueing job {SLURM_JOBID}")
subprocess.check_call(shlex.split(f"scontrol requeue {SLURM_JOBID}"))
def get_ifname():
return ifcfg.default_interface()["device"] | true | true |
f7fea1617c298d6aef3b24ec994ae3293db8c25b | 4,360 | py | Python | app/recipe/tests/test_ingredient_api.py | batorgil-it/recipe-app-api | 9bd69bccc44242433343161e090855aa0a8c2843 | [
"MIT"
] | null | null | null | app/recipe/tests/test_ingredient_api.py | batorgil-it/recipe-app-api | 9bd69bccc44242433343161e090855aa0a8c2843 | [
"MIT"
] | null | null | null | app/recipe/tests/test_ingredient_api.py | batorgil-it/recipe-app-api | 9bd69bccc44242433343161e090855aa0a8c2843 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientApiTests(TestCase):
""" Test the ingredient public requests """
def setUp(self):
self.client = APIClient()
def test_login_required(self):
""" Test a login is required access to ingredient """
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientApiTests(TestCase):
""" Test the ingredient private requests """
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
""" Test retrieving a list of ingredients """
Ingredient.objects.create(user=self.user, name="Kale")
Ingredient.objects.create(user=self.user, name="Salt")
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
""" Test that ingredient for authenticated user are returned """
user2 = get_user_model().objects.create_user(
'other@fibo.cloud',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumefic')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successful(self):
""" Test create a new ingredient """
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
""" Test creating invalid ingredient fails """
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredient_assigned_to_recipes(self):
""" Test filtering ingredients by those assigned to recipes """
ingredient1 = Ingredient.objects.create(user=self.user, name='Apples')
ingredient2 = Ingredient.objects.create(user=self.user, name='Turkey')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user,
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredient_assigned_unique(self):
""" Test filtering ingredients by assigned returns unique items """
ingredient = Ingredient.objects.create(
user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user,
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user,
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 33.538462 | 78 | 0.655963 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
Ingredient.objects.create(user=self.user, name="Kale")
Ingredient.objects.create(user=self.user, name="Salt")
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'other@fibo.cloud',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumefic')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successful(self):
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredient_assigned_to_recipes(self):
ingredient1 = Ingredient.objects.create(user=self.user, name='Apples')
ingredient2 = Ingredient.objects.create(user=self.user, name='Turkey')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user,
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredient_assigned_unique(self):
ingredient = Ingredient.objects.create(
user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user,
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user,
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| true | true |
f7fea197c5b830dace3c2856d8b12e8ec7f4599d | 124 | py | Python | blanc_basic_news/apps.py | src-r-r/blanc-basic-news | d381207ca19779a870ae8002eeca89c4120c4604 | [
"BSD-3-Clause"
] | null | null | null | blanc_basic_news/apps.py | src-r-r/blanc-basic-news | d381207ca19779a870ae8002eeca89c4120c4604 | [
"BSD-3-Clause"
] | null | null | null | blanc_basic_news/apps.py | src-r-r/blanc-basic-news | d381207ca19779a870ae8002eeca89c4120c4604 | [
"BSD-3-Clause"
] | 1 | 2020-10-17T00:34:25.000Z | 2020-10-17T00:34:25.000Z | from django.apps import AppConfig
class BlancBasicNewsConfig(AppConfig):
name = 'blanc_basic_news'
label = 'news'
| 17.714286 | 38 | 0.741935 | from django.apps import AppConfig
class BlancBasicNewsConfig(AppConfig):
name = 'blanc_basic_news'
label = 'news'
| true | true |
f7fea1aee2403c3c2398f649b31634e0eb2c5b93 | 90,631 | py | Python | contentcuration/contentcuration/models.py | ozer550/studio | b69c625d223350b1b4a6909904ec4b44f80b211d | [
"MIT"
] | 6 | 2016-02-02T12:49:54.000Z | 2017-12-05T21:00:01.000Z | contentcuration/contentcuration/models.py | ozer550/studio | b69c625d223350b1b4a6909904ec4b44f80b211d | [
"MIT"
] | 195 | 2015-05-19T20:37:51.000Z | 2018-02-27T19:06:54.000Z | contentcuration/contentcuration/models.py | ozer550/studio | b69c625d223350b1b4a6909904ec4b44f80b211d | [
"MIT"
] | 33 | 2015-05-06T19:19:09.000Z | 2017-11-16T12:50:54.000Z | import functools
import hashlib
import json
import logging
import os
import urllib.parse
import uuid
from datetime import datetime
import pytz
from celery import states
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.core.cache import cache
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.core.files.storage import FileSystemStorage
from django.core.mail import send_mail
from django.db import IntegrityError
from django.db import models
from django.db.models import Count
from django.db.models import Exists
from django.db.models import F
from django.db.models import Index
from django.db.models import IntegerField
from django.db.models import JSONField
from django.db.models import Max
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models import Sum
from django.db.models import UUIDField as DjangoUUIDField
from django.db.models import Value
from django.db.models.expressions import ExpressionList
from django.db.models.expressions import RawSQL
from django.db.models.functions import Cast
from django.db.models.functions import Lower
from django.db.models.indexes import IndexExpression
from django.db.models.query_utils import DeferredAttribute
from django.db.models.sql import Query
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import gettext as _
from django_cte import With
from le_utils import proquint
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import languages
from le_utils.constants import roles
from model_utils import FieldTracker
from mptt.models import MPTTModel
from mptt.models import raise_if_unsaved
from mptt.models import TreeForeignKey
from postmark.core import PMMailInactiveRecipientException
from postmark.core import PMMailUnauthorizedException
from rest_framework.authtoken.models import Token
from contentcuration.constants import channel_history
from contentcuration.db.models.expressions import Array
from contentcuration.db.models.functions import ArrayRemove
from contentcuration.db.models.functions import Unnest
from contentcuration.db.models.manager import CustomContentNodeTreeManager
from contentcuration.db.models.manager import CustomManager
from contentcuration.statistics import record_channel_stats
from contentcuration.utils.cache import delete_public_channel_cache_keys
from contentcuration.utils.parser import load_json_string
EDIT_ACCESS = "edit"
VIEW_ACCESS = "view"
DEFAULT_CONTENT_DEFAULTS = {
'license': None,
'language': None,
'author': None,
'aggregator': None,
'provider': None,
'copyright_holder': None,
'license_description': None,
'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5,
'm_value': 5,
'n_value': 5,
'auto_derive_video_thumbnail': True,
'auto_derive_audio_thumbnail': True,
'auto_derive_document_thumbnail': True,
'auto_derive_html5_thumbnail': True,
'auto_derive_exercise_thumbnail': True,
'auto_randomize_questions': True,
}
DEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False)
def to_pk(model_or_pk):
if isinstance(model_or_pk, models.Model):
return model_or_pk.pk
return model_or_pk
class UserManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password=None):
if not email:
raise ValueError('Email address not specified')
new_user = self.model(
email=self.normalize_email(email),
)
new_user.set_password(password)
new_user.first_name = first_name
new_user.last_name = last_name
new_user.save(using=self._db)
return new_user
def create_superuser(self, email, first_name, last_name, password=None):
new_user = self.create_user(email, first_name, last_name, password=password)
new_user.is_admin = True
new_user.save(using=self._db)
return new_user
class UniqueActiveUserIndex(Index):
def create_sql(self, model, schema_editor, using='', **kwargs):
"""
This is a vendored and modified version of the Django create_sql method
We do this so that we can monkey patch in the unique index statement onto the schema_editor
while we create the statement for this index, and then revert it to normal.
We should remove this as soon as Django natively supports UniqueConstraints with Expressions.
This should hopefully be the case in Django 3.3.
"""
include = [model._meta.get_field(field_name).column for field_name in self.include]
condition = self._get_condition_sql(model, schema_editor)
if self.expressions:
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
expressions = ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
fields = None
col_suffixes = None
else:
fields = [
model._meta.get_field(field_name)
for field_name, _ in self.fields_orders
]
col_suffixes = [order[1] for order in self.fields_orders]
expressions = None
sql = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s"
# Store the normal SQL statement for indexes
old_create_index_sql = schema_editor.sql_create_index
# Replace it with our own unique index so that this index actually adds a constraint
schema_editor.sql_create_index = sql
# Generate the SQL staetment that we want to return
return_statement = schema_editor._create_index_sql(
model, fields=fields, name=self.name, using=using,
db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,
opclasses=self.opclasses, condition=condition, include=include,
expressions=expressions, **kwargs,
)
# Reinstate the previous index SQL statement so that we have done no harm
schema_editor.sql_create_index = old_create_index_sql
# Return our SQL statement
return return_statement
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=100, unique=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField('active', default=False,
help_text='Designates whether this user should be treated as active.')
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload')
disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded')
information = JSONField(null=True)
content_defaults = JSONField(default=dict)
policies = JSONField(default=dict, null=True)
feature_flags = JSONField(default=dict, null=True)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"disk_space",
])
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
def __unicode__(self):
return self.email
def delete(self):
from contentcuration.viewsets.common import SQCount
# Remove any invitations associated to this account
self.sent_to.all().delete()
# Delete channels associated with this user (if user is the only editor)
user_query = (
User.objects.filter(editable_channels__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
self.editable_channels.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1).delete()
# Delete channel collections associated with this user (if user is the only editor)
user_query = (
User.objects.filter(channel_sets__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1).delete()
super(User, self).delete()
def can_edit(self, channel_id):
return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists()
def check_space(self, size, checksum):
active_files = self.get_user_active_files()
if active_files.filter(checksum=checksum).exists():
return True
space = self.get_available_space(active_files=active_files)
if space < size:
raise PermissionDenied(_("Not enough space. Check your storage under Settings page."))
def check_channel_space(self, channel):
active_files = self.get_user_active_files()
staging_tree_id = channel.staging_tree.tree_id
channel_files = self.files\
.filter(contentnode__tree_id=staging_tree_id)\
.values('checksum')\
.distinct()\
.exclude(checksum__in=active_files.values_list('checksum', flat=True))
staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0)
if self.get_available_space(active_files=active_files) < (staged_size):
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def check_staged_space(self, size, checksum):
if self.staged_files.filter(checksum=checksum).exists():
return True
space = self.get_available_staged_space()
if space < size:
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def get_available_staged_space(self):
space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum("file_size"))['size'] or 0
return float(max(self.disk_space - space_used, 0))
def get_available_space(self, active_files=None):
return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0))
def get_user_active_trees(self):
return self.editable_channels.exclude(deleted=True)\
.values(tree_id=F("main_tree__tree_id"))
def get_user_active_files(self):
cte = With(self.get_user_active_trees().distinct())
return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\
.with_cte(cte)\
.values('checksum')\
.distinct()
def get_space_used(self, active_files=None):
active_files = active_files or self.get_user_active_files()
files = active_files.aggregate(total_used=Sum('file_size'))
return float(files['total_used'] or 0)
def set_space_used(self):
self.disk_space_used = self.get_space_used()
self.save()
return self.disk_space_used
def get_space_used_by_kind(self):
active_files = self.get_user_active_files()
files = active_files.values('preset__kind_id')\
.annotate(space=Sum('file_size'))\
.order_by()
kind_dict = {}
for item in files:
kind_dict[item['preset__kind_id']] = item['space']
return kind_dict
def email_user(self, subject, message, from_email=None, **kwargs):
try:
# msg = EmailMultiAlternatives(subject, message, from_email, [self.email])
# msg.attach_alternative(kwargs["html_message"],"text/html")
# msg.send()
send_mail(subject, message, from_email, [self.email], **kwargs)
except (PMMailInactiveRecipientException, PMMailUnauthorizedException) as e:
logging.error(str(e))
def clean(self):
super(User, self).clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def get_token(self):
token, _ = Token.objects.get_or_create(user=self)
return token.key
def save(self, *args, **kwargs):
from contentcuration.utils.user import calculate_user_storage
super(User, self).save(*args, **kwargs)
if 'disk_space' in self._field_updates.changed():
calculate_user_storage(self.pk)
changed = False
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
changed = True
if not self.clipboard_tree:
self.clipboard_tree = ContentNode.objects.create(title=self.email + " clipboard", kind_id=content_kinds.TOPIC)
self.clipboard_tree.save()
changed = True
if changed:
self.save()
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
indexes = [
UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name="contentcura_email_d4d492_idx")
]
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
# all shared editors
all_editable = User.editable_channels.through.objects.all()
editable = all_editable.filter(
channel_id__in=all_editable.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
# all shared viewers
all_view_only = User.view_only_channels.through.objects.all()
view_only = all_view_only.filter(
channel_id__in=all_view_only.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
return queryset.filter(
Q(pk=user.pk)
| Q(pk__in=editable.values_list("user_id", flat=True))
| Q(pk__in=view_only.values_list("user_id", flat=True))
)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(pk=user.pk)
@classmethod
def get_for_email(cls, email, **filters):
"""
Returns the appropriate User record given an email, ordered by:
- those with is_active=True first, which there should only ever be one
- otherwise by ID DESC so most recent inactive shoud be returned
:param email: A string of the user's email
:param filters: Additional filters to filter the User queryset
:return: User or None
"""
return User.objects.filter(email__iexact=email.strip(), **filters)\
.order_by("-is_active", "-id").first()
class UUIDField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def get_default(self):
result = super(UUIDField, self).get_default()
if isinstance(result, uuid.UUID):
result = result.hex
return result
def to_python(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
class MPTTTreeIDManager(models.Model):
"""
Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,
the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.
As we are using this table only for the ID generation, it does not need any fields.
We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's
concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures
that the number of records (and thus id) matches the max tree ID number when this table gets added.
"""
def file_on_disk_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
return generate_file_on_disk_name(instance.checksum, filename)
def generate_file_on_disk_name(checksum, filename):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, ext = os.path.splitext(filename)
directory = os.path.join(settings.STORAGE_ROOT, h[0], h[1])
if not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, h + ext.lower())
def object_storage_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
default_ext = ''
if instance.file_format_id:
default_ext = '.{}'.format(instance.file_format_id)
return generate_object_storage_name(instance.checksum, filename, default_ext)
def generate_object_storage_name(checksum, filename, default_ext=''):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, actual_ext = os.path.splitext(filename)
ext = actual_ext if actual_ext else default_ext
# Use / instead of os.path.join as Windows makes this \\
directory = "/".join([settings.STORAGE_ROOT, h[0], h[1]])
return os.path.join(directory, h + ext.lower())
def generate_storage_url(filename, request=None, *args):
"""
Generate a storage URL for the given content filename.
"""
path = generate_object_storage_name(os.path.splitext(filename)[0], filename)
# There are three scenarios where Studio might be run as:
#
# 1. In normal kubernetes, nginx will proxy for us. We'll know we're in kubernetes when the
# environment variable RUN_MODE=k8s
#
# 2. In Docker Compose and bare metal runserver, we'll be running in runserver, and minio
# will be exposed in port 9000 in the host's localhost network.
# Note (aron): returning the true storage URL (e.g. https://storage.googleapis.com/storage/a.mp4)
# isn't too important, because we have CDN in front of our servers, so it should be cached.
# But change the logic here in case there is a potential for bandwidth and latency improvement.
# Detect our current state first
run_mode = os.getenv("RUN_MODE")
# if we're running inside k8s, then just serve the normal /content/{storage,databases} URL,
# and let nginx handle proper proxying.
if run_mode == "k8s":
url = "/content/{path}".format(
path=path,
)
# if we're in docker-compose or in baremetal, just return the object storage URL as localhost:9000
elif run_mode == "docker-compose" or run_mode is None:
# generate the minio storage URL, so we can get the GET parameters that give everyone
# access even if they don't need to log in
params = urllib.parse.urlparse(default_storage.url(path)).query
host = "localhost"
port = 9000 # hardcoded to the default minio IP address
url = "http://{host}:{port}/{bucket}/{path}?{params}".format(
host=host,
port=port,
bucket=settings.AWS_S3_BUCKET_NAME,
path=path,
params=params,
)
return url
class FileOnDiskStorage(FileSystemStorage):
"""
Overrider FileSystemStorage's default save method to ignore duplicated file.
"""
def get_available_name(self, name):
return name
def _save(self, name, content):
if self.exists(name):
# if the file exists, do not call the superclasses _save method
logging.warn('Content copy "%s" already exists!' % name)
return name
return super(FileOnDiskStorage, self)._save(name, content)
class SecretToken(models.Model):
"""Tokens for channels"""
token = models.CharField(max_length=100, unique=True)
is_primary = models.BooleanField(default=False)
@classmethod
def exists(cls, token):
"""
Return true when the token string given by string already exists.
Returns false otherwise.
"""
return cls.objects.filter(token=token).exists()
@classmethod
def generate_new_token(cls):
"""
Creates a primary secret token for the current channel using a proquint
string. Creates a secondary token containing the channel id.
These tokens can be used to refer to the channel to download its content
database.
"""
token = proquint.generate()
# Try 100 times to generate a unique token.
TRIALS = 100
for __ in range(TRIALS):
token = proquint.generate()
if SecretToken.exists(token):
continue
break
# after TRIALS attempts and we didn't get a unique token,
# just raise an error.
# See https://stackoverflow.com/a/9980160 on what for-else loop does.
else:
raise ValueError("Cannot generate new token")
# We found a unique token! Save it
return token
def __str__(self):
return "{}-{}".format(self.token[:5], self.token[5:])
def get_channel_thumbnail(channel):
if not isinstance(channel, dict):
channel = channel.__dict__
if channel.get("thumbnail_encoding"):
thumbnail_data = channel.get("thumbnail_encoding")
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
if channel.get("thumbnail") and 'static' not in channel.get("thumbnail"):
return generate_storage_url(channel.get("thumbnail"))
return '/static/img/kolibri_placeholder.png'
CHANNEL_NAME_INDEX_NAME = "channel_name_idx"
# A list of all the FKs from Channel object
# to ContentNode trees
# used for permissions filtering
CHANNEL_TREES = (
"main_tree",
"chef_tree",
"trash_tree",
"staging_tree",
"previous_tree",
)
def boolean_val(val):
return Value(val, output_field=models.BooleanField())
class PermissionCTE(With):
tree_id_fields = [
"channel__{}__tree_id".format(tree_name)
for tree_name in CHANNEL_TREES
]
def __init__(self, model, user_id, **kwargs):
queryset = model.objects.filter(user_id=user_id)\
.annotate(
tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField())
)
super(PermissionCTE, self).__init__(queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs)
@classmethod
def editable_channels(cls, user_id):
return PermissionCTE(User.editable_channels.through, user_id, name="editable_channels_cte")
@classmethod
def view_only_channels(cls, user_id):
return PermissionCTE(User.view_only_channels.through, user_id, name="view_only_channels_cte")
def exists(self, *filters):
return Exists(self.queryset().filter(*filters).values("user_id"))
class Channel(models.Model):
""" Permissions come from association with organizations """
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
tagline = models.CharField(max_length=150, blank=True, null=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True, null=True)
thumbnail_encoding = JSONField(default=dict)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='editable_channels',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
viewers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='view_only_channels',
verbose_name="viewers",
help_text="Users with view only rights",
blank=True,
)
language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL)
trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL)
main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL)
staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL)
chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL)
previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL)
bookmarked_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='bookmarked_channels',
verbose_name="bookmarked by",
)
deleted = models.BooleanField(default=False, db_index=True)
public = models.BooleanField(default=False, db_index=True)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
content_defaults = JSONField(default=dict)
priority = models.IntegerField(default=0, help_text="Order to display public channels")
last_published = models.DateTimeField(blank=True, null=True)
secret_tokens = models.ManyToManyField(
SecretToken,
related_name='channels',
verbose_name="secret tokens",
blank=True,
)
source_url = models.CharField(max_length=200, blank=True, null=True)
demo_server_url = models.CharField(max_length=200, blank=True, null=True)
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
ricecooker_version = models.CharField(max_length=100, blank=True, null=True)
# Fields to calculate when channel is published
published_data = JSONField(default=dict)
icon_encoding = models.TextField(blank=True, null=True)
total_resource_count = models.IntegerField(default=0)
published_kind_count = models.TextField(blank=True, null=True)
published_size = models.FloatField(default=0)
included_languages = models.ManyToManyField(
"Language",
related_name='channels',
verbose_name="languages",
blank=True,
)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"description",
"language_id",
"thumbnail",
"name",
"thumbnail_encoding",
# watch these fields for changes
# but exclude them from setting changed
# on the main tree
"deleted",
"public",
"main_tree_id",
"version",
])
@classmethod
def get_editable(cls, user, channel_id):
return cls.filter_edit_queryset(cls.objects.all(), user).get(id=channel_id)
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
# it won't return anything
if not user_id:
return queryset.none()
edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
user_email = not user.is_anonymous and user.email
if user_id:
filters = dict(user_id=user_id, channel_id=OuterRef("id"))
edit = Exists(User.editable_channels.through.objects.filter(**filters).values("user_id"))
view = Exists(User.view_only_channels.through.objects.filter(**filters).values("user_id"))
else:
edit = boolean_val(False)
view = boolean_val(False)
queryset = queryset.annotate(
edit=edit,
view=view,
)
if user_id and user.is_admin:
return queryset
permission_filter = Q()
if user_id:
pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list(
"channel_id", flat=True
)
permission_filter = (
Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels)
)
return queryset.filter(permission_filter | Q(deleted=False, public=True))
@classmethod
def get_all_channels(cls):
return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct()
def resource_size_key(self):
return "{}_resource_size".format(self.pk)
# Might be good to display resource size, but need to improve query time first
def get_resource_size(self):
cached_data = cache.get(self.resource_size_key())
if cached_data:
return cached_data
tree_id = self.main_tree.tree_id
files = File.objects.select_related('contentnode', 'assessment_item')\
.filter(contentnode__tree_id=tree_id)\
.values('checksum', 'file_size')\
.distinct()\
.aggregate(resource_size=Sum('file_size'))
cache.set(self.resource_size_key(), files['resource_size'] or 0, None)
return files['resource_size'] or 0
def on_create(self):
record_channel_stats(self, None)
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
if not self.main_tree:
self.main_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
original_channel_id=self.id,
source_channel_id=self.id,
changed=True,
complete=True,
)
# Ensure that locust or unit tests raise if there are any concurrency issues with tree ids.
if settings.DEBUG:
if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1:
raise AssertionError
if not self.trash_tree:
self.trash_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
)
# if this change affects the published channel list, clear the channel cache
if self.public and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def on_update(self):
from contentcuration.utils.user import calculate_user_storage
original_values = self._field_updates.changed()
record_channel_stats(self, original_values)
blacklist = set([
"public",
"main_tree_id",
"version",
])
if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)):
# Changing channel metadata should also mark main_tree as changed
self.main_tree.changed = True
# Check if original thumbnail is no longer referenced
if "thumbnail" in original_values and original_values["thumbnail"] and 'static' not in original_values["thumbnail"]:
filename, ext = os.path.splitext(original_values["thumbnail"])
delete_empty_file_reference(filename, ext[1:])
# Refresh storage for all editors on the channel
if "deleted" in original_values:
for editor in self.editors.all():
calculate_user_storage(editor.pk)
# Delete db if channel has been deleted and mark as unpublished
if "deleted" in original_values and not original_values["deleted"]:
self.pending_editors.all().delete()
export_db_storage_path = os.path.join(settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id))
if default_storage.exists(export_db_storage_path):
default_storage.delete(export_db_storage_path)
if self.main_tree:
self.main_tree.published = False
if self.main_tree and self.main_tree._field_updates.changed():
self.main_tree.save()
# if this change affects the published channel list, clear the channel cache
if "public" in original_values and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
super(Channel, self).save(*args, **kwargs)
def get_thumbnail(self):
return get_channel_thumbnail(self)
def has_changes(self):
return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists()
def get_date_modified(self):
return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified']
def get_resource_count(self):
return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count()
def get_human_token(self):
return self.secret_tokens.get(is_primary=True)
def get_channel_id_token(self):
return self.secret_tokens.get(token=self.id)
def make_token(self):
token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True)
self.secret_tokens.get_or_create(token=self.id)
return token
def make_public(self, bypass_signals=False):
"""
Sets the current channel object to be public and viewable by anyone.
If bypass_signals is True, update the model in such a way that we
prevent any model signals from running due to the update.
Returns the same channel object.
"""
if bypass_signals:
self.public = True # set this attribute still, so the object will be updated
Channel.objects.filter(id=self.id).update(public=True)
# clear the channel cache
delete_public_channel_cache_keys()
else:
self.public = True
self.save()
return self
def mark_created(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.CREATION)
def mark_publishing(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.PUBLICATION)
self.main_tree.publishing = True
self.main_tree.save()
def mark_deleted(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.DELETION)
self.deleted = True
self.save()
def mark_recovered(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.RECOVERY)
self.deleted = False
self.save()
@property
def deletion_history(self):
return self.history.filter(action=channel_history.DELETION)
@property
def publishing_history(self):
return self.history.filter(action=channel_history.PUBLICATION)
@classmethod
def get_public_channels(cls, defer_nonmain_trees=False):
"""
Get all public channels.
If defer_nonmain_trees is True, defer the loading of all
trees except for the main_tree."""
if defer_nonmain_trees:
c = (Channel.objects
.filter(public=True)
.exclude(deleted=True)
.select_related('main_tree')
.prefetch_related('editors')
.defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))
else:
c = Channel.objects.filter(public=True).exclude(deleted=True)
return c
class Meta:
verbose_name = "Channel"
verbose_name_plural = "Channels"
indexes = [
models.Index(fields=["name"], name=CHANNEL_NAME_INDEX_NAME),
]
index_together = [
["deleted", "public"]
]
CHANNEL_HISTORY_CHANNEL_INDEX_NAME = "idx_channel_history_channel_id"
class ChannelHistory(models.Model):
"""
Model for tracking certain actions performed on a channel
"""
channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE)
actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE)
performed = models.DateTimeField(default=timezone.now)
action = models.CharField(max_length=50, choices=channel_history.choices)
@classmethod
def prune(cls):
"""
Prunes history records by keeping the most recent actions for each channel and type,
and deleting all other older actions
"""
keep_ids = cls.objects.distinct("channel_id", "action").order_by("channel_id", "action", "-performed").values_list("id", flat=True)
cls.objects.exclude(id__in=keep_ids).delete()
class Meta:
verbose_name = "Channel history"
verbose_name_plural = "Channel histories"
indexes = [
models.Index(fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME),
]
class ChannelSet(models.Model):
# NOTE: this is referred to as "channel collections" on the front-end, but we need to call it
# something else as there is already a ChannelCollection model on the front-end
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
public = models.BooleanField(default=False, db_index=True)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='channel_sets',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
user_id = not user.is_anonymous and user.id
edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
return cls.filter_edit_queryset(queryset, user)
def get_channels(self):
if self.secret_token:
return self.secret_token.channels.filter(deleted=False)
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
super(ChannelSet, self).save()
def on_create(self):
if not self.secret_token:
self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token())
def delete(self, *args, **kwargs):
super(ChannelSet, self).delete(*args, **kwargs)
if self.secret_token:
self.secret_token.delete()
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=50)
channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
objects = CustomManager()
def __str__(self):
return self.tag_name
class Meta:
unique_together = ['tag_name', 'channel']
def delegate_manager(method):
"""
Delegate method calls to base manager, if exists.
"""
@functools.wraps(method)
def wrapped(self, *args, **kwargs):
if self._base_manager:
return getattr(self._base_manager, method.__name__)(*args, **kwargs)
return method(self, *args, **kwargs)
return wrapped
class License(models.Model):
"""
Normalize the license of ContentNode model
"""
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(
default=False,
verbose_name="license exists",
help_text="Tells whether or not a content item is licensed to share",
)
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
NODE_ID_INDEX_NAME = "node_id_idx"
NODE_MODIFIED_INDEX_NAME = "node_modified_idx"
NODE_MODIFIED_DESC_INDEX_NAME = "node_modified_desc_idx"
class ContentNode(MPTTModel, models.Model):
"""
By default, all nodes have a title and can be used as a topic.
"""
# Random id used internally on Studio (See `node_id` for id used in Kolibri)
id = UUIDField(primary_key=True, default=uuid.uuid4)
# the content_id is used for tracking a user's interaction with a piece of
# content, in the face of possibly many copies of that content. When a user
# interacts with a piece of content, all substantially similar pieces of
# content should be marked as such as well. We track these "substantially
# similar" types of content by having them have the same content_id.
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
# TODO: disallow nulls once existing models have been set
original_channel_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True) # Original channel copied from
source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from
# Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary)
original_source_node_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank=True)
prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of',
through='PrerequisiteContentRelationship', symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship',
symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)
# No longer used
sort_order = models.FloatField(max_length=50, default=1, verbose_name="sort order",
help_text="Ascending, lowest number shown first")
copyright_holder = models.CharField(max_length=200, null=True, blank=True, default="",
help_text="Organization of person who holds the essential rights")
# legacy field...
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name="created")
modified = models.DateTimeField(auto_now=True, verbose_name="modified")
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
"""
Extra fields for exercises:
- type: mastery model to use to determine completion
- m: m value for M out of N mastery criteria
- n: n value for M out of N mastery criteria
"""
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default="", help_text="Who created this content?",
null=True)
aggregator = models.CharField(max_length=200, blank=True, default="", help_text="Who gathered this content together?",
null=True)
provider = models.CharField(max_length=200, blank=True, default="", help_text="Who distributed this content?",
null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
# Fields for metadata labels
# These fields use a map to store applied labels
# {
# "<label_id1>": true,
# "<label_id2>": true,
# }
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
# A field for storing a suggested duration for the content node
# this duration should be in seconds.
suggested_duration = models.IntegerField(blank=True, null=True, help_text="Suggested duration for the content node (in seconds)")
objects = CustomContentNodeTreeManager()
# Track all updates and ignore a blacklist of attributes
# when we check for changes
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef("tree_id"))
@classmethod
def _annotate_channel_id(cls, queryset):
# Annotate channel id
return queryset.annotate(
channel_id=Subquery(
Channel.objects.filter(
main_tree__tree_id=OuterRef("tree_id")
).values_list("id", flat=True)[:1]
)
)
@classmethod
def _orphan_tree_id_subquery(cls):
# For some reason this now requires an explicit type cast
# or it gets interpreted as a varchar
return Cast(cls.objects.filter(
pk=settings.ORPHANAGE_ROOT_ID
).values_list("tree_id", flat=True)[:1], output_field=IntegerField())
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.exclude(pk=settings.ORPHANAGE_ROOT_ID)
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(Q(edit=True) | Q(tree_id=cls._orphan_tree_id_subquery()))
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("tree_id")
).values("pk")
),
).exclude(pk=settings.ORPHANAGE_ROOT_ID)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(tree_id=cls._orphan_tree_id_subquery())
)
@raise_if_unsaved
def get_root(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(
tree_id=self._mpttfield('tree_id'),
parent=None,
)
def get_tree_data(self, levels=float('inf')):
"""
Returns `levels`-deep tree information starting at current node.
Args:
levels (int): depth of tree hierarchy to return
Returns:
tree (dict): starting with self, with children list containing either
the just the children's `node_id`s or full recusive tree.
"""
if self.kind_id == content_kinds.TOPIC:
node_data = {
"title": self.title,
"kind": self.kind_id,
"node_id": self.node_id,
"studio_id": self.id,
}
children = self.children.all()
if levels > 0:
node_data["children"] = [c.get_tree_data(levels=levels - 1) for c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {
"title": self.title,
"kind": self.kind_id,
"count": self.assessment_items.count(),
"node_id": self.node_id,
"studio_id": self.id,
}
return {
"title": self.title,
"kind": self.kind_id,
"file_size": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],
"node_id": self.node_id,
"studio_id": self.id,
}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related("main_tree").get(pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \
ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = "associated_presets_{}".format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, "channel_id"):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
# Problems with json.loads, so use ast.literal_eval to get dict
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get("base64"):
return thumbnail_data["base64"]
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ""
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
"""
Returns all ContentNodes with a given title. If limit_to_children_of
is passed in with an id, only look at all the children of the node with that id.
"""
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
"""
Returns information about the node and its children, including total size, languages, files, etc.
:return: A dictionary with detailed statistics and information about the node.
"""
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
node = ContentNode.objects.filter(pk=self.id).order_by()
descendants = (
self.get_descendants()
.prefetch_related("children", "files", "tags")
.select_related("license", "language")
.values("id")
)
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": 0,
"resource_size": 0,
"includes": {"coach_content": 0, "exercises": 0},
"kind_count": [],
"languages": "",
"accessible_languages": "",
"licenses": "",
"tags": [],
"copyright_holders": "",
"authors": "",
"aggregators": "",
"providers": "",
"sample_pathway": [],
"original_channels": [],
"sample_nodes": [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
# Get resources
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("checksum", "file_size")
.order_by(),
name="nodes",
)
file_query = (
nodes.queryset().with_cte(nodes).values("checksum", "file_size").distinct()
)
l_nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("language_id", "preset_id")
.order_by(),
name="l_nodes",
)
accessible_languages_query = (
l_nodes.queryset()
.filter(preset_id=format_presets.VIDEO_SUBTITLE)
.with_cte(l_nodes)
.values("language__native_name")
.distinct()
)
tags_query = str(
ContentTag.objects.filter(
tagged_content__pk__in=descendants.values_list("pk", flat=True)
)
.values("tag_name")
.annotate(count=Count("tag_name"))
.query
).replace("topic", "'topic'")
kind_count_query = str(
resources.values("kind_id").annotate(count=Count("kind_id")).query
).replace("topic", "'topic'")
node = node.annotate(
resource_count=SQCount(resources, field="id"),
resource_size=SQSum(file_query, field="file_size"),
copyright_holders=SQArrayAgg(
resources.distinct("copyright_holder").order_by("copyright_holder"),
field="copyright_holder",
),
authors=SQArrayAgg(
resources.distinct("author").order_by("author"), field="author"
),
aggregators=SQArrayAgg(
resources.distinct("aggregator").order_by("aggregator"),
field="aggregator",
),
providers=SQArrayAgg(
resources.distinct("provider").order_by("provider"), field="provider"
),
languages=SQRelatedArrayAgg(
descendants.exclude(language=None)
.distinct("language__native_name")
.order_by(),
field="language__native_name",
fieldname="native_name",
),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query,
field="language__native_name",
fieldname="native_name",
),
licenses=SQRelatedArrayAgg(
resources.exclude(license=None)
.distinct("license__license_name")
.order_by("license__license_name"),
field="license__license_name",
fieldname="license_name",
),
kind_count=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(
kind_count_query
),
(),
),
tags_list=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(tags_query), ()
),
coach_content=SQCount(
resources.filter(role_visibility=roles.COACH), field="id"
),
exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field="id"
),
)
# Get sample pathway by getting longest path
# Using resources.aggregate adds a lot of time, use values that have already been fetched
max_level = max(
resources.values_list("level", flat=True).order_by().distinct() or [0]
)
m_nodes = With(
resources.values("id", "level", "tree_id", "lft").order_by(),
name="m_nodes",
)
deepest_node_record = (
m_nodes.queryset()
.with_cte(m_nodes)
.filter(level=max_level)
.values("id")
.order_by("tree_id", "lft")
.first()
)
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record["id"])
pathway = (
list(
deepest_node.get_ancestors()
.order_by()
.exclude(parent=None)
.values("title", "node_id", "kind_id")
.order_by()
)
if deepest_node_record
else []
)
sample_nodes = (
[
{
"node_id": n.node_id,
"title": n.title,
"description": n.description,
"thumbnail": n.get_thumbnail(),
"kind": n.kind_id,
}
for n in deepest_node.get_siblings(include_self=True)[0:4]
]
if deepest_node_record
else []
)
# Get list of channels nodes were originally imported from (omitting the current channel)
channel_id = channel and channel.id
originals = (
resources.values("original_channel_id")
.annotate(count=Count("original_channel_id"))
.order_by("original_channel_id")
)
originals = {c["original_channel_id"]: c["count"] for c in originals}
original_channels = (
Channel.objects.exclude(pk=channel_id)
.filter(pk__in=originals.keys(), deleted=False)
.order_by()
)
original_channels = [
{
"id": c.id,
"name": "{}{}".format(
c.name, _(" (Original)") if channel_id == c.id else ""
),
"thumbnail": c.get_thumbnail(),
"count": originals[c.id],
}
for c in original_channels
]
node = (
node.order_by()
.values(
"id",
"resource_count",
"resource_size",
"copyright_holders",
"authors",
"aggregators",
"providers",
"languages",
"accessible_languages",
"coach_content",
"licenses",
"tags_list",
"kind_count",
"exercises",
)
.first()
)
for_educators = {
"coach_content": node["coach_content"],
"exercises": node["exercises"],
}
# Serialize data
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": node.get("resource_count", 0),
"resource_size": node.get("resource_size", 0),
"includes": for_educators,
"kind_count": node.get("kind_count", []),
"languages": node.get("languages", ""),
"accessible_languages": node.get("accessible_languages", ""),
"licenses": node.get("licenses", ""),
"tags": node.get("tags_list", []),
"copyright_holders": node["copyright_holders"],
"authors": node["authors"],
"aggregators": node["aggregators"],
"providers": node["providers"],
"sample_pathway": pathway,
"original_channels": original_channels,
"sample_nodes": sample_nodes,
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
# Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods.
blacklist = set([
'changed',
'modified',
'publishing',
mptt_opts.tree_id_attr,
mptt_opts.left_attr,
mptt_opts.right_attr,
mptt_opts.level_attr,
])
original_values = self._field_updates.changed()
return any((True for field in original_values if field not in blacklist))
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True).distinct():
calculate_user_storage(editor)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
# Recalculate storage if node was moved to or from the trash tree
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
# Logic borrowed from mptt - do a simple check to see if we have changed
# the parent of the node. We use the mptt specific cached fields here
# because these get updated by the mptt move methods, and so will be up to
# date, meaning we can avoid locking the DB twice when the fields have already
# been updated in the database.
# If most moves are being done independently of just changing the parent
# and then calling a save, locking within the save method itself should rarely
# be triggered - meaning updates to contentnode metadata should only rarely
# trigger a write lock on mptt fields.
old_parent_id = self._field_updates.changed().get("parent_id")
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
# Lock the mptt fields for the trees of the old and new parent
with ContentNode.objects.lock_mptt(*ContentNode.objects
.filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid])
.values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
# Copied from MPTT
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
# Lock the mptt fields for the tree of this node
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
# Copied from MPTT
delete.alters_data = True
def copy_to(
self,
target=None,
position="last-child",
pk=None,
mods=None,
excluded_descendants=None,
can_edit_source_channel=None,
batch_size=None,
progress_tracker=None
):
return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0]
def copy(self):
return self.copy_to()
class Meta:
verbose_name = "Topic"
verbose_name_plural = "Topics"
# Do not allow two nodes with the same name on the same level
# unique_together = ('parent', 'title')
indexes = [
models.Index(fields=["node_id"], name=NODE_ID_INDEX_NAME),
models.Index(fields=["-modified"], name=NODE_MODIFIED_DESC_INDEX_NAME),
]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True) # Render on client side
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip(".")
f = FormatPreset.objects.filter(
allowed_formats__extension=ext,
display=True
)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return "{code}-{subcode}".format(code=self.lang_code,
subcode=self.lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
ASSESSMENT_ID_INDEX_NAME = "assessment_id_idx"
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default="multiplechoice")
question = models.TextField(blank=True)
hints = models.TextField(default="[]")
answers = models.TextField(default="[]")
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name="assessment_items", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
# Track all updates
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [
models.Index(fields=["assessment_id"], name=ASSESSMENT_ID_INDEX_NAME),
]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("contentnode__tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name="slideshow_slides", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE)
FILE_DISTINCT_INDEX_NAME = "file_checksum_file_size_idx"
FILE_MODIFIED_DESC_INDEX_NAME = "file_modified_desc_idx"
FILE_DURATION_CONSTRAINT = "file_media_duration_int"
MEDIA_PRESETS = [format_presets.AUDIO, format_presets.VIDEO_HIGH_RES, format_presets.VIDEO_LOW_RES]
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500,
blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name="modified", null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q(tree_id=OuterRef("assessment_item__contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(
Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(public=True).filter(
Q(main_tree__tree_id=OuterRef("contentnode__tree_id"))
| Q(main_tree__tree_id=OuterRef("assessment_item__contentnode__tree_id"))
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
# TODO(aron): write tests for this
return os.path.basename(self.file_on_disk.name)
def on_update(self):
# since modified was added later as a nullable field to File, we don't use a default but
# instead we'll just make sure it's always updated through our serializers
self.modified = timezone.now()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file
if self.checksum is None or self.checksum == "":
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError("Files of type `{}` are not supported.".format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [
models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME),
models.Index(fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME),
]
constraints = [
models.CheckConstraint(check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)), name=FILE_DURATION_CONSTRAINT)
]
@receiver(models.signals.post_delete, sender=File)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""
Deletes file from filesystem if no other File objects are referencing the same file on disk
when corresponding `File` object is deleted.
Be careful! we don't know if this will work when perform bash delete on File obejcts.
"""
# Recalculate storage
from contentcuration.utils.user import calculate_user_storage
if instance.uploaded_by_id:
calculate_user_storage(instance.uploaded_by_id)
def delete_empty_file_reference(checksum, extension):
filename = checksum + '.' + extension
if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists():
storage_path = generate_object_storage_name(checksum, filename)
if default_storage.exists(storage_path):
default_storage.delete(storage_path)
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
# self reference exception
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
# immediate cyclic exception
if PrerequisiteContentRelationship.objects.using(self._state.db) \
.filter(target_node=self.prerequisite, prerequisite=self.target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
# distant cyclic exception
# elif <this is a nice to have exception, may implement in the future when the priority raises.>
# raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % (self.target_node, self.prerequisite))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % (self.pk)
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
# self reference exception
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
# handle immediate cyclic
if RelatedContentRelationship.objects.using(self._state.db) \
.filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1):
return # silently cancel the save
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = "Invitation"
verbose_name_plural = "Invitations"
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
# channel is a nullable field, so check that it exists.
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
| Q(channel__viewers=user)
).distinct()
class Task(models.Model):
"""Asynchronous tasks"""
task_id = UUIDField(db_index=True, default=uuid.uuid4) # This ID is used as the Celery task ID
task_type = models.CharField(max_length=50)
created = models.DateTimeField(default=timezone.now)
status = models.CharField(max_length=10)
is_progress_tracking = models.BooleanField(default=False)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="task", on_delete=models.CASCADE)
metadata = JSONField()
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
@classmethod
def find_incomplete(cls, task_type, **filters):
filters.update(task_type=task_type, status__in=["QUEUED", states.PENDING, states.RECEIVED, states.STARTED])
return cls.objects.filter(**filters)
| 39.404783 | 157 | 0.653717 | import functools
import hashlib
import json
import logging
import os
import urllib.parse
import uuid
from datetime import datetime
import pytz
from celery import states
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.core.cache import cache
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.core.files.storage import FileSystemStorage
from django.core.mail import send_mail
from django.db import IntegrityError
from django.db import models
from django.db.models import Count
from django.db.models import Exists
from django.db.models import F
from django.db.models import Index
from django.db.models import IntegerField
from django.db.models import JSONField
from django.db.models import Max
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models import Sum
from django.db.models import UUIDField as DjangoUUIDField
from django.db.models import Value
from django.db.models.expressions import ExpressionList
from django.db.models.expressions import RawSQL
from django.db.models.functions import Cast
from django.db.models.functions import Lower
from django.db.models.indexes import IndexExpression
from django.db.models.query_utils import DeferredAttribute
from django.db.models.sql import Query
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import gettext as _
from django_cte import With
from le_utils import proquint
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import languages
from le_utils.constants import roles
from model_utils import FieldTracker
from mptt.models import MPTTModel
from mptt.models import raise_if_unsaved
from mptt.models import TreeForeignKey
from postmark.core import PMMailInactiveRecipientException
from postmark.core import PMMailUnauthorizedException
from rest_framework.authtoken.models import Token
from contentcuration.constants import channel_history
from contentcuration.db.models.expressions import Array
from contentcuration.db.models.functions import ArrayRemove
from contentcuration.db.models.functions import Unnest
from contentcuration.db.models.manager import CustomContentNodeTreeManager
from contentcuration.db.models.manager import CustomManager
from contentcuration.statistics import record_channel_stats
from contentcuration.utils.cache import delete_public_channel_cache_keys
from contentcuration.utils.parser import load_json_string
EDIT_ACCESS = "edit"
VIEW_ACCESS = "view"
DEFAULT_CONTENT_DEFAULTS = {
'license': None,
'language': None,
'author': None,
'aggregator': None,
'provider': None,
'copyright_holder': None,
'license_description': None,
'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5,
'm_value': 5,
'n_value': 5,
'auto_derive_video_thumbnail': True,
'auto_derive_audio_thumbnail': True,
'auto_derive_document_thumbnail': True,
'auto_derive_html5_thumbnail': True,
'auto_derive_exercise_thumbnail': True,
'auto_randomize_questions': True,
}
DEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False)
def to_pk(model_or_pk):
if isinstance(model_or_pk, models.Model):
return model_or_pk.pk
return model_or_pk
class UserManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password=None):
if not email:
raise ValueError('Email address not specified')
new_user = self.model(
email=self.normalize_email(email),
)
new_user.set_password(password)
new_user.first_name = first_name
new_user.last_name = last_name
new_user.save(using=self._db)
return new_user
def create_superuser(self, email, first_name, last_name, password=None):
new_user = self.create_user(email, first_name, last_name, password=password)
new_user.is_admin = True
new_user.save(using=self._db)
return new_user
class UniqueActiveUserIndex(Index):
def create_sql(self, model, schema_editor, using='', **kwargs):
include = [model._meta.get_field(field_name).column for field_name in self.include]
condition = self._get_condition_sql(model, schema_editor)
if self.expressions:
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
expressions = ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
fields = None
col_suffixes = None
else:
fields = [
model._meta.get_field(field_name)
for field_name, _ in self.fields_orders
]
col_suffixes = [order[1] for order in self.fields_orders]
expressions = None
sql = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s"
old_create_index_sql = schema_editor.sql_create_index
schema_editor.sql_create_index = sql
return_statement = schema_editor._create_index_sql(
model, fields=fields, name=self.name, using=using,
db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,
opclasses=self.opclasses, condition=condition, include=include,
expressions=expressions, **kwargs,
)
schema_editor.sql_create_index = old_create_index_sql
return return_statement
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=100, unique=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField('active', default=False,
help_text='Designates whether this user should be treated as active.')
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload')
disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded')
information = JSONField(null=True)
content_defaults = JSONField(default=dict)
policies = JSONField(default=dict, null=True)
feature_flags = JSONField(default=dict, null=True)
_field_updates = FieldTracker(fields=[
"disk_space",
])
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
def __unicode__(self):
return self.email
def delete(self):
from contentcuration.viewsets.common import SQCount
self.sent_to.all().delete()
user_query = (
User.objects.filter(editable_channels__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
self.editable_channels.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1).delete()
user_query = (
User.objects.filter(channel_sets__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1).delete()
super(User, self).delete()
def can_edit(self, channel_id):
return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists()
def check_space(self, size, checksum):
active_files = self.get_user_active_files()
if active_files.filter(checksum=checksum).exists():
return True
space = self.get_available_space(active_files=active_files)
if space < size:
raise PermissionDenied(_("Not enough space. Check your storage under Settings page."))
def check_channel_space(self, channel):
active_files = self.get_user_active_files()
staging_tree_id = channel.staging_tree.tree_id
channel_files = self.files\
.filter(contentnode__tree_id=staging_tree_id)\
.values('checksum')\
.distinct()\
.exclude(checksum__in=active_files.values_list('checksum', flat=True))
staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0)
if self.get_available_space(active_files=active_files) < (staged_size):
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def check_staged_space(self, size, checksum):
if self.staged_files.filter(checksum=checksum).exists():
return True
space = self.get_available_staged_space()
if space < size:
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def get_available_staged_space(self):
space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum("file_size"))['size'] or 0
return float(max(self.disk_space - space_used, 0))
def get_available_space(self, active_files=None):
return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0))
def get_user_active_trees(self):
return self.editable_channels.exclude(deleted=True)\
.values(tree_id=F("main_tree__tree_id"))
def get_user_active_files(self):
cte = With(self.get_user_active_trees().distinct())
return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\
.with_cte(cte)\
.values('checksum')\
.distinct()
def get_space_used(self, active_files=None):
active_files = active_files or self.get_user_active_files()
files = active_files.aggregate(total_used=Sum('file_size'))
return float(files['total_used'] or 0)
def set_space_used(self):
self.disk_space_used = self.get_space_used()
self.save()
return self.disk_space_used
def get_space_used_by_kind(self):
active_files = self.get_user_active_files()
files = active_files.values('preset__kind_id')\
.annotate(space=Sum('file_size'))\
.order_by()
kind_dict = {}
for item in files:
kind_dict[item['preset__kind_id']] = item['space']
return kind_dict
def email_user(self, subject, message, from_email=None, **kwargs):
try:
send_mail(subject, message, from_email, [self.email], **kwargs)
except (PMMailInactiveRecipientException, PMMailUnauthorizedException) as e:
logging.error(str(e))
def clean(self):
super(User, self).clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
def get_token(self):
token, _ = Token.objects.get_or_create(user=self)
return token.key
def save(self, *args, **kwargs):
from contentcuration.utils.user import calculate_user_storage
super(User, self).save(*args, **kwargs)
if 'disk_space' in self._field_updates.changed():
calculate_user_storage(self.pk)
changed = False
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
changed = True
if not self.clipboard_tree:
self.clipboard_tree = ContentNode.objects.create(title=self.email + " clipboard", kind_id=content_kinds.TOPIC)
self.clipboard_tree.save()
changed = True
if changed:
self.save()
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
indexes = [
UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name="contentcura_email_d4d492_idx")
]
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
all_editable = User.editable_channels.through.objects.all()
editable = all_editable.filter(
channel_id__in=all_editable.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
all_view_only = User.view_only_channels.through.objects.all()
view_only = all_view_only.filter(
channel_id__in=all_view_only.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
return queryset.filter(
Q(pk=user.pk)
| Q(pk__in=editable.values_list("user_id", flat=True))
| Q(pk__in=view_only.values_list("user_id", flat=True))
)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(pk=user.pk)
@classmethod
def get_for_email(cls, email, **filters):
return User.objects.filter(email__iexact=email.strip(), **filters)\
.order_by("-is_active", "-id").first()
class UUIDField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def get_default(self):
result = super(UUIDField, self).get_default()
if isinstance(result, uuid.UUID):
result = result.hex
return result
def to_python(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
class MPTTTreeIDManager(models.Model):
def file_on_disk_name(instance, filename):
return generate_file_on_disk_name(instance.checksum, filename)
def generate_file_on_disk_name(checksum, filename):
h = checksum
basename, ext = os.path.splitext(filename)
directory = os.path.join(settings.STORAGE_ROOT, h[0], h[1])
if not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, h + ext.lower())
def object_storage_name(instance, filename):
default_ext = ''
if instance.file_format_id:
default_ext = '.{}'.format(instance.file_format_id)
return generate_object_storage_name(instance.checksum, filename, default_ext)
def generate_object_storage_name(checksum, filename, default_ext=''):
h = checksum
basename, actual_ext = os.path.splitext(filename)
ext = actual_ext if actual_ext else default_ext
directory = "/".join([settings.STORAGE_ROOT, h[0], h[1]])
return os.path.join(directory, h + ext.lower())
def generate_storage_url(filename, request=None, *args):
path = generate_object_storage_name(os.path.splitext(filename)[0], filename)
# will be exposed in port 9000 in the host's localhost network.
# But change the logic here in case there is a potential for bandwidth and latency improvement.
# Detect our current state first
run_mode = os.getenv("RUN_MODE")
# if we're running inside k8s, then just serve the normal /content/{storage,databases} URL,
if run_mode == "k8s":
url = "/content/{path}".format(
path=path,
)
elif run_mode == "docker-compose" or run_mode is None:
# generate the minio storage URL, so we can get the GET parameters that give everyone
# access even if they don't need to log in
params = urllib.parse.urlparse(default_storage.url(path)).query
host = "localhost"
port = 9000
url = "http://{host}:{port}/{bucket}/{path}?{params}".format(
host=host,
port=port,
bucket=settings.AWS_S3_BUCKET_NAME,
path=path,
params=params,
)
return url
class FileOnDiskStorage(FileSystemStorage):
def get_available_name(self, name):
return name
def _save(self, name, content):
if self.exists(name):
logging.warn('Content copy "%s" already exists!' % name)
return name
return super(FileOnDiskStorage, self)._save(name, content)
class SecretToken(models.Model):
token = models.CharField(max_length=100, unique=True)
is_primary = models.BooleanField(default=False)
@classmethod
def exists(cls, token):
return cls.objects.filter(token=token).exists()
@classmethod
def generate_new_token(cls):
token = proquint.generate()
TRIALS = 100
for __ in range(TRIALS):
token = proquint.generate()
if SecretToken.exists(token):
continue
break
# just raise an error.
# See https://stackoverflow.com/a/9980160 on what for-else loop does.
else:
raise ValueError("Cannot generate new token")
# We found a unique token! Save it
return token
def __str__(self):
return "{}-{}".format(self.token[:5], self.token[5:])
def get_channel_thumbnail(channel):
if not isinstance(channel, dict):
channel = channel.__dict__
if channel.get("thumbnail_encoding"):
thumbnail_data = channel.get("thumbnail_encoding")
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
if channel.get("thumbnail") and 'static' not in channel.get("thumbnail"):
return generate_storage_url(channel.get("thumbnail"))
return '/static/img/kolibri_placeholder.png'
CHANNEL_NAME_INDEX_NAME = "channel_name_idx"
# A list of all the FKs from Channel object
# to ContentNode trees
# used for permissions filtering
CHANNEL_TREES = (
"main_tree",
"chef_tree",
"trash_tree",
"staging_tree",
"previous_tree",
)
def boolean_val(val):
return Value(val, output_field=models.BooleanField())
class PermissionCTE(With):
tree_id_fields = [
"channel__{}__tree_id".format(tree_name)
for tree_name in CHANNEL_TREES
]
def __init__(self, model, user_id, **kwargs):
queryset = model.objects.filter(user_id=user_id)\
.annotate(
tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField())
)
super(PermissionCTE, self).__init__(queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs)
@classmethod
def editable_channels(cls, user_id):
return PermissionCTE(User.editable_channels.through, user_id, name="editable_channels_cte")
@classmethod
def view_only_channels(cls, user_id):
return PermissionCTE(User.view_only_channels.through, user_id, name="view_only_channels_cte")
def exists(self, *filters):
return Exists(self.queryset().filter(*filters).values("user_id"))
class Channel(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
tagline = models.CharField(max_length=150, blank=True, null=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True, null=True)
thumbnail_encoding = JSONField(default=dict)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='editable_channels',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
viewers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='view_only_channels',
verbose_name="viewers",
help_text="Users with view only rights",
blank=True,
)
language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL)
trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL)
main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL)
staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL)
chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL)
previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL)
bookmarked_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='bookmarked_channels',
verbose_name="bookmarked by",
)
deleted = models.BooleanField(default=False, db_index=True)
public = models.BooleanField(default=False, db_index=True)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
content_defaults = JSONField(default=dict)
priority = models.IntegerField(default=0, help_text="Order to display public channels")
last_published = models.DateTimeField(blank=True, null=True)
secret_tokens = models.ManyToManyField(
SecretToken,
related_name='channels',
verbose_name="secret tokens",
blank=True,
)
source_url = models.CharField(max_length=200, blank=True, null=True)
demo_server_url = models.CharField(max_length=200, blank=True, null=True)
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
ricecooker_version = models.CharField(max_length=100, blank=True, null=True)
# Fields to calculate when channel is published
published_data = JSONField(default=dict)
icon_encoding = models.TextField(blank=True, null=True)
total_resource_count = models.IntegerField(default=0)
published_kind_count = models.TextField(blank=True, null=True)
published_size = models.FloatField(default=0)
included_languages = models.ManyToManyField(
"Language",
related_name='channels',
verbose_name="languages",
blank=True,
)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"description",
"language_id",
"thumbnail",
"name",
"thumbnail_encoding",
# watch these fields for changes
# but exclude them from setting changed
# on the main tree
"deleted",
"public",
"main_tree_id",
"version",
])
@classmethod
def get_editable(cls, user, channel_id):
return cls.filter_edit_queryset(cls.objects.all(), user).get(id=channel_id)
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
# it won't return anything
if not user_id:
return queryset.none()
edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
user_email = not user.is_anonymous and user.email
if user_id:
filters = dict(user_id=user_id, channel_id=OuterRef("id"))
edit = Exists(User.editable_channels.through.objects.filter(**filters).values("user_id"))
view = Exists(User.view_only_channels.through.objects.filter(**filters).values("user_id"))
else:
edit = boolean_val(False)
view = boolean_val(False)
queryset = queryset.annotate(
edit=edit,
view=view,
)
if user_id and user.is_admin:
return queryset
permission_filter = Q()
if user_id:
pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list(
"channel_id", flat=True
)
permission_filter = (
Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels)
)
return queryset.filter(permission_filter | Q(deleted=False, public=True))
@classmethod
def get_all_channels(cls):
return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct()
def resource_size_key(self):
return "{}_resource_size".format(self.pk)
def get_resource_size(self):
cached_data = cache.get(self.resource_size_key())
if cached_data:
return cached_data
tree_id = self.main_tree.tree_id
files = File.objects.select_related('contentnode', 'assessment_item')\
.filter(contentnode__tree_id=tree_id)\
.values('checksum', 'file_size')\
.distinct()\
.aggregate(resource_size=Sum('file_size'))
cache.set(self.resource_size_key(), files['resource_size'] or 0, None)
return files['resource_size'] or 0
def on_create(self):
record_channel_stats(self, None)
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
if not self.main_tree:
self.main_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
original_channel_id=self.id,
source_channel_id=self.id,
changed=True,
complete=True,
)
if settings.DEBUG:
if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1:
raise AssertionError
if not self.trash_tree:
self.trash_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
)
if self.public and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def on_update(self):
from contentcuration.utils.user import calculate_user_storage
original_values = self._field_updates.changed()
record_channel_stats(self, original_values)
blacklist = set([
"public",
"main_tree_id",
"version",
])
if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)):
self.main_tree.changed = True
if "thumbnail" in original_values and original_values["thumbnail"] and 'static' not in original_values["thumbnail"]:
filename, ext = os.path.splitext(original_values["thumbnail"])
delete_empty_file_reference(filename, ext[1:])
if "deleted" in original_values:
for editor in self.editors.all():
calculate_user_storage(editor.pk)
if "deleted" in original_values and not original_values["deleted"]:
self.pending_editors.all().delete()
export_db_storage_path = os.path.join(settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id))
if default_storage.exists(export_db_storage_path):
default_storage.delete(export_db_storage_path)
if self.main_tree:
self.main_tree.published = False
if self.main_tree and self.main_tree._field_updates.changed():
self.main_tree.save()
if "public" in original_values and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
super(Channel, self).save(*args, **kwargs)
def get_thumbnail(self):
return get_channel_thumbnail(self)
def has_changes(self):
return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists()
def get_date_modified(self):
return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified']
def get_resource_count(self):
return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count()
def get_human_token(self):
return self.secret_tokens.get(is_primary=True)
def get_channel_id_token(self):
return self.secret_tokens.get(token=self.id)
def make_token(self):
token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True)
self.secret_tokens.get_or_create(token=self.id)
return token
def make_public(self, bypass_signals=False):
if bypass_signals:
self.public = True
Channel.objects.filter(id=self.id).update(public=True)
delete_public_channel_cache_keys()
else:
self.public = True
self.save()
return self
def mark_created(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.CREATION)
def mark_publishing(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.PUBLICATION)
self.main_tree.publishing = True
self.main_tree.save()
def mark_deleted(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.DELETION)
self.deleted = True
self.save()
def mark_recovered(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.RECOVERY)
self.deleted = False
self.save()
@property
def deletion_history(self):
return self.history.filter(action=channel_history.DELETION)
@property
def publishing_history(self):
return self.history.filter(action=channel_history.PUBLICATION)
@classmethod
def get_public_channels(cls, defer_nonmain_trees=False):
if defer_nonmain_trees:
c = (Channel.objects
.filter(public=True)
.exclude(deleted=True)
.select_related('main_tree')
.prefetch_related('editors')
.defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))
else:
c = Channel.objects.filter(public=True).exclude(deleted=True)
return c
class Meta:
verbose_name = "Channel"
verbose_name_plural = "Channels"
indexes = [
models.Index(fields=["name"], name=CHANNEL_NAME_INDEX_NAME),
]
index_together = [
["deleted", "public"]
]
CHANNEL_HISTORY_CHANNEL_INDEX_NAME = "idx_channel_history_channel_id"
class ChannelHistory(models.Model):
channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE)
actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE)
performed = models.DateTimeField(default=timezone.now)
action = models.CharField(max_length=50, choices=channel_history.choices)
@classmethod
def prune(cls):
keep_ids = cls.objects.distinct("channel_id", "action").order_by("channel_id", "action", "-performed").values_list("id", flat=True)
cls.objects.exclude(id__in=keep_ids).delete()
class Meta:
verbose_name = "Channel history"
verbose_name_plural = "Channel histories"
indexes = [
models.Index(fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME),
]
class ChannelSet(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
public = models.BooleanField(default=False, db_index=True)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='channel_sets',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
user_id = not user.is_anonymous and user.id
edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
return cls.filter_edit_queryset(queryset, user)
def get_channels(self):
if self.secret_token:
return self.secret_token.channels.filter(deleted=False)
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
super(ChannelSet, self).save()
def on_create(self):
if not self.secret_token:
self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token())
def delete(self, *args, **kwargs):
super(ChannelSet, self).delete(*args, **kwargs)
if self.secret_token:
self.secret_token.delete()
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=50)
channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
objects = CustomManager()
def __str__(self):
return self.tag_name
class Meta:
unique_together = ['tag_name', 'channel']
def delegate_manager(method):
@functools.wraps(method)
def wrapped(self, *args, **kwargs):
if self._base_manager:
return getattr(self._base_manager, method.__name__)(*args, **kwargs)
return method(self, *args, **kwargs)
return wrapped
class License(models.Model):
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(
default=False,
verbose_name="license exists",
help_text="Tells whether or not a content item is licensed to share",
)
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
NODE_ID_INDEX_NAME = "node_id_idx"
NODE_MODIFIED_INDEX_NAME = "node_modified_idx"
NODE_MODIFIED_DESC_INDEX_NAME = "node_modified_desc_idx"
class ContentNode(MPTTModel, models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
# content, in the face of possibly many copies of that content. When a user
# interacts with a piece of content, all substantially similar pieces of
# content should be marked as such as well. We track these "substantially
# similar" types of content by having them have the same content_id.
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
# TODO: disallow nulls once existing models have been set
original_channel_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True) # Original channel copied from
source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from
# Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary)
original_source_node_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank=True)
prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of',
through='PrerequisiteContentRelationship', symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship',
symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)
# No longer used
sort_order = models.FloatField(max_length=50, default=1, verbose_name="sort order",
help_text="Ascending, lowest number shown first")
copyright_holder = models.CharField(max_length=200, null=True, blank=True, default="",
help_text="Organization of person who holds the essential rights")
# legacy field...
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name="created")
modified = models.DateTimeField(auto_now=True, verbose_name="modified")
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default="", help_text="Who created this content?",
null=True)
aggregator = models.CharField(max_length=200, blank=True, default="", help_text="Who gathered this content together?",
null=True)
provider = models.CharField(max_length=200, blank=True, default="", help_text="Who distributed this content?",
null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
# Fields for metadata labels
# These fields use a map to store applied labels
# {
# "<label_id1>": true,
# "<label_id2>": true,
# }
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
# A field for storing a suggested duration for the content node
# this duration should be in seconds.
suggested_duration = models.IntegerField(blank=True, null=True, help_text="Suggested duration for the content node (in seconds)")
objects = CustomContentNodeTreeManager()
# Track all updates and ignore a blacklist of attributes
# when we check for changes
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef("tree_id"))
@classmethod
def _annotate_channel_id(cls, queryset):
# Annotate channel id
return queryset.annotate(
channel_id=Subquery(
Channel.objects.filter(
main_tree__tree_id=OuterRef("tree_id")
).values_list("id", flat=True)[:1]
)
)
@classmethod
def _orphan_tree_id_subquery(cls):
# For some reason this now requires an explicit type cast
# or it gets interpreted as a varchar
return Cast(cls.objects.filter(
pk=settings.ORPHANAGE_ROOT_ID
).values_list("tree_id", flat=True)[:1], output_field=IntegerField())
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.exclude(pk=settings.ORPHANAGE_ROOT_ID)
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(Q(edit=True) | Q(tree_id=cls._orphan_tree_id_subquery()))
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("tree_id")
).values("pk")
),
).exclude(pk=settings.ORPHANAGE_ROOT_ID)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(tree_id=cls._orphan_tree_id_subquery())
)
@raise_if_unsaved
def get_root(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(
tree_id=self._mpttfield('tree_id'),
parent=None,
)
def get_tree_data(self, levels=float('inf')):
if self.kind_id == content_kinds.TOPIC:
node_data = {
"title": self.title,
"kind": self.kind_id,
"node_id": self.node_id,
"studio_id": self.id,
}
children = self.children.all()
if levels > 0:
node_data["children"] = [c.get_tree_data(levels=levels - 1) for c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {
"title": self.title,
"kind": self.kind_id,
"count": self.assessment_items.count(),
"node_id": self.node_id,
"studio_id": self.id,
}
return {
"title": self.title,
"kind": self.kind_id,
"file_size": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],
"node_id": self.node_id,
"studio_id": self.id,
}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related("main_tree").get(pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \
ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = "associated_presets_{}".format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, "channel_id"):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
# Problems with json.loads, so use ast.literal_eval to get dict
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get("base64"):
return thumbnail_data["base64"]
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ""
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
node = ContentNode.objects.filter(pk=self.id).order_by()
descendants = (
self.get_descendants()
.prefetch_related("children", "files", "tags")
.select_related("license", "language")
.values("id")
)
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": 0,
"resource_size": 0,
"includes": {"coach_content": 0, "exercises": 0},
"kind_count": [],
"languages": "",
"accessible_languages": "",
"licenses": "",
"tags": [],
"copyright_holders": "",
"authors": "",
"aggregators": "",
"providers": "",
"sample_pathway": [],
"original_channels": [],
"sample_nodes": [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
# Get resources
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("checksum", "file_size")
.order_by(),
name="nodes",
)
file_query = (
nodes.queryset().with_cte(nodes).values("checksum", "file_size").distinct()
)
l_nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("language_id", "preset_id")
.order_by(),
name="l_nodes",
)
accessible_languages_query = (
l_nodes.queryset()
.filter(preset_id=format_presets.VIDEO_SUBTITLE)
.with_cte(l_nodes)
.values("language__native_name")
.distinct()
)
tags_query = str(
ContentTag.objects.filter(
tagged_content__pk__in=descendants.values_list("pk", flat=True)
)
.values("tag_name")
.annotate(count=Count("tag_name"))
.query
).replace("topic", "'topic'")
kind_count_query = str(
resources.values("kind_id").annotate(count=Count("kind_id")).query
).replace("topic", "'topic'")
node = node.annotate(
resource_count=SQCount(resources, field="id"),
resource_size=SQSum(file_query, field="file_size"),
copyright_holders=SQArrayAgg(
resources.distinct("copyright_holder").order_by("copyright_holder"),
field="copyright_holder",
),
authors=SQArrayAgg(
resources.distinct("author").order_by("author"), field="author"
),
aggregators=SQArrayAgg(
resources.distinct("aggregator").order_by("aggregator"),
field="aggregator",
),
providers=SQArrayAgg(
resources.distinct("provider").order_by("provider"), field="provider"
),
languages=SQRelatedArrayAgg(
descendants.exclude(language=None)
.distinct("language__native_name")
.order_by(),
field="language__native_name",
fieldname="native_name",
),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query,
field="language__native_name",
fieldname="native_name",
),
licenses=SQRelatedArrayAgg(
resources.exclude(license=None)
.distinct("license__license_name")
.order_by("license__license_name"),
field="license__license_name",
fieldname="license_name",
),
kind_count=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(
kind_count_query
),
(),
),
tags_list=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(tags_query), ()
),
coach_content=SQCount(
resources.filter(role_visibility=roles.COACH), field="id"
),
exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field="id"
),
)
# Get sample pathway by getting longest path
# Using resources.aggregate adds a lot of time, use values that have already been fetched
max_level = max(
resources.values_list("level", flat=True).order_by().distinct() or [0]
)
m_nodes = With(
resources.values("id", "level", "tree_id", "lft").order_by(),
name="m_nodes",
)
deepest_node_record = (
m_nodes.queryset()
.with_cte(m_nodes)
.filter(level=max_level)
.values("id")
.order_by("tree_id", "lft")
.first()
)
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record["id"])
pathway = (
list(
deepest_node.get_ancestors()
.order_by()
.exclude(parent=None)
.values("title", "node_id", "kind_id")
.order_by()
)
if deepest_node_record
else []
)
sample_nodes = (
[
{
"node_id": n.node_id,
"title": n.title,
"description": n.description,
"thumbnail": n.get_thumbnail(),
"kind": n.kind_id,
}
for n in deepest_node.get_siblings(include_self=True)[0:4]
]
if deepest_node_record
else []
)
# Get list of channels nodes were originally imported from (omitting the current channel)
channel_id = channel and channel.id
originals = (
resources.values("original_channel_id")
.annotate(count=Count("original_channel_id"))
.order_by("original_channel_id")
)
originals = {c["original_channel_id"]: c["count"] for c in originals}
original_channels = (
Channel.objects.exclude(pk=channel_id)
.filter(pk__in=originals.keys(), deleted=False)
.order_by()
)
original_channels = [
{
"id": c.id,
"name": "{}{}".format(
c.name, _(" (Original)") if channel_id == c.id else ""
),
"thumbnail": c.get_thumbnail(),
"count": originals[c.id],
}
for c in original_channels
]
node = (
node.order_by()
.values(
"id",
"resource_count",
"resource_size",
"copyright_holders",
"authors",
"aggregators",
"providers",
"languages",
"accessible_languages",
"coach_content",
"licenses",
"tags_list",
"kind_count",
"exercises",
)
.first()
)
for_educators = {
"coach_content": node["coach_content"],
"exercises": node["exercises"],
}
# Serialize data
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": node.get("resource_count", 0),
"resource_size": node.get("resource_size", 0),
"includes": for_educators,
"kind_count": node.get("kind_count", []),
"languages": node.get("languages", ""),
"accessible_languages": node.get("accessible_languages", ""),
"licenses": node.get("licenses", ""),
"tags": node.get("tags_list", []),
"copyright_holders": node["copyright_holders"],
"authors": node["authors"],
"aggregators": node["aggregators"],
"providers": node["providers"],
"sample_pathway": pathway,
"original_channels": original_channels,
"sample_nodes": sample_nodes,
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
# Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods.
blacklist = set([
'changed',
'modified',
'publishing',
mptt_opts.tree_id_attr,
mptt_opts.left_attr,
mptt_opts.right_attr,
mptt_opts.level_attr,
])
original_values = self._field_updates.changed()
return any((True for field in original_values if field not in blacklist))
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True).distinct():
calculate_user_storage(editor)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
# Recalculate storage if node was moved to or from the trash tree
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
# Logic borrowed from mptt - do a simple check to see if we have changed
# the parent of the node. We use the mptt specific cached fields here
# because these get updated by the mptt move methods, and so will be up to
# date, meaning we can avoid locking the DB twice when the fields have already
# been updated in the database.
# If most moves are being done independently of just changing the parent
# and then calling a save, locking within the save method itself should rarely
# be triggered - meaning updates to contentnode metadata should only rarely
# trigger a write lock on mptt fields.
old_parent_id = self._field_updates.changed().get("parent_id")
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
# Lock the mptt fields for the trees of the old and new parent
with ContentNode.objects.lock_mptt(*ContentNode.objects
.filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid])
.values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
# Copied from MPTT
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
# Lock the mptt fields for the tree of this node
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
# Copied from MPTT
delete.alters_data = True
def copy_to(
self,
target=None,
position="last-child",
pk=None,
mods=None,
excluded_descendants=None,
can_edit_source_channel=None,
batch_size=None,
progress_tracker=None
):
return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0]
def copy(self):
return self.copy_to()
class Meta:
verbose_name = "Topic"
verbose_name_plural = "Topics"
# Do not allow two nodes with the same name on the same level
# unique_together = ('parent', 'title')
indexes = [
models.Index(fields=["node_id"], name=NODE_ID_INDEX_NAME),
models.Index(fields=["-modified"], name=NODE_MODIFIED_DESC_INDEX_NAME),
]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True) # Render on client side
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
_, ext = os.path.splitext(filename)
ext = ext.lstrip(".")
f = FormatPreset.objects.filter(
allowed_formats__extension=ext,
display=True
)
return f.first()
@classmethod
def get_preset(cls, preset_name):
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return "{code}-{subcode}".format(code=self.lang_code,
subcode=self.lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
ASSESSMENT_ID_INDEX_NAME = "assessment_id_idx"
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default="multiplechoice")
question = models.TextField(blank=True)
hints = models.TextField(default="[]")
answers = models.TextField(default="[]")
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name="assessment_items", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
# Track all updates
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [
models.Index(fields=["assessment_id"], name=ASSESSMENT_ID_INDEX_NAME),
]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("contentnode__tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name="slideshow_slides", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE)
FILE_DISTINCT_INDEX_NAME = "file_checksum_file_size_idx"
FILE_MODIFIED_DESC_INDEX_NAME = "file_modified_desc_idx"
FILE_DURATION_CONSTRAINT = "file_media_duration_int"
MEDIA_PRESETS = [format_presets.AUDIO, format_presets.VIDEO_HIGH_RES, format_presets.VIDEO_LOW_RES]
class File(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500,
blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name="modified", null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q(tree_id=OuterRef("assessment_item__contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(
Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(public=True).filter(
Q(main_tree__tree_id=OuterRef("contentnode__tree_id"))
| Q(main_tree__tree_id=OuterRef("assessment_item__contentnode__tree_id"))
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension)
def filename(self):
# TODO(aron): write tests for this
return os.path.basename(self.file_on_disk.name)
def on_update(self):
# since modified was added later as a nullable field to File, we don't use a default but
self.modified = timezone.now()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
from contentcuration.utils.user import calculate_user_storage
if set_by_file_on_disk and self.file_on_disk:
if self.checksum is None or self.checksum == "":
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError("Files of type `{}` are not supported.".format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [
models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME),
models.Index(fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME),
]
constraints = [
models.CheckConstraint(check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)), name=FILE_DURATION_CONSTRAINT)
]
@receiver(models.signals.post_delete, sender=File)
def auto_delete_file_on_delete(sender, instance, **kwargs):
from contentcuration.utils.user import calculate_user_storage
if instance.uploaded_by_id:
calculate_user_storage(instance.uploaded_by_id)
def delete_empty_file_reference(checksum, extension):
filename = checksum + '.' + extension
if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists():
storage_path = generate_object_storage_name(checksum, filename)
if default_storage.exists(storage_path):
default_storage.delete(storage_path)
class PrerequisiteContentRelationship(models.Model):
target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
if PrerequisiteContentRelationship.objects.using(self._state.db) \
.filter(target_node=self.prerequisite, prerequisite=self.target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % (self.pk)
class RelatedContentRelationship(models.Model):
contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
if RelatedContentRelationship.objects.using(self._state.db) \
.filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1):
return
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = "Invitation"
verbose_name_plural = "Invitations"
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
| Q(channel__viewers=user)
).distinct()
class Task(models.Model):
task_id = UUIDField(db_index=True, default=uuid.uuid4)
task_type = models.CharField(max_length=50)
created = models.DateTimeField(default=timezone.now)
status = models.CharField(max_length=10)
is_progress_tracking = models.BooleanField(default=False)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="task", on_delete=models.CASCADE)
metadata = JSONField()
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
@classmethod
def find_incomplete(cls, task_type, **filters):
filters.update(task_type=task_type, status__in=["QUEUED", states.PENDING, states.RECEIVED, states.STARTED])
return cls.objects.filter(**filters)
| true | true |
f7fea2cb66b964a840b93207eec7e268160a0629 | 8,236 | py | Python | wechatter/server/run_server.py | we-chatter/wechatter | 52f884ba938e4b2248abca24efc22ac0fbbbaf1e | [
"Apache-2.0"
] | 11 | 2020-11-20T03:28:06.000Z | 2022-03-02T11:47:06.000Z | wechatter/server/run_server.py | we-chatter/wechatter | 52f884ba938e4b2248abca24efc22ac0fbbbaf1e | [
"Apache-2.0"
] | null | null | null | wechatter/server/run_server.py | we-chatter/wechatter | 52f884ba938e4b2248abca24efc22ac0fbbbaf1e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : run_server.py
@Time : 2020/8/26 2:50 下午
@Desc :
"""
import asyncio
import concurrent.futures
import logging
import multiprocessing
import os
import tempfile
import traceback
from collections import defaultdict
from functools import reduce, wraps
from inspect import isawaitable
from pathlib import Path
from http import HTTPStatus
from typing import (
Any,
Callable,
List,
Optional,
Text,
Union,
Dict,
TYPE_CHECKING,
NoReturn
)
from pathlib import Path
# parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.insert(0, parentdir)
from wechatter.config import CONFIG
import aiohttp
from sanic import Sanic, response
from sanic.response import text, HTTPResponse
from sanic.request import Request
from sanic_cors import CORS
import wechatter
import wechatter.utils
import wechatter.shared
import wechatter.utils.endpoints
import wechatter.shared.utils
import wechatter.shared.utils.io
from wechatter.model_training import train_async
from wechatter.shared.dialogue_config import (
DOCS_URL_TRAINING_DATA,
DEFAULT_MODELS_PATH,
DEFAULT_DOMAIN_PATH
)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
app = Sanic(__name__)
logging.info('wechatter loading...')
def configure_cors(
app: Sanic, cors_origins: Union[Text, List[Text], None] = ""
) -> None:
"""Configure CORS origins for the given app."""
# Workaround so that socketio works with requests from other origins.
# https://github.com/miguelgrinberg/python-socketio/issues/205#issuecomment-493769183
app.config.CORS_AUTOMATIC_OPTIONS = True
app.config.CORS_SUPPORTS_CREDENTIALS = True
app.config.CORS_EXPOSE_HEADERS = "filename"
CORS(
app, resources={r"/*": {"origins": cors_origins or ""}}, automatic_options=True
)
# def create_app(
# cors_origins: Union[Text, List[Text], None] = "*",
# ):
# app = Sanic(__name__)
app.update_config(CONFIG) # 系统配置信息
# configure_cors(app, cors_origins) # 解决跨域问题
@app.route("/")
async def test(request):
return text('Welcome to wechatty dialogue engine,Current version is:' + wechatter.__version__)
@app.get('/version')
async def version(request: Request):
"""
Get version information
:param request:
:return:
"""
return response.text(
"Hello from Wechatter:" + wechatter.__version__
)
@app.post("/model/train")
async def train(request: Request) -> HTTPResponse:
"""
训练模型
方式一:加载数据库,写入临时文件,训练模型
:param temporary_directory:
:param request:
:return:
"""
training_payload = _training_payload_from_json(request)
try:
# with app.active_training_processes.get_lock():
# app.active_training_processes.value += 1
training_result = await train_async(**training_payload)
if training_result.model:
filename = os.path.basename(training_result.model)
return await response.file(
training_result.model,
filename=filename,
)
else:
raise ErrorResponse(
HTTPStatus.INTERNAL_SERVER_ERROR,
"TrainingError",
"Ran training, but it finished without a trained model.",
)
except ErrorResponse as e:
raise e
def _training_payload_from_json(request: Request) -> Dict[Text, Any]:
"""
读取请求的json文件,同时写入一个临时文件夹
:param request:
:param temp_dir:
:return:
"""
logging.debug(
"Extracting JSON payload with Markdown training data from request body."
)
request_payload = request.json
_validate_json_training_payload(request_payload)
temp_dir= ''
config_path = os.path.join(temp_dir, "config.yml")
wechatter.shared.utils.io.write_text_file(request_payload["config"], config_path)
if "nlu" in request_payload:
nlu_path = os.path.join(temp_dir, "nlu.md")
wechatter.shared.utils.io.write_text_file(request_payload["nlu"], nlu_path)
if "stories" in request_payload:
stories_path = os.path.join(temp_dir, "stories.md")
wechatter.shared.utils.io.write_text_file(request_payload["stories"], stories_path)
if "responses" in request_payload:
responses_path = os.path.join(temp_dir, "responses.md")
wechatter.shared.utils.io.write_text_file(
request_payload["responses"], responses_path
)
domain_path = DEFAULT_DOMAIN_PATH
if "domain" in request_payload:
domain_path = os.path.join(temp_dir, "domain.yml")
wechatter.shared.utils.io.write_text_file(request_payload["domain"], domain_path)
if "model_name" in request_payload: # 制定模型名称
model_name = request_payload["model_name"]
model_output_directory = str(temp_dir)
if request_payload.get(
"save_to_default_model_directory",
wechatter.utils.endpoints.bool_arg(request, "save_to_default_model_directory", True),
): # 如果参数里save_to_default_model_directory = True,则保存在默认的文件夹里
model_output_directory = DEFAULT_MODELS_PATH
return dict(
domain=domain_path,
config=config_path,
training_files=str(temp_dir),
output=model_output_directory,
force_training=request_payload.get(
"force", wechatter.utils.endpoints.bool_arg(request, "force_training", False)
),
dm_additional_arguments=_extract_dm_additional_arguments(request),
nlu_additional_arguments=_extract_nlu_additional_arguments(request),
)
def _validate_json_training_payload(rjs: Dict):
if "config" not in rjs:
raise ErrorResponse(
HTTPStatus.BAD_REQUEST,
"BadRequest",
"The training request is missing the required key `config`.",
{"parameter": "config", "in": "body"},
)
if "nlu" not in rjs and "stories" not in rjs:
raise ErrorResponse(
HTTPStatus.BAD_REQUEST,
"BadRequest",
"To train a Rasa model you need to specify at least one type of "
"training data. Add `nlu` and/or `stories` to the request.",
{"parameters": ["nlu", "stories"], "in": "body"},
)
if "stories" in rjs and "domain" not in rjs:
raise ErrorResponse(
HTTPStatus.BAD_REQUEST,
"BadRequest",
"To train a Rasa model with story training data, you also need to "
"specify the `domain`.",
{"parameter": "domain", "in": "body"},
)
# if "force" in rjs or "save_to_default_model_directory" in rjs:
# wechatter.shared.utils.io.raise_deprecation_warning(
# "Specifying 'force' and 'save_to_default_model_directory' as part of the "
# "JSON payload is deprecated. Please use the header arguments "
# "'force_training' and 'save_to_default_model_directory'.",
# docs=_docs("/api/http-api"),
# )
# if "model_name" in rjs
class ErrorResponse(Exception):
"""Common exception to handle failing API requests."""
def __init__(
self,
status: Union[int, HTTPStatus],
reason: Text,
message: Text,
details: Any = None,
help_url: Optional[Text] = None,
) -> None:
"""Creates error.
Args:
status: The HTTP status code to return.
reason: Short summary of the error.
message: Detailed explanation of the error.
details: Additional details which describe the error. Must be serializable.
help_url: URL where users can get further help (e.g. docs).
"""
self.error_info = {
"version": wechatter.__version__,
"status": "failure",
"message": message,
"reason": reason,
"details": details or {},
"help": help_url,
"code": status,
}
self.status = status
logging.error(message)
super(ErrorResponse, self).__init__()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9015, auto_reload=True, workers=4)
| 29 | 98 | 0.65238 |
import asyncio
import concurrent.futures
import logging
import multiprocessing
import os
import tempfile
import traceback
from collections import defaultdict
from functools import reduce, wraps
from inspect import isawaitable
from pathlib import Path
from http import HTTPStatus
from typing import (
Any,
Callable,
List,
Optional,
Text,
Union,
Dict,
TYPE_CHECKING,
NoReturn
)
from pathlib import Path
from wechatter.config import CONFIG
import aiohttp
from sanic import Sanic, response
from sanic.response import text, HTTPResponse
from sanic.request import Request
from sanic_cors import CORS
import wechatter
import wechatter.utils
import wechatter.shared
import wechatter.utils.endpoints
import wechatter.shared.utils
import wechatter.shared.utils.io
from wechatter.model_training import train_async
from wechatter.shared.dialogue_config import (
DOCS_URL_TRAINING_DATA,
DEFAULT_MODELS_PATH,
DEFAULT_DOMAIN_PATH
)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
app = Sanic(__name__)
logging.info('wechatter loading...')
def configure_cors(
app: Sanic, cors_origins: Union[Text, List[Text], None] = ""
) -> None:
TOMATIC_OPTIONS = True
app.config.CORS_SUPPORTS_CREDENTIALS = True
app.config.CORS_EXPOSE_HEADERS = "filename"
CORS(
app, resources={r"/*": {"origins": cors_origins or ""}}, automatic_options=True
)
app.update_config(CONFIG)
route("/")
async def test(request):
return text('Welcome to wechatty dialogue engine,Current version is:' + wechatter.__version__)
@app.get('/version')
async def version(request: Request):
return response.text(
"Hello from Wechatter:" + wechatter.__version__
)
@app.post("/model/train")
async def train(request: Request) -> HTTPResponse:
training_payload = _training_payload_from_json(request)
try:
training_result = await train_async(**training_payload)
if training_result.model:
filename = os.path.basename(training_result.model)
return await response.file(
training_result.model,
filename=filename,
)
else:
raise ErrorResponse(
HTTPStatus.INTERNAL_SERVER_ERROR,
"TrainingError",
"Ran training, but it finished without a trained model.",
)
except ErrorResponse as e:
raise e
def _training_payload_from_json(request: Request) -> Dict[Text, Any]:
logging.debug(
"Extracting JSON payload with Markdown training data from request body."
)
request_payload = request.json
_validate_json_training_payload(request_payload)
temp_dir= ''
config_path = os.path.join(temp_dir, "config.yml")
wechatter.shared.utils.io.write_text_file(request_payload["config"], config_path)
if "nlu" in request_payload:
nlu_path = os.path.join(temp_dir, "nlu.md")
wechatter.shared.utils.io.write_text_file(request_payload["nlu"], nlu_path)
if "stories" in request_payload:
stories_path = os.path.join(temp_dir, "stories.md")
wechatter.shared.utils.io.write_text_file(request_payload["stories"], stories_path)
if "responses" in request_payload:
responses_path = os.path.join(temp_dir, "responses.md")
wechatter.shared.utils.io.write_text_file(
request_payload["responses"], responses_path
)
domain_path = DEFAULT_DOMAIN_PATH
if "domain" in request_payload:
domain_path = os.path.join(temp_dir, "domain.yml")
wechatter.shared.utils.io.write_text_file(request_payload["domain"], domain_path)
if "model_name" in request_payload:
model_name = request_payload["model_name"]
model_output_directory = str(temp_dir)
if request_payload.get(
"save_to_default_model_directory",
wechatter.utils.endpoints.bool_arg(request, "save_to_default_model_directory", True),
):
model_output_directory = DEFAULT_MODELS_PATH
return dict(
domain=domain_path,
config=config_path,
training_files=str(temp_dir),
output=model_output_directory,
force_training=request_payload.get(
"force", wechatter.utils.endpoints.bool_arg(request, "force_training", False)
),
dm_additional_arguments=_extract_dm_additional_arguments(request),
nlu_additional_arguments=_extract_nlu_additional_arguments(request),
)
def _validate_json_training_payload(rjs: Dict):
if "config" not in rjs:
raise ErrorResponse(
HTTPStatus.BAD_REQUEST,
"BadRequest",
"The training request is missing the required key `config`.",
{"parameter": "config", "in": "body"},
)
if "nlu" not in rjs and "stories" not in rjs:
raise ErrorResponse(
HTTPStatus.BAD_REQUEST,
"BadRequest",
"To train a Rasa model you need to specify at least one type of "
"training data. Add `nlu` and/or `stories` to the request.",
{"parameters": ["nlu", "stories"], "in": "body"},
)
if "stories" in rjs and "domain" not in rjs:
raise ErrorResponse(
HTTPStatus.BAD_REQUEST,
"BadRequest",
"To train a Rasa model with story training data, you also need to "
"specify the `domain`.",
{"parameter": "domain", "in": "body"},
)
class ErrorResponse(Exception):
def __init__(
self,
status: Union[int, HTTPStatus],
reason: Text,
message: Text,
details: Any = None,
help_url: Optional[Text] = None,
) -> None:
self.error_info = {
"version": wechatter.__version__,
"status": "failure",
"message": message,
"reason": reason,
"details": details or {},
"help": help_url,
"code": status,
}
self.status = status
logging.error(message)
super(ErrorResponse, self).__init__()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9015, auto_reload=True, workers=4)
| true | true |
f7fea30701fb8857c9dbeef81f81df1976216321 | 413 | py | Python | tools/clang/pylib/clang/PRESUBMIT.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-10-18T02:33:40.000Z | 2020-10-18T02:33:40.000Z | tools/clang/pylib/clang/PRESUBMIT.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2021-05-17T16:28:52.000Z | 2021-05-21T22:42:22.000Z | tools/clang/pylib/clang/PRESUBMIT.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
USE_PYTHON3 = True
def CheckChangeOnCommit(input_api, output_api):
results = []
# Run the unit tests.
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', [ r'^.+_test\.py$']))
return results
| 24.294118 | 72 | 0.728814 |
USE_PYTHON3 = True
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', [ r'^.+_test\.py$']))
return results
| true | true |
f7fea3e2a5044ecd3601076e3fcad84a661b03cb | 5,915 | py | Python | submission_version/code/bing.py | jingchengdeng/Satellite-Aerial-Image-Retrieval | 7ed8316fae20ea9e07db12051b1ee7a895c50f3b | [
"MIT"
] | null | null | null | submission_version/code/bing.py | jingchengdeng/Satellite-Aerial-Image-Retrieval | 7ed8316fae20ea9e07db12051b1ee7a895c50f3b | [
"MIT"
] | null | null | null | submission_version/code/bing.py | jingchengdeng/Satellite-Aerial-Image-Retrieval | 7ed8316fae20ea9e07db12051b1ee7a895c50f3b | [
"MIT"
] | null | null | null | import sys, math, logging, json, mpmath, io
from urllib import request, error
from PIL import Image
class Bing():
def __init__(self):
self.dataPath = "http://dev.virtualearth.net/REST/v1/Imagery/Metadata/Aerial"
self.myKey = "AvHyA8kJY_qxax2ciNHnW61HCz6GxqM6wZr2sLIMYCpsVYSb8BVmJCauCSKLS_5p"
self.imageUrl = None
self.imageUrlSubdomains = None
self.imageWidth = None
self.imageHeight = None
self.zoomMax = None
self.numSubdomains = None
self.radius = 6378137
self.blockWidth = 256
self.blockHeight = 256
self.maxBlocksAllowed = 256
self.getData()
def getJson(self, url):
try:
return json.loads(request.urlopen(url).read())
except error.URLError as e:
if hasattr(e, "reason"):
logging.error("Faild in getting data: " + e.reason)
elif hasattr(e, "code"):
logging.error("Error code: " + e.code)
sys.exit()
except:
logging.error("Wrong request.")
return
def getData(self):
res = self.getJson("%s?key=%s" % (self.dataPath, self.myKey))
if "errorDetails" not in res:
data = res["resourceSets"][0]["resources"][0]
self.imageWidth = data["imageWidth"]
self.imageHeight = data["imageHeight"]
self.imageUrl = data["imageUrl"]
self.imageUrlSubdomains = data["imageUrlSubdomains"]
self.zoomMax = data["zoomMax"]
self.numSubdomains = len(self.imageUrlSubdomains)
else:
logging.error("Unknown response")
sys.exit()
# print("data get")
return
def lat2y(self, lat):
return math.log(math.tan(math.radians(lat)) + mpmath.sec(math.radians(lat))) * self.radius
def lon2x(self, lon):
return math.radians(lon) * self.radius
def y2lat(self, y):
return math.degrees(math.atan(math.sinh(y / self.radius)))
def x2lon(self, x):
return math.degrees(x / self.radius)
def quadKey(self, x, y, zoom):
res = ""
for i in range(1, zoom + 1):
digit = 0
mask = 1 << (i - 1)
if x & mask:
digit += 1
if y & mask:
digit += 2
res += str(digit)
# print("get quadKey = %s" % res)
return res[::-1]
def getMaxZoom(self, coord):
y_1, x_1 = self.lat2y(coord[1]), self.lon2x(coord[0])
y_2, x_2 = self.lat2y(coord[3]), self.lon2x(coord[2])
center_lat, center_lon = self.y2lat((y_1 + y_2)/ 2), self.x2lon((x_1 + x_2)/ 2)
zoom = self.zoomMax
while True:
res = self.getJson(self.dataPath + '/' + str(center_lat) + ',' + str(center_lon) + "?zl=" + str(zoom) + '&key=' + self.myKey)
if 'errorDetails' not in res:
data = res["resourceSets"][0]["resources"][0]
if data["vintageEnd"]: break
else:
logging.error("Unknown response")
sys.exit()
zoom -= 1
# print("get max zoom as %s" % zoom)
return zoom
def toBlockCoords(self, lat, lon, zoom):
perimeter = math.pi * self.radius * 2
blockPerAxis = 2 ** zoom
y, x = self.lat2y(lat), self.lon2x(lon)
norm_lat = perimeter/2 - y
norm_lon = perimeter/2 + x
y = norm_lat * blockPerAxis / perimeter
x = norm_lon * blockPerAxis / perimeter
# print("get block coords %s, %s" % (x, y))
return math.floor(y), math.floor(x)
def getBlockUrl(self, zoom, x, y, counter):
quadkey = self.quadKey(x, y, zoom)
url = self.imageUrl.replace("{subdomain}", self.imageUrlSubdomains[counter % self.numSubdomains])
url = url.replace("{quadkey}", quadkey)
# print("get url at %s" % url)
return url
def getBlockImage(self, zoom, x, y, counter):
url = self.getBlockUrl(zoom, x, y, counter)
try:
image = request.urlopen(url).read()
except Exception as e:
logging.error(e)
logging.error("Unable to download image with url:" + url)
sys.exit()
# print("block image get")
return image
def merge(self, left, right, top, bottom, zoom, result, numBlocks):
counter = 0
for x in range(left, right + 1):
for y in range(top, bottom + 1):
print("Image " + str(counter+1) + " in processing, " + str(numBlocks) + " in total.")
block = self.getBlockImage(zoom, x, y, counter)
image = Image.open(io.BytesIO(block))
result.paste(image, ((x - left) * self.blockWidth, (y - top) * self.blockHeight))
counter += 1
return
def run(self, input):
zoom = self.getMaxZoom(input)
low_lat, high_lat = (input[1], input[3]) if input[1] < input[3] else (input[3], input[1])
low_lon, high_lon = (input[0], input[2]) if input[0] < input[2] else (input[2], input[0])
bottom, left = self.toBlockCoords(low_lat, low_lon, zoom)
top, right = self.toBlockCoords(high_lat, high_lon, zoom)
# print(bottom, top, left, right)
numBlocksOnX = right - left + 1
numBlocksOnY = bottom - top + 1
numBlocks = numBlocksOnX * numBlocksOnY
if numBlocks > self.maxBlocksAllowed:
logging.error("Block number limit exceed by " + numBlocks + ".")
sys.exit()
result = Image.new("RGB", (numBlocksOnX * self.blockWidth, numBlocksOnY * self.blockHeight), (0, 0, 0, 0))
self.merge(left, right, top, bottom, zoom, result, numBlocks)
fileName = input[4]
result.save("../result/" + fileName)
print("Completed.")
return | 39.433333 | 137 | 0.550972 | import sys, math, logging, json, mpmath, io
from urllib import request, error
from PIL import Image
class Bing():
def __init__(self):
self.dataPath = "http://dev.virtualearth.net/REST/v1/Imagery/Metadata/Aerial"
self.myKey = "AvHyA8kJY_qxax2ciNHnW61HCz6GxqM6wZr2sLIMYCpsVYSb8BVmJCauCSKLS_5p"
self.imageUrl = None
self.imageUrlSubdomains = None
self.imageWidth = None
self.imageHeight = None
self.zoomMax = None
self.numSubdomains = None
self.radius = 6378137
self.blockWidth = 256
self.blockHeight = 256
self.maxBlocksAllowed = 256
self.getData()
def getJson(self, url):
try:
return json.loads(request.urlopen(url).read())
except error.URLError as e:
if hasattr(e, "reason"):
logging.error("Faild in getting data: " + e.reason)
elif hasattr(e, "code"):
logging.error("Error code: " + e.code)
sys.exit()
except:
logging.error("Wrong request.")
return
def getData(self):
res = self.getJson("%s?key=%s" % (self.dataPath, self.myKey))
if "errorDetails" not in res:
data = res["resourceSets"][0]["resources"][0]
self.imageWidth = data["imageWidth"]
self.imageHeight = data["imageHeight"]
self.imageUrl = data["imageUrl"]
self.imageUrlSubdomains = data["imageUrlSubdomains"]
self.zoomMax = data["zoomMax"]
self.numSubdomains = len(self.imageUrlSubdomains)
else:
logging.error("Unknown response")
sys.exit()
return
def lat2y(self, lat):
return math.log(math.tan(math.radians(lat)) + mpmath.sec(math.radians(lat))) * self.radius
def lon2x(self, lon):
return math.radians(lon) * self.radius
def y2lat(self, y):
return math.degrees(math.atan(math.sinh(y / self.radius)))
def x2lon(self, x):
return math.degrees(x / self.radius)
def quadKey(self, x, y, zoom):
res = ""
for i in range(1, zoom + 1):
digit = 0
mask = 1 << (i - 1)
if x & mask:
digit += 1
if y & mask:
digit += 2
res += str(digit)
return res[::-1]
def getMaxZoom(self, coord):
y_1, x_1 = self.lat2y(coord[1]), self.lon2x(coord[0])
y_2, x_2 = self.lat2y(coord[3]), self.lon2x(coord[2])
center_lat, center_lon = self.y2lat((y_1 + y_2)/ 2), self.x2lon((x_1 + x_2)/ 2)
zoom = self.zoomMax
while True:
res = self.getJson(self.dataPath + '/' + str(center_lat) + ',' + str(center_lon) + "?zl=" + str(zoom) + '&key=' + self.myKey)
if 'errorDetails' not in res:
data = res["resourceSets"][0]["resources"][0]
if data["vintageEnd"]: break
else:
logging.error("Unknown response")
sys.exit()
zoom -= 1
return zoom
def toBlockCoords(self, lat, lon, zoom):
perimeter = math.pi * self.radius * 2
blockPerAxis = 2 ** zoom
y, x = self.lat2y(lat), self.lon2x(lon)
norm_lat = perimeter/2 - y
norm_lon = perimeter/2 + x
y = norm_lat * blockPerAxis / perimeter
x = norm_lon * blockPerAxis / perimeter
return math.floor(y), math.floor(x)
def getBlockUrl(self, zoom, x, y, counter):
quadkey = self.quadKey(x, y, zoom)
url = self.imageUrl.replace("{subdomain}", self.imageUrlSubdomains[counter % self.numSubdomains])
url = url.replace("{quadkey}", quadkey)
return url
def getBlockImage(self, zoom, x, y, counter):
url = self.getBlockUrl(zoom, x, y, counter)
try:
image = request.urlopen(url).read()
except Exception as e:
logging.error(e)
logging.error("Unable to download image with url:" + url)
sys.exit()
return image
def merge(self, left, right, top, bottom, zoom, result, numBlocks):
counter = 0
for x in range(left, right + 1):
for y in range(top, bottom + 1):
print("Image " + str(counter+1) + " in processing, " + str(numBlocks) + " in total.")
block = self.getBlockImage(zoom, x, y, counter)
image = Image.open(io.BytesIO(block))
result.paste(image, ((x - left) * self.blockWidth, (y - top) * self.blockHeight))
counter += 1
return
def run(self, input):
zoom = self.getMaxZoom(input)
low_lat, high_lat = (input[1], input[3]) if input[1] < input[3] else (input[3], input[1])
low_lon, high_lon = (input[0], input[2]) if input[0] < input[2] else (input[2], input[0])
bottom, left = self.toBlockCoords(low_lat, low_lon, zoom)
top, right = self.toBlockCoords(high_lat, high_lon, zoom)
numBlocksOnX = right - left + 1
numBlocksOnY = bottom - top + 1
numBlocks = numBlocksOnX * numBlocksOnY
if numBlocks > self.maxBlocksAllowed:
logging.error("Block number limit exceed by " + numBlocks + ".")
sys.exit()
result = Image.new("RGB", (numBlocksOnX * self.blockWidth, numBlocksOnY * self.blockHeight), (0, 0, 0, 0))
self.merge(left, right, top, bottom, zoom, result, numBlocks)
fileName = input[4]
result.save("../result/" + fileName)
print("Completed.")
return | true | true |
f7fea4e9ff72d29e31375a1c9c38bc540a5a038b | 10,277 | py | Python | job.py | DIR-LAB/delayscheduling | 12539897f8de3052d97fa99239ccc0a01766cc87 | [
"MIT"
] | null | null | null | job.py | DIR-LAB/delayscheduling | 12539897f8de3052d97fa99239ccc0a01766cc87 | [
"MIT"
] | null | null | null | job.py | DIR-LAB/delayscheduling | 12539897f8de3052d97fa99239ccc0a01766cc87 | [
"MIT"
] | null | null | null | import re
import sys
import math
class Job:
"""
1. Job Number -- a counter field, starting from 1.
2. Submit Time -- in seconds. The earliest time the log refers to is zero, and is usually the submittal time of the first job. The lines in the log are sorted by ascending submittal times. It makes sense for jobs to also be numbered in this order.
3. Wait Time -- in seconds. The difference between the job's submit time and the time at which it actually began to run. Naturally, this is only relevant to real logs, not to models.
4. Run Time -- in seconds. The wall clock time the job was running (end time minus start time).
We decided to use ``wait time'' and ``run time'' instead of the equivalent ``start time'' and ``end time'' because they are directly attributable to the Scheduler and application, and are more suitable for models where only the run time is relevant.
Note that when values are rounded to an integral number of seconds (as often happens in logs) a run time of 0 is possible and means the job ran for less than 0.5 seconds. On the other hand it is permissable to use floating point values for time fields.
5. Number of Allocated Processors -- an integer. In most cases this is also the number of processors the job uses; if the job does not use all of them, we typically don't know about it.
6. Average CPU Time Used -- both user and system, in seconds. This is the average over all processors of the CPU time used, and may therefore be smaller than the wall clock runtime. If a log contains the total CPU time used by all the processors, it is divided by the number of allocated processors to derive the average.
7. Used Memory -- in kilobytes. This is again the average per processor.
8. Requested Number of Processors.
9. Requested Time. This can be either runtime (measured in wallclock seconds), or average CPU time per processor (also in seconds) -- the exact meaning is determined by a header comment. In many logs this field is used for the user runtime estimate (or upper bound) used in backfilling. If a log contains a request for total CPU time, it is divided by the number of requested processors.
10. Requested Memory (again kilobytes per processor).
11. Status 1 if the job was completed, 0 if it failed, and 5 if cancelled. If information about chekcpointing or swapping is included, other values are also possible. See usage note below. This field is meaningless for models, so would be -1.
12. User ID -- a natural number, between one and the number of different users.
13. Group ID -- a natural number, between one and the number of different groups. Some systems control resource usage by groups rather than by individual users.
14. Executable (Application) Number -- a natural number, between one and the number of different applications appearing in the workload. in some logs, this might represent a script file used to run jobs rather than the executable directly; this should be noted in a header comment.
15. Queue Number -- a natural number, between one and the number of different queues in the system. The nature of the system's queues should be explained in a header comment. This field is where batch and interactive jobs should be differentiated: we suggest the convention of denoting interactive jobs by 0.
16. Partition Number -- a natural number, between one and the number of different partitions in the systems. The nature of the system's partitions should be explained in a header comment. For example, it is possible to use partition numbers to identify which machine in a cluster was used.
17. Preceding Job Number -- this is the number of a previous job in the workload, such that the current job can only start after the termination of this preceding job. Together with the next field, this allows the workload to include feedback as described below.
18. Think Time from Preceding Job -- this is the number of seconds that should elapse between the termination of the preceding job and the submittal of this one.
"""
def __init__(self, line = "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"):
line = line.strip()
s_array = re.split("\\s+", line)
self.job_id = int(s_array[0])
self.submit_time = int(s_array[1])
self.wait_time = int(s_array[2])
self.run_time = int(s_array[3])
self.skip_time = 0
if self.run_time == -1:
self.run_time = 10
self.number_of_allocated_processors = int(s_array[4])
self.average_cpu_time_used = float(s_array[5])
self.used_memory = int(s_array[6])
# "requested number of processors" and "number of allocated processors" are typically mixed.
# I do not know their difference clearly. But it seems to me using a larger one will be sufficient.
self.request_number_of_processors = int(s_array[7])
self.number_of_allocated_processors = max(self.number_of_allocated_processors, self.request_number_of_processors)
self.request_number_of_processors = self.number_of_allocated_processors
self.request_number_of_nodes = -1
# if we use the job's request time field
# for model, request_time might be empty. In this case, we set request_time to the run_time
self.request_time = int(s_array[8])
if self.request_time == -1:
self.request_time = self.run_time
# force to use runtime
self.request_time = self.run_time
# if we use the run time as the most accurate request time
# self.request_time = self.run_time + 60
# if we gradually increase the accuracy of job's request time
# with a percentage wrong estimation and round to a fixed time: 1,2,3,... hours.
# this.requestTime = (int) (this.runTime + this.runTime * 0.4);
# int roundsTo = 60 * 60; //round up to hours
# this.requestTime = (this.requestTime / roundsTo + 1) * roundsTo;
self.request_memory = int(s_array[9])
self.status = int(s_array[10])
self.user_id = int(s_array[11])
self.group_id = int(s_array[12])
self.executable_number = int(s_array[13])
self.queue_number = int(s_array[14])
try:
self.partition_number = int(s_array[15])
except ValueError:
self.partition_number = 0
self.proceeding_job_number = int(s_array[16])
self.think_time_from_proceeding_job = int(s_array[17])
self.random_id = self.submit_time
self.scheduled_time = -1
self.allocated_machines = None
self.slurm_in_queue_time = 0
self.slurm_age = 0
self.slurm_job_size = 0.0
self.slurm_fair = 0.0
self.slurm_partition = 0
self.slurm_qos = 0
self.slurm_tres_cpu = 0.0
def __eq__(self, other):
return self.job_id == other.job_id
def __lt__(self, other):
return self.job_id < other.job_id
def __hash__(self):
return hash(self.job_id)
def __str__(self):
return "J["+str(self.job_id)+"]-["+str(self.request_number_of_processors)+"]-["+str(self.submit_time)+"]-["+str(self.request_time)+"]"
def __feature__(self):
return [self.submit_time, self.request_number_of_processors, self.request_time,
self.user_id, self.group_id, self.executable_number, self.queue_number]
class Workloads:
def __init__(self, path):
self.all_jobs = []
self.max = 0
self.max_exec_time = 0
self.min_exec_time = sys.maxsize
self.max_job_id = 0
self.max_requested_memory = 0
self.max_user_id = 0
self.max_group_id = 0
self.max_executable_number = 0
self.max_job_id = 0
self.max_nodes = 0
self.max_procs = 0
with open(path) as fp:
for line in fp:
if line.startswith(";"):
if line.startswith("; MaxNodes:"):
self.max_nodes = int(line.split(":")[1].strip())
if line.startswith("; MaxProcs:"):
self.max_procs = int(line.split(":")[1].strip())
continue
j = Job(line)
if j.run_time > self.max_exec_time:
self.max_exec_time = j.run_time
if j.run_time < self.min_exec_time:
self.min_exec_time = j.run_time
if j.request_memory > self.max_requested_memory:
self.max_requested_memory = j.request_memory
if j.user_id > self.max_user_id:
self.max_user_id = j.user_id
if j.group_id > self.max_group_id:
self.max_group_id = j.group_id
if j.executable_number > self.max_executable_number:
self.max_executable_number = j.executable_number
self.all_jobs.append(j)
if j.request_number_of_processors > self.max:
self.max = j.request_number_of_processors
# if max_procs = 0, it means node/proc are the same.
if self.max_procs == 0:
self.max_procs = self.max_nodes
print ("Max Allocated Processors:", str(self.max), "max node:", self.max_nodes,
"max procs:", self.max_procs,
"max execution time:", self.max_exec_time,
"max user id:", self.max_user_id)
self.all_jobs.sort(key=lambda job: job.job_id)
def size(self):
return len(self.all_jobs)
def reset(self):
for job in self.all_jobs:
job.scheduled_time = -1
job.skip_time = 0
def __getitem__(self, item):
return self.all_jobs[item]
if __name__ == "__main__":
print ("Loading the workloads...")
load = Workloads("../../../data/lublin_256.swf")
print ("Finish loading the workloads...", type(load[0]))
print (load.max_nodes, load.max_procs)
print (load[0].__feature__())
print (load[1].__feature__())
# empty_job_str = "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"
# empty_job = Job(empty_job_str)
# print (empty_job.job_id, empty_job.feature())
| 53.248705 | 391 | 0.659336 | import re
import sys
import math
class Job:
def __init__(self, line = "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"):
line = line.strip()
s_array = re.split("\\s+", line)
self.job_id = int(s_array[0])
self.submit_time = int(s_array[1])
self.wait_time = int(s_array[2])
self.run_time = int(s_array[3])
self.skip_time = 0
if self.run_time == -1:
self.run_time = 10
self.number_of_allocated_processors = int(s_array[4])
self.average_cpu_time_used = float(s_array[5])
self.used_memory = int(s_array[6])
self.request_number_of_processors = int(s_array[7])
self.number_of_allocated_processors = max(self.number_of_allocated_processors, self.request_number_of_processors)
self.request_number_of_processors = self.number_of_allocated_processors
self.request_number_of_nodes = -1
# for model, request_time might be empty. In this case, we set request_time to the run_time
self.request_time = int(s_array[8])
if self.request_time == -1:
self.request_time = self.run_time
# force to use runtime
self.request_time = self.run_time
# if we use the run time as the most accurate request time
# self.request_time = self.run_time + 60
# if we gradually increase the accuracy of job's request time
self.request_memory = int(s_array[9])
self.status = int(s_array[10])
self.user_id = int(s_array[11])
self.group_id = int(s_array[12])
self.executable_number = int(s_array[13])
self.queue_number = int(s_array[14])
try:
self.partition_number = int(s_array[15])
except ValueError:
self.partition_number = 0
self.proceeding_job_number = int(s_array[16])
self.think_time_from_proceeding_job = int(s_array[17])
self.random_id = self.submit_time
self.scheduled_time = -1
self.allocated_machines = None
self.slurm_in_queue_time = 0
self.slurm_age = 0
self.slurm_job_size = 0.0
self.slurm_fair = 0.0
self.slurm_partition = 0
self.slurm_qos = 0
self.slurm_tres_cpu = 0.0
def __eq__(self, other):
return self.job_id == other.job_id
def __lt__(self, other):
return self.job_id < other.job_id
def __hash__(self):
return hash(self.job_id)
def __str__(self):
return "J["+str(self.job_id)+"]-["+str(self.request_number_of_processors)+"]-["+str(self.submit_time)+"]-["+str(self.request_time)+"]"
def __feature__(self):
return [self.submit_time, self.request_number_of_processors, self.request_time,
self.user_id, self.group_id, self.executable_number, self.queue_number]
class Workloads:
def __init__(self, path):
self.all_jobs = []
self.max = 0
self.max_exec_time = 0
self.min_exec_time = sys.maxsize
self.max_job_id = 0
self.max_requested_memory = 0
self.max_user_id = 0
self.max_group_id = 0
self.max_executable_number = 0
self.max_job_id = 0
self.max_nodes = 0
self.max_procs = 0
with open(path) as fp:
for line in fp:
if line.startswith(";"):
if line.startswith("; MaxNodes:"):
self.max_nodes = int(line.split(":")[1].strip())
if line.startswith("; MaxProcs:"):
self.max_procs = int(line.split(":")[1].strip())
continue
j = Job(line)
if j.run_time > self.max_exec_time:
self.max_exec_time = j.run_time
if j.run_time < self.min_exec_time:
self.min_exec_time = j.run_time
if j.request_memory > self.max_requested_memory:
self.max_requested_memory = j.request_memory
if j.user_id > self.max_user_id:
self.max_user_id = j.user_id
if j.group_id > self.max_group_id:
self.max_group_id = j.group_id
if j.executable_number > self.max_executable_number:
self.max_executable_number = j.executable_number
self.all_jobs.append(j)
if j.request_number_of_processors > self.max:
self.max = j.request_number_of_processors
if self.max_procs == 0:
self.max_procs = self.max_nodes
print ("Max Allocated Processors:", str(self.max), "max node:", self.max_nodes,
"max procs:", self.max_procs,
"max execution time:", self.max_exec_time,
"max user id:", self.max_user_id)
self.all_jobs.sort(key=lambda job: job.job_id)
def size(self):
return len(self.all_jobs)
def reset(self):
for job in self.all_jobs:
job.scheduled_time = -1
job.skip_time = 0
def __getitem__(self, item):
return self.all_jobs[item]
if __name__ == "__main__":
print ("Loading the workloads...")
load = Workloads("../../../data/lublin_256.swf")
print ("Finish loading the workloads...", type(load[0]))
print (load.max_nodes, load.max_procs)
print (load[0].__feature__())
print (load[1].__feature__())
| true | true |
f7fea50826973fa6874dd4ae8cf65067ae41473d | 540 | py | Python | setup.py | pu-bioinformatics/PythonMiniProject-urbanslug | 6ea908d671a7df0b4f6b23d352d20defd8bae3c5 | [
"MIT"
] | null | null | null | setup.py | pu-bioinformatics/PythonMiniProject-urbanslug | 6ea908d671a7df0b4f6b23d352d20defd8bae3c5 | [
"MIT"
] | null | null | null | setup.py | pu-bioinformatics/PythonMiniProject-urbanslug | 6ea908d671a7df0b4f6b23d352d20defd8bae3c5 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "Amino",
version = "0.0.0.1",
description = "A collection of modules for performing simple PDB file analysis.",
long_description=long_description,
long_description_content_type="text/markdown",
author = "Njagi Mwaniki",
author_email= "njagi@urbanslug.com",
setup_requires=["pytest-runner"],
tests_require=["pytest"],
packages=find_packages(),
test_suite="pdb.tests"
)
| 28.421053 | 85 | 0.694444 | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "Amino",
version = "0.0.0.1",
description = "A collection of modules for performing simple PDB file analysis.",
long_description=long_description,
long_description_content_type="text/markdown",
author = "Njagi Mwaniki",
author_email= "njagi@urbanslug.com",
setup_requires=["pytest-runner"],
tests_require=["pytest"],
packages=find_packages(),
test_suite="pdb.tests"
)
| true | true |
f7fea5380d6b55b8daacfad11c52c84be1a0a325 | 66,896 | py | Python | nephoria/testcase_utils/cli_test_runner.py | bigschwan/nephology | b09cc34c6796a1737159b2976526a7dbb530b364 | [
"BSD-2-Clause"
] | 5 | 2016-08-31T06:51:18.000Z | 2018-04-25T20:50:25.000Z | nephoria/testcase_utils/cli_test_runner.py | bigschwan/nephology | b09cc34c6796a1737159b2976526a7dbb530b364 | [
"BSD-2-Clause"
] | 49 | 2016-04-19T07:01:46.000Z | 2017-11-17T23:57:18.000Z | nephoria/testcase_utils/cli_test_runner.py | bigschwan/nephology | b09cc34c6796a1737159b2976526a7dbb530b364 | [
"BSD-2-Clause"
] | 14 | 2016-04-14T17:01:35.000Z | 2021-07-13T13:35:38.000Z | """
This is the base class for any test case to be included in the Nephoria repo. It should include any
functionality that we expect to be repeated in most of the test cases that will be written.
These wrapper/harness classes are primarily intended to help provide a common means of running and
reporting 'system level tests', although should work as well for "unit level tests". The intention
is that tests can be built upon other test methods.
For the purposes of this module, a 'system' focused test suite is the sum of a what would
otherwise be many 'unit tests' in order to produce an end to end flow that might mimic a
users's experience and test interactions of the system.
This module is also intended to provide the most commonly shared test attributes amongst the
existing tests when this was written:
cli options/arguments,
logging to stdout, err and a file provided via cli arg,
option to specify a portions or a single unit by providing a list of units to be looked up
by name, method, etc..
gathered/summary of results and metrics,
sharing of test artifacts (to save time over suite execution),
ability to skip tests based on dependencies such as a previous test unit's status, etc..
ability to share/extend testcase classes to build upon other test case classes/suites.
basic config file parsing (arguments in file are merged with cli args)
Currently included:
- Debug method
- Allow parameterized test cases
- Method to run test case
- Run a list of test cases
- Start, end and current status messages
- Enum class for possible test results
TBD:
- Metric tracking (need to define what metrics we want, how they are to be reported)
- Use docstring as description for test case categorizing, and tracking over
time (ie via remote DB)
##################################################################################################
# Sample test and output: #
##################################################################################################
See README.md for more info
"""
import errno
import inspect
import time
import argparse
import re
import sys
import os
import types
import traceback
import random
import string
import yaml
import json
from collections import OrderedDict
from prettytable import PrettyTable
from cloud_utils.log_utils.eulogger import Eulogger
from cloud_utils.log_utils import markup, ForegroundColor, BackGroundColor, TextStyle
from cloud_utils.log_utils import red, green, blue, yellow, cyan, get_traceback, get_terminal_size
from nephoria.testcase_utils.euconfig import EuConfig
import StringIO
import copy
def _get_method_args_kwargs(method):
args = []
kwargdict = OrderedDict()
spec = inspect.getargspec(method)
if spec.defaults:
kwarg_index = len(spec.args) - len(spec.defaults)
args = spec.args[0:kwarg_index]
kwargs = spec.args[kwarg_index:]
for value in spec.defaults:
kwargdict[kwargs.pop(0)] = value
else:
args = spec.args
return args, kwargdict
class TestResult():
'''
standardized test results
'''
not_run = "NOT_RUN"
passed = "PASSED"
failed = "FAILED"
##################################################################################################
# Convenience class to run wrap individual methods to run, store and access results.
# A testunit represents an individual function or method to be run by the CliTestRunner class.
##################################################################################################
class TestUnit(object):
'''
Description: Convenience class to run wrap individual methods, and run and store and access
results.
type method: method
param method: The underlying method for this object to wrap, run and provide information on
type args: list of arguments
param args: the arguments to be fed to the given 'method'
type eof: boolean
param eof: boolean to indicate whether a failure while running the given 'method' should end t
he test case execution.
'''
def __init__(self, method, html_anchors=False, test_unit_name=None, test_logger=None,
test_unit_description=None, *args, **kwargs):
if not hasattr(method, '__call__'):
raise ValueError('TestUnit method is not callable: "{0}"'.format(method))
self.method = method
self.method_possible_args = CliTestRunner.get_meth_arg_names(self.method)
self.args = args
self.kwargs = kwargs
self.name = test_unit_name or str(method.__name__)
self.result = TestResult.not_run
self.time_to_run = 0
self._html_link = None
self._info = None
self.anchor_id = None
self.error_anchor_id = None
self.error = ""
# if self.kwargs.get('html_anchors', False):
if html_anchors:
self.anchor_id = str(str(time.ctime()) + self.name + "_" +
str(''.join(random.choice(string.ascii_uppercase +
string.ascii_lowercase +
string.digits) for x in range(3))) + "_").replace(" ", "_")
self.error_anchor_id = "ERROR_" + self.anchor_id
self.description = test_unit_description
if self.description is None:
self.description = self.get_test_method_description()
self.eof = False
if test_logger:
debug_buf = 'Creating TestUnit: "{0}" with args:'.format(self.name)
for count, thing in enumerate(args):
debug_buf += '{0}. {1}'.format(count, thing)
for name, value in kwargs.items():
debug_buf += '{0} = {1}'.format(name, value)
test_logger.debug(debug_buf)
@classmethod
def create_testcase_from_method(cls, method, test_logger=None, eof=False, *args, **kwargs):
'''
Description: Creates a EutesterTestUnit object from a method and set of arguments to be
fed to that method
type method: method
param method: The underlying method for this object to wrap, run and provide information on
type args: list of arguments
param args: the arguments to be fed to the given 'method'
'''
testunit = TestUnit(method, *args, test_logger=test_logger, **kwargs)
testunit.eof = eof
return testunit
@property
def info(self):
if self._info is None:
info = {'name': self.name, 'args': list(self.args), 'kwargs': self.kwargs, 'tags': [],
'description': None, 'file': "", 'results': {}}
try:
info['file'] = self.method.im_func.func_code.co_filename
except Exception as E:
sys.stderr.write('{0}\nFailed to get name for method:{1}, err:{2}'
.format(get_traceback(), self.name, E))
try:
dirmatch = re.search('testcases/(.*)/.*py', info['file'])
if dirmatch:
testdir = dirmatch.group(1)
for tag in testdir.split('/'):
info['tags'].append(tag)
except Exception as E:
sys.stderr.write('{0}\nFailed to get testdir for method:{1}, err:{2}'
.format(get_traceback(), self.name, E))
info.update(self._parse_docstring_for_yaml())
info['results'] = self.get_results()
self._info = info
return self._info
def get_results(self):
results = {'status': self.result, 'elapsed': self.time_to_run, 'date': time.asctime(),
'error': self.error}
return results
def set_kwarg(self, kwarg, val):
self.kwargs[kwarg] = val
def _parse_docstring_for_yaml(self):
ydoc = {}
try:
doc = str(self.method.__doc__ or "")
yaml_match = re.search("\{yaml\}((.|\n)*)\{yaml\}", doc)
if yaml_match and len(yaml_match.groups()):
ystr = yaml_match.group(1)
ydoc = yaml.load(ystr) or {}
except Exception as E:
sys.stderr.write('{0}\nError parsing yaml from docstring, testmethod:"{1}",'
' error:"{2}"\n'.format(get_traceback(), self.name, E))
sys.stderr.flush()
return ydoc
def get_test_method_description(self, header=True):
'''
Description:
Attempts to derive test unit description for the registered test method.
Keys off the string "Description:" preceded by any amount of white space and ending with
either a blank line or the string "EndDescription". This is used in debug output when
providing info to the user as to the method being run as a testunit's
intention/description.
'''
if header:
desc = "\nMETHOD:" + str(self.name) + ", TEST DESCRIPTION:\n"
else:
desc = ""
# Attempt to get the description from the yaml first
info_desc = self.info.get('description', None)
if info_desc:
return "{0}{1}".format(desc, info_desc)
else:
ret = []
try:
doc = str(self.method.__doc__)
if not doc:
try:
desc = desc + "\n".join(self.method.im_func.func_doc.title().splitlines())
except:
pass
return desc
for line in doc.splitlines():
line = line.lstrip().rstrip()
if re.search('^\s+:', line):
break
ret.append(line)
except Exception, e:
print('get_test_method_description: error' + str(e))
if ret:
info_desc = "\n".join(ret)
self.info['description'] = info_desc
desc = desc + info_desc
return desc
def run(self, eof=None):
'''
Description: Wrapper which attempts to run self.method and handle failures, record time.
'''
if eof is None:
eof = self.eof
for count, value in enumerate(self.args):
print 'ARG:{0}. {1}'.format(count, value)
for key, value in self.kwargs.items():
print 'KWARG:{0} = {1}'.format(key, value)
start = time.time()
args = self.args or []
kwargs = self.kwargs or {}
try:
ret = self.method(*args, **kwargs)
self.result = TestResult.passed
return ret
except SkipTestException, se:
print red("TESTUNIT SKIPPED:" + str(self.name) + "\n" + str(se))
self.error = str(se)
self.result = TestResult.not_run
except Exception, e:
buf = '\nTESTUNIT FAILED: ' + self.name
if self.kwargs.get('html_anchors', False):
buf += "<font color=red> Error in test unit '" + self.name + "':\n"
if self.kwargs.get('html_anchors', False):
buf += ' </font>'
print '<a name="' + str(self.error_anchor_id) + '"></a>'
print red("{0}\n".format(get_traceback()))
self.error = '{0}("{1}")'.format(e.__class__.__name__, e)
self.result = TestResult.failed
if eof:
raise e
else:
pass
finally:
self.time_to_run = int(time.time() - start)
self.info['results'] = self.get_results()
##################################################################################################
# Cli Test Runner/Wrapper Class
# Used to wrap, run and report results on a set of test functions, methods, or TestUnit objects.
# This class's convenience methods are intended to provide;
# - a common CLI
# - a common CLI arguments used when testing a cloud environment with Nephoria.
# - a common methods to inspect, run and track the results of the wrapped test methods/functions
# - common methods to display progress and results of the tests being run.
# - common entry/exit point for running test suites in a CI environment, etc..
##################################################################################################
class CliTestRunner(object):
#####################################################################################
# List of dicts/kwargs to be used to fed to
# arparse.add_argument() to build additional cli args.
# The intention here is to help enforce common cli arguments across individual tests,
# as well as help test authors from having to re-add/create these per test.
#####################################################################################
_DEFAULT_CLI_ARGS = {
'password': {'args': ["--password"],
'kwargs': {"help": "Password to use for machine root ssh access",
"default": None}},
'emi': {'args': ["--emi"],
'kwargs': {"help": "pre-installed emi id which to execute these "
"nephoria_unit_tests against",
"default": None}},
'zone': {'args': ["--zone"],
'kwargs': {"help": "Zone to use in this test",
"default": None}},
'vmtype': {'args': ["--vmtype"],
'kwargs': {"help": "Virtual Machine Type to use in this test",
"default": "c1.medium"}},
'clc': {'args': ["--clc"],
'kwargs': {"help": "Address of Machine hosting CLC services",
"default": None}},
'log_level': {'args': ["--log-level"],
'kwargs': {"help": "log level for stdout logging",
"default": 'DEBUG'}},
'test_account': {'args': ['--test-account'],
'kwargs': {"help": "Cloud account name to use with test controller",
"default": "testrunner"}},
'test_user': {'args': ['--test-user'],
'kwargs': {"help": "Cloud user name to use with test controller",
"default": "admin"}},
'region_domain': {'args': ['--region'],
'kwargs': {'help': 'Region domain to run this test in',
'default': None}},
'access_key': {'args': ['--access-key'],
'kwargs': {'help': 'Access key to use during test',
'default': None}},
'secret_key': {'args': ['--secret-key'],
'kwargs': {'help': 'Secret key to use during test',
'default': None}},
'log_file': {'args': ['--log-file'],
'kwargs': {"help": "file path to log to (in addition to stdout",
"default": None}},
'log_file_level': {'args': ['--log-file-level'],
'kwargs': {"help": "log level to use when logging to '--log-file'",
"default": "DEBUG"}},
'test_list': {'args': ['--test-list'],
'kwargs': {"help": "comma or space delimited list of test names to run",
"default": None}},
'test_regex': {'args': ['--test-regex'],
'kwargs': {'help': 'regex to use when creating the list of local test '
'methods to run.'
'Will use this regex in a search of the method name',
'default': None}},
'environment_file': {'args': ['--environment-file'],
'kwargs': {"help": "Environment file that describes Eucalyptus topology,"
"e.g Environment file that was used by Calyptos.",
"default": None}},
'dry_run': {'args': ['--dry-run'],
'kwargs': {'help': 'Prints test runlist info and exit. '
'Default is json to stdout, see below for formats and '
'location options. A higher log level can also be provided '
'to quiet down any other output'
'Argument format:'
' json/yaml/nephoria:filepath'
'Example#: json:/tmp/testinfo.json ' ,
'nargs': "?",
'default': False}},
'no_clean': {'args': ['--no-clean'],
'kwargs': {'help': 'Flag, if provided will not run the clean method on exit',
'action': 'store_true',
'default': False}}
}
_CLI_DESCRIPTION = "CLI TEST RUNNER"
def __init__(self, name=None, description=None, **kwargs):
"""
Cli Test Runner Class
:param name: Name user to identifiy this test suite
:param description: Description to be provided to the CLI
:param kwargs: Any arguments to be passed to the parser at runtime to supplement
any arguments provided by the cli, and/or any config files.
These kwargs will end up a attributes of self.args.
"""
self.name = name or self.__class__.__name__
# create parser
self.parser = argparse.ArgumentParser(prog=self.name, description=self._CLI_DESCRIPTION)
self.pre_init()
# create cli options from class dict
for argname, arg_dict in self._DEFAULT_CLI_ARGS.iteritems():
cli_args = arg_dict.get('args')
cli_kwargs = arg_dict.get('kwargs')
self.parser.add_argument(*cli_args, **cli_kwargs)
# Combine CLI provided args with any runtime values form **kwargs, and/or values
# found in a provided config file path
self.get_args(runtime_kwargs=kwargs)
self._testlist = []
log_level = getattr(self.args, 'log_level', 'INFO')
log_file = getattr(self.args, 'log_file', None)
log_file_level = getattr(self.args, 'log_file_level', "DEBUG")
self.html_anchors = False
self.log = Eulogger(identifier=self.name, stdout_level=log_level,
logfile=log_file, logfile_level=log_file_level)
# set the date format for the logger
for h in self.log.parent.handlers:
if h == self.log.stdout_handler:
h.formatter.datefmt = "%m-%d %H:%M:%S"
break
self._term_width = 110
height, width = get_terminal_size()
if width < self._term_width:
self._term_width = width
self.post_init()
self.show_self()
def pre_init(self, *args, **kwargs):
"""
Additional items to be run towards the beginning of init()
"""
pass
def post_init(self, *args, **kwargs):
"""
Additional items to be run at the end of init.
"""
pass
def clean_method(self):
"""
This method should be implemented per Test Class. This method will be called by default
during the test run method(s). 'no_clean_on_exit' set by cli '--no-clean' will prevent
this default method from being called.
"""
raise Exception("Clean_method was not implemented. Was run_list using clean_on_exit?")
def get_default_userhome_config(self, fname='nephoria.conf'):
'''
Description: Attempts to fetch the file 'fname' from the current user's home dir.
Returns path to the user's home dir default nephoria config file.
:type fname: string
:param fname: the nephoria default config file name
:rtype: string
:returns: string representing the path to 'fname', the default nephoria conf file.
'''
try:
def_path = os.getenv('HOME') + '/.nephoria/' + str(fname)
except:
return None
try:
os.stat(def_path)
return def_path
except:
self.log.debug("Default config not found:" + str(def_path))
return None
def show_self(self):
main_pt = PrettyTable([yellow('TEST CASE INFO', bold=True)])
main_pt.border = False
pt = PrettyTable(['KEY', 'VALUE'])
pt.header = False
pt.align = 'l'
pt.add_row([blue("NAME"), self.name])
pt.add_row([blue("TEST LIST"), self._testlist])
pt.add_row([blue('ENVIRONMENT FILE'), self.args.environment_file])
main_pt.add_row([pt])
self.log.info("\n{0}\n".format(main_pt))
self.show_args()
##############################################################################################
# Create 'TestUnit' obj methods
##############################################################################################
def create_testunit_from_method(self, method, *args, **kwargs):
'''
Description: Convenience method calling EutesterTestUnit.
Creates a EutesterTestUnit object from a method and set of arguments to be
fed to that method
:type method: method
:param method: The underlying method for this object to wrap, run and provide
information on
:type eof: boolean
:param eof: Boolean to indicate whether this testunit should cause a test list to end of
failure
:type autoarg: boolean
:param autoarg: Boolean to indicate whether to autopopulate this testunit with values from
global testcase.args
:type args: list of positional arguments
:param args: the positional arguments to be fed to the given testunit 'method'
:type kwargs: list of keyword arguements
:param kwargs: list of keyword
:rtype: EutesterTestUnit
:returns: EutesterTestUnit object
'''
eof = False
autoarg = True
methvars = self.get_meth_arg_names(method)
# Pull out value relative to this method, leave in any that are intended to be passed
# through
if 'autoarg' in kwargs:
if 'autoarg' in methvars:
autoarg = kwargs['autoarg']
else:
autoarg = kwargs.pop('autoarg')
if 'eof' in kwargs:
if 'eof' in methvars:
eof = kwargs['eof']
else:
eof = kwargs.pop('eof')
# Only pass the arg if we need it otherwise it will print with all methods/testunits
if self.html_anchors:
testunit = TestUnit(method, *args, test_logger=self.log,
html_anchors=self.html_anchors, **kwargs)
else:
testunit = TestUnit(method, *args, test_logger=self.log, **kwargs)
testunit.eof = eof
# if autoarg, auto populate testunit arguements from local testcase.args namespace values
if autoarg:
self.populate_testunit_with_args(testunit)
return testunit
def create_testunit_by_name(self, name, obj=None, eof=True, autoarg=True, test_logger=None,
*args, **kwargs):
'''
Description: Attempts to match a method name contained with object 'obj', and create a
EutesterTestUnit object from that method and the provided positional as well as keyword
arguments provided.
:type name: string
:param name: Name of method to look for within instance of object 'obj'
:type obj: class instance
:param obj: Instance type, defaults to self testcase object
:type args: positional arguements
:param args: None or more positional arguments to be passed to method to be run
:type kwargs: keyword arguments
:param kwargs: None or more keyword arguements to be passed to method to be run
'''
eof = False
autoarg = True
obj = obj or self
test_logger = test_logger or self.log
try:
meth = getattr(obj, name)
except AttributeError as AE:
self.log.error('Could not create test unit for name:"{0}", err:"{1}"'.format(name, AE))
raise
methvars = self.get_meth_arg_names(meth)
# Pull out value relative to this method, leave in any that are intended to be
# passed through
if 'autoarg' in kwargs:
if 'autoarg' in methvars:
autoarg = kwargs['autoarg']
else:
autoarg = kwargs.pop('autoarg')
if 'eof' in kwargs:
if 'eof' in methvars:
eof = kwargs['eof']
else:
eof = kwargs.pop('eof')
if 'obj' in kwargs:
if 'obj' in methvars:
obj = kwargs['obj']
else:
obj = kwargs.pop('obj')
testunit = TestUnit(meth, *args, test_logger=test_logger, **kwargs)
testunit.eof = eof
# if autoarg, auto populate testunit arguements from local testcase.args namespace values
if autoarg:
self.populate_testunit_with_args(testunit)
return testunit
##############################################################################################
# Convenience methods to fetch current testunit by its name
##############################################################################################
def get_testunit_by_name(self, name):
for testunit in self._testlist:
if testunit.name == name:
return testunit
return None
##############################################################################################
# Convenience methods to fetch current testunit by its method
##############################################################################################
def get_testunit_by_method(self, method):
for testunit in self._testlist:
if testunit.method == method:
return testunit
return None
##############################################################################################
# Convenience methods to help inspect, convert, and run provided test functions/methods
##############################################################################################
def populate_testunit_with_args(self, testunit, namespace=None):
'''
Description: Checks a given test unit's available positional and key word args lists
for matching values contained with the given namespace, by default will use local
testcase.args. If testunit's underlying method has arguments matching the namespace
provided, then those args will be applied to the testunits args referenced when running
the testunit. Namespace values will not be applied/overwrite testunits, if the testunit
already has conflicting values in it's args(positional) list or kwargs(keyword args) dict.
:type: testunit: Eutestcase.eutestertestunit object
:param: testunit: A testunit object for which the namespace values will be applied
:type: namespace: namespace obj
:param: namespace: namespace obj containing args/values to be applied to testunit.
None by default will use local testunit args.
'''
self.log.debug(
"Attempting to populate testunit:" + str(testunit.name) + ", with testcase.args...")
args_to_apply = namespace or self.args
if not args_to_apply:
return
testunit_obj_args = {}
# copy the test units key word args
testunit_obj_args.update(copy.copy(testunit.kwargs))
self.log.debug("Testunit keyword args:" + str(testunit_obj_args))
# Get all the var names of the underlying method the testunit is wrapping
method_args = self.get_meth_arg_names(testunit.method)
offset = 0 if isinstance(testunit.method, types.FunctionType) else 1
self.log.debug("Got method args:" + str(method_args))
# Add the var names of the positional args provided in testunit.args to check against later
# Append to the known keyword arg list
for x, arg in enumerate(testunit.args):
testunit_obj_args[method_args[x + offset]] = arg
self.log.debug("test unit total args:" + str(testunit_obj_args))
# populate any global args which do not conflict with args already contained within the
# test case first populate matching method args with our global testcase args taking
# least precedence
for apply_val in args_to_apply._get_kwargs():
for methvar in method_args:
if methvar == apply_val[0]:
self.log.debug("Found matching arg for:" + str(methvar))
# Don't overwrite existing testunit args/kwargs that have already been assigned
if apply_val[0] in testunit_obj_args:
self.log.debug("Skipping populate because testunit already has this arg:" +
str(methvar))
continue
# Append cmdargs list to testunits kwargs
testunit.set_kwarg(methvar, apply_val[1])
##############################################################################################
# Methods to format and write information on the test runlist
##############################################################################################
def _dump_output(self, output, filepath):
if not filepath:
print output
else:
filepath = os.path.abspath(filepath)
self.log.debug('Attempting to write test runlist info to:"{0}"'.format(filepath))
if not os.path.exists(os.path.dirname(filepath)):
try:
os.makedirs(os.path.dirname(filepath))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(filepath, "w") as dumpfile:
dumpfile.write(output)
def dump_test_info_yaml(self, testlist=None, filepath=None, printresults=True):
testlist = testlist or self._testlist
if not testlist:
self.log.warning('Test runlist is empty')
return
dumplist = []
for test in testlist:
dumplist.append(test.info)
output = yaml.dump(dumplist, default_flow_style=False, explicit_start=True)
if printresults:
self._dump_output(output, filepath)
return (0)
else:
return output
def dump_test_info_json(self, testlist=None, filepath=None, printresults=True):
testlist = testlist or self._testlist
if not testlist:
self.log.warning('Test runlist is empty')
return
dumplist = []
for test in testlist:
dumplist.append(test.info)
output = json.dumps(dumplist, indent=4)
if printresults:
self._dump_output(output, filepath)
return (0)
else:
return output
def dump_test_info_nephoria(self, testlist=None, filepath=None, printresults=True):
testlist = testlist or self._testlist
if not testlist:
self.log.warning('Test runlist is empty')
output = "TEST LIST: NOT RUNNING DUE TO DRYRUN\n{0}\n" \
.format(self.print_test_list_results(testlist=testlist,
descriptions=True,
printout=False))
if printresults:
self._dump_output(output, filepath)
return (0)
else:
return output
def handle_dry_run(self, testlist, printresults):
dry_run_arg = getattr(self.args, 'dry_run', False)
if dry_run_arg is not False:
filepath = None
handler = self.dump_test_info_json
if isinstance(dry_run_arg, basestring):
args = str(dry_run_arg).split(':')
try:
fmt = str(args[0]).strip()
if fmt == 'yaml':
handler = self.dump_test_info_yaml
elif fmt == 'nephoria':
handler = self.dump_test_info_nephoria
elif fmt == 'json':
handler = self.dump_test_info_json
else:
raise ValueError('Unknown format for dry_run:"{0}". Supported Values:'
'"json, yaml, nephoria"'.format(args[0]))
filepath = str(args[1]).strip()
except IndexError:
pass
return handler(testlist=testlist, filepath=filepath, printresults=printresults)
##############################################################################################
# "Run" test methods
##############################################################################################
def run(self, testlist=None, eof=False, clean_on_exit=None, test_regex=None,
printresults=True, force_dry_run=False):
'''
Desscription: wrapper to execute a list of ebsTestCase objects
:type list: list
:param list: list of EutesterTestUnit objects to be run
:type eof: boolean
:param eof: Flag to indicate whether run_test_case_list should exit on any failures.
If this is set to False it will exit only when a given EutesterTestUnit
fails and has it's eof flag set to True.
:type clean_on_exit: boolean
:param clean_on_exit: Flag to indicate if clean_on_exit should be ran at end of test
list execution.
: type test_regex: string
:param test_regex: string representing regex to be used against test methods found in this
class (ie methods prefixed with the word 'test'), or provided in
the test_list cli arg. Matching methods will be sorted alphabetically
and added to the run list.
:type printresults: boolean
:param printresults: Flag to indicate whether or not to print a summary of results upon
run_test_case_list completion.
:rtype: integer
:returns: integer exit code to represent pass/fail of the list executed.
'''
regex = test_regex or self.args.test_regex
if force_dry_run is True:
dry_run = True
else:
dry_run = self.get_arg('dry_run')
def apply_regex(testnames):
if not regex:
return testnames
else:
new_list = []
for testname in testnames:
if re.search(regex, testname):
new_list.append(testname)
return new_list
if clean_on_exit is None:
clean_on_exit = not(getattr(self.args, 'no_clean', False))
if testlist is None:
# See if test names were provided via the command line. Match those to local methods
# and run them
if getattr(self.args, 'test_list', None):
self.args.test_list = re.sub("[\"']", "", str(self.args.test_list))
test_names = str(self.args.test_list).replace(',', " ").split()
test_names = apply_regex(test_names)
testlist = []
for test_name in test_names:
test_name = test_name.strip(',')
testlist.append(self.create_testunit_by_name(name=test_name,
obj=self))
else:
# Get all the local methods which being with the work 'test' and run those.
def key(text):
return [(int(c) if c.isdigit() else c) for c in re.split('(\d+)', text)]
testlist = []
attr_names = []
for name in dir(self):
if name.startswith('test'):
attr_names.append(name)
attr_names = apply_regex(attr_names)
for name in sorted(attr_names, key=key):
attr = getattr(self, name, None)
if hasattr(attr, '__call__'):
testlist.append(self.create_testunit_from_method(method=attr,
test_unit_name=name))
self._testlist = testlist
if not self._testlist:
self.log.warning('No tests were provided or found to run?')
return None
start = time.time()
tests_ran = 0
test_count = len(self._testlist)
orig_log_id = self.log.identifier
if dry_run is not False:
return self.handle_dry_run(self._testlist, printresults=printresults)
try:
for test in self._testlist:
tests_ran += 1
self.log.identifier = markup(test.name, markups=[ForegroundColor.WHITE,
BackGroundColor.BG_BLACK,
TextStyle.BOLD])
self.print_test_unit_startmsg(test)
try:
test.run(eof=eof or test.eof)
except Exception, e:
self.log.debug('Testcase:' + str(test.name) + ' error:' + str(e))
if eof or (not eof and test.eof):
self.endfailure(' TEST:"{0}" COMPLETE'.format(test.name))
raise e
else:
self.endfailure(' TEST:"{0}" COMPLETE '.format(test.name))
else:
if test.result == TestResult.failed:
self.endfailure(' TEST:"{0}" COMPLETE'.format(test.name))
elif test.result == TestResult.not_run:
self.endnotrun(' TEST:"{0}" COMPLETE'.format(test.name))
elif test.result == TestResult.passed:
self.endsuccess(' TEST:"{0}" COMPLETE'.format(test.name))
else:
self.log.info(' TEST:"{0}" COMPLETE'.format(test.name))
self.log.identifier = orig_log_id
self.log.debug(self.print_test_list_short_stats(self._testlist))
except:
self.log.warning(red('Error in test runner...\n{0}'.format(get_traceback())))
raise
finally:
self.log.identifier = orig_log_id
elapsed = int(time.time() - start)
msgout = ('RUN TEST CASE LIST DONE:\nRan {0}/{1} nephoria_unit_tests in'
' "{2}" seconds\n'.format(tests_ran, test_count, elapsed))
if printresults:
try:
self.log.debug("Printing pre-cleanup results:")
msgout += self.print_test_list_results(testlist=self._testlist, printout=False)
self.status(msgout)
except:
pass
try:
if clean_on_exit:
cleanunit = self.create_testunit_from_method(self.clean_method)
self._testlist.append(cleanunit)
try:
self.print_test_unit_startmsg(cleanunit)
cleanunit.run()
except Exception, e:
out = StringIO.StringIO()
traceback.print_exception(*sys.exc_info(), file=out)
out.seek(0)
self.log.debug("Failure in cleanup: " + str(e) + "\n" + out.read())
if printresults:
msgout = self.print_test_list_results(testlist=self._testlist,
printout=False)
self.status(msgout)
except Exception as E:
self.log.warning('{0}\nIgnoring Error:"{1}"'.format(get_traceback(), E))
self._testlist = copy.copy(self._testlist)
passed = 0
failed = 0
not_run = 0
for test in self._testlist:
if test.result == TestResult.passed:
passed += 1
if test.result == TestResult.failed:
failed += 1
if test.result == TestResult.not_run:
not_run += 1
total = passed + failed + not_run
print "passed:" + str(passed) + " failed:" + str(failed) + " not_run:" + str(
not_run) + " total:" + str(total)
if failed:
return (1)
else:
return (0)
def run_test_list_by_name(self, list, eof=None):
unit_list = []
for test in list:
unit_list.append(self.create_testunit_by_name(test))
# Run the EutesterUnitTest objects
return self.run(unit_list, eof=eof)
def run_method_by_name(self, name, obj=None, *args, **kwargs):
'''
Description: Find a method within an instance of obj and run that method with either
args/kwargs provided or any self.args which match the methods varname.
:type name: string
:param name: Name of method to look for within instance of object 'obj'
:type obj: class instance
:param obj: Instance type, defaults to self testcase object
:type args: positional arguements
:param args: None or more positional arguments to be passed to method to be run
:type kwargs: keyword arguments
:param kwargs: None or more keyword arguements to be passed to method to be run
'''
obj = obj or self
meth = getattr(obj, name)
return self.do_with_args(meth, *args, **kwargs)
##############################################################################################
# CLI parser and test argument inspection/manipulation methods
##############################################################################################
def get_args(self, use_cli=True, file_sections=[], runtime_kwargs=None, verbose=True):
'''
Description: Method will attempt to retrieve all command line arguments presented
through local testcase's 'argparse' methods, as well as retrieve all EuConfig file
arguments. All arguments will be combined into a single namespace object held locally
at 'testcase.args'. Note: cli arg 'config' must be provided for config file valus to be
store in self.args.
:type use_cli: boolean
:param use_cli: Boolean to indicate whether or not to create and read from a cli
argparsing object
:type use_default_file: boolean
:param use_default_files: Boolean to indicate whether or not to read default config file
at $HOME/.nephoria/nephoria.conf (not indicated by cli)
:type sections: list
:param sections: list of EuConfig sections to read configuration values from, and store
in self.args.
:type runtime_kwargs: dict
:param runtime_kwargs: dict used to populate arg values (in addition to cli and/or files)
:rtype: arparse.namespace obj
:returns: namespace object with values from cli and config file arguements
'''
configfile = None
args = None
# build out a namespace object from the config file first
cf = argparse.Namespace()
# Setup/define the config file block/sections we intend to read from
confblocks = file_sections or [self.name, 'global']
required = []
for action in self.parser._actions:
if action.required:
required.append(action)
sys_args = sys.argv[1:]
has_cli_value = []
if required and sys_args:
try:
for action in required:
for optstring in action.option_strings:
if optstring in sys_args:
has_cli_value.append(action)
except Exception as E:
print 'argstring:"{0}"'.format(sys_args)
raise
for action in has_cli_value:
required.remove(action)
for action in required:
self.parser._actions.remove(action)
if use_cli:
# first get command line args to see if there's a config file
cliargs = self.parser.parse_args(args=sys_args)
# if a config file was passed, combine the config file and command line args into a
# single namespace object
if cliargs:
# Check to see if there's explicit config sections to read
# if a file or list of config files is specified add it to our list...
# legacy support for config, configfile config_file arg names...
config_file = getattr(cliargs, 'config_file', None)
# store config block list for debug purposes
cf.__setattr__('configsections', copy.copy(confblocks))
# create euconfig configparser objects from each file.
if config_file:
self.config_file = EuConfig(filename=configfile)
# Now iterate through remaining config block in file and add to args...
for section in confblocks:
if self.config_file.config.has_section(section):
for item in self.config_file.config.items(section):
cf.__setattr__(str(item[0]), item[1])
else:
self.config_file = None
if cliargs:
# Now make sure any conflicting args provided on the command line take precedence
# over config file args
for val in cliargs._get_kwargs():
if (val[0] not in cf) or (val[1] is not None):
cf.__setattr__(str(val[0]), val[1])
args = cf
for arg_name, value in runtime_kwargs.iteritems():
setattr(args, arg_name, value)
# Check to see if arguments required by the parser were provided by the runtime kwargs or
# from any arguments read from a config file. Then process these values per their
# respective parser actions to enforce any formatting or rules of the parser action
missing_required = []
for action in required:
if action not in self.parser._actions:
self.parser._actions.append(action)
if not hasattr(args, action.dest):
missing_required.extend(action.option_strings)
if missing_required:
message = 'missing required arguments: "{0}"'.format(", ".join(missing_required))
self.parser.error(message)
# Reprocess all the arguments to enforce rule set by the parser actions in the case
# an argument value was provided by the runtime kwargs or a config file.
for action in self.parser._actions:
if hasattr(args, action.dest):
setattr(args, action.dest, self.parser._get_value(action,
getattr(args, action.dest)))
self.args = args
return args
def get_pretty_args(self, testunit):
'''
Description: Returns a string buf containing formated arg:value for printing later
:type: testunit: Eutestcase.eutestertestunit object
:param: testunit: A testunit object for which the namespace args will be used
:rtype: string
:returns: formated string containing args and their values.
'''
buf = "\nEnd on Failure:" + str(testunit.eof)
buf += "\nPassing ARGS:"
if not testunit.args and not testunit.kwargs:
buf += '\"\"\n'
else:
buf += "\n---------------------\n"
varnames = self.get_meth_arg_names(testunit.method)
if testunit.args:
for count, arg in enumerate(testunit.args):
buf += str(varnames[count + 1]) + " : " + str(arg) + "\n"
if testunit.kwargs:
for key in testunit.kwargs:
buf += str(key) + " : " + str(testunit.kwargs[key]) + "\n"
buf += "---------------------\n"
return buf
def has_arg(self, arg):
'''
Description: If arg is present in local testcase args namespace, will
return True, else False
:type arg: string
:param arg: string name of arg to check for.
:rtype: boolean
:returns: True if arg is present, false if not
'''
arg = str(arg)
if hasattr(self, 'args'):
if self.args and (arg in self.args):
return True
return False
def get_arg(self, arg):
'''
Description: Fetchs the value of an arg within the local testcase args namespace.
If the arg does not exist, None will be returned.
:type arg: string
:param arg: string name of arg to get.
:rtype: value
:returns: Value of arguement given, or None if not found
'''
if self.has_arg(arg):
return getattr(self.args, str(arg))
return None
def add_arg(self, arg, value):
'''
Description: Adds an arg 'arg' within the local testcase args namespace and assigns
it 'value'.
If arg exists already in testcase.args, then an exception will be raised.
:type arg: string
:param arg: string name of arg to set.
:type value: value
:param value: value to set arg to
'''
if self.has_arg(arg):
raise Exception("Arg" + str(arg) + 'already exists in args')
else:
self.args.__setattr__(arg, value)
def set_arg(self, arg, value):
'''
Description: Sets an arg 'arg' within the local testcase args namespace to 'value'.
If arg does not exist in testcase.args, then it will be created.
:type arg: string
:param arg: string name of arg to set.
:type value: value
:param value: value to set arg to
'''
if self.has_arg(arg):
new = argparse.Namespace()
for val in self.args._get_kwargs():
if arg != val[0]:
new.__setattr__(val[0], val[1])
new.__setattr__(arg, value)
self.args = new
else:
self.args.__setattr__(arg, value)
def show_args(self, args=None):
'''
Description: Prints args names and values for debug purposes.
By default will use the local testcase.args, else args can be provided.
:type args: namespace object
:param args: namespace object to be printed,by default None will print local
testcase's args.
'''
if args is None:
args = self.args
if not args:
return
headers= [yellow('TEST ARGS', bold=True), yellow('VALUE', bold=True)]
pt = PrettyTable(headers)
pt.align = 'l'
pt.max_width[headers[0]] = 30
pt.max_width[headers[1]] = 80
for key, val in args._get_kwargs():
pt.add_row([blue(key), val])
self.log.info("\n{0}\n".format(pt))
def do_with_args(self, meth, *args, **kwargs):
'''
Description: Convenience method used to wrap the provided instance_method, function, or
object type 'meth' and populate meth's positional and keyword arguments with the local
testcase.args created from the CLI and/or config file, as well as the *args and **kwargs
variable length arguments passed into this method.
:type meth: method
:param meth: A method or class initiator to wrapped/populated with this testcase objects
namespace args
:type args: positional arguments
:param args: None or more values representing positional arguments to be passed to 'meth'
when executed. These will take precedence over local testcase obj
namespace args
:type kwargs: keyword arguments
:param kwargs: None or more values reprsenting keyword arguments to be passed to 'meth'
when executed. These will take precedence over local testcase obj namespace
args and positional args
'''
if not hasattr(self, 'args'):
raise Exception(
'TestCase object does not have args yet, see: get_args and setup_parser options')
tc_args = self.args
cmdargs = {}
f_code = self.get_method_fcode(meth)
vars = self.get_meth_arg_names(meth)
self.log.debug("do_with_args: Method:" + str(f_code.co_name) + ", Vars:" + str(vars))
# first populate matching method args with our global testcase args...
for val in tc_args._get_kwargs():
for var in vars:
if var == val[0]:
cmdargs[var] = val[1]
# Then overwrite/populate with any given positional local args...
for count, arg in enumerate(args):
cmdargs[vars[count + 1]] = arg
# Finall overwrite/populate with any given key word local args...
for name, value in kwargs.items():
for var in vars:
if var == name:
cmdargs[var] = value
self.log.debug(
'create_with_args: running ' + str(f_code.co_name) + "(" +
str(cmdargs).replace(':', '=') + ")")
return meth(**cmdargs)
@classmethod
def get_method_fcode(cls, meth):
f_code = None
# Find the args for the method passed in...
# Check for object/class init...
if isinstance(meth, types.ObjectType):
try:
f_code = meth.__init__.__func__.func_code
except:
pass
# Check for instance method...
if isinstance(meth, types.MethodType):
try:
f_code = meth.im_func.func_code
except:
pass
# Check for function...
if isinstance(meth, types.FunctionType):
try:
f_code = meth.func_code
except:
pass
if not f_code:
raise Exception(
"get_method_fcode: Could not find function_code for passed method of type:" +
str(type(meth)))
return f_code
@classmethod
def get_meth_arg_names(cls, meth):
'''
Description: Return varnames within argcount
:type:meth: method
:param: meth: method to fetch arg names for
:rtype: list
:returns: list of strings representing the varnames within argcount for this method
'''
fcode = cls.get_method_fcode(meth)
varnames = fcode.co_varnames[0:fcode.co_argcount]
return varnames
@classmethod
def get_testunit_method_arg_dict(cls, testunit):
argdict = {}
spec = inspect.getargspec(testunit.method)
if isinstance(testunit.method, types.FunctionType):
argnames = spec.args
else:
argnames = spec.args[1:len(spec.args)]
defaults = spec.defaults or []
# Initialize the return dict
for argname in argnames:
argdict[argname] = '<!None!>'
# Set the default values of the testunits method
for x in xrange(0, len(defaults)):
argdict[argnames.pop()] = defaults[len(defaults) - x - 1]
# Then overwrite those with the testunits kwargs values
for kwarg in testunit.kwargs:
argdict[kwarg] = testunit.kwargs[kwarg]
# then add the positional args in if they apply...
for count, value in enumerate(testunit.args):
argdict[argnames[count]] = value
return argdict
@classmethod
def format_testunit_method_arg_values(cls, testunit):
buf = testunit.name + "("
argdict = CliTestRunner.get_testunit_method_arg_dict(testunit)
for arg in argdict:
buf += str(arg) + "=" + str(argdict[arg]) + ", "
buf = buf.rstrip(',')
buf += ")"
return buf
##############################################################################################
# Convenience methods for formatting test output
##############################################################################################
def status(self, msg, markups=None):
'''
Description: Convenience method to format debug output
:type msg: string
:param msg: The string to be formated and printed via self.debug
:param color: asci markup color to use, or None
'''
if markups is None:
markups = [32]
if markups:
msg = markup(msg, markups=markups)
pt = PrettyTable(['status'])
pt.header = False
pt.align = 'l'
pt.padding_width = 0
pt.vrules = 2
pt.add_row([msg])
self.log.info("\n{0}\n".format(pt))
#########################################################################
# Messages formats used at the start and end of a specific test unit run
#########################################################################
def startmsg(self, msg=""):
self.status(msg, markups=[ForegroundColor.WHITE, BackGroundColor.BG_BLUE, TextStyle.BOLD])
def endsuccess(self, msg=""):
msg = "- SUCCESS - {0}".format(msg).center(self._term_width)
self.status(msg, markups=[ForegroundColor.WHITE, BackGroundColor.BG_GREEN, TextStyle.BOLD])
return msg
def endfailure(self, msg=""):
msg = "- FAILURE - {0}".format(msg).center(self._term_width)
self.status(msg, markups=[ForegroundColor.WHITE, BackGroundColor.BG_RED, TextStyle.BOLD])
return msg
def endnotrun(self, msg=""):
msg = "- NOT RUN - {0}".format(msg).center(self._term_width)
self.status(msg, markups=[ForegroundColor.WHITE, BackGroundColor.BG_MAGENTA,
TextStyle.BOLD])
return msg
########################################################################
# Message formats used when displaying test suite/list result summaries
########################################################################
def resultdefault(self, msg, printout=True):
msg = markup(msg, markups=[ForegroundColor.BLUE, BackGroundColor.BG_WHITE])
if printout:
self.log.debug(msg)
return msg
def resultfail(self, msg, printout=True):
msg = markup(msg, markups=[ForegroundColor.RED, BackGroundColor.BG_WHITE])
if printout:
self.log.debug(msg)
return msg
def resulterr(self, msg, printout=True):
msg = red(msg)
if printout:
self.log.debug(msg)
return msg
def print_test_unit_startmsg(self, test):
"""
Logs a message at the beginning of a specific test unit run containing information about
the test to be run. TestUnits have their own description string which should help inform
the user as to what the test is going to try to achieve and how.
if the 'html_anchors' flag is provided an html anchor for this test unit's run will
also be printed and the test unit's html link can printed/accessed later.
:param test: test unit obj
"""
startbuf = ''
if self.html_anchors:
link = '<a name="' + str(test.anchor_id) + '"></a>\n'
test._html_link = link
startbuf += '<div id="myDiv" name="myDiv" title="Example Div Element" style="color: ' \
'#0900C4; font: Helvetica 12pt;border: 1px solid black;">'
startbuf += str(link)
header = "HEADER".ljust(110)
pt = PrettyTable([header])
pt.max_width = 105
pt.header = False
pt.align = 'l'
buf = "STARTING TESTUNIT: {0}".format(test.name).ljust(self._term_width)
argbuf = self.get_pretty_args(test)
buf += str(test.description) + str(argbuf)
buf += 'Running test method: "{0}"'.format(self.format_testunit_method_arg_values(test))
pt.add_row([buf])
startbuf += markup(pt, markups=[ForegroundColor.WHITE, BackGroundColor.BG_BLUE])
if self.html_anchors:
startbuf += '\n </div>'
self.status(startbuf)
def print_test_list_results(self, testlist=None, descriptions=False,
printout=True, printmethod=None):
'''
Description: Prints a formated list of results for a list of EutesterTestUnits
:type testlist: list
:param testlist: list of EutesterTestUnits
:type printout: boolean
:param printout: boolean to flag whether to print using printmethod or self.debug,
or to return a string buffer representing the results outputq
:type descriptions: boolean
"param description: boolean flag, if true will include test descriptions in the output
:type printmethod: method
:param printmethod: method to use for printing test result output. Default is self.debug
'''
main_header = yellow('TEST RESULTS FOR "{0}"'.format(self.name), bold=True)
if testlist is None:
testlist = self._testlist
if not testlist:
raise Exception("print_test_list_results, error: No Test list provided")
printmethod = printmethod or self.log.info
printmethod("Test list results for testcase:" + str(self.name))
main_pt = PrettyTable([main_header])
main_pt.align = 'l'
main_pt.vrules = 2
main_pt.hrules = 1
for testunit in testlist:
# Ascii mark up errors using pmethod() so errors are in bold/red, etc...
if testunit.result == TestResult.passed:
markups = [ForegroundColor.BLUE, BackGroundColor.BG_WHITE]
elif testunit.result == TestResult.not_run:
markups = [ForegroundColor.BLACK, BackGroundColor.BG_WHITE]
else:
markups = [ForegroundColor.RED, BackGroundColor.BG_WHITE]
term_height, term_width = get_terminal_size()
if term_width > self._term_width:
term_width = self._term_width
key_width = 12
val_width = term_width - key_width - 6
headers = ['KEY'.ljust(key_width, "-"), 'VALUE'.ljust(val_width, "-")]
pt = PrettyTable(headers)
pt.max_width[headers[0]] = key_width
pt.max_width[headers[1]] = val_width
pt.header = False
pt.align = 'l'
pt.vrules = 1
pt.hrules = 2
test_arg_string = self.format_testunit_method_arg_values(testunit)
error_summary = None
# Print additional line showing error in the failed case...
if testunit.result == TestResult.failed:
error_summary = "ERROR:({0})"\
.format("\n".join(str(testunit.error).splitlines()[0:3]))
if testunit.result == TestResult.not_run:
error_summary = 'NOT_RUN ({0}:{1})'\
.format(testunit.name, "\n".join(str(testunit.error).splitlines()[0:3]))
pt.add_row(['RESULT:', str(testunit.result).ljust(val_width)])
pt.add_row(['TEST NAME', testunit.name])
pt.add_row(['TIME:', testunit.time_to_run])
pt.add_row(['TEST ARGS:', test_arg_string])
if descriptions:
pt.add_row(['DESCRIPTION:', testunit.get_test_method_description(header=False)])
pt.add_row(['OUTPUT:', error_summary])
main_pt.add_row([markup(pt, markups=markups)])
main_pt.add_row(["\n{0}\n".format(self.print_test_list_short_stats(testlist))])
if printout:
printmethod("\n{0}\n".format(main_pt))
else:
return main_pt
def print_test_list_short_stats(self, list, printmethod=None):
results = {}
total = 0
elapsed = 0
# initialize a dict containing all the possible defined test results
for result_string in dir(TestResult)[2:]:
results[getattr(TestResult, result_string)] = 0
# increment values in results dict based upon result of each testunit in list
try:
for testunit in list:
total += 1
elapsed += testunit.time_to_run
results[testunit.result] += 1
except:
print results
raise
# Create tables with results summaries
headers = ['TOTAL']
results_row = [total]
for field in results:
headers.append(field.upper())
results_row.append(results[field])
headers.append('ELAPSED')
results_row.append(elapsed)
pt = PrettyTable(headers)
pt.vrules = 2
pt.add_row(results_row)
main_header = yellow('LATEST RESULTS:', bold=True)
main_pt = PrettyTable([main_header])
main_pt.align = 'l'
main_pt.padding_width = 0
main_pt.border = False
main_pt.add_row([str(pt)])
if printmethod:
printmethod(main_pt.get_string())
return "\n{0}\n".format(main_pt)
def getline(self, len):
"""
Provide a string containing a line "---" of length len
:param len: integer
:return: string
"""
buf = ''
for x in xrange(0, len):
buf += '-'
return buf
class SkipTestException(Exception):
def __init__(self, value='Skipped Test'):
self.value = value
def __str__(self):
return repr(self.value)
| 43.552083 | 100 | 0.549315 | """
This is the base class for any test case to be included in the Nephoria repo. It should include any
functionality that we expect to be repeated in most of the test cases that will be written.
These wrapper/harness classes are primarily intended to help provide a common means of running and
reporting 'system level tests', although should work as well for "unit level tests". The intention
is that tests can be built upon other test methods.
For the purposes of this module, a 'system' focused test suite is the sum of a what would
otherwise be many 'unit tests' in order to produce an end to end flow that might mimic a
users's experience and test interactions of the system.
This module is also intended to provide the most commonly shared test attributes amongst the
existing tests when this was written:
cli options/arguments,
logging to stdout, err and a file provided via cli arg,
option to specify a portions or a single unit by providing a list of units to be looked up
by name, method, etc..
gathered/summary of results and metrics,
sharing of test artifacts (to save time over suite execution),
ability to skip tests based on dependencies such as a previous test unit's status, etc..
ability to share/extend testcase classes to build upon other test case classes/suites.
basic config file parsing (arguments in file are merged with cli args)
Currently included:
- Debug method
- Allow parameterized test cases
- Method to run test case
- Run a list of test cases
- Start, end and current status messages
- Enum class for possible test results
TBD:
- Metric tracking (need to define what metrics we want, how they are to be reported)
- Use docstring as description for test case categorizing, and tracking over
time (ie via remote DB)
##################################################################################################
# Sample test and output: #
##################################################################################################
See README.md for more info
"""
import errno
import inspect
import time
import argparse
import re
import sys
import os
import types
import traceback
import random
import string
import yaml
import json
from collections import OrderedDict
from prettytable import PrettyTable
from cloud_utils.log_utils.eulogger import Eulogger
from cloud_utils.log_utils import markup, ForegroundColor, BackGroundColor, TextStyle
from cloud_utils.log_utils import red, green, blue, yellow, cyan, get_traceback, get_terminal_size
from nephoria.testcase_utils.euconfig import EuConfig
import StringIO
import copy
def _get_method_args_kwargs(method):
args = []
kwargdict = OrderedDict()
spec = inspect.getargspec(method)
if spec.defaults:
kwarg_index = len(spec.args) - len(spec.defaults)
args = spec.args[0:kwarg_index]
kwargs = spec.args[kwarg_index:]
for value in spec.defaults:
kwargdict[kwargs.pop(0)] = value
else:
args = spec.args
return args, kwargdict
class TestResult():
'''
standardized test results
'''
not_run = "NOT_RUN"
passed = "PASSED"
failed = "FAILED"
| false | true |
f7fea78f10f057100d5d075562c44ca6b4ec1edc | 666 | py | Python | server/instance.py | elieltonlimeira/desafio-backend | c5c0418bb8970d50114e76feaf9f52b2c9522e70 | [
"Unlicense"
] | null | null | null | server/instance.py | elieltonlimeira/desafio-backend | c5c0418bb8970d50114e76feaf9f52b2c9522e70 | [
"Unlicense"
] | null | null | null | server/instance.py | elieltonlimeira/desafio-backend | c5c0418bb8970d50114e76feaf9f52b2c9522e70 | [
"Unlicense"
] | null | null | null | from flask import Flask, Blueprint
from flask_sqlalchemy import SQLAlchemy
class Server():
def __init__(self, ):
self.app = Flask(__name__)
self.blueprint = Blueprint('api', __name__, url_prefix='/api')
self.app.register_blueprint(self.blueprint)
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
self.app.config['PROPAGATE_EXCEPTIONS'] = True
self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
def run(self, ):
self.app.run(
port=5000,
debug=True,
host='0.0.0.0'
)
server = Server()
app = server.app
db = SQLAlchemy(app) | 28.956522 | 72 | 0.617117 | from flask import Flask, Blueprint
from flask_sqlalchemy import SQLAlchemy
class Server():
def __init__(self, ):
self.app = Flask(__name__)
self.blueprint = Blueprint('api', __name__, url_prefix='/api')
self.app.register_blueprint(self.blueprint)
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
self.app.config['PROPAGATE_EXCEPTIONS'] = True
self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
def run(self, ):
self.app.run(
port=5000,
debug=True,
host='0.0.0.0'
)
server = Server()
app = server.app
db = SQLAlchemy(app) | true | true |
f7fea95c31ee9479ef6cfbd57953d815677e4156 | 3,012 | py | Python | Archive/Kizmeow NFT Tracker V2/cogs/project_nft.py | Xeift/Kizmeow-OpenSea-and-Etherscan-Discord-Bot | 2ea42ec9d48f18f284724267876795d473d158c7 | [
"MIT"
] | 70 | 2021-12-20T00:37:04.000Z | 2022-03-30T07:06:49.000Z | Archive/Kizmeow NFT Tracker V2/cogs/project_nft.py | Xeift/Kizmeow-OpenSea-and-Etherscan-Discord-Bot | 2ea42ec9d48f18f284724267876795d473d158c7 | [
"MIT"
] | null | null | null | Archive/Kizmeow NFT Tracker V2/cogs/project_nft.py | Xeift/Kizmeow-OpenSea-and-Etherscan-Discord-Bot | 2ea42ec9d48f18f284724267876795d473d158c7 | [
"MIT"
] | 22 | 2021-12-21T07:27:24.000Z | 2022-03-11T11:51:36.000Z | import discord
from discord_slash.utils.manage_commands import create_option
import urllib.request as ur
import json
from discord_slash import cog_ext
from core.cog_core import cogcore
class project_nft(cogcore):
@cog_ext.cog_slash(name="project_nft",
description="return some useful information about your NFT from the contract address and token id you entered",
options=
[
create_option
(
name="contract_address",
description="enter the contract address of yor NFT",
option_type=3,
required=True
),
create_option
(
name="token_id",
description="enter the token id of your NFT",
option_type=3,
required=True
)
],
)
async def nft(self,ctx,contract_address,token_id):
url1='https://api.opensea.io/api/v1/asset/'+contract_address+'/'+token_id+'/?format=json' #api url
req = ur.Request(url=url1,headers={'User-Agent': 'Mozilla/5.0'})
site1 = ur.urlopen(req)
page1 = site1.read()
contents1 = page1.decode()
data1 = json.loads(contents1)
if data1['name'] == None:
name = "no data"
else:
name = str(data1['name'])
if data1['image_original_url'] == None:
image_original_url = "no data"
else:
image_original_url = str(data1['image_original_url'])
if data1['top_ownerships'][0]['owner']['user'] == None:
top_ownerships = "no data"
else:
top_ownerships = str(data1['top_ownerships'][0]['owner']['user']['username'])
if data1['description'] == None:
description = "no data"
else:
description = str(data1['description'])
if data1['collection']['primary_asset_contracts'][0]['external_link'] == None:
external_link = "no data"
else:
external_link = str(data1['collection']['primary_asset_contracts'][0]['external_link'])
if data1['collection']['primary_asset_contracts'][0]['schema_name'] == None:
schema_name = "no data"
else:
schema_name = str(data1['collection']['primary_asset_contracts'][0]['schema_name'])
if data1['token_id'] == None:
token_id1 = "no data"
else:
token_id1 = str(data1['token_id'])
if data1['permalink'] == None:
permalink = "no data"
else:
permalink = str(data1['permalink'])
embed=discord.Embed(title="["+name+"]", color=0xe8006f)
embed.set_thumbnail(url=image_original_url)
embed.add_field(name="token id" , value=token_id1, inline=False)
embed.add_field(name="description" , value=description, inline=False)
embed.add_field(name="official website" , value=external_link, inline=False)
embed.add_field(name="token type" , value=schema_name, inline=False)
embed.add_field(name="owner" , value=top_ownerships, inline=False)
embed.add_field(name="OpenSea" , value=permalink, inline=False)
embed.add_field(name="original resolution image" , value=image_original_url, inline=False)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(project_nft(bot))
| 29.821782 | 113 | 0.668327 | import discord
from discord_slash.utils.manage_commands import create_option
import urllib.request as ur
import json
from discord_slash import cog_ext
from core.cog_core import cogcore
class project_nft(cogcore):
@cog_ext.cog_slash(name="project_nft",
description="return some useful information about your NFT from the contract address and token id you entered",
options=
[
create_option
(
name="contract_address",
description="enter the contract address of yor NFT",
option_type=3,
required=True
),
create_option
(
name="token_id",
description="enter the token id of your NFT",
option_type=3,
required=True
)
],
)
async def nft(self,ctx,contract_address,token_id):
url1='https://api.opensea.io/api/v1/asset/'+contract_address+'/'+token_id+'/?format=json'
req = ur.Request(url=url1,headers={'User-Agent': 'Mozilla/5.0'})
site1 = ur.urlopen(req)
page1 = site1.read()
contents1 = page1.decode()
data1 = json.loads(contents1)
if data1['name'] == None:
name = "no data"
else:
name = str(data1['name'])
if data1['image_original_url'] == None:
image_original_url = "no data"
else:
image_original_url = str(data1['image_original_url'])
if data1['top_ownerships'][0]['owner']['user'] == None:
top_ownerships = "no data"
else:
top_ownerships = str(data1['top_ownerships'][0]['owner']['user']['username'])
if data1['description'] == None:
description = "no data"
else:
description = str(data1['description'])
if data1['collection']['primary_asset_contracts'][0]['external_link'] == None:
external_link = "no data"
else:
external_link = str(data1['collection']['primary_asset_contracts'][0]['external_link'])
if data1['collection']['primary_asset_contracts'][0]['schema_name'] == None:
schema_name = "no data"
else:
schema_name = str(data1['collection']['primary_asset_contracts'][0]['schema_name'])
if data1['token_id'] == None:
token_id1 = "no data"
else:
token_id1 = str(data1['token_id'])
if data1['permalink'] == None:
permalink = "no data"
else:
permalink = str(data1['permalink'])
embed=discord.Embed(title="["+name+"]", color=0xe8006f)
embed.set_thumbnail(url=image_original_url)
embed.add_field(name="token id" , value=token_id1, inline=False)
embed.add_field(name="description" , value=description, inline=False)
embed.add_field(name="official website" , value=external_link, inline=False)
embed.add_field(name="token type" , value=schema_name, inline=False)
embed.add_field(name="owner" , value=top_ownerships, inline=False)
embed.add_field(name="OpenSea" , value=permalink, inline=False)
embed.add_field(name="original resolution image" , value=image_original_url, inline=False)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(project_nft(bot))
| true | true |
f7fea95f581bdfeabe4a96fba459430c751bc2d2 | 5,108 | py | Python | app/views/mqttc/Client.py | RedFalsh/flask-example | 561b04d31356cf1a4a0324a89ebc4f3dcd94e16c | [
"MIT"
] | 1 | 2019-02-26T07:40:07.000Z | 2019-02-26T07:40:07.000Z | app/views/mqttc/Client.py | RedFalsh/flask-example | 561b04d31356cf1a4a0324a89ebc4f3dcd94e16c | [
"MIT"
] | null | null | null | app/views/mqttc/Client.py | RedFalsh/flask-example | 561b04d31356cf1a4a0324a89ebc4f3dcd94e16c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from app.views.mqttc import route_mqtt
from app import app, mqtt, db
from app.model import Device,DeviceTime,DeviceOperateLog
from app.common.libs.MqttService import CMD
from app.common.libs.Helper import getFormatDate, getCurrentDate
from app.common.libs.Logging import logger
import re
import json
import time, threading
# mqtt.subscribe(topic="$SYS/#", qos=0)
# mqtt.subscribe(topic="$SYS/brokers", qos=0)
mqtt.subscribe(topic="$SYS/brokers/emqx@127.0.0.1/clients/#", qos=0)
# 订阅终端设备的信息
mqtt.subscribe(topic="/dev/#", qos=0)
DEVICE_CONNECTED = re.compile(r'^\$SYS/brokers/(.*?)/clients/([a-zA-Z0-9]{10})/(connected|disconnected)')
# DEVICE_DISCONNECTED = re.compile(r'^\$SYS/brokers/(.*?)/clients/([a-zA-Z0-9]{8})/disconnected')
DEVICE_CONTROL = re.compile(r'^/dev/([a-zA-Z0-9]{10})/(sub|pub)')
# 设备操作记录保存
def DeviceOperateLogAdd(sn, code, msg, source):
with app.app_context():
device_info = Device.query.filter_by( sn=sn ).first()
if device_info:
operate_log = DeviceOperateLog()
operate_log.device_id = device_info.id
operate_log.code = code
operate_log.msg = msg
operate_log.source = source
operate_log.time = getCurrentDate()
db.session.add(operate_log)
db.session.commit()
def DeviceStatusChanged(sn, status):
with app.app_context():
device_info = Device.query.filter_by( sn=sn ).first()
if device_info:
device_info.status = status
db.session.commit()
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
topic=message.topic
payload=str(message.payload.decode())
res = DEVICE_CONNECTED.match(topic)
if res:
sn = res.group(2)
connect = res.group(3)
resp = {'code':CMD.TAP_ONLINE}
with app.app_context():
device_info = Device.query.filter_by( sn=sn ).first()
if device_info:
if connect == "connected":
device_info.online = 1
resp['msg'] = 1
if connect == "disconnected":
device_info.online = 0
resp['msg'] = 0
db.session.commit()
mqtt.publish('/dev/%s/pub'%sn, json.dumps(resp))
res = DEVICE_CONTROL.match(topic)
if res:
sn = res.group(1)
sub_pub = res.group(2)
payload = json.loads(payload)
code = int(payload['code']) if 'code' in payload else ''
msg = payload['msg'] if 'msg' in payload else ''
logger.info("设备操作记录:")
if sub_pub == 'sub':
logger.info("user->device: %s"%payload)
DeviceOperateLogAdd(sn,code,msg,'sub')
if sub_pub == 'pub':
logger.info("device->user: %s"%payload)
DeviceOperateLogAdd(sn,code,msg,'pub')
# 监听到阀门状态变化
if int(code) == CMD.TAP_STATUS:
logger.info("阀门状态发生变化:%s"%msg)
DeviceStatusChanged(sn, int(msg))
# @mqtt.on_log()
# def handle_logging(client, userdata, level, buf):
# print(level, buf)
# @route_mqtt.route("mqtt/nodes")
# def mqttNodes():
# return 'mqtt nodes'
def timerTask():
logger.info("开启定时任务......")
resp = {}
def ControlTap(id, cmd):
device_info = Device.query.filter_by( id=id ).first()
if device_info:
if device_info.online == 1:
resp['code'] = cmd
sn = device_info.sn
mqtt.publish('/dev/%s/sub'%sn, json.dumps(resp))
return True
while True:
time.sleep(30)
time_now = getFormatDate(format="%H:%M")
time_week = getFormatDate(format="%w")
with app.app_context():
time_info = DeviceTime.query.filter_by( alive=1 ).all()
for t in time_info:
if t.type == 1: # 执行一次的任务
if t.open_time == time_now:
if t.open_flag == 0:
if ControlTap(t.device_id, CMD.TAP_OPEN):
t.open_flag = 1
db.session.commit()
if t.close_time == time_now:
if t.close_flag == 0:
if ControlTap(t.device_id, CMD.TAP_CLOSE):
t.close_flag = 1
db.session.commit()
if t.open_flag == 1 and t.close_flag == 1:
# 单次任务的执行步骤完成, 关闭任务
t.alive = 0
db.session.commit()
else: # 其他周期性的任务,按星期来执行
period = str(t.period).split(',')
if time_week in period:
if t.open_time == time_now:
ControlTap(t.device_id, CMD.TAP_OPEN)
if t.close_time == time_now:
ControlTap(t.device_id, CMD.TAP_CLOSE)
time_thread = threading.Thread(target=timerTask)
time_thread.setDaemon(True)
time_thread.start()
| 34.748299 | 105 | 0.550117 |
from app.views.mqttc import route_mqtt
from app import app, mqtt, db
from app.model import Device,DeviceTime,DeviceOperateLog
from app.common.libs.MqttService import CMD
from app.common.libs.Helper import getFormatDate, getCurrentDate
from app.common.libs.Logging import logger
import re
import json
import time, threading
mqtt.subscribe(topic="$SYS/brokers/emqx@127.0.0.1/clients/#", qos=0)
mqtt.subscribe(topic="/dev/#", qos=0)
DEVICE_CONNECTED = re.compile(r'^\$SYS/brokers/(.*?)/clients/([a-zA-Z0-9]{10})/(connected|disconnected)')
DEVICE_CONTROL = re.compile(r'^/dev/([a-zA-Z0-9]{10})/(sub|pub)')
def DeviceOperateLogAdd(sn, code, msg, source):
with app.app_context():
device_info = Device.query.filter_by( sn=sn ).first()
if device_info:
operate_log = DeviceOperateLog()
operate_log.device_id = device_info.id
operate_log.code = code
operate_log.msg = msg
operate_log.source = source
operate_log.time = getCurrentDate()
db.session.add(operate_log)
db.session.commit()
def DeviceStatusChanged(sn, status):
with app.app_context():
device_info = Device.query.filter_by( sn=sn ).first()
if device_info:
device_info.status = status
db.session.commit()
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
topic=message.topic
payload=str(message.payload.decode())
res = DEVICE_CONNECTED.match(topic)
if res:
sn = res.group(2)
connect = res.group(3)
resp = {'code':CMD.TAP_ONLINE}
with app.app_context():
device_info = Device.query.filter_by( sn=sn ).first()
if device_info:
if connect == "connected":
device_info.online = 1
resp['msg'] = 1
if connect == "disconnected":
device_info.online = 0
resp['msg'] = 0
db.session.commit()
mqtt.publish('/dev/%s/pub'%sn, json.dumps(resp))
res = DEVICE_CONTROL.match(topic)
if res:
sn = res.group(1)
sub_pub = res.group(2)
payload = json.loads(payload)
code = int(payload['code']) if 'code' in payload else ''
msg = payload['msg'] if 'msg' in payload else ''
logger.info("设备操作记录:")
if sub_pub == 'sub':
logger.info("user->device: %s"%payload)
DeviceOperateLogAdd(sn,code,msg,'sub')
if sub_pub == 'pub':
logger.info("device->user: %s"%payload)
DeviceOperateLogAdd(sn,code,msg,'pub')
if int(code) == CMD.TAP_STATUS:
logger.info("阀门状态发生变化:%s"%msg)
DeviceStatusChanged(sn, int(msg))
def timerTask():
logger.info("开启定时任务......")
resp = {}
def ControlTap(id, cmd):
device_info = Device.query.filter_by( id=id ).first()
if device_info:
if device_info.online == 1:
resp['code'] = cmd
sn = device_info.sn
mqtt.publish('/dev/%s/sub'%sn, json.dumps(resp))
return True
while True:
time.sleep(30)
time_now = getFormatDate(format="%H:%M")
time_week = getFormatDate(format="%w")
with app.app_context():
time_info = DeviceTime.query.filter_by( alive=1 ).all()
for t in time_info:
if t.type == 1:
if t.open_time == time_now:
if t.open_flag == 0:
if ControlTap(t.device_id, CMD.TAP_OPEN):
t.open_flag = 1
db.session.commit()
if t.close_time == time_now:
if t.close_flag == 0:
if ControlTap(t.device_id, CMD.TAP_CLOSE):
t.close_flag = 1
db.session.commit()
if t.open_flag == 1 and t.close_flag == 1:
t.alive = 0
db.session.commit()
else:
period = str(t.period).split(',')
if time_week in period:
if t.open_time == time_now:
ControlTap(t.device_id, CMD.TAP_OPEN)
if t.close_time == time_now:
ControlTap(t.device_id, CMD.TAP_CLOSE)
time_thread = threading.Thread(target=timerTask)
time_thread.setDaemon(True)
time_thread.start()
| true | true |
f7fea96a2be03bff6ff06044c7287908b5916edf | 388 | py | Python | Python_Exercicios/Mundo1/Tipos Primitivos e Saída de Dados/python_013.py | jbauermanncode/Curso_Em_Video_Python | 330c207d7bed4e663fe1b9ab433ab57a9828b7f1 | [
"MIT"
] | null | null | null | Python_Exercicios/Mundo1/Tipos Primitivos e Saída de Dados/python_013.py | jbauermanncode/Curso_Em_Video_Python | 330c207d7bed4e663fe1b9ab433ab57a9828b7f1 | [
"MIT"
] | null | null | null | Python_Exercicios/Mundo1/Tipos Primitivos e Saída de Dados/python_013.py | jbauermanncode/Curso_Em_Video_Python | 330c207d7bed4e663fe1b9ab433ab57a9828b7f1 | [
"MIT"
] | null | null | null | '''
Faça um algoritmo que leia o salario de um funcionário e mostre seu novo salário, com 15% de aumento.
'''
# Leia Salário
salario = float(input('Qual é o salário do funcionário? R$'))
# Calcular novo Salário
novo = salario + (salario * 15 / 100)
# Imprimir na tela
print('Um funcionário que ganhava R${:.2f}, com 15% de aumento, passou a receber R${:.2f}'.format(salario, novo))
| 32.333333 | 113 | 0.695876 |
salario = float(input('Qual é o salário do funcionário? R$'))
novo = salario + (salario * 15 / 100)
print('Um funcionário que ganhava R${:.2f}, com 15% de aumento, passou a receber R${:.2f}'.format(salario, novo))
| true | true |
f7fea9d65ff0fc7e30ac7de150859dc210be6008 | 83,662 | py | Python | test/api/test_workflows.py | chambm/galaxy | fd1926767996a161cd2fc8bd184e6835fd688765 | [
"CC-BY-3.0"
] | null | null | null | test/api/test_workflows.py | chambm/galaxy | fd1926767996a161cd2fc8bd184e6835fd688765 | [
"CC-BY-3.0"
] | 1 | 2019-02-04T16:21:27.000Z | 2019-02-04T16:45:17.000Z | test/api/test_workflows.py | chambm/galaxy | fd1926767996a161cd2fc8bd184e6835fd688765 | [
"CC-BY-3.0"
] | null | null | null | from __future__ import print_function
import time
from collections import namedtuple
from json import dumps
from uuid import uuid4
import yaml
from requests import delete, put
from base import api
from base.populators import (
DatasetCollectionPopulator,
DatasetPopulator,
skip_without_tool,
wait_on,
WorkflowPopulator
)
from galaxy.exceptions import error_codes
from galaxy.tools.verify.test_data import TestDataResolver
SIMPLE_NESTED_WORKFLOW_YAML = """
class: GalaxyWorkflow
inputs:
- id: outer_input
steps:
- tool_id: cat1
label: first_cat
state:
input1:
$link: outer_input
- run:
class: GalaxyWorkflow
inputs:
- id: inner_input
outputs:
- id: workflow_output
source: random_lines#out_file1
steps:
- tool_id: random_lines1
label: random_lines
state:
num_lines: 1
input:
$link: inner_input
seed_source:
seed_source_selector: set_seed
seed: asdf
label: nested_workflow
connect:
inner_input: first_cat#out_file1
- tool_id: cat1
label: second_cat
state:
input1:
$link: nested_workflow#workflow_output
queries:
- input2:
$link: nested_workflow#workflow_output
test_data:
outer_input:
value: 1.bed
type: File
"""
class BaseWorkflowsApiTestCase(api.ApiTestCase):
# TODO: Find a new file for this class.
def setUp(self):
super(BaseWorkflowsApiTestCase, self).setUp()
self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor)
def _assert_user_has_workflow_with_name(self, name):
names = self._workflow_names()
assert name in names, "No workflows with name %s in users workflows <%s>" % (name, names)
def _workflow_names(self):
index_response = self._get("workflows")
self._assert_status_code_is(index_response, 200)
names = [w["name"] for w in index_response.json()]
return names
def import_workflow(self, workflow, **kwds):
upload_response = self.workflow_populator.import_workflow(workflow, **kwds)
return upload_response
def _upload_yaml_workflow(self, has_yaml, **kwds):
return self.workflow_populator.upload_yaml_workflow(has_yaml, **kwds)
def _setup_workflow_run(self, workflow=None, inputs_by='step_id', history_id=None, workflow_id=None):
if not workflow_id:
workflow_id = self.workflow_populator.create_workflow(workflow)
if not history_id:
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
hda2 = self.dataset_populator.new_dataset(history_id, content="4 5 6")
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=workflow_id,
)
label_map = {
'WorkflowInput1': self._ds_entry(hda1),
'WorkflowInput2': self._ds_entry(hda2)
}
if inputs_by == 'step_id':
ds_map = self._build_ds_map(workflow_id, label_map)
workflow_request["ds_map"] = ds_map
elif inputs_by == "step_index":
index_map = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2)
}
workflow_request["inputs"] = dumps(index_map)
workflow_request["inputs_by"] = 'step_index'
elif inputs_by == "name":
workflow_request["inputs"] = dumps(label_map)
workflow_request["inputs_by"] = 'name'
elif inputs_by in ["step_uuid", "uuid_implicitly"]:
uuid_map = {
workflow["steps"]["0"]["uuid"]: self._ds_entry(hda1),
workflow["steps"]["1"]["uuid"]: self._ds_entry(hda2),
}
workflow_request["inputs"] = dumps(uuid_map)
if inputs_by == "step_uuid":
workflow_request["inputs_by"] = "step_uuid"
return workflow_request, history_id
def _build_ds_map(self, workflow_id, label_map):
workflow_inputs = self._workflow_inputs(workflow_id)
ds_map = {}
for key, value in workflow_inputs.items():
label = value["label"]
if label in label_map:
ds_map[key] = label_map[label]
return dumps(ds_map)
def _ds_entry(self, hda):
src = 'hda'
if 'history_content_type' in hda and hda['history_content_type'] == "dataset_collection":
src = 'hdca'
return dict(src=src, id=hda["id"])
def _workflow_inputs(self, uploaded_workflow_id):
workflow_show_resposne = self._get("workflows/%s" % uploaded_workflow_id)
self._assert_status_code_is(workflow_show_resposne, 200)
workflow_inputs = workflow_show_resposne.json()["inputs"]
return workflow_inputs
def _invocation_details(self, workflow_id, invocation_id):
invocation_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, invocation_id))
self._assert_status_code_is(invocation_details_response, 200)
invocation_details = invocation_details_response.json()
return invocation_details
def _run_jobs(self, has_workflow, history_id=None, wait=True, source_type=None, jobs_descriptions=None, expected_response=200, assert_ok=True):
def read_test_data(test_dict):
test_data_resolver = TestDataResolver()
filename = test_data_resolver.get_filename(test_dict["value"])
content = open(filename, "r").read()
return content
if history_id is None:
history_id = self.history_id
workflow_id = self._upload_yaml_workflow(
has_workflow, source_type=source_type
)
if jobs_descriptions is None:
assert source_type != "path"
jobs_descriptions = yaml.load(has_workflow)
test_data = jobs_descriptions.get("test_data", {})
label_map = {}
inputs = {}
has_uploads = False
for key, value in test_data.items():
is_dict = isinstance(value, dict)
if is_dict and ("elements" in value or value.get("type", None) in ["list:paired", "list", "paired"]):
elements_data = value.get("elements", [])
elements = []
for element_data in elements_data:
identifier = element_data["identifier"]
input_type = element_data.get("type", "raw")
if input_type == "File":
content = read_test_data(element_data)
else:
content = element_data["content"]
elements.append((identifier, content))
# TODO: make this collection_type
collection_type = value["type"]
new_collection_kwds = {}
if "name" in value:
new_collection_kwds["name"] = value["name"]
if collection_type == "list:paired":
hdca = self.dataset_collection_populator.create_list_of_pairs_in_history(history_id, **new_collection_kwds).json()
elif collection_type == "list":
hdca = self.dataset_collection_populator.create_list_in_history(history_id, contents=elements, **new_collection_kwds).json()
else:
hdca = self.dataset_collection_populator.create_pair_in_history(history_id, contents=elements, **new_collection_kwds).json()
label_map[key] = self._ds_entry(hdca)
inputs[key] = hdca
has_uploads = True
elif is_dict and "type" in value:
input_type = value["type"]
if input_type == "File":
content = read_test_data(value)
new_dataset_kwds = {
"content": content
}
if "name" in value:
new_dataset_kwds["name"] = value["name"]
if "file_type" in value:
new_dataset_kwds["file_type"] = value["file_type"]
hda = self.dataset_populator.new_dataset(history_id, **new_dataset_kwds)
label_map[key] = self._ds_entry(hda)
has_uploads = True
elif input_type == "raw":
label_map[key] = value["value"]
inputs[key] = value["value"]
elif not is_dict:
has_uploads = True
hda = self.dataset_populator.new_dataset(history_id, content=value)
label_map[key] = self._ds_entry(hda)
inputs[key] = hda
else:
raise ValueError("Invalid test_data def %" % test_data)
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=workflow_id,
)
workflow_request["inputs"] = dumps(label_map)
workflow_request["inputs_by"] = 'name'
if has_uploads:
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
url = "workflows/%s/usage" % (workflow_id)
invocation_response = self._post(url, data=workflow_request)
self._assert_status_code_is(invocation_response, expected_response)
invocation = invocation_response.json()
invocation_id = invocation.get('id')
if invocation_id:
# Wait for workflow to become fully scheduled and then for all jobs
# complete.
if wait:
self.workflow_populator.wait_for_workflow(workflow_id, invocation_id, history_id, assert_ok=assert_ok)
jobs = self._history_jobs(history_id)
return RunJobsSummary(
history_id=history_id,
workflow_id=workflow_id,
invocation_id=invocation_id,
inputs=inputs,
jobs=jobs,
)
def _history_jobs(self, history_id):
return self._get("jobs", {"history_id": history_id, "order_by": "create_time"}).json()
def _assert_history_job_count(self, history_id, n):
jobs = self._history_jobs(history_id)
self.assertEqual(len(jobs), n)
# Workflow API TODO:
# - Allow history_id as param to workflow run action. (hist_id)
# - Allow post to workflows/<workflow_id>/run in addition to posting to
# /workflows with id in payload.
# - Much more testing obviously, always more testing.
class WorkflowsApiTestCase(BaseWorkflowsApiTestCase):
def setUp(self):
super(WorkflowsApiTestCase, self).setUp()
def test_show_valid(self):
workflow_id = self.workflow_populator.simple_workflow("dummy")
workflow_id = self.workflow_populator.simple_workflow("test_regular")
show_response = self._get("workflows/%s" % workflow_id, {"style": "instance"})
workflow = show_response.json()
self._assert_looks_like_instance_workflow_representation(workflow)
assert len(workflow["steps"]) == 3
self.assertEqual(sorted(step["id"] for step in workflow["steps"].values()), [0, 1, 2])
show_response = self._get("workflows/%s" % workflow_id, {"legacy": True})
workflow = show_response.json()
self._assert_looks_like_instance_workflow_representation(workflow)
assert len(workflow["steps"]) == 3
# Can't reay say what the legacy IDs are but must be greater than 3 because dummy
# workflow was created first in this instance.
self.assertNotEqual(sorted(step["id"] for step in workflow["steps"].values()), [0, 1, 2])
def test_show_invalid_key_is_400(self):
show_response = self._get("workflows/%s" % self._random_key())
self._assert_status_code_is(show_response, 400)
def test_cannot_show_private_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_not_importportable")
with self._different_user():
show_response = self._get("workflows/%s" % workflow_id)
self._assert_status_code_is(show_response, 403)
def test_delete(self):
workflow_id = self.workflow_populator.simple_workflow("test_delete")
workflow_name = "test_delete"
self._assert_user_has_workflow_with_name(workflow_name)
workflow_url = self._api_url("workflows/%s" % workflow_id, use_key=True)
delete_response = delete(workflow_url)
self._assert_status_code_is(delete_response, 200)
# Make sure workflow is no longer in index by default.
assert workflow_name not in self._workflow_names()
def test_other_cannot_delete(self):
workflow_id = self.workflow_populator.simple_workflow("test_other_delete")
with self._different_user():
workflow_url = self._api_url("workflows/%s" % workflow_id, use_key=True)
delete_response = delete(workflow_url)
self._assert_status_code_is(delete_response, 403)
def test_index(self):
index_response = self._get("workflows")
self._assert_status_code_is(index_response, 200)
assert isinstance(index_response.json(), list)
def test_upload(self):
self.__test_upload(use_deprecated_route=False)
def test_upload_deprecated(self):
self.__test_upload(use_deprecated_route=True)
def test_import_tools_requires_admin(self):
response = self.__test_upload(import_tools=True, assert_ok=False)
assert response.status_code == 403
def __test_upload(self, use_deprecated_route=False, name="test_import", workflow=None, assert_ok=True, import_tools=False):
if workflow is None:
workflow = self.workflow_populator.load_workflow(name=name)
data = dict(
workflow=dumps(workflow),
)
if import_tools:
data["import_tools"] = import_tools
if use_deprecated_route:
route = "workflows/upload"
else:
route = "workflows"
upload_response = self._post(route, data=data)
if assert_ok:
self._assert_status_code_is(upload_response, 200)
self._assert_user_has_workflow_with_name(name)
return upload_response
def test_update(self):
original_workflow = self.workflow_populator.load_workflow(name="test_import")
uuids = {}
labels = {}
for order_index, step_dict in original_workflow["steps"].items():
uuid = str(uuid4())
step_dict["uuid"] = uuid
uuids[order_index] = uuid
label = "label_%s" % order_index
step_dict["label"] = label
labels[order_index] = label
def check_label_and_uuid(order_index, step_dict):
assert order_index in uuids
assert order_index in labels
self.assertEqual(uuids[order_index], step_dict["uuid"])
self.assertEqual(labels[order_index], step_dict["label"])
upload_response = self.__test_upload(workflow=original_workflow)
workflow_id = upload_response.json()["id"]
def update(workflow_object):
put_response = self._update_workflow(workflow_id, workflow_object)
self._assert_status_code_is(put_response, 200)
return put_response
workflow_content = self._download_workflow(workflow_id)
steps = workflow_content["steps"]
def tweak_step(step):
order_index, step_dict = step
check_label_and_uuid(order_index, step_dict)
assert step_dict['position']['top'] != 1
assert step_dict['position']['left'] != 1
step_dict['position'] = {'top': 1, 'left': 1}
map(tweak_step, steps.items())
update(workflow_content)
def check_step(step):
order_index, step_dict = step
check_label_and_uuid(order_index, step_dict)
assert step_dict['position']['top'] == 1
assert step_dict['position']['left'] == 1
updated_workflow_content = self._download_workflow(workflow_id)
map(check_step, updated_workflow_content['steps'].items())
# Re-update against original worklfow...
update(original_workflow)
updated_workflow_content = self._download_workflow(workflow_id)
# Make sure the positions have been updated.
map(tweak_step, updated_workflow_content['steps'].items())
def test_update_no_tool_id(self):
workflow_object = self.workflow_populator.load_workflow(name="test_import")
upload_response = self.__test_upload(workflow=workflow_object)
workflow_id = upload_response.json()["id"]
del workflow_object["steps"]["2"]["tool_id"]
put_response = self._update_workflow(workflow_id, workflow_object)
self._assert_status_code_is(put_response, 400)
def test_update_missing_tool(self):
# Create allows missing tools, update doesn't currently...
workflow_object = self.workflow_populator.load_workflow(name="test_import")
upload_response = self.__test_upload(workflow=workflow_object)
workflow_id = upload_response.json()["id"]
workflow_object["steps"]["2"]["tool_id"] = "cat-not-found"
put_response = self._update_workflow(workflow_id, workflow_object)
self._assert_status_code_is(put_response, 400)
def test_require_unique_step_uuids(self):
workflow_dup_uuids = self.workflow_populator.load_workflow(name="test_import")
uuid0 = str(uuid4())
for step_dict in workflow_dup_uuids["steps"].values():
step_dict["uuid"] = uuid0
response = self.workflow_populator.create_workflow_response(workflow_dup_uuids)
self._assert_status_code_is(response, 400)
def test_require_unique_step_labels(self):
workflow_dup_label = self.workflow_populator.load_workflow(name="test_import")
for step_dict in workflow_dup_label["steps"].values():
step_dict["label"] = "my duplicated label"
response = self.workflow_populator.create_workflow_response(workflow_dup_label)
self._assert_status_code_is(response, 400)
def test_import_deprecated(self):
workflow_id = self.workflow_populator.simple_workflow("test_import_published_deprecated", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
self._assert_user_has_workflow_with_name("imported: test_import_published_deprecated")
def test_import_annotations(self):
workflow_id = self.workflow_populator.simple_workflow("test_import_annotations", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
# Test annotations preserved during upload and copied over during
# import.
other_id = other_import_response.json()["id"]
imported_workflow = self._show_workflow(other_id)
assert imported_workflow["annotation"] == "simple workflow"
step_annotations = set(step["annotation"] for step in imported_workflow["steps"].values())
assert "input1 description" in step_annotations
def test_import_subworkflows(self):
def get_subworkflow_content_id(workflow_id):
workflow_contents = self._download_workflow(workflow_id, style="editor")
steps = workflow_contents['steps']
subworkflow_step = next(s for s in steps.values() if s["type"] == "subworkflow")
return subworkflow_step['content_id']
workflow_id = self._upload_yaml_workflow(SIMPLE_NESTED_WORKFLOW_YAML, publish=True)
subworkflow_content_id = get_subworkflow_content_id(workflow_id)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
imported_workflow_id = other_import_response.json()["id"]
imported_subworkflow_content_id = get_subworkflow_content_id(imported_workflow_id)
assert subworkflow_content_id != imported_subworkflow_content_id
def test_not_importable_prevents_import(self):
workflow_id = self.workflow_populator.simple_workflow("test_not_importportable")
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 403)
def test_import_published(self):
workflow_id = self.workflow_populator.simple_workflow("test_import_published", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id, deprecated_route=True)
self._assert_status_code_is(other_import_response, 200)
self._assert_user_has_workflow_with_name("imported: test_import_published")
def test_export(self):
uploaded_workflow_id = self.workflow_populator.simple_workflow("test_for_export")
downloaded_workflow = self._download_workflow(uploaded_workflow_id)
assert downloaded_workflow["name"] == "test_for_export"
assert len(downloaded_workflow["steps"]) == 3
first_input = downloaded_workflow["steps"]["0"]["inputs"][0]
assert first_input["name"] == "WorkflowInput1"
assert first_input["description"] == "input1 description"
self._assert_has_keys(downloaded_workflow, "a_galaxy_workflow", "format-version", "annotation", "uuid", "steps")
for step in downloaded_workflow["steps"].values():
self._assert_has_keys(
step,
'id',
'type',
'tool_id',
'tool_version',
'name',
'tool_state',
'annotation',
'inputs',
'workflow_outputs',
'outputs'
)
if step['type'] == "tool":
self._assert_has_keys(step, "post_job_actions")
def test_export_editor(self):
uploaded_workflow_id = self.workflow_populator.simple_workflow("test_for_export")
downloaded_workflow = self._download_workflow(uploaded_workflow_id, style="editor")
self._assert_has_keys(downloaded_workflow, "name", "steps", "upgrade_messages")
for step in downloaded_workflow["steps"].values():
self._assert_has_keys(
step,
'id',
'type',
'content_id',
'name',
'tool_state',
'tooltip',
'data_inputs',
'data_outputs',
'config_form',
'annotation',
'post_job_actions',
'workflow_outputs',
'uuid',
'label',
)
def test_import_missing_tool(self):
workflow = self.workflow_populator.load_workflow_from_resource(name="test_workflow_missing_tool")
workflow_id = self.workflow_populator.create_workflow(workflow)
workflow_description = self._show_workflow(workflow_id)
steps = workflow_description["steps"]
missing_tool_steps = [v for v in steps.values() if v['tool_id'] == 'cat_missing_tool']
assert len(missing_tool_steps) == 1
def test_import_no_tool_id(self):
# Import works with missing tools, but not with absent content/tool id.
workflow = self.workflow_populator.load_workflow_from_resource(name="test_workflow_missing_tool")
del workflow["steps"]["2"]["tool_id"]
create_response = self.__test_upload(workflow=workflow, assert_ok=False)
self._assert_status_code_is(create_response, 400)
def test_import_export_with_runtime_inputs(self):
workflow = self.workflow_populator.load_workflow_from_resource(name="test_workflow_with_runtime_input")
workflow_id = self.workflow_populator.create_workflow(workflow)
downloaded_workflow = self._download_workflow(workflow_id)
assert len(downloaded_workflow["steps"]) == 2
runtime_input = downloaded_workflow["steps"]["1"]["inputs"][0]
assert runtime_input["description"].startswith("runtime parameter for tool")
assert runtime_input["name"] == "num_lines"
@skip_without_tool("cat1")
def test_run_workflow_by_index(self):
self.__run_cat_workflow(inputs_by='step_index')
@skip_without_tool("cat1")
def test_run_workflow_by_uuid(self):
self.__run_cat_workflow(inputs_by='step_uuid')
@skip_without_tool("cat1")
def test_run_workflow_by_uuid_implicitly(self):
self.__run_cat_workflow(inputs_by='uuid_implicitly')
@skip_without_tool("cat1")
def test_run_workflow_by_name(self):
self.__run_cat_workflow(inputs_by='name')
@skip_without_tool("cat1")
def test_run_workflow(self):
self.__run_cat_workflow(inputs_by='step_id')
@skip_without_tool("multiple_versions")
def test_run_versioned_tools(self):
history_01_id = self.dataset_populator.new_history()
workflow_version_01 = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- tool_id: multiple_versions
tool_version: "0.1"
state:
inttest: 0
""")
self.__invoke_workflow(history_01_id, workflow_version_01)
self.dataset_populator.wait_for_history(history_01_id, assert_ok=True)
history_02_id = self.dataset_populator.new_history()
workflow_version_02 = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- tool_id: multiple_versions
tool_version: "0.2"
state:
inttest: 1
""")
self.__invoke_workflow(history_02_id, workflow_version_02)
self.dataset_populator.wait_for_history(history_02_id, assert_ok=True)
def __run_cat_workflow(self, inputs_by):
workflow = self.workflow_populator.load_workflow(name="test_for_run")
workflow["steps"]["0"]["uuid"] = str(uuid4())
workflow["steps"]["1"]["uuid"] = str(uuid4())
workflow_request, history_id = self._setup_workflow_run(workflow, inputs_by=inputs_by)
# TODO: This should really be a post to workflows/<workflow_id>/run or
# something like that.
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
invocation_id = run_workflow_response.json()["id"]
invocation = self._invocation_details(workflow_request["workflow_id"], invocation_id)
assert invocation["state"] == "scheduled", invocation
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
@skip_without_tool("collection_creates_pair")
def test_workflow_run_output_collections(self):
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input
type: input
- label: split_up
tool_id: collection_creates_pair
state:
input1:
$link: text_input
- tool_id: collection_paired_test
state:
f1:
$link: split_up#paired_output
""")
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="a\nb\nc\nd\n")
inputs = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("a\nc\nb\nd\n", self.dataset_populator.get_history_dataset_content(history_id, hid=0))
@skip_without_tool("collection_creates_pair")
def test_workflow_run_output_collection_mapping(self):
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- type: input_collection
- tool_id: collection_creates_pair
state:
input1:
$link: 0
- tool_id: collection_paired_test
state:
f1:
$link: 1#paired_output
- tool_id: cat_list
state:
input1:
$link: 2#out1
""")
history_id = self.dataset_populator.new_history()
hdca1 = self.dataset_collection_populator.create_list_in_history(history_id, contents=["a\nb\nc\nd\n", "e\nf\ng\nh\n"]).json()
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hdca1),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("a\nc\nb\nd\ne\ng\nf\nh\n", self.dataset_populator.get_history_dataset_content(history_id, hid=0))
@skip_without_tool("collection_split_on_column")
def test_workflow_run_dynamic_output_collections(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input
- label: text_input2
type: input
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
queries:
- input2:
$link: text_input2
- label: split_up
tool_id: collection_split_on_column
state:
input1:
$link: cat_inputs#out_file1
- tool_id: cat_list
state:
input1:
$link: split_up#split_output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t30.0\nsamp2\t40.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
details = self.dataset_populator.get_history_dataset_details(history_id, hid=0)
last_item_hid = details["hid"]
assert last_item_hid == 7, "Expected 7 history items, got %s" % last_item_hid
content = self.dataset_populator.get_history_dataset_content(history_id, hid=0)
self.assertEqual("10.0\n30.0\n20.0\n40.0\n", content)
@skip_without_tool("collection_split_on_column")
@skip_without_tool("min_repeat")
def test_workflow_run_dynamic_output_collections_2(self):
# A more advanced output collection workflow, testing regression of
# https://github.com/galaxyproject/galaxy/issues/776
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: test_input_1
type: input
- label: test_input_2
type: input
- label: test_input_3
type: input
- label: split_up
tool_id: collection_split_on_column
state:
input1:
$link: test_input_2
- label: min_repeat
tool_id: min_repeat
state:
queries:
- input:
$link: test_input_1
queries2:
- input2:
$link: split_up#split_output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t20.0\nsamp2\t40.0\n")
hda3 = self.dataset_populator.new_dataset(history_id, content="samp1\t30.0\nsamp2\t60.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
'2': self._ds_entry(hda3),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
content = self.dataset_populator.get_history_dataset_content(history_id, hid=7)
self.assertEqual(content.strip(), "samp1\t10.0\nsamp2\t20.0")
@skip_without_tool("collection_split_on_column")
def test_workflow_run_dynamic_output_collections_3(self):
# Test a workflow that create a list:list:list followed by a mapping step.
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input
- label: text_input2
type: input
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
queries:
- input2:
$link: text_input2
- label: split_up_1
tool_id: collection_split_on_column
state:
input1:
$link: cat_inputs#out_file1
- label: split_up_2
tool_id: collection_split_on_column
state:
input1:
$link: split_up_1#split_output
- tool_id: cat
state:
input1:
$link: split_up_2#split_output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t30.0\nsamp2\t40.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
@skip_without_tool("mapper")
@skip_without_tool("pileup")
def test_workflow_metadata_validation_0(self):
# Testing regression of
# https://github.com/galaxyproject/galaxy/issues/1514
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
steps:
- label: input_fastqs
type: input_collection
- label: reference
type: input
- label: map_over_mapper
tool_id: mapper
state:
input1:
$link: input_fastqs
reference:
$link: reference
- label: pileup
tool_id: pileup
state:
input1:
$link: map_over_mapper#out_file1
reference:
$link: reference
test_data:
input_fastqs:
type: list
elements:
- identifier: samp1
value: 1.fastq
type: File
- identifier: samp2
value: 1.fastq
type: File
reference:
value: 1.fasta
type: File
""", history_id=history_id)
def test_run_subworkflow_simple(self):
history_id = self.dataset_populator.new_history()
self._run_jobs(SIMPLE_NESTED_WORKFLOW_YAML, history_id=history_id)
content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual("chr5\t131424298\t131424460\tCCDS4149.1_cds_0_0_chr5_131424299_f\t0\t+\nchr5\t131424298\t131424460\tCCDS4149.1_cds_0_0_chr5_131424299_f\t0\t+\n", content)
@skip_without_tool("cat1")
@skip_without_tool("collection_paired_test")
def test_workflow_run_zip_collections(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: test_input_1
type: input
- label: test_input_2
type: input
- label: first_cat
tool_id: cat1
state:
input1:
$link: test_input_1
- label: zip_it
tool_id: "__ZIP_COLLECTION__"
state:
input_forward:
$link: first_cat#out_file1
input_reverse:
$link: test_input_2
- label: concat_pair
tool_id: collection_paired_test
state:
f1:
$link: zip_it#output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t20.0\nsamp2\t40.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual(content.strip(), "samp1\t10.0\nsamp2\t20.0\nsamp1\t20.0\nsamp2\t40.0")
def test_filter_failed_mapping(self):
history_id = self.dataset_populator.new_history()
summary = self._run_jobs("""
class: GalaxyWorkflow
inputs:
- type: collection
label: input_c
steps:
- label: mixed_collection
tool_id: exit_code_from_file
state:
input:
$link: input_c
- label: filtered_collection
tool_id: "__FILTER_FAILED_DATASETS__"
state:
input:
$link: mixed_collection#out_file1
- tool_id: cat1
state:
input1:
$link: filtered_collection
test_data:
input_c:
type: list
elements:
- identifier: i1
content: "0"
- identifier: i2
content: "1"
""", history_id=history_id, wait=True, assert_ok=False)
jobs = summary.jobs
def filter_jobs_by_tool(tool_id):
return [j for j in summary.jobs if j["tool_id"] == tool_id]
assert len(filter_jobs_by_tool("upload1")) == 2, jobs
assert len(filter_jobs_by_tool("exit_code_from_file")) == 2, jobs
assert len(filter_jobs_by_tool("__FILTER_FAILED_DATASETS__")) == 1, jobs
# Follow proves one job was filtered out of the result of cat1
assert len(filter_jobs_by_tool("cat1")) == 1, jobs
def test_workflow_request(self):
workflow = self.workflow_populator.load_workflow(name="test_for_queue")
workflow_request, history_id = self._setup_workflow_run(workflow)
url = "workflows/%s/usage" % (workflow_request["workflow_id"])
del workflow_request["workflow_id"]
run_workflow_response = self._post(url, data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
# Give some time for workflow to get scheduled before scanning the history.
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
@skip_without_tool("cat")
def test_workflow_pause(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(
history_id,
uploaded_workflow_id,
index_map,
)
# Give some time for workflow to get scheduled before scanning the history.
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Wait for all the datasets to complete, make sure the workflow invocation
# is not complete.
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled', invocation
self.__review_paused_steps(uploaded_workflow_id, invocation_id, order_index=2, action=True)
invocation_scheduled = False
for i in range(25):
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
if invocation['state'] == 'scheduled':
invocation_scheduled = True
break
time.sleep(.5)
assert invocation_scheduled, "Workflow state is not scheduled..."
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
@skip_without_tool("cat")
def test_workflow_pause_cancel(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(history_id, uploaded_workflow_id, index_map)
# Give some time for workflow to get scheduled before scanning the history.
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Wait for all the datasets to complete, make sure the workflow invocation
# is not complete.
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled'
self.__review_paused_steps(uploaded_workflow_id, invocation_id, order_index=2, action=False)
# Not immediately cancelled, must wait until workflow scheduled again.
time.sleep(4)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] == 'cancelled', invocation
@skip_without_tool("head")
def test_workflow_map_reduce_pause(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_map_reduce_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="reviewed\nunreviewed")
hdca1 = self.dataset_collection_populator.create_list_in_history(history_id, contents=["1\n2\n3", "4\n5\n6"]).json()
index_map = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hdca1),
}
invocation_id = self.__invoke_workflow(history_id, uploaded_workflow_id, index_map)
# Give some time for workflow to get scheduled before scanning the history.
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Wait for all the datasets to complete, make sure the workflow invocation
# is not complete.
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled'
self.__review_paused_steps(uploaded_workflow_id, invocation_id, order_index=4, action=True)
self.wait_for_invocation_and_jobs(history_id, uploaded_workflow_id, invocation_id)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] == 'scheduled'
self.assertEqual("reviewed\n1\nreviewed\n4\n", self.dataset_populator.get_history_dataset_content(history_id))
@skip_without_tool("cat")
def test_cancel_workflow_invocation(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(history_id, uploaded_workflow_id, index_map)
# Give some time for workflow to get scheduled before scanning the history.
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Wait for all the datasets to complete, make sure the workflow invocation
# is not complete.
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled'
invocation_url = self._api_url("workflows/%s/usage/%s" % (uploaded_workflow_id, invocation_id), use_key=True)
delete_response = delete(invocation_url)
self._assert_status_code_is(delete_response, 200)
# Wait for all the datasets to complete, make sure the workflow invocation
# is not complete.
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] == 'cancelled'
def test_run_with_implicit_connection(self):
history_id = self.dataset_populator.new_history()
run_summary = self._run_jobs("""
class: GalaxyWorkflow
steps:
- label: test_input
type: input
- label: first_cat
tool_id: cat1
state:
input1:
$link: test_input
- label: the_pause
type: pause
connect:
input:
- first_cat#out_file1
- label: second_cat
tool_id: cat1
state:
input1:
$link: the_pause
- label: third_cat
tool_id: random_lines1
connect:
$step: second_cat
state:
num_lines: 1
input:
$link: test_input
seed_source:
seed_source_selector: set_seed
seed: asdf
test_data:
test_input: "hello world"
""", history_id=history_id, wait=False)
history_id = run_summary.history_id
workflow_id = run_summary.workflow_id
invocation_id = run_summary.invocation_id
# Wait for first two jobs to be scheduled - upload and first cat.
wait_on(lambda: len(self._history_jobs(history_id)) >= 2 or None, "history jobs")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
invocation = self._invocation_details(workflow_id, invocation_id)
assert invocation['state'] != 'scheduled', invocation
# Expect two jobs - the upload and first cat. randomlines shouldn't run
# it is implicitly dependent on second cat.
self._assert_history_job_count(history_id, 2)
self.__review_paused_steps(workflow_id, invocation_id, order_index=2, action=True)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
self._assert_history_job_count(history_id, 4)
def test_run_with_validated_parameter_connection_valid(self):
history_id = self.dataset_populator.new_history()
run_summary = self._run_jobs("""
class: GalaxyWorkflow
inputs:
- label: text_input
type: text
steps:
- tool_id: validation_repeat
state:
r2:
- text:
$link: text_input
test_data:
text_input:
value: "abd"
type: raw
""", history_id=history_id, wait=True)
time.sleep(10)
self.workflow_populator.wait_for_invocation(run_summary.workflow_id, run_summary.invocation_id)
jobs = self._history_jobs(history_id)
assert len(jobs) == 1
def test_run_with_validated_parameter_connection_invalid(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- label: text_input
type: text
steps:
- tool_id: validation_repeat
state:
r2:
- text:
$link: text_input
test_data:
text_input:
value: ""
type: raw
""", history_id=history_id, wait=True, assert_ok=False)
def test_run_with_text_connection(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- label: data_input
type: data
- label: text_input
type: text
steps:
- label: randomlines
tool_id: random_lines1
state:
num_lines: 1
input:
$link: data_input
seed_source:
seed_source_selector: set_seed
seed:
$link: text_input
test_data:
data_input:
value: 1.bed
type: File
text_input:
value: asdf
type: raw
""", history_id=history_id)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual("chr5\t131424298\t131424460\tCCDS4149.1_cds_0_0_chr5_131424299_f\t0\t+\n", content)
def wait_for_invocation_and_jobs(self, history_id, workflow_id, invocation_id, assert_ok=True):
self.workflow_populator.wait_for_invocation(workflow_id, invocation_id)
time.sleep(.5)
self.dataset_populator.wait_for_history(history_id, assert_ok=assert_ok)
time.sleep(.5)
def test_cannot_run_inaccessible_workflow(self):
workflow = self.workflow_populator.load_workflow(name="test_for_run_cannot_access")
workflow_request, history_id = self._setup_workflow_run(workflow)
with self._different_user():
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 403)
def test_400_on_invalid_workflow_id(self):
workflow = self.workflow_populator.load_workflow(name="test_for_run_does_not_exist")
workflow_request, history_id = self._setup_workflow_run(workflow)
workflow_request["workflow_id"] = self._random_key()
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 400)
def test_cannot_run_against_other_users_history(self):
workflow = self.workflow_populator.load_workflow(name="test_for_run_does_not_exist")
workflow_request, history_id = self._setup_workflow_run(workflow)
with self._different_user():
other_history_id = self.dataset_populator.new_history()
workflow_request["history"] = "hist_id=%s" % other_history_id
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 403)
@skip_without_tool("cat")
@skip_without_tool("cat_list")
def test_workflow_run_with_matching_lists(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_matching_lists")
workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hdca1 = self.dataset_collection_populator.create_list_in_history(history_id, contents=[("sample1-1", "1 2 3"), ("sample2-1", "7 8 9")]).json()
hdca2 = self.dataset_collection_populator.create_list_in_history(history_id, contents=[("sample1-2", "4 5 6"), ("sample2-2", "0 a b")]).json()
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
label_map = {"list1": self._ds_entry(hdca1), "list2": self._ds_entry(hdca2)}
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=workflow_id,
ds_map=self._build_ds_map(workflow_id, label_map),
)
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("1 2 3\n4 5 6\n7 8 9\n0 a b\n", self.dataset_populator.get_history_dataset_content(history_id))
def test_workflow_stability(self):
# Run this index stability test with following command:
# ./run_tests.sh test/api/test_workflows.py:WorkflowsApiTestCase.test_workflow_stability
num_tests = 1
for workflow_file in ["test_workflow_topoambigouity", "test_workflow_topoambigouity_auto_laidout"]:
workflow = self.workflow_populator.load_workflow_from_resource(workflow_file)
last_step_map = self._step_map(workflow)
for i in range(num_tests):
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
downloaded_workflow = self._download_workflow(uploaded_workflow_id)
step_map = self._step_map(downloaded_workflow)
assert step_map == last_step_map
last_step_map = step_map
def _step_map(self, workflow):
# Build dict mapping 'tep index to input name.
step_map = {}
for step_index, step in workflow["steps"].items():
if step["type"] == "data_input":
step_map[step_index] = step["inputs"][0]["name"]
return step_map
def test_empty_create(self):
response = self._post("workflows")
self._assert_status_code_is(response, 400)
self._assert_error_code_is(response, error_codes.USER_REQUEST_MISSING_PARAMETER)
def test_invalid_create_multiple_types(self):
data = {
'shared_workflow_id': '1234567890abcdef',
'from_history_id': '1234567890abcdef'
}
response = self._post("workflows", data)
self._assert_status_code_is(response, 400)
self._assert_error_code_is(response, error_codes.USER_REQUEST_INVALID_PARAMETER)
@skip_without_tool("cat1")
def test_run_with_pja(self):
workflow = self.workflow_populator.load_workflow(name="test_for_pja_run", add_pja=True)
workflow_request, history_id = self._setup_workflow_run(workflow, inputs_by='step_index')
workflow_request["replacement_params"] = dumps(dict(replaceme="was replaced"))
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
assert content["name"] == "foo was replaced"
@skip_without_tool("cat")
def test_run_rename_collection_element(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
type: data_collection_input
collection_type: list
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
outputs:
out_file1:
rename: "my new name"
test_data:
input1:
type: list
name: the_dataset_list
elements:
- identifier: el1
value: 1.fastq
type: File
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, hid=3, wait=True, assert_ok=True)
name = content["name"]
assert name == "my new name", name
@skip_without_tool("cat")
def test_run_rename_based_on_input(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
outputs:
out_file1:
rename: "#{input1 | basename} suffix"
test_data:
input1:
value: 1.fasta
type: File
name: fasta1
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "fasta1 suffix", name
@skip_without_tool("cat")
def test_run_rename_based_on_input_recursive(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
outputs:
out_file1:
rename: "#{input1} #{input1 | upper} suffix"
test_data:
input1:
value: 1.fasta
type: File
name: '#{input1}'
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "#{input1} #{INPUT1} suffix", name
@skip_without_tool("cat")
def test_run_rename_based_on_input_repeat(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
- id: input2
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
queries:
- input2:
$link: input2
outputs:
out_file1:
rename: "#{queries_0.input2| basename} suffix"
test_data:
input1:
value: 1.fasta
type: File
name: fasta1
input2:
value: 1.fasta
type: File
name: fasta2
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "fasta2 suffix", name
@skip_without_tool("mapper2")
def test_run_rename_based_on_input_conditional(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: fasta_input
- id: fastq_input
steps:
- tool_id: mapper2
state:
fastq_input:
fastq_input_selector: single
fastq_input1:
$link: fastq_input
reference:
$link: fasta_input
outputs:
out_file1:
# Wish it was qualified for conditionals but it doesn't seem to be. -John
# rename: "#{fastq_input.fastq_input1 | basename} suffix"
rename: "#{fastq_input1 | basename} suffix"
test_data:
fasta_input:
value: 1.fasta
type: File
name: fasta1
file_type: fasta
fastq_input:
value: 1.fastqsanger
type: File
name: fastq1
file_type: fastqsanger
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "fastq1 suffix", name
@skip_without_tool("mapper2")
def test_run_rename_based_on_input_collection(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: fasta_input
- id: fastq_inputs
steps:
- tool_id: mapper2
state:
fastq_input:
fastq_input_selector: paired_collection
fastq_input1:
$link: fastq_inputs
reference:
$link: fasta_input
outputs:
out_file1:
# Wish it was qualified for conditionals but it doesn't seem to be. -John
# rename: "#{fastq_input.fastq_input1 | basename} suffix"
rename: "#{fastq_input1} suffix"
test_data:
fasta_input:
value: 1.fasta
type: File
name: fasta1
file_type: fasta
fastq_inputs:
type: list
name: the_dataset_pair
elements:
- identifier: forward
value: 1.fastq
type: File
- identifier: reverse
value: 1.fastq
type: File
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "the_dataset_pair suffix", name
@skip_without_tool("cat1")
def test_run_with_runtime_pja(self):
workflow = self.workflow_populator.load_workflow(name="test_for_pja_runtime")
uuid0, uuid1, uuid2 = str(uuid4()), str(uuid4()), str(uuid4())
workflow["steps"]["0"]["uuid"] = uuid0
workflow["steps"]["1"]["uuid"] = uuid1
workflow["steps"]["2"]["uuid"] = uuid2
workflow_request, history_id = self._setup_workflow_run(workflow, inputs_by='step_index')
workflow_request["replacement_params"] = dumps(dict(replaceme="was replaced"))
pja_map = {
"RenameDatasetActionout_file1": dict(
action_type="RenameDatasetAction",
output_name="out_file1",
action_arguments=dict(newname="foo ${replaceme}"),
)
}
workflow_request["parameters"] = dumps({
uuid2: {"__POST_JOB_ACTIONS__": pja_map}
})
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
assert content["name"] == "foo was replaced", content["name"]
# Test for regression of previous behavior where runtime post job actions
# would be added to the original workflow post job actions.
workflow_id = workflow_request["workflow_id"]
downloaded_workflow = self._download_workflow(workflow_id)
pjas = list(downloaded_workflow["steps"]["2"]["post_job_actions"].values())
assert len(pjas) == 0, len(pjas)
@skip_without_tool("cat1")
def test_run_with_delayed_runtime_pja(self):
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: test_input
type: input
- label: first_cat
tool_id: cat1
state:
input1:
$link: test_input
- label: the_pause
type: pause
connect:
input:
- first_cat#out_file1
- label: second_cat
tool_id: cat1
state:
input1:
$link: the_pause
""")
downloaded_workflow = self._download_workflow(workflow_id)
print(downloaded_workflow)
uuid_dict = dict((int(index), step["uuid"]) for index, step in downloaded_workflow["steps"].items())
history_id = self.dataset_populator.new_history()
hda = self.dataset_populator.new_dataset(history_id, content="1 2 3")
self.dataset_populator.wait_for_history(history_id)
inputs = {
'0': self._ds_entry(hda),
}
print(inputs)
uuid2 = uuid_dict[3]
workflow_request = {}
workflow_request["replacement_params"] = dumps(dict(replaceme="was replaced"))
pja_map = {
"RenameDatasetActionout_file1": dict(
action_type="RenameDatasetAction",
output_name="out_file1",
action_arguments=dict(newname="foo ${replaceme}"),
)
}
workflow_request["parameters"] = dumps({
uuid2: {"__POST_JOB_ACTIONS__": pja_map}
})
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs=inputs, request=workflow_request)
time.sleep(2)
self.dataset_populator.wait_for_history(history_id)
self.__review_paused_steps(workflow_id, invocation_id, order_index=2, action=True)
self.workflow_populator.wait_for_workflow(workflow_id, invocation_id, history_id)
time.sleep(1)
content = self.dataset_populator.get_history_dataset_details(history_id)
assert content["name"] == "foo was replaced", content["name"]
@skip_without_tool("cat1")
def test_delete_intermediate_datasets_pja_1(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
outputs:
- id: wf_output_1
source: third_cat#out_file1
steps:
- tool_id: cat1
label: first_cat
state:
input1:
$link: input1
- tool_id: cat1
label: second_cat
state:
input1:
$link: first_cat#out_file1
- tool_id: cat1
label: third_cat
state:
input1:
$link: second_cat#out_file1
outputs:
out_file1:
delete_intermediate_datasets: true
test_data:
input1: "hello world"
""", history_id=history_id)
hda1 = self.dataset_populator.get_history_dataset_details(history_id, hid=1)
hda2 = self.dataset_populator.get_history_dataset_details(history_id, hid=2)
hda3 = self.dataset_populator.get_history_dataset_details(history_id, hid=3)
hda4 = self.dataset_populator.get_history_dataset_details(history_id, hid=4)
assert not hda1["deleted"]
assert hda2["deleted"]
# I think hda3 should be deleted, but the inputs to
# steps with workflow outputs are not deleted.
# assert hda3["deleted"]
print(hda3["deleted"])
assert not hda4["deleted"]
@skip_without_tool("random_lines1")
def test_run_replace_params_by_tool(self):
workflow_request, history_id = self._setup_random_x2_workflow("test_for_replace_tool_params")
workflow_request["parameters"] = dumps(dict(random_lines1=dict(num_lines=5)))
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Would be 8 and 6 without modification
self.__assert_lines_hid_line_count_is(history_id, 2, 5)
self.__assert_lines_hid_line_count_is(history_id, 3, 5)
@skip_without_tool("random_lines1")
def test_run_replace_params_by_uuid(self):
workflow_request, history_id = self._setup_random_x2_workflow("test_for_replace_tool_params")
workflow_request["parameters"] = dumps({
"58dffcc9-bcb7-4117-a0e1-61513524b3b1": dict(num_lines=4),
"58dffcc9-bcb7-4117-a0e1-61513524b3b2": dict(num_lines=3),
})
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Would be 8 and 6 without modification
self.__assert_lines_hid_line_count_is(history_id, 2, 4)
self.__assert_lines_hid_line_count_is(history_id, 3, 3)
@skip_without_tool("cat1")
@skip_without_tool("addValue")
def test_run_batch(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_batch")
workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
hda2 = self.dataset_populator.new_dataset(history_id, content="4 5 6")
hda3 = self.dataset_populator.new_dataset(history_id, content="7 8 9")
hda4 = self.dataset_populator.new_dataset(history_id, content="10 11 12")
parameters = {
"0": {"input": {"batch": True, "values": [{"id" : hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"},
{"id" : hda2.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id" : hda3.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id" : hda4.get("id"), "hid": hda2.get("hid"), "src": "hda"}]}},
"1": {"input": {"batch": False, "values": [{"id" : hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"}]}, "exp": "2"}}
workflow_request = {
"history_id" : history_id,
"batch" : True,
"parameters_normalized": True,
"parameters" : dumps(parameters),
}
invocation_response = self._post("workflows/%s/usage" % workflow_id, data=workflow_request)
self._assert_status_code_is(invocation_response, 200)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
r1 = "1 2 3\t1\n1 2 3\t2\n"
r2 = "4 5 6\t1\n1 2 3\t2\n"
r3 = "7 8 9\t1\n1 2 3\t2\n"
r4 = "10 11 12\t1\n1 2 3\t2\n"
t1 = self.dataset_populator.get_history_dataset_content(history_id, hid=7)
t2 = self.dataset_populator.get_history_dataset_content(history_id, hid=10)
t3 = self.dataset_populator.get_history_dataset_content(history_id, hid=13)
t4 = self.dataset_populator.get_history_dataset_content(history_id, hid=16)
self.assertEqual(r1, t1)
self.assertEqual(r2, t2)
self.assertEqual(r3, t3)
self.assertEqual(r4, t4)
@skip_without_tool("validation_default")
def test_parameter_substitution_sanitization(self):
substitions = dict(input1="\" ; echo \"moo")
run_workflow_response, history_id = self._run_validation_workflow_with_substitions(substitions)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("__dq__ X echo __dq__moo\n", self.dataset_populator.get_history_dataset_content(history_id, hid=1))
@skip_without_tool("validation_repeat")
def test_parameter_substitution_validation_value_errors_0(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- tool_id: validation_repeat
state:
r2:
- text: "abd"
""")
workflow_request = dict(
history="hist_id=%s" % history_id,
parameters=dumps(dict(validation_repeat={"r2_0|text": ""}))
)
url = "workflows/%s/invocations" % workflow_id
invocation_response = self._post(url, data=workflow_request)
# Take a valid stat and make it invalid, assert workflow won't run.
self._assert_status_code_is(invocation_response, 400)
@skip_without_tool("validation_default")
def test_parameter_substitution_validation_value_errors_1(self):
substitions = dict(select_param="\" ; echo \"moo")
run_workflow_response, history_id = self._run_validation_workflow_with_substitions(substitions)
self._assert_status_code_is(run_workflow_response, 400)
@skip_without_tool("validation_repeat")
def test_workflow_import_state_validation_1(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
steps:
- tool_id: validation_repeat
state:
r2:
- text: ""
""", history_id=history_id, wait=False, expected_response=400)
def _run_validation_workflow_with_substitions(self, substitions):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_validation_1")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=uploaded_workflow_id,
parameters=dumps(dict(validation_default=substitions))
)
run_workflow_response = self._post("workflows", data=workflow_request)
return run_workflow_response, history_id
@skip_without_tool("random_lines1")
def test_run_replace_params_by_steps(self):
workflow_request, history_id, steps = self._setup_random_x2_workflow_steps("test_for_replace_step_params")
params = dumps({str(steps[1]["id"]): dict(num_lines=5)})
workflow_request["parameters"] = params
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Would be 8 and 6 without modification
self.__assert_lines_hid_line_count_is(history_id, 2, 8)
self.__assert_lines_hid_line_count_is(history_id, 3, 5)
@skip_without_tool("random_lines1")
def test_run_replace_params_nested(self):
workflow_request, history_id, steps = self._setup_random_x2_workflow_steps("test_for_replace_step_params_nested")
seed_source = dict(
seed_source_selector="set_seed",
seed="moo",
)
params = dumps({str(steps[0]["id"]): dict(num_lines=1, seed_source=seed_source),
str(steps[1]["id"]): dict(num_lines=1, seed_source=seed_source)})
workflow_request["parameters"] = params
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("3\n", self.dataset_populator.get_history_dataset_content(history_id))
def test_pja_import_export(self):
workflow = self.workflow_populator.load_workflow(name="test_for_pja_import", add_pja=True)
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
downloaded_workflow = self._download_workflow(uploaded_workflow_id)
self._assert_has_keys(downloaded_workflow["steps"], "0", "1", "2")
pjas = list(downloaded_workflow["steps"]["2"]["post_job_actions"].values())
assert len(pjas) == 1, len(pjas)
pja = pjas[0]
self._assert_has_keys(pja, "action_type", "output_name", "action_arguments")
@skip_without_tool("cat1")
def test_only_own_invocations_accessible(self):
workflow_id, usage = self._run_workflow_once_get_invocation("test_usage")
with self._different_user():
usage_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, usage["id"]))
self._assert_status_code_is(usage_details_response, 403)
@skip_without_tool("cat1")
def test_invocation_usage(self):
workflow_id, usage = self._run_workflow_once_get_invocation("test_usage")
invocation_id = usage["id"]
usage_details = self._invocation_details(workflow_id, invocation_id)
# Assert some high-level things about the structure of data returned.
self._assert_has_keys(usage_details, "inputs", "steps")
invocation_steps = usage_details["steps"]
for step in invocation_steps:
self._assert_has_keys(step, "workflow_step_id", "order_index", "id")
an_invocation_step = invocation_steps[0]
step_id = an_invocation_step["id"]
step_response = self._get("workflows/%s/usage/%s/steps/%s" % (workflow_id, invocation_id, step_id))
self._assert_status_code_is(step_response, 200)
self._assert_has_keys(step_response.json(), "id", "order_index")
@skip_without_tool("cat1")
def test_invocations_accessible_imported_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_usage", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
other_id = other_import_response.json()["id"]
workflow_request, history_id = self._setup_workflow_run(workflow_id=other_id)
response = self._get("workflows/%s/usage" % other_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
run_workflow_response = run_workflow_response.json()
invocation_id = run_workflow_response['id']
usage_details_response = self._get("workflows/%s/usage/%s" % (other_id, invocation_id))
self._assert_status_code_is(usage_details_response, 200)
@skip_without_tool("cat1")
def test_invocations_accessible_published_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_usage", publish=True)
with self._different_user():
workflow_request, history_id = self._setup_workflow_run(workflow_id=workflow_id)
workflow_request['workflow_id'] = workflow_request.pop('workflow_id')
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
run_workflow_response = run_workflow_response.json()
invocation_id = run_workflow_response['id']
usage_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, invocation_id))
self._assert_status_code_is(usage_details_response, 200)
@skip_without_tool("cat1")
def test_invocations_not_accessible_by_different_user_for_published_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_usage", publish=True)
workflow_request, history_id = self._setup_workflow_run(workflow_id=workflow_id)
workflow_request['workflow_id'] = workflow_request.pop('workflow_id')
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
run_workflow_response = run_workflow_response.json()
invocation_id = run_workflow_response['id']
with self._different_user():
usage_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, invocation_id))
self._assert_status_code_is(usage_details_response, 403)
def _update_workflow(self, workflow_id, workflow_object):
data = dict(
workflow=workflow_object
)
raw_url = 'workflows/%s' % workflow_id
url = self._api_url(raw_url, use_key=True)
put_response = put(url, data=dumps(data))
return put_response
def _invocation_step_details(self, workflow_id, invocation_id, step_id):
invocation_step_response = self._get("workflows/%s/usage/%s/steps/%s" % (workflow_id, invocation_id, step_id))
self._assert_status_code_is(invocation_step_response, 200)
invocation_step_details = invocation_step_response.json()
return invocation_step_details
def _execute_invocation_step_action(self, workflow_id, invocation_id, step_id, action):
raw_url = "workflows/%s/usage/%s/steps/%s" % (workflow_id, invocation_id, step_id)
url = self._api_url(raw_url, use_key=True)
payload = dumps(dict(action=action))
action_response = put(url, data=payload)
self._assert_status_code_is(action_response, 200)
invocation_step_details = action_response.json()
return invocation_step_details
def _run_workflow_once_get_invocation(self, name):
workflow = self.workflow_populator.load_workflow(name=name)
workflow_request, history_id = self._setup_workflow_run(workflow)
workflow_id = workflow_request["workflow_id"]
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
usages = response.json()
assert len(usages) == 1
return workflow_id, usages[0]
def _setup_random_x2_workflow_steps(self, name):
workflow_request, history_id = self._setup_random_x2_workflow("test_for_replace_step_params")
random_line_steps = self._random_lines_steps(workflow_request)
return workflow_request, history_id, random_line_steps
def _random_lines_steps(self, workflow_request):
workflow_summary_response = self._get("workflows/%s" % workflow_request["workflow_id"])
self._assert_status_code_is(workflow_summary_response, 200)
steps = workflow_summary_response.json()["steps"]
return sorted((step for step in steps.values() if step["tool_id"] == "random_lines1"), key=lambda step: step["id"])
def _setup_random_x2_workflow(self, name):
workflow = self.workflow_populator.load_random_x2_workflow(name)
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
workflow_inputs = self._workflow_inputs(uploaded_workflow_id)
key = next(iter(workflow_inputs.keys()))
history_id = self.dataset_populator.new_history()
ten_lines = "\n".join(str(_) for _ in range(10))
hda1 = self.dataset_populator.new_dataset(history_id, content=ten_lines)
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=uploaded_workflow_id,
ds_map=dumps({
key: self._ds_entry(hda1),
}),
)
return workflow_request, history_id
def __review_paused_steps(self, uploaded_workflow_id, invocation_id, order_index, action=True):
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
invocation_steps = invocation["steps"]
pause_steps = [s for s in invocation_steps if s['order_index'] == order_index]
for pause_step in pause_steps:
pause_step_id = pause_step['id']
self._execute_invocation_step_action(uploaded_workflow_id, invocation_id, pause_step_id, action=action)
def __assert_lines_hid_line_count_is(self, history, hid, lines):
contents_url = "histories/%s/contents" % history
history_contents_response = self._get(contents_url)
self._assert_status_code_is(history_contents_response, 200)
hda_summary = next(hc for hc in history_contents_response.json() if hc["hid"] == hid)
hda_info_response = self._get("%s/%s" % (contents_url, hda_summary["id"]))
self._assert_status_code_is(hda_info_response, 200)
self.assertEqual(hda_info_response.json()["metadata_data_lines"], lines)
def __invoke_workflow(self, history_id, workflow_id, inputs={}, request={}, assert_ok=True):
request["history"] = "hist_id=%s" % history_id,
if inputs:
request["inputs"] = dumps(inputs)
request["inputs_by"] = 'step_index'
url = "workflows/%s/usage" % (workflow_id)
invocation_response = self._post(url, data=request)
if assert_ok:
self._assert_status_code_is(invocation_response, 200)
invocation_id = invocation_response.json()["id"]
return invocation_id
else:
return invocation_response
def __import_workflow(self, workflow_id, deprecated_route=False):
if deprecated_route:
route = "workflows/import"
import_data = dict(
workflow_id=workflow_id,
)
else:
route = "workflows"
import_data = dict(
shared_workflow_id=workflow_id,
)
return self._post(route, import_data)
def _download_workflow(self, workflow_id, style=None):
params = {}
if style:
params = {"style": style}
download_response = self._get("workflows/%s/download" % workflow_id, params)
self._assert_status_code_is(download_response, 200)
downloaded_workflow = download_response.json()
return downloaded_workflow
def _show_workflow(self, workflow_id):
show_response = self._get("workflows/%s" % workflow_id)
self._assert_status_code_is(show_response, 200)
return show_response.json()
def _assert_looks_like_instance_workflow_representation(self, workflow):
self._assert_has_keys(
workflow,
'url',
'owner',
'inputs',
'annotation',
'steps'
)
for step in workflow["steps"].values():
self._assert_has_keys(
step,
'id',
'type',
'tool_id',
'tool_version',
'annotation',
'tool_inputs',
'input_steps',
)
RunJobsSummary = namedtuple('RunJobsSummary', ['history_id', 'workflow_id', 'invocation_id', 'inputs', 'jobs'])
| 41.416832 | 179 | 0.673986 | from __future__ import print_function
import time
from collections import namedtuple
from json import dumps
from uuid import uuid4
import yaml
from requests import delete, put
from base import api
from base.populators import (
DatasetCollectionPopulator,
DatasetPopulator,
skip_without_tool,
wait_on,
WorkflowPopulator
)
from galaxy.exceptions import error_codes
from galaxy.tools.verify.test_data import TestDataResolver
SIMPLE_NESTED_WORKFLOW_YAML = """
class: GalaxyWorkflow
inputs:
- id: outer_input
steps:
- tool_id: cat1
label: first_cat
state:
input1:
$link: outer_input
- run:
class: GalaxyWorkflow
inputs:
- id: inner_input
outputs:
- id: workflow_output
source: random_lines#out_file1
steps:
- tool_id: random_lines1
label: random_lines
state:
num_lines: 1
input:
$link: inner_input
seed_source:
seed_source_selector: set_seed
seed: asdf
label: nested_workflow
connect:
inner_input: first_cat#out_file1
- tool_id: cat1
label: second_cat
state:
input1:
$link: nested_workflow#workflow_output
queries:
- input2:
$link: nested_workflow#workflow_output
test_data:
outer_input:
value: 1.bed
type: File
"""
class BaseWorkflowsApiTestCase(api.ApiTestCase):
def setUp(self):
super(BaseWorkflowsApiTestCase, self).setUp()
self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor)
def _assert_user_has_workflow_with_name(self, name):
names = self._workflow_names()
assert name in names, "No workflows with name %s in users workflows <%s>" % (name, names)
def _workflow_names(self):
index_response = self._get("workflows")
self._assert_status_code_is(index_response, 200)
names = [w["name"] for w in index_response.json()]
return names
def import_workflow(self, workflow, **kwds):
upload_response = self.workflow_populator.import_workflow(workflow, **kwds)
return upload_response
def _upload_yaml_workflow(self, has_yaml, **kwds):
return self.workflow_populator.upload_yaml_workflow(has_yaml, **kwds)
def _setup_workflow_run(self, workflow=None, inputs_by='step_id', history_id=None, workflow_id=None):
if not workflow_id:
workflow_id = self.workflow_populator.create_workflow(workflow)
if not history_id:
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
hda2 = self.dataset_populator.new_dataset(history_id, content="4 5 6")
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=workflow_id,
)
label_map = {
'WorkflowInput1': self._ds_entry(hda1),
'WorkflowInput2': self._ds_entry(hda2)
}
if inputs_by == 'step_id':
ds_map = self._build_ds_map(workflow_id, label_map)
workflow_request["ds_map"] = ds_map
elif inputs_by == "step_index":
index_map = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2)
}
workflow_request["inputs"] = dumps(index_map)
workflow_request["inputs_by"] = 'step_index'
elif inputs_by == "name":
workflow_request["inputs"] = dumps(label_map)
workflow_request["inputs_by"] = 'name'
elif inputs_by in ["step_uuid", "uuid_implicitly"]:
uuid_map = {
workflow["steps"]["0"]["uuid"]: self._ds_entry(hda1),
workflow["steps"]["1"]["uuid"]: self._ds_entry(hda2),
}
workflow_request["inputs"] = dumps(uuid_map)
if inputs_by == "step_uuid":
workflow_request["inputs_by"] = "step_uuid"
return workflow_request, history_id
def _build_ds_map(self, workflow_id, label_map):
workflow_inputs = self._workflow_inputs(workflow_id)
ds_map = {}
for key, value in workflow_inputs.items():
label = value["label"]
if label in label_map:
ds_map[key] = label_map[label]
return dumps(ds_map)
def _ds_entry(self, hda):
src = 'hda'
if 'history_content_type' in hda and hda['history_content_type'] == "dataset_collection":
src = 'hdca'
return dict(src=src, id=hda["id"])
def _workflow_inputs(self, uploaded_workflow_id):
workflow_show_resposne = self._get("workflows/%s" % uploaded_workflow_id)
self._assert_status_code_is(workflow_show_resposne, 200)
workflow_inputs = workflow_show_resposne.json()["inputs"]
return workflow_inputs
def _invocation_details(self, workflow_id, invocation_id):
invocation_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, invocation_id))
self._assert_status_code_is(invocation_details_response, 200)
invocation_details = invocation_details_response.json()
return invocation_details
def _run_jobs(self, has_workflow, history_id=None, wait=True, source_type=None, jobs_descriptions=None, expected_response=200, assert_ok=True):
def read_test_data(test_dict):
test_data_resolver = TestDataResolver()
filename = test_data_resolver.get_filename(test_dict["value"])
content = open(filename, "r").read()
return content
if history_id is None:
history_id = self.history_id
workflow_id = self._upload_yaml_workflow(
has_workflow, source_type=source_type
)
if jobs_descriptions is None:
assert source_type != "path"
jobs_descriptions = yaml.load(has_workflow)
test_data = jobs_descriptions.get("test_data", {})
label_map = {}
inputs = {}
has_uploads = False
for key, value in test_data.items():
is_dict = isinstance(value, dict)
if is_dict and ("elements" in value or value.get("type", None) in ["list:paired", "list", "paired"]):
elements_data = value.get("elements", [])
elements = []
for element_data in elements_data:
identifier = element_data["identifier"]
input_type = element_data.get("type", "raw")
if input_type == "File":
content = read_test_data(element_data)
else:
content = element_data["content"]
elements.append((identifier, content))
collection_type = value["type"]
new_collection_kwds = {}
if "name" in value:
new_collection_kwds["name"] = value["name"]
if collection_type == "list:paired":
hdca = self.dataset_collection_populator.create_list_of_pairs_in_history(history_id, **new_collection_kwds).json()
elif collection_type == "list":
hdca = self.dataset_collection_populator.create_list_in_history(history_id, contents=elements, **new_collection_kwds).json()
else:
hdca = self.dataset_collection_populator.create_pair_in_history(history_id, contents=elements, **new_collection_kwds).json()
label_map[key] = self._ds_entry(hdca)
inputs[key] = hdca
has_uploads = True
elif is_dict and "type" in value:
input_type = value["type"]
if input_type == "File":
content = read_test_data(value)
new_dataset_kwds = {
"content": content
}
if "name" in value:
new_dataset_kwds["name"] = value["name"]
if "file_type" in value:
new_dataset_kwds["file_type"] = value["file_type"]
hda = self.dataset_populator.new_dataset(history_id, **new_dataset_kwds)
label_map[key] = self._ds_entry(hda)
has_uploads = True
elif input_type == "raw":
label_map[key] = value["value"]
inputs[key] = value["value"]
elif not is_dict:
has_uploads = True
hda = self.dataset_populator.new_dataset(history_id, content=value)
label_map[key] = self._ds_entry(hda)
inputs[key] = hda
else:
raise ValueError("Invalid test_data def %" % test_data)
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=workflow_id,
)
workflow_request["inputs"] = dumps(label_map)
workflow_request["inputs_by"] = 'name'
if has_uploads:
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
url = "workflows/%s/usage" % (workflow_id)
invocation_response = self._post(url, data=workflow_request)
self._assert_status_code_is(invocation_response, expected_response)
invocation = invocation_response.json()
invocation_id = invocation.get('id')
if invocation_id:
if wait:
self.workflow_populator.wait_for_workflow(workflow_id, invocation_id, history_id, assert_ok=assert_ok)
jobs = self._history_jobs(history_id)
return RunJobsSummary(
history_id=history_id,
workflow_id=workflow_id,
invocation_id=invocation_id,
inputs=inputs,
jobs=jobs,
)
def _history_jobs(self, history_id):
return self._get("jobs", {"history_id": history_id, "order_by": "create_time"}).json()
def _assert_history_job_count(self, history_id, n):
jobs = self._history_jobs(history_id)
self.assertEqual(len(jobs), n)
class WorkflowsApiTestCase(BaseWorkflowsApiTestCase):
def setUp(self):
super(WorkflowsApiTestCase, self).setUp()
def test_show_valid(self):
workflow_id = self.workflow_populator.simple_workflow("dummy")
workflow_id = self.workflow_populator.simple_workflow("test_regular")
show_response = self._get("workflows/%s" % workflow_id, {"style": "instance"})
workflow = show_response.json()
self._assert_looks_like_instance_workflow_representation(workflow)
assert len(workflow["steps"]) == 3
self.assertEqual(sorted(step["id"] for step in workflow["steps"].values()), [0, 1, 2])
show_response = self._get("workflows/%s" % workflow_id, {"legacy": True})
workflow = show_response.json()
self._assert_looks_like_instance_workflow_representation(workflow)
assert len(workflow["steps"]) == 3
# workflow was created first in this instance.
self.assertNotEqual(sorted(step["id"] for step in workflow["steps"].values()), [0, 1, 2])
def test_show_invalid_key_is_400(self):
show_response = self._get("workflows/%s" % self._random_key())
self._assert_status_code_is(show_response, 400)
def test_cannot_show_private_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_not_importportable")
with self._different_user():
show_response = self._get("workflows/%s" % workflow_id)
self._assert_status_code_is(show_response, 403)
def test_delete(self):
workflow_id = self.workflow_populator.simple_workflow("test_delete")
workflow_name = "test_delete"
self._assert_user_has_workflow_with_name(workflow_name)
workflow_url = self._api_url("workflows/%s" % workflow_id, use_key=True)
delete_response = delete(workflow_url)
self._assert_status_code_is(delete_response, 200)
# Make sure workflow is no longer in index by default.
assert workflow_name not in self._workflow_names()
def test_other_cannot_delete(self):
workflow_id = self.workflow_populator.simple_workflow("test_other_delete")
with self._different_user():
workflow_url = self._api_url("workflows/%s" % workflow_id, use_key=True)
delete_response = delete(workflow_url)
self._assert_status_code_is(delete_response, 403)
def test_index(self):
index_response = self._get("workflows")
self._assert_status_code_is(index_response, 200)
assert isinstance(index_response.json(), list)
def test_upload(self):
self.__test_upload(use_deprecated_route=False)
def test_upload_deprecated(self):
self.__test_upload(use_deprecated_route=True)
def test_import_tools_requires_admin(self):
response = self.__test_upload(import_tools=True, assert_ok=False)
assert response.status_code == 403
def __test_upload(self, use_deprecated_route=False, name="test_import", workflow=None, assert_ok=True, import_tools=False):
if workflow is None:
workflow = self.workflow_populator.load_workflow(name=name)
data = dict(
workflow=dumps(workflow),
)
if import_tools:
data["import_tools"] = import_tools
if use_deprecated_route:
route = "workflows/upload"
else:
route = "workflows"
upload_response = self._post(route, data=data)
if assert_ok:
self._assert_status_code_is(upload_response, 200)
self._assert_user_has_workflow_with_name(name)
return upload_response
def test_update(self):
original_workflow = self.workflow_populator.load_workflow(name="test_import")
uuids = {}
labels = {}
for order_index, step_dict in original_workflow["steps"].items():
uuid = str(uuid4())
step_dict["uuid"] = uuid
uuids[order_index] = uuid
label = "label_%s" % order_index
step_dict["label"] = label
labels[order_index] = label
def check_label_and_uuid(order_index, step_dict):
assert order_index in uuids
assert order_index in labels
self.assertEqual(uuids[order_index], step_dict["uuid"])
self.assertEqual(labels[order_index], step_dict["label"])
upload_response = self.__test_upload(workflow=original_workflow)
workflow_id = upload_response.json()["id"]
def update(workflow_object):
put_response = self._update_workflow(workflow_id, workflow_object)
self._assert_status_code_is(put_response, 200)
return put_response
workflow_content = self._download_workflow(workflow_id)
steps = workflow_content["steps"]
def tweak_step(step):
order_index, step_dict = step
check_label_and_uuid(order_index, step_dict)
assert step_dict['position']['top'] != 1
assert step_dict['position']['left'] != 1
step_dict['position'] = {'top': 1, 'left': 1}
map(tweak_step, steps.items())
update(workflow_content)
def check_step(step):
order_index, step_dict = step
check_label_and_uuid(order_index, step_dict)
assert step_dict['position']['top'] == 1
assert step_dict['position']['left'] == 1
updated_workflow_content = self._download_workflow(workflow_id)
map(check_step, updated_workflow_content['steps'].items())
# Re-update against original worklfow...
update(original_workflow)
updated_workflow_content = self._download_workflow(workflow_id)
# Make sure the positions have been updated.
map(tweak_step, updated_workflow_content['steps'].items())
def test_update_no_tool_id(self):
workflow_object = self.workflow_populator.load_workflow(name="test_import")
upload_response = self.__test_upload(workflow=workflow_object)
workflow_id = upload_response.json()["id"]
del workflow_object["steps"]["2"]["tool_id"]
put_response = self._update_workflow(workflow_id, workflow_object)
self._assert_status_code_is(put_response, 400)
def test_update_missing_tool(self):
# Create allows missing tools, update doesn't currently...
workflow_object = self.workflow_populator.load_workflow(name="test_import")
upload_response = self.__test_upload(workflow=workflow_object)
workflow_id = upload_response.json()["id"]
workflow_object["steps"]["2"]["tool_id"] = "cat-not-found"
put_response = self._update_workflow(workflow_id, workflow_object)
self._assert_status_code_is(put_response, 400)
def test_require_unique_step_uuids(self):
workflow_dup_uuids = self.workflow_populator.load_workflow(name="test_import")
uuid0 = str(uuid4())
for step_dict in workflow_dup_uuids["steps"].values():
step_dict["uuid"] = uuid0
response = self.workflow_populator.create_workflow_response(workflow_dup_uuids)
self._assert_status_code_is(response, 400)
def test_require_unique_step_labels(self):
workflow_dup_label = self.workflow_populator.load_workflow(name="test_import")
for step_dict in workflow_dup_label["steps"].values():
step_dict["label"] = "my duplicated label"
response = self.workflow_populator.create_workflow_response(workflow_dup_label)
self._assert_status_code_is(response, 400)
def test_import_deprecated(self):
workflow_id = self.workflow_populator.simple_workflow("test_import_published_deprecated", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
self._assert_user_has_workflow_with_name("imported: test_import_published_deprecated")
def test_import_annotations(self):
workflow_id = self.workflow_populator.simple_workflow("test_import_annotations", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
other_id = other_import_response.json()["id"]
imported_workflow = self._show_workflow(other_id)
assert imported_workflow["annotation"] == "simple workflow"
step_annotations = set(step["annotation"] for step in imported_workflow["steps"].values())
assert "input1 description" in step_annotations
def test_import_subworkflows(self):
def get_subworkflow_content_id(workflow_id):
workflow_contents = self._download_workflow(workflow_id, style="editor")
steps = workflow_contents['steps']
subworkflow_step = next(s for s in steps.values() if s["type"] == "subworkflow")
return subworkflow_step['content_id']
workflow_id = self._upload_yaml_workflow(SIMPLE_NESTED_WORKFLOW_YAML, publish=True)
subworkflow_content_id = get_subworkflow_content_id(workflow_id)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
imported_workflow_id = other_import_response.json()["id"]
imported_subworkflow_content_id = get_subworkflow_content_id(imported_workflow_id)
assert subworkflow_content_id != imported_subworkflow_content_id
def test_not_importable_prevents_import(self):
workflow_id = self.workflow_populator.simple_workflow("test_not_importportable")
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 403)
def test_import_published(self):
workflow_id = self.workflow_populator.simple_workflow("test_import_published", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id, deprecated_route=True)
self._assert_status_code_is(other_import_response, 200)
self._assert_user_has_workflow_with_name("imported: test_import_published")
def test_export(self):
uploaded_workflow_id = self.workflow_populator.simple_workflow("test_for_export")
downloaded_workflow = self._download_workflow(uploaded_workflow_id)
assert downloaded_workflow["name"] == "test_for_export"
assert len(downloaded_workflow["steps"]) == 3
first_input = downloaded_workflow["steps"]["0"]["inputs"][0]
assert first_input["name"] == "WorkflowInput1"
assert first_input["description"] == "input1 description"
self._assert_has_keys(downloaded_workflow, "a_galaxy_workflow", "format-version", "annotation", "uuid", "steps")
for step in downloaded_workflow["steps"].values():
self._assert_has_keys(
step,
'id',
'type',
'tool_id',
'tool_version',
'name',
'tool_state',
'annotation',
'inputs',
'workflow_outputs',
'outputs'
)
if step['type'] == "tool":
self._assert_has_keys(step, "post_job_actions")
def test_export_editor(self):
uploaded_workflow_id = self.workflow_populator.simple_workflow("test_for_export")
downloaded_workflow = self._download_workflow(uploaded_workflow_id, style="editor")
self._assert_has_keys(downloaded_workflow, "name", "steps", "upgrade_messages")
for step in downloaded_workflow["steps"].values():
self._assert_has_keys(
step,
'id',
'type',
'content_id',
'name',
'tool_state',
'tooltip',
'data_inputs',
'data_outputs',
'config_form',
'annotation',
'post_job_actions',
'workflow_outputs',
'uuid',
'label',
)
def test_import_missing_tool(self):
workflow = self.workflow_populator.load_workflow_from_resource(name="test_workflow_missing_tool")
workflow_id = self.workflow_populator.create_workflow(workflow)
workflow_description = self._show_workflow(workflow_id)
steps = workflow_description["steps"]
missing_tool_steps = [v for v in steps.values() if v['tool_id'] == 'cat_missing_tool']
assert len(missing_tool_steps) == 1
def test_import_no_tool_id(self):
workflow = self.workflow_populator.load_workflow_from_resource(name="test_workflow_missing_tool")
del workflow["steps"]["2"]["tool_id"]
create_response = self.__test_upload(workflow=workflow, assert_ok=False)
self._assert_status_code_is(create_response, 400)
def test_import_export_with_runtime_inputs(self):
workflow = self.workflow_populator.load_workflow_from_resource(name="test_workflow_with_runtime_input")
workflow_id = self.workflow_populator.create_workflow(workflow)
downloaded_workflow = self._download_workflow(workflow_id)
assert len(downloaded_workflow["steps"]) == 2
runtime_input = downloaded_workflow["steps"]["1"]["inputs"][0]
assert runtime_input["description"].startswith("runtime parameter for tool")
assert runtime_input["name"] == "num_lines"
@skip_without_tool("cat1")
def test_run_workflow_by_index(self):
self.__run_cat_workflow(inputs_by='step_index')
@skip_without_tool("cat1")
def test_run_workflow_by_uuid(self):
self.__run_cat_workflow(inputs_by='step_uuid')
@skip_without_tool("cat1")
def test_run_workflow_by_uuid_implicitly(self):
self.__run_cat_workflow(inputs_by='uuid_implicitly')
@skip_without_tool("cat1")
def test_run_workflow_by_name(self):
self.__run_cat_workflow(inputs_by='name')
@skip_without_tool("cat1")
def test_run_workflow(self):
self.__run_cat_workflow(inputs_by='step_id')
@skip_without_tool("multiple_versions")
def test_run_versioned_tools(self):
history_01_id = self.dataset_populator.new_history()
workflow_version_01 = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- tool_id: multiple_versions
tool_version: "0.1"
state:
inttest: 0
""")
self.__invoke_workflow(history_01_id, workflow_version_01)
self.dataset_populator.wait_for_history(history_01_id, assert_ok=True)
history_02_id = self.dataset_populator.new_history()
workflow_version_02 = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- tool_id: multiple_versions
tool_version: "0.2"
state:
inttest: 1
""")
self.__invoke_workflow(history_02_id, workflow_version_02)
self.dataset_populator.wait_for_history(history_02_id, assert_ok=True)
def __run_cat_workflow(self, inputs_by):
workflow = self.workflow_populator.load_workflow(name="test_for_run")
workflow["steps"]["0"]["uuid"] = str(uuid4())
workflow["steps"]["1"]["uuid"] = str(uuid4())
workflow_request, history_id = self._setup_workflow_run(workflow, inputs_by=inputs_by)
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
invocation_id = run_workflow_response.json()["id"]
invocation = self._invocation_details(workflow_request["workflow_id"], invocation_id)
assert invocation["state"] == "scheduled", invocation
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
@skip_without_tool("collection_creates_pair")
def test_workflow_run_output_collections(self):
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input
type: input
- label: split_up
tool_id: collection_creates_pair
state:
input1:
$link: text_input
- tool_id: collection_paired_test
state:
f1:
$link: split_up#paired_output
""")
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="a\nb\nc\nd\n")
inputs = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("a\nc\nb\nd\n", self.dataset_populator.get_history_dataset_content(history_id, hid=0))
@skip_without_tool("collection_creates_pair")
def test_workflow_run_output_collection_mapping(self):
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- type: input_collection
- tool_id: collection_creates_pair
state:
input1:
$link: 0
- tool_id: collection_paired_test
state:
f1:
$link: 1#paired_output
- tool_id: cat_list
state:
input1:
$link: 2#out1
""")
history_id = self.dataset_populator.new_history()
hdca1 = self.dataset_collection_populator.create_list_in_history(history_id, contents=["a\nb\nc\nd\n", "e\nf\ng\nh\n"]).json()
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hdca1),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("a\nc\nb\nd\ne\ng\nf\nh\n", self.dataset_populator.get_history_dataset_content(history_id, hid=0))
@skip_without_tool("collection_split_on_column")
def test_workflow_run_dynamic_output_collections(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input
- label: text_input2
type: input
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
queries:
- input2:
$link: text_input2
- label: split_up
tool_id: collection_split_on_column
state:
input1:
$link: cat_inputs#out_file1
- tool_id: cat_list
state:
input1:
$link: split_up#split_output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t30.0\nsamp2\t40.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
details = self.dataset_populator.get_history_dataset_details(history_id, hid=0)
last_item_hid = details["hid"]
assert last_item_hid == 7, "Expected 7 history items, got %s" % last_item_hid
content = self.dataset_populator.get_history_dataset_content(history_id, hid=0)
self.assertEqual("10.0\n30.0\n20.0\n40.0\n", content)
@skip_without_tool("collection_split_on_column")
@skip_without_tool("min_repeat")
def test_workflow_run_dynamic_output_collections_2(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: test_input_1
type: input
- label: test_input_2
type: input
- label: test_input_3
type: input
- label: split_up
tool_id: collection_split_on_column
state:
input1:
$link: test_input_2
- label: min_repeat
tool_id: min_repeat
state:
queries:
- input:
$link: test_input_1
queries2:
- input2:
$link: split_up#split_output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t20.0\nsamp2\t40.0\n")
hda3 = self.dataset_populator.new_dataset(history_id, content="samp1\t30.0\nsamp2\t60.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
'2': self._ds_entry(hda3),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
content = self.dataset_populator.get_history_dataset_content(history_id, hid=7)
self.assertEqual(content.strip(), "samp1\t10.0\nsamp2\t20.0")
@skip_without_tool("collection_split_on_column")
def test_workflow_run_dynamic_output_collections_3(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input
- label: text_input2
type: input
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
queries:
- input2:
$link: text_input2
- label: split_up_1
tool_id: collection_split_on_column
state:
input1:
$link: cat_inputs#out_file1
- label: split_up_2
tool_id: collection_split_on_column
state:
input1:
$link: split_up_1#split_output
- tool_id: cat
state:
input1:
$link: split_up_2#split_output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t30.0\nsamp2\t40.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
@skip_without_tool("mapper")
@skip_without_tool("pileup")
def test_workflow_metadata_validation_0(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
steps:
- label: input_fastqs
type: input_collection
- label: reference
type: input
- label: map_over_mapper
tool_id: mapper
state:
input1:
$link: input_fastqs
reference:
$link: reference
- label: pileup
tool_id: pileup
state:
input1:
$link: map_over_mapper#out_file1
reference:
$link: reference
test_data:
input_fastqs:
type: list
elements:
- identifier: samp1
value: 1.fastq
type: File
- identifier: samp2
value: 1.fastq
type: File
reference:
value: 1.fasta
type: File
""", history_id=history_id)
def test_run_subworkflow_simple(self):
history_id = self.dataset_populator.new_history()
self._run_jobs(SIMPLE_NESTED_WORKFLOW_YAML, history_id=history_id)
content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual("chr5\t131424298\t131424460\tCCDS4149.1_cds_0_0_chr5_131424299_f\t0\t+\nchr5\t131424298\t131424460\tCCDS4149.1_cds_0_0_chr5_131424299_f\t0\t+\n", content)
@skip_without_tool("cat1")
@skip_without_tool("collection_paired_test")
def test_workflow_run_zip_collections(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: test_input_1
type: input
- label: test_input_2
type: input
- label: first_cat
tool_id: cat1
state:
input1:
$link: test_input_1
- label: zip_it
tool_id: "__ZIP_COLLECTION__"
state:
input_forward:
$link: first_cat#out_file1
input_reverse:
$link: test_input_2
- label: concat_pair
tool_id: collection_paired_test
state:
f1:
$link: zip_it#output
""")
hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t20.0\nsamp2\t40.0\n")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hda2),
}
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual(content.strip(), "samp1\t10.0\nsamp2\t20.0\nsamp1\t20.0\nsamp2\t40.0")
def test_filter_failed_mapping(self):
history_id = self.dataset_populator.new_history()
summary = self._run_jobs("""
class: GalaxyWorkflow
inputs:
- type: collection
label: input_c
steps:
- label: mixed_collection
tool_id: exit_code_from_file
state:
input:
$link: input_c
- label: filtered_collection
tool_id: "__FILTER_FAILED_DATASETS__"
state:
input:
$link: mixed_collection#out_file1
- tool_id: cat1
state:
input1:
$link: filtered_collection
test_data:
input_c:
type: list
elements:
- identifier: i1
content: "0"
- identifier: i2
content: "1"
""", history_id=history_id, wait=True, assert_ok=False)
jobs = summary.jobs
def filter_jobs_by_tool(tool_id):
return [j for j in summary.jobs if j["tool_id"] == tool_id]
assert len(filter_jobs_by_tool("upload1")) == 2, jobs
assert len(filter_jobs_by_tool("exit_code_from_file")) == 2, jobs
assert len(filter_jobs_by_tool("__FILTER_FAILED_DATASETS__")) == 1, jobs
assert len(filter_jobs_by_tool("cat1")) == 1, jobs
def test_workflow_request(self):
workflow = self.workflow_populator.load_workflow(name="test_for_queue")
workflow_request, history_id = self._setup_workflow_run(workflow)
url = "workflows/%s/usage" % (workflow_request["workflow_id"])
del workflow_request["workflow_id"]
run_workflow_response = self._post(url, data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
@skip_without_tool("cat")
def test_workflow_pause(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(
history_id,
uploaded_workflow_id,
index_map,
)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled', invocation
self.__review_paused_steps(uploaded_workflow_id, invocation_id, order_index=2, action=True)
invocation_scheduled = False
for i in range(25):
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
if invocation['state'] == 'scheduled':
invocation_scheduled = True
break
time.sleep(.5)
assert invocation_scheduled, "Workflow state is not scheduled..."
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
@skip_without_tool("cat")
def test_workflow_pause_cancel(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(history_id, uploaded_workflow_id, index_map)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled'
self.__review_paused_steps(uploaded_workflow_id, invocation_id, order_index=2, action=False)
time.sleep(4)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] == 'cancelled', invocation
@skip_without_tool("head")
def test_workflow_map_reduce_pause(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_map_reduce_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="reviewed\nunreviewed")
hdca1 = self.dataset_collection_populator.create_list_in_history(history_id, contents=["1\n2\n3", "4\n5\n6"]).json()
index_map = {
'0': self._ds_entry(hda1),
'1': self._ds_entry(hdca1),
}
invocation_id = self.__invoke_workflow(history_id, uploaded_workflow_id, index_map)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled'
self.__review_paused_steps(uploaded_workflow_id, invocation_id, order_index=4, action=True)
self.wait_for_invocation_and_jobs(history_id, uploaded_workflow_id, invocation_id)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] == 'scheduled'
self.assertEqual("reviewed\n1\nreviewed\n4\n", self.dataset_populator.get_history_dataset_content(history_id))
@skip_without_tool("cat")
def test_cancel_workflow_invocation(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(history_id, uploaded_workflow_id, index_map)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] != 'scheduled'
invocation_url = self._api_url("workflows/%s/usage/%s" % (uploaded_workflow_id, invocation_id), use_key=True)
delete_response = delete(invocation_url)
self._assert_status_code_is(delete_response, 200)
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
assert invocation['state'] == 'cancelled'
def test_run_with_implicit_connection(self):
history_id = self.dataset_populator.new_history()
run_summary = self._run_jobs("""
class: GalaxyWorkflow
steps:
- label: test_input
type: input
- label: first_cat
tool_id: cat1
state:
input1:
$link: test_input
- label: the_pause
type: pause
connect:
input:
- first_cat#out_file1
- label: second_cat
tool_id: cat1
state:
input1:
$link: the_pause
- label: third_cat
tool_id: random_lines1
connect:
$step: second_cat
state:
num_lines: 1
input:
$link: test_input
seed_source:
seed_source_selector: set_seed
seed: asdf
test_data:
test_input: "hello world"
""", history_id=history_id, wait=False)
history_id = run_summary.history_id
workflow_id = run_summary.workflow_id
invocation_id = run_summary.invocation_id
wait_on(lambda: len(self._history_jobs(history_id)) >= 2 or None, "history jobs")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
invocation = self._invocation_details(workflow_id, invocation_id)
assert invocation['state'] != 'scheduled', invocation
# it is implicitly dependent on second cat.
self._assert_history_job_count(history_id, 2)
self.__review_paused_steps(workflow_id, invocation_id, order_index=2, action=True)
self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
self._assert_history_job_count(history_id, 4)
def test_run_with_validated_parameter_connection_valid(self):
history_id = self.dataset_populator.new_history()
run_summary = self._run_jobs("""
class: GalaxyWorkflow
inputs:
- label: text_input
type: text
steps:
- tool_id: validation_repeat
state:
r2:
- text:
$link: text_input
test_data:
text_input:
value: "abd"
type: raw
""", history_id=history_id, wait=True)
time.sleep(10)
self.workflow_populator.wait_for_invocation(run_summary.workflow_id, run_summary.invocation_id)
jobs = self._history_jobs(history_id)
assert len(jobs) == 1
def test_run_with_validated_parameter_connection_invalid(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- label: text_input
type: text
steps:
- tool_id: validation_repeat
state:
r2:
- text:
$link: text_input
test_data:
text_input:
value: ""
type: raw
""", history_id=history_id, wait=True, assert_ok=False)
def test_run_with_text_connection(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- label: data_input
type: data
- label: text_input
type: text
steps:
- label: randomlines
tool_id: random_lines1
state:
num_lines: 1
input:
$link: data_input
seed_source:
seed_source_selector: set_seed
seed:
$link: text_input
test_data:
data_input:
value: 1.bed
type: File
text_input:
value: asdf
type: raw
""", history_id=history_id)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual("chr5\t131424298\t131424460\tCCDS4149.1_cds_0_0_chr5_131424299_f\t0\t+\n", content)
def wait_for_invocation_and_jobs(self, history_id, workflow_id, invocation_id, assert_ok=True):
self.workflow_populator.wait_for_invocation(workflow_id, invocation_id)
time.sleep(.5)
self.dataset_populator.wait_for_history(history_id, assert_ok=assert_ok)
time.sleep(.5)
def test_cannot_run_inaccessible_workflow(self):
workflow = self.workflow_populator.load_workflow(name="test_for_run_cannot_access")
workflow_request, history_id = self._setup_workflow_run(workflow)
with self._different_user():
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 403)
def test_400_on_invalid_workflow_id(self):
workflow = self.workflow_populator.load_workflow(name="test_for_run_does_not_exist")
workflow_request, history_id = self._setup_workflow_run(workflow)
workflow_request["workflow_id"] = self._random_key()
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 400)
def test_cannot_run_against_other_users_history(self):
workflow = self.workflow_populator.load_workflow(name="test_for_run_does_not_exist")
workflow_request, history_id = self._setup_workflow_run(workflow)
with self._different_user():
other_history_id = self.dataset_populator.new_history()
workflow_request["history"] = "hist_id=%s" % other_history_id
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 403)
@skip_without_tool("cat")
@skip_without_tool("cat_list")
def test_workflow_run_with_matching_lists(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_matching_lists")
workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hdca1 = self.dataset_collection_populator.create_list_in_history(history_id, contents=[("sample1-1", "1 2 3"), ("sample2-1", "7 8 9")]).json()
hdca2 = self.dataset_collection_populator.create_list_in_history(history_id, contents=[("sample1-2", "4 5 6"), ("sample2-2", "0 a b")]).json()
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
label_map = {"list1": self._ds_entry(hdca1), "list2": self._ds_entry(hdca2)}
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=workflow_id,
ds_map=self._build_ds_map(workflow_id, label_map),
)
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("1 2 3\n4 5 6\n7 8 9\n0 a b\n", self.dataset_populator.get_history_dataset_content(history_id))
def test_workflow_stability(self):
# Run this index stability test with following command:
# ./run_tests.sh test/api/test_workflows.py:WorkflowsApiTestCase.test_workflow_stability
num_tests = 1
for workflow_file in ["test_workflow_topoambigouity", "test_workflow_topoambigouity_auto_laidout"]:
workflow = self.workflow_populator.load_workflow_from_resource(workflow_file)
last_step_map = self._step_map(workflow)
for i in range(num_tests):
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
downloaded_workflow = self._download_workflow(uploaded_workflow_id)
step_map = self._step_map(downloaded_workflow)
assert step_map == last_step_map
last_step_map = step_map
def _step_map(self, workflow):
# Build dict mapping 'tep index to input name.
step_map = {}
for step_index, step in workflow["steps"].items():
if step["type"] == "data_input":
step_map[step_index] = step["inputs"][0]["name"]
return step_map
def test_empty_create(self):
response = self._post("workflows")
self._assert_status_code_is(response, 400)
self._assert_error_code_is(response, error_codes.USER_REQUEST_MISSING_PARAMETER)
def test_invalid_create_multiple_types(self):
data = {
'shared_workflow_id': '1234567890abcdef',
'from_history_id': '1234567890abcdef'
}
response = self._post("workflows", data)
self._assert_status_code_is(response, 400)
self._assert_error_code_is(response, error_codes.USER_REQUEST_INVALID_PARAMETER)
@skip_without_tool("cat1")
def test_run_with_pja(self):
workflow = self.workflow_populator.load_workflow(name="test_for_pja_run", add_pja=True)
workflow_request, history_id = self._setup_workflow_run(workflow, inputs_by='step_index')
workflow_request["replacement_params"] = dumps(dict(replaceme="was replaced"))
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
assert content["name"] == "foo was replaced"
@skip_without_tool("cat")
def test_run_rename_collection_element(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
type: data_collection_input
collection_type: list
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
outputs:
out_file1:
rename: "my new name"
test_data:
input1:
type: list
name: the_dataset_list
elements:
- identifier: el1
value: 1.fastq
type: File
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, hid=3, wait=True, assert_ok=True)
name = content["name"]
assert name == "my new name", name
@skip_without_tool("cat")
def test_run_rename_based_on_input(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
outputs:
out_file1:
rename: "#{input1 | basename} suffix"
test_data:
input1:
value: 1.fasta
type: File
name: fasta1
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "fasta1 suffix", name
@skip_without_tool("cat")
def test_run_rename_based_on_input_recursive(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
outputs:
out_file1:
rename: "#{input1} #{input1 | upper} suffix"
test_data:
input1:
value: 1.fasta
type: File
name: '#{input1}'
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "#{input1} #{INPUT1} suffix", name
@skip_without_tool("cat")
def test_run_rename_based_on_input_repeat(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
- id: input2
steps:
- tool_id: cat
label: first_cat
state:
input1:
$link: input1
queries:
- input2:
$link: input2
outputs:
out_file1:
rename: "#{queries_0.input2| basename} suffix"
test_data:
input1:
value: 1.fasta
type: File
name: fasta1
input2:
value: 1.fasta
type: File
name: fasta2
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "fasta2 suffix", name
@skip_without_tool("mapper2")
def test_run_rename_based_on_input_conditional(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: fasta_input
- id: fastq_input
steps:
- tool_id: mapper2
state:
fastq_input:
fastq_input_selector: single
fastq_input1:
$link: fastq_input
reference:
$link: fasta_input
outputs:
out_file1:
# Wish it was qualified for conditionals but it doesn't seem to be. -John
# rename: "#{fastq_input.fastq_input1 | basename} suffix"
rename: "#{fastq_input1 | basename} suffix"
test_data:
fasta_input:
value: 1.fasta
type: File
name: fasta1
file_type: fasta
fastq_input:
value: 1.fastqsanger
type: File
name: fastq1
file_type: fastqsanger
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "fastq1 suffix", name
@skip_without_tool("mapper2")
def test_run_rename_based_on_input_collection(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: fasta_input
- id: fastq_inputs
steps:
- tool_id: mapper2
state:
fastq_input:
fastq_input_selector: paired_collection
fastq_input1:
$link: fastq_inputs
reference:
$link: fasta_input
outputs:
out_file1:
# Wish it was qualified for conditionals but it doesn't seem to be. -John
# rename: "#{fastq_input.fastq_input1 | basename} suffix"
rename: "#{fastq_input1} suffix"
test_data:
fasta_input:
value: 1.fasta
type: File
name: fasta1
file_type: fasta
fastq_inputs:
type: list
name: the_dataset_pair
elements:
- identifier: forward
value: 1.fastq
type: File
- identifier: reverse
value: 1.fastq
type: File
""", history_id=history_id)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
name = content["name"]
assert name == "the_dataset_pair suffix", name
@skip_without_tool("cat1")
def test_run_with_runtime_pja(self):
workflow = self.workflow_populator.load_workflow(name="test_for_pja_runtime")
uuid0, uuid1, uuid2 = str(uuid4()), str(uuid4()), str(uuid4())
workflow["steps"]["0"]["uuid"] = uuid0
workflow["steps"]["1"]["uuid"] = uuid1
workflow["steps"]["2"]["uuid"] = uuid2
workflow_request, history_id = self._setup_workflow_run(workflow, inputs_by='step_index')
workflow_request["replacement_params"] = dumps(dict(replaceme="was replaced"))
pja_map = {
"RenameDatasetActionout_file1": dict(
action_type="RenameDatasetAction",
output_name="out_file1",
action_arguments=dict(newname="foo ${replaceme}"),
)
}
workflow_request["parameters"] = dumps({
uuid2: {"__POST_JOB_ACTIONS__": pja_map}
})
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
assert content["name"] == "foo was replaced", content["name"]
workflow_id = workflow_request["workflow_id"]
downloaded_workflow = self._download_workflow(workflow_id)
pjas = list(downloaded_workflow["steps"]["2"]["post_job_actions"].values())
assert len(pjas) == 0, len(pjas)
@skip_without_tool("cat1")
def test_run_with_delayed_runtime_pja(self):
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- label: test_input
type: input
- label: first_cat
tool_id: cat1
state:
input1:
$link: test_input
- label: the_pause
type: pause
connect:
input:
- first_cat#out_file1
- label: second_cat
tool_id: cat1
state:
input1:
$link: the_pause
""")
downloaded_workflow = self._download_workflow(workflow_id)
print(downloaded_workflow)
uuid_dict = dict((int(index), step["uuid"]) for index, step in downloaded_workflow["steps"].items())
history_id = self.dataset_populator.new_history()
hda = self.dataset_populator.new_dataset(history_id, content="1 2 3")
self.dataset_populator.wait_for_history(history_id)
inputs = {
'0': self._ds_entry(hda),
}
print(inputs)
uuid2 = uuid_dict[3]
workflow_request = {}
workflow_request["replacement_params"] = dumps(dict(replaceme="was replaced"))
pja_map = {
"RenameDatasetActionout_file1": dict(
action_type="RenameDatasetAction",
output_name="out_file1",
action_arguments=dict(newname="foo ${replaceme}"),
)
}
workflow_request["parameters"] = dumps({
uuid2: {"__POST_JOB_ACTIONS__": pja_map}
})
invocation_id = self.__invoke_workflow(history_id, workflow_id, inputs=inputs, request=workflow_request)
time.sleep(2)
self.dataset_populator.wait_for_history(history_id)
self.__review_paused_steps(workflow_id, invocation_id, order_index=2, action=True)
self.workflow_populator.wait_for_workflow(workflow_id, invocation_id, history_id)
time.sleep(1)
content = self.dataset_populator.get_history_dataset_details(history_id)
assert content["name"] == "foo was replaced", content["name"]
@skip_without_tool("cat1")
def test_delete_intermediate_datasets_pja_1(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
inputs:
- id: input1
outputs:
- id: wf_output_1
source: third_cat#out_file1
steps:
- tool_id: cat1
label: first_cat
state:
input1:
$link: input1
- tool_id: cat1
label: second_cat
state:
input1:
$link: first_cat#out_file1
- tool_id: cat1
label: third_cat
state:
input1:
$link: second_cat#out_file1
outputs:
out_file1:
delete_intermediate_datasets: true
test_data:
input1: "hello world"
""", history_id=history_id)
hda1 = self.dataset_populator.get_history_dataset_details(history_id, hid=1)
hda2 = self.dataset_populator.get_history_dataset_details(history_id, hid=2)
hda3 = self.dataset_populator.get_history_dataset_details(history_id, hid=3)
hda4 = self.dataset_populator.get_history_dataset_details(history_id, hid=4)
assert not hda1["deleted"]
assert hda2["deleted"]
print(hda3["deleted"])
assert not hda4["deleted"]
@skip_without_tool("random_lines1")
def test_run_replace_params_by_tool(self):
workflow_request, history_id = self._setup_random_x2_workflow("test_for_replace_tool_params")
workflow_request["parameters"] = dumps(dict(random_lines1=dict(num_lines=5)))
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.__assert_lines_hid_line_count_is(history_id, 2, 5)
self.__assert_lines_hid_line_count_is(history_id, 3, 5)
@skip_without_tool("random_lines1")
def test_run_replace_params_by_uuid(self):
workflow_request, history_id = self._setup_random_x2_workflow("test_for_replace_tool_params")
workflow_request["parameters"] = dumps({
"58dffcc9-bcb7-4117-a0e1-61513524b3b1": dict(num_lines=4),
"58dffcc9-bcb7-4117-a0e1-61513524b3b2": dict(num_lines=3),
})
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.__assert_lines_hid_line_count_is(history_id, 2, 4)
self.__assert_lines_hid_line_count_is(history_id, 3, 3)
@skip_without_tool("cat1")
@skip_without_tool("addValue")
def test_run_batch(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_batch")
workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
hda2 = self.dataset_populator.new_dataset(history_id, content="4 5 6")
hda3 = self.dataset_populator.new_dataset(history_id, content="7 8 9")
hda4 = self.dataset_populator.new_dataset(history_id, content="10 11 12")
parameters = {
"0": {"input": {"batch": True, "values": [{"id" : hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"},
{"id" : hda2.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id" : hda3.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id" : hda4.get("id"), "hid": hda2.get("hid"), "src": "hda"}]}},
"1": {"input": {"batch": False, "values": [{"id" : hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"}]}, "exp": "2"}}
workflow_request = {
"history_id" : history_id,
"batch" : True,
"parameters_normalized": True,
"parameters" : dumps(parameters),
}
invocation_response = self._post("workflows/%s/usage" % workflow_id, data=workflow_request)
self._assert_status_code_is(invocation_response, 200)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
r1 = "1 2 3\t1\n1 2 3\t2\n"
r2 = "4 5 6\t1\n1 2 3\t2\n"
r3 = "7 8 9\t1\n1 2 3\t2\n"
r4 = "10 11 12\t1\n1 2 3\t2\n"
t1 = self.dataset_populator.get_history_dataset_content(history_id, hid=7)
t2 = self.dataset_populator.get_history_dataset_content(history_id, hid=10)
t3 = self.dataset_populator.get_history_dataset_content(history_id, hid=13)
t4 = self.dataset_populator.get_history_dataset_content(history_id, hid=16)
self.assertEqual(r1, t1)
self.assertEqual(r2, t2)
self.assertEqual(r3, t3)
self.assertEqual(r4, t4)
@skip_without_tool("validation_default")
def test_parameter_substitution_sanitization(self):
substitions = dict(input1="\" ; echo \"moo")
run_workflow_response, history_id = self._run_validation_workflow_with_substitions(substitions)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("__dq__ X echo __dq__moo\n", self.dataset_populator.get_history_dataset_content(history_id, hid=1))
@skip_without_tool("validation_repeat")
def test_parameter_substitution_validation_value_errors_0(self):
history_id = self.dataset_populator.new_history()
workflow_id = self._upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- tool_id: validation_repeat
state:
r2:
- text: "abd"
""")
workflow_request = dict(
history="hist_id=%s" % history_id,
parameters=dumps(dict(validation_repeat={"r2_0|text": ""}))
)
url = "workflows/%s/invocations" % workflow_id
invocation_response = self._post(url, data=workflow_request)
self._assert_status_code_is(invocation_response, 400)
@skip_without_tool("validation_default")
def test_parameter_substitution_validation_value_errors_1(self):
substitions = dict(select_param="\" ; echo \"moo")
run_workflow_response, history_id = self._run_validation_workflow_with_substitions(substitions)
self._assert_status_code_is(run_workflow_response, 400)
@skip_without_tool("validation_repeat")
def test_workflow_import_state_validation_1(self):
history_id = self.dataset_populator.new_history()
self._run_jobs("""
class: GalaxyWorkflow
steps:
- tool_id: validation_repeat
state:
r2:
- text: ""
""", history_id=history_id, wait=False, expected_response=400)
def _run_validation_workflow_with_substitions(self, substitions):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_validation_1")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=uploaded_workflow_id,
parameters=dumps(dict(validation_default=substitions))
)
run_workflow_response = self._post("workflows", data=workflow_request)
return run_workflow_response, history_id
@skip_without_tool("random_lines1")
def test_run_replace_params_by_steps(self):
workflow_request, history_id, steps = self._setup_random_x2_workflow_steps("test_for_replace_step_params")
params = dumps({str(steps[1]["id"]): dict(num_lines=5)})
workflow_request["parameters"] = params
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Would be 8 and 6 without modification
self.__assert_lines_hid_line_count_is(history_id, 2, 8)
self.__assert_lines_hid_line_count_is(history_id, 3, 5)
@skip_without_tool("random_lines1")
def test_run_replace_params_nested(self):
workflow_request, history_id, steps = self._setup_random_x2_workflow_steps("test_for_replace_step_params_nested")
seed_source = dict(
seed_source_selector="set_seed",
seed="moo",
)
params = dumps({str(steps[0]["id"]): dict(num_lines=1, seed_source=seed_source),
str(steps[1]["id"]): dict(num_lines=1, seed_source=seed_source)})
workflow_request["parameters"] = params
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("3\n", self.dataset_populator.get_history_dataset_content(history_id))
def test_pja_import_export(self):
workflow = self.workflow_populator.load_workflow(name="test_for_pja_import", add_pja=True)
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
downloaded_workflow = self._download_workflow(uploaded_workflow_id)
self._assert_has_keys(downloaded_workflow["steps"], "0", "1", "2")
pjas = list(downloaded_workflow["steps"]["2"]["post_job_actions"].values())
assert len(pjas) == 1, len(pjas)
pja = pjas[0]
self._assert_has_keys(pja, "action_type", "output_name", "action_arguments")
@skip_without_tool("cat1")
def test_only_own_invocations_accessible(self):
workflow_id, usage = self._run_workflow_once_get_invocation("test_usage")
with self._different_user():
usage_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, usage["id"]))
self._assert_status_code_is(usage_details_response, 403)
@skip_without_tool("cat1")
def test_invocation_usage(self):
workflow_id, usage = self._run_workflow_once_get_invocation("test_usage")
invocation_id = usage["id"]
usage_details = self._invocation_details(workflow_id, invocation_id)
# Assert some high-level things about the structure of data returned.
self._assert_has_keys(usage_details, "inputs", "steps")
invocation_steps = usage_details["steps"]
for step in invocation_steps:
self._assert_has_keys(step, "workflow_step_id", "order_index", "id")
an_invocation_step = invocation_steps[0]
step_id = an_invocation_step["id"]
step_response = self._get("workflows/%s/usage/%s/steps/%s" % (workflow_id, invocation_id, step_id))
self._assert_status_code_is(step_response, 200)
self._assert_has_keys(step_response.json(), "id", "order_index")
@skip_without_tool("cat1")
def test_invocations_accessible_imported_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_usage", publish=True)
with self._different_user():
other_import_response = self.__import_workflow(workflow_id)
self._assert_status_code_is(other_import_response, 200)
other_id = other_import_response.json()["id"]
workflow_request, history_id = self._setup_workflow_run(workflow_id=other_id)
response = self._get("workflows/%s/usage" % other_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
run_workflow_response = run_workflow_response.json()
invocation_id = run_workflow_response['id']
usage_details_response = self._get("workflows/%s/usage/%s" % (other_id, invocation_id))
self._assert_status_code_is(usage_details_response, 200)
@skip_without_tool("cat1")
def test_invocations_accessible_published_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_usage", publish=True)
with self._different_user():
workflow_request, history_id = self._setup_workflow_run(workflow_id=workflow_id)
workflow_request['workflow_id'] = workflow_request.pop('workflow_id')
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
run_workflow_response = run_workflow_response.json()
invocation_id = run_workflow_response['id']
usage_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, invocation_id))
self._assert_status_code_is(usage_details_response, 200)
@skip_without_tool("cat1")
def test_invocations_not_accessible_by_different_user_for_published_workflow(self):
workflow_id = self.workflow_populator.simple_workflow("test_usage", publish=True)
workflow_request, history_id = self._setup_workflow_run(workflow_id=workflow_id)
workflow_request['workflow_id'] = workflow_request.pop('workflow_id')
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
run_workflow_response = run_workflow_response.json()
invocation_id = run_workflow_response['id']
with self._different_user():
usage_details_response = self._get("workflows/%s/usage/%s" % (workflow_id, invocation_id))
self._assert_status_code_is(usage_details_response, 403)
def _update_workflow(self, workflow_id, workflow_object):
data = dict(
workflow=workflow_object
)
raw_url = 'workflows/%s' % workflow_id
url = self._api_url(raw_url, use_key=True)
put_response = put(url, data=dumps(data))
return put_response
def _invocation_step_details(self, workflow_id, invocation_id, step_id):
invocation_step_response = self._get("workflows/%s/usage/%s/steps/%s" % (workflow_id, invocation_id, step_id))
self._assert_status_code_is(invocation_step_response, 200)
invocation_step_details = invocation_step_response.json()
return invocation_step_details
def _execute_invocation_step_action(self, workflow_id, invocation_id, step_id, action):
raw_url = "workflows/%s/usage/%s/steps/%s" % (workflow_id, invocation_id, step_id)
url = self._api_url(raw_url, use_key=True)
payload = dumps(dict(action=action))
action_response = put(url, data=payload)
self._assert_status_code_is(action_response, 200)
invocation_step_details = action_response.json()
return invocation_step_details
def _run_workflow_once_get_invocation(self, name):
workflow = self.workflow_populator.load_workflow(name=name)
workflow_request, history_id = self._setup_workflow_run(workflow)
workflow_id = workflow_request["workflow_id"]
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
assert len(response.json()) == 0
run_workflow_response = self._post("workflows", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
response = self._get("workflows/%s/usage" % workflow_id)
self._assert_status_code_is(response, 200)
usages = response.json()
assert len(usages) == 1
return workflow_id, usages[0]
def _setup_random_x2_workflow_steps(self, name):
workflow_request, history_id = self._setup_random_x2_workflow("test_for_replace_step_params")
random_line_steps = self._random_lines_steps(workflow_request)
return workflow_request, history_id, random_line_steps
def _random_lines_steps(self, workflow_request):
workflow_summary_response = self._get("workflows/%s" % workflow_request["workflow_id"])
self._assert_status_code_is(workflow_summary_response, 200)
steps = workflow_summary_response.json()["steps"]
return sorted((step for step in steps.values() if step["tool_id"] == "random_lines1"), key=lambda step: step["id"])
def _setup_random_x2_workflow(self, name):
workflow = self.workflow_populator.load_random_x2_workflow(name)
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
workflow_inputs = self._workflow_inputs(uploaded_workflow_id)
key = next(iter(workflow_inputs.keys()))
history_id = self.dataset_populator.new_history()
ten_lines = "\n".join(str(_) for _ in range(10))
hda1 = self.dataset_populator.new_dataset(history_id, content=ten_lines)
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=uploaded_workflow_id,
ds_map=dumps({
key: self._ds_entry(hda1),
}),
)
return workflow_request, history_id
def __review_paused_steps(self, uploaded_workflow_id, invocation_id, order_index, action=True):
invocation = self._invocation_details(uploaded_workflow_id, invocation_id)
invocation_steps = invocation["steps"]
pause_steps = [s for s in invocation_steps if s['order_index'] == order_index]
for pause_step in pause_steps:
pause_step_id = pause_step['id']
self._execute_invocation_step_action(uploaded_workflow_id, invocation_id, pause_step_id, action=action)
def __assert_lines_hid_line_count_is(self, history, hid, lines):
contents_url = "histories/%s/contents" % history
history_contents_response = self._get(contents_url)
self._assert_status_code_is(history_contents_response, 200)
hda_summary = next(hc for hc in history_contents_response.json() if hc["hid"] == hid)
hda_info_response = self._get("%s/%s" % (contents_url, hda_summary["id"]))
self._assert_status_code_is(hda_info_response, 200)
self.assertEqual(hda_info_response.json()["metadata_data_lines"], lines)
def __invoke_workflow(self, history_id, workflow_id, inputs={}, request={}, assert_ok=True):
request["history"] = "hist_id=%s" % history_id,
if inputs:
request["inputs"] = dumps(inputs)
request["inputs_by"] = 'step_index'
url = "workflows/%s/usage" % (workflow_id)
invocation_response = self._post(url, data=request)
if assert_ok:
self._assert_status_code_is(invocation_response, 200)
invocation_id = invocation_response.json()["id"]
return invocation_id
else:
return invocation_response
def __import_workflow(self, workflow_id, deprecated_route=False):
if deprecated_route:
route = "workflows/import"
import_data = dict(
workflow_id=workflow_id,
)
else:
route = "workflows"
import_data = dict(
shared_workflow_id=workflow_id,
)
return self._post(route, import_data)
def _download_workflow(self, workflow_id, style=None):
params = {}
if style:
params = {"style": style}
download_response = self._get("workflows/%s/download" % workflow_id, params)
self._assert_status_code_is(download_response, 200)
downloaded_workflow = download_response.json()
return downloaded_workflow
def _show_workflow(self, workflow_id):
show_response = self._get("workflows/%s" % workflow_id)
self._assert_status_code_is(show_response, 200)
return show_response.json()
def _assert_looks_like_instance_workflow_representation(self, workflow):
self._assert_has_keys(
workflow,
'url',
'owner',
'inputs',
'annotation',
'steps'
)
for step in workflow["steps"].values():
self._assert_has_keys(
step,
'id',
'type',
'tool_id',
'tool_version',
'annotation',
'tool_inputs',
'input_steps',
)
RunJobsSummary = namedtuple('RunJobsSummary', ['history_id', 'workflow_id', 'invocation_id', 'inputs', 'jobs'])
| true | true |
f7feaa3a85aacf8905df1928db8d5ee842524215 | 8,094 | py | Python | core/settings.py | rectory-school/rectory-apps-updated | a6d47f6d5928f0c816eb45fd229da2f9f2fa2ff1 | [
"MIT"
] | null | null | null | core/settings.py | rectory-school/rectory-apps-updated | a6d47f6d5928f0c816eb45fd229da2f9f2fa2ff1 | [
"MIT"
] | 30 | 2021-07-16T12:54:14.000Z | 2021-12-24T16:59:04.000Z | core/settings.py | rectory-school/rectory-apps-updated | a6d47f6d5928f0c816eb45fd229da2f9f2fa2ff1 | [
"MIT"
] | null | null | null | """
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import socket
import uuid
import json
from email.utils import parseaddr
import environ
env = environ.Env()
BASE_DIR = Path(__file__).resolve().parent.parent
if (dotenv := (BASE_DIR / ".env")).exists():
environ.Env.read_env(dotenv)
DEBUG = env.bool('DEBUG', default=False)
SECRET_KEY = env('SECRET_KEY')
DATABASES = {'default': env.db(default='sqlite:///' + (BASE_DIR / 'db.sqlite3').absolute().as_posix()), }
CACHES = {'default': env.cache(default="locmemcache://")}
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=["127.0.0.1", "localhost"])
GOOGLE_OAUTH_CLIENT_ID = env("GOOGLE_OAUTH_CLIENT_ID", default=None)
GOOGLE_HOSTED_DOMAIN = env("GOOGLE_HOSTED_DOMAIN", default=None)
# These are for DigitalOcean Spaces
AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", default=None)
AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL", default=None)
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", default=None)
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", default=None)
AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME", default=None)
DEFAULT_FILE_STORAGE = env("DEFAULT_FILE_STORAGE", default="django.core.files.storage.FileSystemStorage")
AWS_DEFAULT_ACL = 'public-read'
AWS_QUERYSTRING_AUTH = False
# Remote logging configuration
LOGZ_REMOTE_URL = env('LOGZ_REMOTE_URL', default=None)
LOGZ_TOKEN = env('LOGZ_TOKEN', default=None)
# Mail configuration
MAILGUN_API_KEY = env('MAILGUN_API_KEY', default=None)
MAILGUN_SENDER_DOMAIN = env('MAILGUN_SENDER_DOMAIN', default=None)
SERVER_EMAIL = env('SERVER_EMAIL', default='root@localhost')
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', default='root@localhost')
ADMINS = tuple(parseaddr(email) for email in env.list('DJANGO_ADMINS', default=[]))
MANAGERS = tuple(parseaddr(email) for email in env.list('DJANGO_MANAGERS', default=[]))
EMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
if MAILGUN_API_KEY and MAILGUN_SENDER_DOMAIN:
ANYMAIL = {
"MAILGUN_API_KEY": MAILGUN_API_KEY,
"MAILGUN_SENDER_DOMAIN": MAILGUN_SENDER_DOMAIN,
}
# Application definition
AUTH_USER_MODEL = 'accounts.User'
# The direct to config settings are to bypass the default_app_config RemovedInDjango41Warning warnings
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adminsortable2',
'django_safemigrate',
'bootstrap4',
'health_check',
# 'health_check.db,
'health_check.db.apps.HealthCheckConfig',
'django_bootstrap_breadcrumbs',
# 'versatileimagefield',
'versatileimagefield.apps.VersatileImageFieldConfig',
'rest_framework',
'solo',
'accounts',
'icons',
'nav',
'calendar_generator',
'sis',
'jobs',
'stored_mail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'lb_health_check.middleware.AliveCheck',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'logging_middleware.middleware.LoggingMiddleware',
]
if DEBUG:
# Inject the debug toolbar
security_index = MIDDLEWARE.index('django.middleware.security.SecurityMiddleware')
MIDDLEWARE.insert(security_index+1, 'debug_toolbar.middleware.DebugToolbarMiddleware')
INSTALLED_APPS.append('debug_toolbar.apps.DebugToolbarConfig')
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / "templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'accounts.context_processors.has_admin_access'
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_ROOT = BASE_DIR / "scratch" / "media"
MEDIA_URL = env.str("MEDIA_URL", "/media/")
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = BASE_DIR / "scratch" / "static"
STATICFILES_DIRS = [BASE_DIR / "static"]
INTERNAL_IPS = [
'127.0.0.1',
'[::1]',
]
LOGIN_REDIRECT_URL = "/"
RESULTS_CACHE_SIZE = 2500
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
logz_format = {
'system': 'rectory-apps-web',
'system-hostname': hostname,
'system-ip-address': ip_address,
'system-uuid': uuid.uuid4().hex,
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'logzioFormat': {
'format': json.dumps(logz_format),
'validate': False,
},
'verbose': {
'format': '%(asctime)s %(name)s [%(levelname)s] %(filename)s:%(lineno)d %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'WARN',
},
'log-http-requests': {
'handlers': ['console'],
'level': 'INFO',
},
'jobs': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
'stored_mail': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
}
}
}
if LOGZ_REMOTE_URL and LOGZ_TOKEN:
LOGGING['handlers']['logzio'] = {
'class': 'logzio.handler.LogzioHandler',
'level': 'INFO',
'formatter': 'logzioFormat',
'logzio_type': "django",
'logs_drain_timeout': 5,
'url': LOGZ_REMOTE_URL,
'debug': False,
'network_timeout': 10,
'token': LOGZ_TOKEN,
}
LOGGING['loggers']['django']['handlers'].append('logzio')
LOGGING['loggers']['log-http-requests']['handlers'].append('logzio')
LOGGING['loggers']['jobs']['handlers'].append('logzio')
ALIVENESS_URL = "/health-check/"
| 28.5 | 119 | 0.675562 |
from pathlib import Path
import socket
import uuid
import json
from email.utils import parseaddr
import environ
env = environ.Env()
BASE_DIR = Path(__file__).resolve().parent.parent
if (dotenv := (BASE_DIR / ".env")).exists():
environ.Env.read_env(dotenv)
DEBUG = env.bool('DEBUG', default=False)
SECRET_KEY = env('SECRET_KEY')
DATABASES = {'default': env.db(default='sqlite:///' + (BASE_DIR / 'db.sqlite3').absolute().as_posix()), }
CACHES = {'default': env.cache(default="locmemcache://")}
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=["127.0.0.1", "localhost"])
GOOGLE_OAUTH_CLIENT_ID = env("GOOGLE_OAUTH_CLIENT_ID", default=None)
GOOGLE_HOSTED_DOMAIN = env("GOOGLE_HOSTED_DOMAIN", default=None)
AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", default=None)
AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL", default=None)
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", default=None)
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", default=None)
AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME", default=None)
DEFAULT_FILE_STORAGE = env("DEFAULT_FILE_STORAGE", default="django.core.files.storage.FileSystemStorage")
AWS_DEFAULT_ACL = 'public-read'
AWS_QUERYSTRING_AUTH = False
LOGZ_REMOTE_URL = env('LOGZ_REMOTE_URL', default=None)
LOGZ_TOKEN = env('LOGZ_TOKEN', default=None)
MAILGUN_API_KEY = env('MAILGUN_API_KEY', default=None)
MAILGUN_SENDER_DOMAIN = env('MAILGUN_SENDER_DOMAIN', default=None)
SERVER_EMAIL = env('SERVER_EMAIL', default='root@localhost')
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', default='root@localhost')
ADMINS = tuple(parseaddr(email) for email in env.list('DJANGO_ADMINS', default=[]))
MANAGERS = tuple(parseaddr(email) for email in env.list('DJANGO_MANAGERS', default=[]))
EMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
if MAILGUN_API_KEY and MAILGUN_SENDER_DOMAIN:
ANYMAIL = {
"MAILGUN_API_KEY": MAILGUN_API_KEY,
"MAILGUN_SENDER_DOMAIN": MAILGUN_SENDER_DOMAIN,
}
AUTH_USER_MODEL = 'accounts.User'
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adminsortable2',
'django_safemigrate',
'bootstrap4',
'health_check',
'health_check.db.apps.HealthCheckConfig',
'django_bootstrap_breadcrumbs',
# 'versatileimagefield',
'versatileimagefield.apps.VersatileImageFieldConfig',
'rest_framework',
'solo',
'accounts',
'icons',
'nav',
'calendar_generator',
'sis',
'jobs',
'stored_mail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'lb_health_check.middleware.AliveCheck',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'logging_middleware.middleware.LoggingMiddleware',
]
if DEBUG:
# Inject the debug toolbar
security_index = MIDDLEWARE.index('django.middleware.security.SecurityMiddleware')
MIDDLEWARE.insert(security_index+1, 'debug_toolbar.middleware.DebugToolbarMiddleware')
INSTALLED_APPS.append('debug_toolbar.apps.DebugToolbarConfig')
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / "templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'accounts.context_processors.has_admin_access'
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_ROOT = BASE_DIR / "scratch" / "media"
MEDIA_URL = env.str("MEDIA_URL", "/media/")
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = BASE_DIR / "scratch" / "static"
STATICFILES_DIRS = [BASE_DIR / "static"]
INTERNAL_IPS = [
'127.0.0.1',
'[::1]',
]
LOGIN_REDIRECT_URL = "/"
RESULTS_CACHE_SIZE = 2500
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
logz_format = {
'system': 'rectory-apps-web',
'system-hostname': hostname,
'system-ip-address': ip_address,
'system-uuid': uuid.uuid4().hex,
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'logzioFormat': {
'format': json.dumps(logz_format),
'validate': False,
},
'verbose': {
'format': '%(asctime)s %(name)s [%(levelname)s] %(filename)s:%(lineno)d %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'WARN',
},
'log-http-requests': {
'handlers': ['console'],
'level': 'INFO',
},
'jobs': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
'stored_mail': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
}
}
}
if LOGZ_REMOTE_URL and LOGZ_TOKEN:
LOGGING['handlers']['logzio'] = {
'class': 'logzio.handler.LogzioHandler',
'level': 'INFO',
'formatter': 'logzioFormat',
'logzio_type': "django",
'logs_drain_timeout': 5,
'url': LOGZ_REMOTE_URL,
'debug': False,
'network_timeout': 10,
'token': LOGZ_TOKEN,
}
LOGGING['loggers']['django']['handlers'].append('logzio')
LOGGING['loggers']['log-http-requests']['handlers'].append('logzio')
LOGGING['loggers']['jobs']['handlers'].append('logzio')
ALIVENESS_URL = "/health-check/"
| true | true |
f7feac26a36903f73b984dcc0360155b4034e6fc | 8,257 | py | Python | src/tantale/livestatus/parser.py | redref/tantale | 358748e7d4a1d87ee48168a03ed68acefb2b9ca2 | [
"Apache-2.0"
] | null | null | null | src/tantale/livestatus/parser.py | redref/tantale | 358748e7d4a1d87ee48168a03ed68acefb2b9ca2 | [
"Apache-2.0"
] | null | null | null | src/tantale/livestatus/parser.py | redref/tantale | 358748e7d4a1d87ee48168a03ed68acefb2b9ca2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import traceback
import logging
from six import b as bytes
from tantale.livestatus.query import Query
from tantale.livestatus.command import Command
from tantale.livestatus.mapping import *
class Parser(object):
def __init__(self):
self.log = logging.getLogger('tantale.livestatus')
def field_map(self, field, table):
"""
Map query field to tantale known field
"""
if field.startswith("%s_" % table[:-1]):
field = field[len(table):]
# Log got no final 's'
if field.startswith("log_"):
field = field[4:]
# Map parent on service
if table == 'services' and field.startswith('host_'):
mapped = self.field_map(field[5:], 'hosts')
if mapped:
return 'host.%s' % mapped
else:
return None
if field in FIELDS_MAPPING:
return FIELDS_MAPPING[field]
elif field in FIELDS_DUMMY:
# Handle not mapped fields
return None
else:
raise Exception('Unknown field %s' % field)
def parse_expr(self, arg_list, table):
""" Convert filters to expression list """
# TOFIX exclude custom_variable_names / not relevant
# TOFIX for now assume right operand is constant
if arg_list[0].endswith("custom_variable_names"):
return None
arg_list[0] = self.field_map(arg_list[0], table)
# Not wired filters
if arg_list[0] is None:
return None
if len(arg_list) == 3:
try:
arg_list[2] = int(arg_list[2])
except ValueError:
pass
return arg_list
else:
raise Exception(
"Error parsing expression %s", ' '.join(arg_list))
def combine_expr(self, operator, expr_list):
""" Combine expressions with and/or - filter not defined ones """
if None in expr_list:
res = []
for expr in expr_list:
if expr is not None:
res.append(expr)
if len(res) == 1:
return res
if len(res) == 0:
return None
expr_list = res
return [operator, expr_list]
def parse_command(self, command):
"""
Parse data from :
SCHEDULE_HOST_DOWNTIME / SCHEDULE_SVC_DOWNTIME
ACKNOWLEDGE_HOST_PROBLEM / ACKNOWLEDGE_SVC_PROBLEM
REMOVE_HOST_ACKNOWLEDGEMENT / REMOVE_SVC_ACKNOWLEDGEMENT
DEL_HOST_DOWNTIME / DEL_SVC_DOWNTIME
DISABLE_PASSIVE_HOST_CHECKS /
DISABLE_PASSIVE_SVC_CHECKS (wired to delete from backend method)
"""
args = command.split(';')
command = args.pop(0)
if command in ('DEL_HOST_DOWNTIME', 'DEL_SVC_DOWNTIME'):
# remove downtime is a special case (id)
return True, Command(
function='downtime',
action='remove',
doc_id=int(args.pop(0)))
else:
kwargs = {}
command = command.split('_')
if command[1] == 'HOST' or command[2] == 'HOST':
kwargs['host'] = args[0]
elif command[1] == 'SVC' or command[2] == 'SVC':
kwargs['host'] = args[0]
kwargs['service'] = args[1]
else:
raise Exception('Unknown command %s' % command)
# parse action
if command[0] in ('REMOVE', 'DISABLE'):
kwargs['action'] = 'remove'
else:
kwargs['action'] = 'add'
# parse function
if command[1] == 'PASSIVE':
kwargs['function'] = 'drop'
elif command[2] == 'DOWNTIME':
kwargs['function'] = 'downtime'
elif (command[2] == 'ACKNOWLEDGEMENT' or
command[0] == 'ACKNOWLEDGE'):
kwargs['function'] = 'acknowledge'
else:
raise Exception('Unknown command %s' % command)
# Always keepalived
return True, Command(**kwargs)
def parse(self, string):
"""
Parse a string and create a livestatus query object
"""
method = None
table = None
options = {}
keepalive = False
try:
for line in string.split('\n'):
self.log.debug("Livestatus query : %s" % line)
members = line.split(' ')
# Empty line
if members[0] == '':
pass
# Stats
elif members[0] == 'Stats:':
options['stats'] = options.get('stats', [])
options['stats'].append(
self.parse_expr(members[1:], table))
elif members[0] == 'StatsAnd:':
nb = int(members[1])
options['stats'][-nb] = self.combine_expr(
'and', options['stats'][-nb:])
options['stats'] = options['stats'][:-nb + 1]
elif members[0] == 'StatsOr:':
nb = int(members[1])
options['stats'][-nb] = self.combine_expr(
'or', options['stats'][-nb:])
options['stats'] = options['stats'][:-nb + 1]
elif members[0] == 'StatsNegate:':
options['stats'][1] = self.combine_expr(
'not', options['stats'][-1])
# Filters
elif members[0] == 'Filter:':
options['filters'] = options.get('filters', [])
options['filters'].append(
self.parse_expr(members[1:], table))
elif members[0] == 'And:':
nb = int(members[1])
options['filters'][-nb] = self.combine_expr(
'and', options['filters'][-nb:])
options['filters'] = options['filters'][:-nb + 1]
elif members[0] == 'Or:':
nb = int(members[1])
options['filters'][-nb] = self.combine_expr(
'or', options['filters'][-nb:])
options['filters'] = options['filters'][:-nb + 1]
elif members[0] == 'Negate:':
options['filters'][-1] = self.combine_expr(
'not', options['filters'][-1])
# Method
elif members[0] == 'GET':
method = 'GET'
table = members[1]
elif members[0] == 'COMMAND':
return self.parse_command(members[2])
# Optional lines / Headers
elif members[0] == 'AuthUser:':
options['filters'] = options.get('filters', [])
options['filters'].append(['contacts', '>=', members[1]])
elif members[0] == 'Columns:':
options['columns'] = members[1:]
elif members[0] == 'ColumnHeaders:':
options['headers'] = members[1:]
elif members[0] == 'ResponseHeader:':
options['rheader'] = members[1]
elif members[0] == 'KeepAlive:':
if members[1] == 'on':
keepalive = True
elif members[0] == 'OutputFormat:':
options['oformat'] = members[1]
elif members[0] == 'Limit:':
options['limit'] = int(members[1])
elif members[0] == 'Localtime:':
# TOFIX no time handling
pass
# Raise error is something not understood
else:
raise Exception('Unknown command %s' % members[0])
return keepalive, Query(method, table, **options)
except:
self.log.warning('Livestatus query parse error')
self.log.debug(
'Error %s\nparsing line "%s" on query "%s"'
% (traceback.format_exc(), line, repr(string)))
| 36.056769 | 77 | 0.475354 |
import traceback
import logging
from six import b as bytes
from tantale.livestatus.query import Query
from tantale.livestatus.command import Command
from tantale.livestatus.mapping import *
class Parser(object):
def __init__(self):
self.log = logging.getLogger('tantale.livestatus')
def field_map(self, field, table):
if field.startswith("%s_" % table[:-1]):
field = field[len(table):]
if field.startswith("log_"):
field = field[4:]
if table == 'services' and field.startswith('host_'):
mapped = self.field_map(field[5:], 'hosts')
if mapped:
return 'host.%s' % mapped
else:
return None
if field in FIELDS_MAPPING:
return FIELDS_MAPPING[field]
elif field in FIELDS_DUMMY:
return None
else:
raise Exception('Unknown field %s' % field)
def parse_expr(self, arg_list, table):
if arg_list[0].endswith("custom_variable_names"):
return None
arg_list[0] = self.field_map(arg_list[0], table)
if arg_list[0] is None:
return None
if len(arg_list) == 3:
try:
arg_list[2] = int(arg_list[2])
except ValueError:
pass
return arg_list
else:
raise Exception(
"Error parsing expression %s", ' '.join(arg_list))
def combine_expr(self, operator, expr_list):
if None in expr_list:
res = []
for expr in expr_list:
if expr is not None:
res.append(expr)
if len(res) == 1:
return res
if len(res) == 0:
return None
expr_list = res
return [operator, expr_list]
def parse_command(self, command):
args = command.split(';')
command = args.pop(0)
if command in ('DEL_HOST_DOWNTIME', 'DEL_SVC_DOWNTIME'):
return True, Command(
function='downtime',
action='remove',
doc_id=int(args.pop(0)))
else:
kwargs = {}
command = command.split('_')
if command[1] == 'HOST' or command[2] == 'HOST':
kwargs['host'] = args[0]
elif command[1] == 'SVC' or command[2] == 'SVC':
kwargs['host'] = args[0]
kwargs['service'] = args[1]
else:
raise Exception('Unknown command %s' % command)
if command[0] in ('REMOVE', 'DISABLE'):
kwargs['action'] = 'remove'
else:
kwargs['action'] = 'add'
if command[1] == 'PASSIVE':
kwargs['function'] = 'drop'
elif command[2] == 'DOWNTIME':
kwargs['function'] = 'downtime'
elif (command[2] == 'ACKNOWLEDGEMENT' or
command[0] == 'ACKNOWLEDGE'):
kwargs['function'] = 'acknowledge'
else:
raise Exception('Unknown command %s' % command)
return True, Command(**kwargs)
def parse(self, string):
method = None
table = None
options = {}
keepalive = False
try:
for line in string.split('\n'):
self.log.debug("Livestatus query : %s" % line)
members = line.split(' ')
if members[0] == '':
pass
elif members[0] == 'Stats:':
options['stats'] = options.get('stats', [])
options['stats'].append(
self.parse_expr(members[1:], table))
elif members[0] == 'StatsAnd:':
nb = int(members[1])
options['stats'][-nb] = self.combine_expr(
'and', options['stats'][-nb:])
options['stats'] = options['stats'][:-nb + 1]
elif members[0] == 'StatsOr:':
nb = int(members[1])
options['stats'][-nb] = self.combine_expr(
'or', options['stats'][-nb:])
options['stats'] = options['stats'][:-nb + 1]
elif members[0] == 'StatsNegate:':
options['stats'][1] = self.combine_expr(
'not', options['stats'][-1])
elif members[0] == 'Filter:':
options['filters'] = options.get('filters', [])
options['filters'].append(
self.parse_expr(members[1:], table))
elif members[0] == 'And:':
nb = int(members[1])
options['filters'][-nb] = self.combine_expr(
'and', options['filters'][-nb:])
options['filters'] = options['filters'][:-nb + 1]
elif members[0] == 'Or:':
nb = int(members[1])
options['filters'][-nb] = self.combine_expr(
'or', options['filters'][-nb:])
options['filters'] = options['filters'][:-nb + 1]
elif members[0] == 'Negate:':
options['filters'][-1] = self.combine_expr(
'not', options['filters'][-1])
elif members[0] == 'GET':
method = 'GET'
table = members[1]
elif members[0] == 'COMMAND':
return self.parse_command(members[2])
elif members[0] == 'AuthUser:':
options['filters'] = options.get('filters', [])
options['filters'].append(['contacts', '>=', members[1]])
elif members[0] == 'Columns:':
options['columns'] = members[1:]
elif members[0] == 'ColumnHeaders:':
options['headers'] = members[1:]
elif members[0] == 'ResponseHeader:':
options['rheader'] = members[1]
elif members[0] == 'KeepAlive:':
if members[1] == 'on':
keepalive = True
elif members[0] == 'OutputFormat:':
options['oformat'] = members[1]
elif members[0] == 'Limit:':
options['limit'] = int(members[1])
elif members[0] == 'Localtime:':
pass
else:
raise Exception('Unknown command %s' % members[0])
return keepalive, Query(method, table, **options)
except:
self.log.warning('Livestatus query parse error')
self.log.debug(
'Error %s\nparsing line "%s" on query "%s"'
% (traceback.format_exc(), line, repr(string)))
| true | true |
f7feac98c8b3082ebe0c888471fdd159a3744560 | 1,282 | py | Python | keepmail/keepmail.py | rgravina/keepmail | 1f53db02ffda8f363ada31340a21eeab176b15ff | [
"MIT"
] | 1 | 2016-05-09T01:19:44.000Z | 2016-05-09T01:19:44.000Z | keepmail/keepmail.py | rgravina/keepmail | 1f53db02ffda8f363ada31340a21eeab176b15ff | [
"MIT"
] | null | null | null | keepmail/keepmail.py | rgravina/keepmail | 1f53db02ffda8f363ada31340a21eeab176b15ff | [
"MIT"
] | null | null | null | """
A dummy mail server.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.mail import smtp
class ConsoleMessageDelivery:
implements(smtp.IMessageDelivery)
def receivedHeader(self, helo, origin, recipients):
return "Received: ConsoleMessageDelivery"
def validateFrom(self, helo, origin):
# All addresses are accepted
return origin
def validateTo(self, user):
return lambda: ConsoleMessage()
class ConsoleMessage:
implements(smtp.IMessage)
def __init__(self):
self.lines = []
def lineReceived(self, line):
self.lines.append(line)
def eomReceived(self):
print "New message received:"
print "\n".join(self.lines)
self.lines = None
return defer.succeed(None)
def connectionLost(self):
# There was an error, throw away the stored lines
self.lines = None
class KeepmailSMTPFactory(smtp.SMTPFactory):
def __init__(self, *a, **kw):
smtp.SMTPFactory.__init__(self, *a, **kw)
self.delivery = ConsoleMessageDelivery()
def buildProtocol(self, addr):
p = smtp.SMTPFactory.buildProtocol(self, addr)
p.delivery = self.delivery
return p
| 25.137255 | 57 | 0.649766 | """
A dummy mail server.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.mail import smtp
class ConsoleMessageDelivery:
implements(smtp.IMessageDelivery)
def receivedHeader(self, helo, origin, recipients):
return "Received: ConsoleMessageDelivery"
def validateFrom(self, helo, origin):
return origin
def validateTo(self, user):
return lambda: ConsoleMessage()
class ConsoleMessage:
implements(smtp.IMessage)
def __init__(self):
self.lines = []
def lineReceived(self, line):
self.lines.append(line)
def eomReceived(self):
print "New message received:"
print "\n".join(self.lines)
self.lines = None
return defer.succeed(None)
def connectionLost(self):
self.lines = None
class KeepmailSMTPFactory(smtp.SMTPFactory):
def __init__(self, *a, **kw):
smtp.SMTPFactory.__init__(self, *a, **kw)
self.delivery = ConsoleMessageDelivery()
def buildProtocol(self, addr):
p = smtp.SMTPFactory.buildProtocol(self, addr)
p.delivery = self.delivery
return p
| false | true |
f7feaddd207f56faba601b1f8ec86bfa765334c4 | 5,134 | py | Python | torch/fx/passes/shape_prop.py | stungkit/pytorch | 0f05e398705bf15406bce79f7ee57d3935ad2abd | [
"Intel"
] | 2 | 2020-03-13T06:57:49.000Z | 2020-05-17T04:18:14.000Z | torch/fx/passes/shape_prop.py | ellhe-blaster/pytorch | e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25 | [
"Intel"
] | 1 | 2022-01-10T18:39:28.000Z | 2022-01-10T19:15:57.000Z | torch/fx/passes/shape_prop.py | ellhe-blaster/pytorch | e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25 | [
"Intel"
] | 1 | 2022-03-26T14:42:50.000Z | 2022-03-26T14:42:50.000Z | import torch
import torch.fx
import traceback
from torch.fx.node import Node, map_aggregate
from typing import Any, Tuple, NamedTuple, Optional, Dict
from torch.fx._compatibility import compatibility
@compatibility(is_backward_compatible=True)
class TensorMetadata(NamedTuple):
# TensorMetadata is a structure containing pertinent information
# about a tensor within a PyTorch program.
# General Tensor metadata
shape : torch.Size
dtype : torch.dtype
requires_grad : bool
stride : Tuple[int]
memory_format : Optional[torch.memory_format]
# Quantization metadata
is_quantized : bool
qparams: Dict[str, Any]
def _extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata:
"""
Extract a TensorMetadata NamedTuple describing `result`.
"""
shape = result.shape
dtype = result.dtype
requires_grad = result.requires_grad
stride = result.stride()
memory_formats = {
torch.contiguous_format,
torch.channels_last,
torch.channels_last_3d,
}
memory_format = None
for query_format in memory_formats:
if result.is_contiguous(memory_format=query_format):
memory_format = query_format
break
is_quantized = result.is_quantized
qparams: Dict[str, Any] = {}
if is_quantized:
qscheme = result.qscheme()
qparams["qscheme"] = qscheme
if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
qparams["scale"] = result.q_scale() # type: ignore[assignment]
qparams["zero_point"] = result.q_zero_point() # type: ignore[assignment]
elif qscheme in {torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric}:
# In this branch, scale and zero_point are expected to be tensors,
# we store the values as immutable_list in TensorMetadata for
# easier serialization downstream
qparams["scale"] = result.q_per_channel_scales().tolist() # type: ignore[assignment]
qparams["zero_point"] = result.q_per_channel_zero_points().tolist() # type: ignore[assignment]
qparams["axis"] = result.q_per_channel_axis() # type: ignore[assignment]
return TensorMetadata(
shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams)
@compatibility(is_backward_compatible=True)
class ShapeProp(torch.fx.Interpreter):
"""
Execute an FX graph Node-by-Node and
record the shape and type of the result
into the corresponding node.
Example:
In this example, we record the shape
and data type of a module given
an example input ``torch.randn(50, D_in)``.
We print the name, shape and dtype of each node.
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = TwoLayerNet(D_in, H, D_out)
gm = torch.fx.symbolic_trace(model)
sample_input = torch.randn(50, D_in)
ShapeProp(gm).propagate(sample_input)
for node in gm.graph.nodes:
print(node.name, node.meta['tensor_meta'].dtype,
node.meta['tensor_meta'].shape)
The output of this code is:
x torch.float32 torch.Size([50, 1000])
linear1 torch.float32 torch.Size([50, 100])
clamp_1 torch.float32 torch.Size([50, 100])
linear2 torch.float32 torch.Size([50, 10])
output torch.float32 torch.Size([50, 10])
Args:
module (GraphModule): The module to be executed
"""
def run_node(self, n : Node) -> Any:
try:
result = super().run_node(n)
except Exception:
traceback.print_exc()
raise RuntimeError(
f"ShapeProp error for: node={n.format_node()} with "
f"meta={n.meta}"
)
found_tensor = False
def extract_tensor_meta(obj):
if isinstance(obj, torch.Tensor):
nonlocal found_tensor
found_tensor = True
return _extract_tensor_metadata(obj)
else:
return obj
meta = map_aggregate(result, extract_tensor_meta)
if found_tensor:
n.meta['tensor_meta'] = meta
n.meta['type'] = type(result)
return result
def propagate(self, *args):
"""
Run `module` via interpretation and return the result and
record the shape and type of each node.
Args:
*args (Tensor): the sample input.
Returns:
Any: The value returned from executing the Module
"""
return super().run(*args)
| 33.776316 | 120 | 0.626217 | import torch
import torch.fx
import traceback
from torch.fx.node import Node, map_aggregate
from typing import Any, Tuple, NamedTuple, Optional, Dict
from torch.fx._compatibility import compatibility
@compatibility(is_backward_compatible=True)
class TensorMetadata(NamedTuple):
shape : torch.Size
dtype : torch.dtype
requires_grad : bool
stride : Tuple[int]
memory_format : Optional[torch.memory_format]
is_quantized : bool
qparams: Dict[str, Any]
def _extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata:
shape = result.shape
dtype = result.dtype
requires_grad = result.requires_grad
stride = result.stride()
memory_formats = {
torch.contiguous_format,
torch.channels_last,
torch.channels_last_3d,
}
memory_format = None
for query_format in memory_formats:
if result.is_contiguous(memory_format=query_format):
memory_format = query_format
break
is_quantized = result.is_quantized
qparams: Dict[str, Any] = {}
if is_quantized:
qscheme = result.qscheme()
qparams["qscheme"] = qscheme
if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
qparams["scale"] = result.q_scale()
qparams["zero_point"] = result.q_zero_point()
elif qscheme in {torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric}:
qparams["scale"] = result.q_per_channel_scales().tolist()
qparams["zero_point"] = result.q_per_channel_zero_points().tolist()
qparams["axis"] = result.q_per_channel_axis()
return TensorMetadata(
shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams)
@compatibility(is_backward_compatible=True)
class ShapeProp(torch.fx.Interpreter):
def run_node(self, n : Node) -> Any:
try:
result = super().run_node(n)
except Exception:
traceback.print_exc()
raise RuntimeError(
f"ShapeProp error for: node={n.format_node()} with "
f"meta={n.meta}"
)
found_tensor = False
def extract_tensor_meta(obj):
if isinstance(obj, torch.Tensor):
nonlocal found_tensor
found_tensor = True
return _extract_tensor_metadata(obj)
else:
return obj
meta = map_aggregate(result, extract_tensor_meta)
if found_tensor:
n.meta['tensor_meta'] = meta
n.meta['type'] = type(result)
return result
def propagate(self, *args):
return super().run(*args)
| true | true |
f7feadebaa83dcc91746056196a01efa6f54fb3e | 19,302 | py | Python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_11_01_preview/operations/_data_collection_rule_associations_operations.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_11_01_preview/operations/_data_collection_rule_associations_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_11_01_preview/operations/_data_collection_rule_associations_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DataCollectionRuleAssociationsOperations(object):
"""DataCollectionRuleAssociationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2019_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_resource(
self,
resource_uri, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DataCollectionRuleAssociationProxyOnlyResourceListResult"]
"""Lists associations for the specified resource.
Lists associations for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionRuleAssociationProxyOnlyResourceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleAssociationProxyOnlyResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataCollectionRuleAssociationProxyOnlyResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations'} # type: ignore
def list_by_rule(
self,
resource_group_name, # type: str
data_collection_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DataCollectionRuleAssociationProxyOnlyResourceListResult"]
"""Lists associations for the specified data collection rule.
Lists associations for the specified data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive.
:type data_collection_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionRuleAssociationProxyOnlyResourceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleAssociationProxyOnlyResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataCollectionRuleAssociationProxyOnlyResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_rule.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'dataCollectionRuleName': self._serialize.url("data_collection_rule_name", data_collection_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}/associations'} # type: ignore
def get(
self,
resource_uri, # type: str
association_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DataCollectionRuleAssociationProxyOnlyResource"
"""Returns the specified association.
Returns the specified association.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param association_name: The name of the association. The name is case insensitive.
:type association_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleAssociationProxyOnlyResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleAssociationProxyOnlyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataCollectionRuleAssociationProxyOnlyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}'} # type: ignore
def create(
self,
resource_uri, # type: str
association_name, # type: str
body=None, # type: Optional["_models.DataCollectionRuleAssociationProxyOnlyResource"]
**kwargs # type: Any
):
# type: (...) -> "_models.DataCollectionRuleAssociationProxyOnlyResource"
"""Creates or updates an association.
Creates or updates an association.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param association_name: The name of the association. The name is case insensitive.
:type association_name: str
:param body: The payload.
:type body: ~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleAssociationProxyOnlyResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleAssociationProxyOnlyResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleAssociationProxyOnlyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataCollectionRuleAssociationProxyOnlyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'DataCollectionRuleAssociationProxyOnlyResource')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}'} # type: ignore
def delete(
self,
resource_uri, # type: str
association_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes an association.
Deletes an association.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param association_name: The name of the association. The name is case insensitive.
:type association_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}'} # type: ignore
| 48.989848 | 208 | 0.672055 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DataCollectionRuleAssociationsOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_resource(
self,
resource_uri,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_resource.metadata['url']
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations'}
def list_by_rule(
self,
resource_group_name,
data_collection_rule_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_rule.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'dataCollectionRuleName': self._serialize.url("data_collection_rule_name", data_collection_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}/associations'}
def get(
self,
resource_uri,
association_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}'}
def create(
self,
resource_uri,
association_name,
body=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create.metadata['url']
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
if body is not None:
body_content = self._serialize.body(body, 'DataCollectionRuleAssociationProxyOnlyResource')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataCollectionRuleAssociationProxyOnlyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}'}
def delete(
self,
resource_uri,
association_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
url = self.delete.metadata['url']
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True, min_length=1),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations/{associationName}'}
| true | true |
f7feae564d8b035286fd8c12171874b1579e9828 | 9,012 | py | Python | examples/llvm_autotuning/executor.py | bcui19/CompilerGym | b3d23cf23c4242df8cf8f54f55db79ef3091eabf | [
"MIT"
] | null | null | null | examples/llvm_autotuning/executor.py | bcui19/CompilerGym | b3d23cf23c4242df8cf8f54f55db79ef3091eabf | [
"MIT"
] | null | null | null | examples/llvm_autotuning/executor.py | bcui19/CompilerGym | b3d23cf23c4242df8cf8f54f55db79ef3091eabf | [
"MIT"
] | 1 | 2022-02-16T04:02:46.000Z | 2022-02-16T04:02:46.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from pathlib import Path
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
from submitit import AutoExecutor
logger = logging.getLogger(__name__)
class Executor(BaseModel):
"""Defines the execution environment for jobs.
E.g. a node on a cluster, the local machine, etc. To create jobs,
instantiate this class and submit functions to using the executor API:
>>> executor = Executor(executor="local", block=True)
>>> with executor.get_executor() as executor:
... executor.submit(my_job, arg1, arg2)
... executor.submit(another_job)
"""
class Type(str, Enum):
"""Types of execution environments."""
SLURM = "slurm"
"""Submit jobs to a SLURM cluster scheduler."""
LOCAL = "local"
"""Submit jobs to run on the current machine."""
DEBUG = "debug"
"""Submit jobs to run synchronously on the current machine."""
NOOP = "noop"
"""Submitted jobs return immediately without executing. This can be
useful for debugging, where you want to validate the code and
configuration without performing any computation.
"""
type: Type = Field(allow_mutation=False)
"""The execution environment."""
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
"""The name of the SLURM partition to submit jobs to.
Only used for :code:`Type.SLURM` executors.
"""
cpus: int = Field(default=1, allow_mutation=False, ge=1)
"""The number of CPU threads to provision.
If the type of executor is :code:`Type.SLURM`, this is the number of CPU
threads to provision for each job. If the type of executor is
:code:`Type.LOCAL`, this is the number of parallel jobs to process in a
thread pool. If the value is -1 and the executor is :code:`Type.LOCAL`, the
number of physical cores on the machine is used. Has no effect for
:code:`Type.DEBUG` and :code:`Type.NOOP`.
"""
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, the :code:`get_executor()` context manager will block
until all jobs have completed when exiting scope. Jobs are still submitted
asynchronously for parallel execution.
"""
# === Start of public API. ===
@contextmanager
def get_executor(self, logs_dir: Path, cpus=None):
cpus = cpus or self.cpus
if self.type == self.Type.SLURM:
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(self.timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=multiprocessing.cpu_count() if cpus == -1 else cpus,
timeout_seconds=int(round(self.timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
# === Start of implementation details. ===
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values['executor']}"
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
"""An executor-like interface that records all jobs that are submitted."""
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except: # noqa
pass
# Produce a list of the first few job IDs
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e: # noqa Intentionally broad.
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
"""An executor which uses a process pool to process jobs in parallel on the
local machine.
"""
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
# Block until all jobs have completed.
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
"""An executor where each job is executed synchronously when result() is
called."""
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
| 32.301075 | 88 | 0.610519 |
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from pathlib import Path
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
from submitit import AutoExecutor
logger = logging.getLogger(__name__)
class Executor(BaseModel):
class Type(str, Enum):
SLURM = "slurm"
LOCAL = "local"
DEBUG = "debug"
NOOP = "noop"
type: Type = Field(allow_mutation=False)
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
cpus: int = Field(default=1, allow_mutation=False, ge=1)
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
@contextmanager
def get_executor(self, logs_dir: Path, cpus=None):
cpus = cpus or self.cpus
if self.type == self.Type.SLURM:
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(self.timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=multiprocessing.cpu_count() if cpus == -1 else cpus,
timeout_seconds=int(round(self.timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values['executor']}"
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except:
pass
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e:
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
| true | true |
f7feafcf1bad77c660800c13f3f0c48a2aad8193 | 8,554 | py | Python | official/utils/misc/keras_utils.py | shere-khan/models | ab1858370a204f793c9d609b05ff60e001d403e6 | [
"Apache-2.0"
] | null | null | null | official/utils/misc/keras_utils.py | shere-khan/models | ab1858370a204f793c9d609b05ff60e001d403e6 | [
"Apache-2.0"
] | null | null | null | official/utils/misc/keras_utils.py | shere-khan/models | ab1858370a204f793c9d609b05ff60e001d403e6 | [
"Apache-2.0"
] | 1 | 2020-03-07T06:46:16.000Z | 2020-03-07T06:46:16.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import time
from absl import logging
import tensorflow as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import tf2
from tensorflow.python.eager import profiler
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
"""
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.global_steps = 0
# Logs start of step 1 then end of each step based on log_steps interval.
self.timestamp_log = []
# Records the time each epoch takes to run from start to finish of epoch.
self.epoch_runtime_log = []
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
self.global_steps += 1
if self.global_steps == 1:
self.start_time = time.time()
self.timestamp_log.append(BatchTimestamp(self.global_steps,
self.start_time))
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
if self.global_steps % self.log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - self.start_time
examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
self.timestamp_log.append(BatchTimestamp(self.global_steps, timestamp))
logging.info(
"BenchmarkMetric: {'global step':%d, 'time_taken': %f,"
"'examples_per_second': %f}",
self.global_steps, elapsed_time, examples_per_second)
self.start_time = timestamp
def on_epoch_end(self, epoch, logs=None):
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
logging.info(
"BenchmarkMetric: {'epoch':%d, 'time_taken': %f}",
epoch, epoch_run_time)
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard,
steps_per_epoch):
"""Validate profile_steps flag value and return profiler callback."""
profile_steps_error_message = (
'profile_steps must be a comma separated pair of positive integers, '
'specifying the first and last steps to be profiled.'
)
try:
profile_steps = [int(i) for i in profile_steps.split(',')]
except ValueError:
raise ValueError(profile_steps_error_message)
if len(profile_steps) != 2:
raise ValueError(profile_steps_error_message)
start_step, stop_step = profile_steps
if start_step < 0 or start_step > stop_step:
raise ValueError(profile_steps_error_message)
if enable_tensorboard:
logging.warning(
'Both TensorBoard and profiler callbacks are used. Note that the '
'TensorBoard callback profiles the 2nd step (unless otherwise '
'specified). Please make sure the steps profiled by the two callbacks '
'do not overlap.')
return ProfilerCallback(model_dir, start_step, stop_step, steps_per_epoch)
class ProfilerCallback(tf.keras.callbacks.Callback):
"""Save profiles in specified step range to log directory."""
def __init__(self, log_dir, start_step, stop_step, steps_per_epoch):
super(ProfilerCallback, self).__init__()
self.log_dir = log_dir
self.start_step = start_step
self.stop_step = stop_step
self.start_epoch = start_step // steps_per_epoch
self.stop_epoch = stop_step // steps_per_epoch
self.start_step_in_epoch = start_step % steps_per_epoch
self.stop_step_in_epoch = stop_step % steps_per_epoch
self.should_start = False
self.should_stop = False
def on_epoch_begin(self, epoch, logs=None):
if epoch == self.start_epoch:
self.should_start = True
if epoch == self.stop_epoch:
self.should_stop = True
def on_batch_begin(self, batch, logs=None):
if batch == self.start_step_in_epoch and self.should_start:
self.should_start = False
profiler.start()
logging.info('Profiler started at Step %s', self.start_step)
def on_batch_end(self, batch, logs=None):
if batch == self.stop_step_in_epoch and self.should_stop:
self.should_stop = False
results = profiler.stop()
profiler.save(self.log_dir, results)
logging.info(
'Profiler saved profiles for steps between %s and %s to %s',
self.start_step, self.stop_step, self.log_dir)
def set_session_config(enable_eager=False,
enable_xla=False):
"""Sets the session config."""
if is_v2_0():
set_config_v2(enable_xla=enable_xla)
else:
config = get_config_proto_v1(enable_xla=enable_xla)
if enable_eager:
tf.compat.v1.enable_eager_execution(config=config)
else:
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
def get_config_proto_v1(enable_xla=False):
"""Return config proto according to flag settings, or None to use default."""
config = None
if enable_xla:
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
# Disable PinToHostOptimizer in grappler when enabling XLA because it causes
# OOM and performance regression.
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
def set_config_v2(enable_xla=False):
"""Config eager context according to flag values using TF 2.0 API."""
if enable_xla:
tf.config.optimizer.set_jit(True)
# Disable PinToHostOptimizer in grappler when enabling XLA because it
# causes OOM and performance regression.
tf.config.optimizer.set_experimental_options(
{'pin_to_host_optimization': False}
)
def is_v2_0():
"""Returns true if using tf 2.0."""
return tf2.enabled()
def set_gpu_thread_mode_and_count(gpu_thread_mode,
datasets_num_private_threads,
num_gpus, per_gpu_thread_count):
"""Set GPU thread mode and count, and adjust dataset threads count."""
cpu_count = multiprocessing.cpu_count()
logging.info('Logical CPU cores: %s', cpu_count)
# Allocate private thread pool for each GPU to schedule and launch kernels
per_gpu_thread_count = per_gpu_thread_count or 2
os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Limit data preprocessing threadpool to CPU cores minus number of total GPU
# private threads and memory copy threads.
total_gpu_thread_count = per_gpu_thread_count * num_gpus
num_runtime_threads = num_gpus
if not datasets_num_private_threads:
datasets_num_private_threads = min(
cpu_count - total_gpu_thread_count - num_runtime_threads,
num_gpus * 8)
logging.info('Set datasets_num_private_threads to %s',
datasets_num_private_threads)
| 37.030303 | 80 | 0.710779 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import time
from absl import logging
import tensorflow as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import tf2
from tensorflow.python.eager import profiler
class BatchTimestamp(object):
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
def __init__(self, batch_size, log_steps):
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.global_steps = 0
self.timestamp_log = []
self.epoch_runtime_log = []
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
self.global_steps += 1
if self.global_steps == 1:
self.start_time = time.time()
self.timestamp_log.append(BatchTimestamp(self.global_steps,
self.start_time))
def on_batch_end(self, batch, logs=None):
if self.global_steps % self.log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - self.start_time
examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
self.timestamp_log.append(BatchTimestamp(self.global_steps, timestamp))
logging.info(
"BenchmarkMetric: {'global step':%d, 'time_taken': %f,"
"'examples_per_second': %f}",
self.global_steps, elapsed_time, examples_per_second)
self.start_time = timestamp
def on_epoch_end(self, epoch, logs=None):
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
logging.info(
"BenchmarkMetric: {'epoch':%d, 'time_taken': %f}",
epoch, epoch_run_time)
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard,
steps_per_epoch):
profile_steps_error_message = (
'profile_steps must be a comma separated pair of positive integers, '
'specifying the first and last steps to be profiled.'
)
try:
profile_steps = [int(i) for i in profile_steps.split(',')]
except ValueError:
raise ValueError(profile_steps_error_message)
if len(profile_steps) != 2:
raise ValueError(profile_steps_error_message)
start_step, stop_step = profile_steps
if start_step < 0 or start_step > stop_step:
raise ValueError(profile_steps_error_message)
if enable_tensorboard:
logging.warning(
'Both TensorBoard and profiler callbacks are used. Note that the '
'TensorBoard callback profiles the 2nd step (unless otherwise '
'specified). Please make sure the steps profiled by the two callbacks '
'do not overlap.')
return ProfilerCallback(model_dir, start_step, stop_step, steps_per_epoch)
class ProfilerCallback(tf.keras.callbacks.Callback):
def __init__(self, log_dir, start_step, stop_step, steps_per_epoch):
super(ProfilerCallback, self).__init__()
self.log_dir = log_dir
self.start_step = start_step
self.stop_step = stop_step
self.start_epoch = start_step // steps_per_epoch
self.stop_epoch = stop_step // steps_per_epoch
self.start_step_in_epoch = start_step % steps_per_epoch
self.stop_step_in_epoch = stop_step % steps_per_epoch
self.should_start = False
self.should_stop = False
def on_epoch_begin(self, epoch, logs=None):
if epoch == self.start_epoch:
self.should_start = True
if epoch == self.stop_epoch:
self.should_stop = True
def on_batch_begin(self, batch, logs=None):
if batch == self.start_step_in_epoch and self.should_start:
self.should_start = False
profiler.start()
logging.info('Profiler started at Step %s', self.start_step)
def on_batch_end(self, batch, logs=None):
if batch == self.stop_step_in_epoch and self.should_stop:
self.should_stop = False
results = profiler.stop()
profiler.save(self.log_dir, results)
logging.info(
'Profiler saved profiles for steps between %s and %s to %s',
self.start_step, self.stop_step, self.log_dir)
def set_session_config(enable_eager=False,
enable_xla=False):
if is_v2_0():
set_config_v2(enable_xla=enable_xla)
else:
config = get_config_proto_v1(enable_xla=enable_xla)
if enable_eager:
tf.compat.v1.enable_eager_execution(config=config)
else:
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
def get_config_proto_v1(enable_xla=False):
config = None
if enable_xla:
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
def set_config_v2(enable_xla=False):
if enable_xla:
tf.config.optimizer.set_jit(True)
tf.config.optimizer.set_experimental_options(
{'pin_to_host_optimization': False}
)
def is_v2_0():
return tf2.enabled()
def set_gpu_thread_mode_and_count(gpu_thread_mode,
datasets_num_private_threads,
num_gpus, per_gpu_thread_count):
cpu_count = multiprocessing.cpu_count()
logging.info('Logical CPU cores: %s', cpu_count)
per_gpu_thread_count = per_gpu_thread_count or 2
os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
total_gpu_thread_count = per_gpu_thread_count * num_gpus
num_runtime_threads = num_gpus
if not datasets_num_private_threads:
datasets_num_private_threads = min(
cpu_count - total_gpu_thread_count - num_runtime_threads,
num_gpus * 8)
logging.info('Set datasets_num_private_threads to %s',
datasets_num_private_threads)
| true | true |
f7feb0e2e9881b591a8395aad6147bfe159bdcf9 | 1,202 | py | Python | setup.py | axant/tgapp-stroller2 | 3866335cf4d69f306edd8055dd91b3188b53c868 | [
"MIT"
] | null | null | null | setup.py | axant/tgapp-stroller2 | 3866335cf4d69f306edd8055dd91b3188b53c868 | [
"MIT"
] | null | null | null | setup.py | axant/tgapp-stroller2 | 3866335cf4d69f306edd8055dd91b3188b53c868 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys, os
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
install_requires=[
"TurboGears2 >= 2.3.3",
"tgext.pluggable",
"tw2.core",
"tw2.forms",
"tgext.ecommerce",
"axf",
"tgext.datahelpers",
"filedepot",
]
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
except IOError:
README = ''
setup(
name='tgapp-stroller2',
version='0.1',
description='',
long_description=README,
author='',
author_email='',
#url='',
keywords='turbogears2.application',
packages=find_packages(exclude=['ez_setup']),
install_requires=install_requires,
include_package_data=True,
package_data={'tgapp.stroller2': ['i18n/*/LC_MESSAGES/*.mo',
'templates/*/*',
'public/*/*']},
entry_points="""
""",
zip_safe=False,
dependency_links=[
"https://github.com/axant/tgext.ecommerce/archive/master.zip#egg=tgext.ecommerce",
],
)
| 24.04 | 90 | 0.610649 |
import sys, os
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
install_requires=[
"TurboGears2 >= 2.3.3",
"tgext.pluggable",
"tw2.core",
"tw2.forms",
"tgext.ecommerce",
"axf",
"tgext.datahelpers",
"filedepot",
]
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
except IOError:
README = ''
setup(
name='tgapp-stroller2',
version='0.1',
description='',
long_description=README,
author='',
author_email='',
keywords='turbogears2.application',
packages=find_packages(exclude=['ez_setup']),
install_requires=install_requires,
include_package_data=True,
package_data={'tgapp.stroller2': ['i18n/*/LC_MESSAGES/*.mo',
'templates/*/*',
'public/*/*']},
entry_points="""
""",
zip_safe=False,
dependency_links=[
"https://github.com/axant/tgext.ecommerce/archive/master.zip#egg=tgext.ecommerce",
],
)
| true | true |
f7feb1f494afa560b1f20e3ca0b86d74720e8449 | 508 | py | Python | Algorithms/Search Insert Position.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | Algorithms/Search Insert Position.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | Algorithms/Search Insert Position.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
first = 0
last = len(nums)-1
mid = (first+last)//2
while first<=last:
if nums[mid]<target:
first = mid +1
elif nums[mid]>target:
last = mid -1
else:
return mid
mid = (first+last)//2
return first
| 22.086957 | 41 | 0.423228 | class Solution(object):
def searchInsert(self, nums, target):
first = 0
last = len(nums)-1
mid = (first+last)//2
while first<=last:
if nums[mid]<target:
first = mid +1
elif nums[mid]>target:
last = mid -1
else:
return mid
mid = (first+last)//2
return first
| true | true |
f7feb25310c0b8253334d4a1506e1fc1731398c2 | 37,694 | py | Python | numpy/lib/histograms.py | MilesCranmer/numpy | 7a1ee13ee28083c484a42a657067570773bcddbe | [
"BSD-3-Clause"
] | null | null | null | numpy/lib/histograms.py | MilesCranmer/numpy | 7a1ee13ee28083c484a42a657067570773bcddbe | [
"BSD-3-Clause"
] | 8 | 2021-10-07T10:59:49.000Z | 2021-11-22T20:06:49.000Z | numpy/lib/histograms.py | MilesCranmer/numpy | 7a1ee13ee28083c484a42a657067570773bcddbe | [
"BSD-3-Clause"
] | null | null | null | """
Histogram-related functions
"""
import contextlib
import functools
import operator
import warnings
import numpy as np
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _ptp(x):
"""Peak-to-peak value of x.
This implementation avoids the problem of signed integer arrays having a
peak-to-peak value that cannot be represented with the array's data type.
This function returns an unsigned value for signed integer arrays.
"""
return _unsigned_subtract(x.max(), x.min())
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_stone(x, range):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
range : (float, float)
The lower and upper range of the bins.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
n = x.size
ptp_x = _ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.",
RuntimeWarning, stacklevel=3)
return ptp_x / nbins
def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return _ptp(x) / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 0 for the bin width.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
If the bin width from the FD estimator is 0, the Sturges estimator is used.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off-the-shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance-based estimators will be of
use, so we revert to the Sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, str):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError as e:
raise TypeError(
'`bins` must be an integer, a string, or an array') from e
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram`
function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that takes into account data variability
and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``. The final bin width is often less
than what is returned by the estimators below.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'sturges'
.. math:: n_h = \log _{2}(n) + 1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def _histogram_dispatcher(
a, bins=None, range=None, density=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, density=None, weights=None):
r"""
Compute the histogram of a dataset.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
Text(0.5, 1.0, "Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
return n, bin_edges
def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
weights=None):
if hasattr(sample, 'shape'): # same condition as used in histogramdd
yield sample
else:
yield from sample
with contextlib.suppress(TypeError):
yield from bins
yield weights
@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, density=None, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (D, N) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the monotonically increasing bin
edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if density is True. If density is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See density and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
try:
n = operator.index(bins[i])
except TypeError as e:
raise TypeError(
"`bins[{}]` must be an integer, when a scalar".format(i)
) from e
edges[i] = np.linspace(smin, smax, n + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
if density:
# calculate the probability density function
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
| 35.195145 | 104 | 0.612803 | import contextlib
import functools
import operator
import warnings
import numpy as np
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
_range = range
def _ptp(x):
return _unsigned_subtract(x.max(), x.min())
def _hist_bin_sqrt(x, range):
del range
return _ptp(x) / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
del range
return _ptp(x) / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
del range
return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
del range
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_stone(x, range):
n = x.size
ptp_x = _ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.",
RuntimeWarning, stacklevel=3)
return ptp_x / nbins
def _hist_bin_doane(x, range):
del range
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return _ptp(x) / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x, range):
del range
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x, range):
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
del range
if fd_bw:
return min(fd_bw, sturges_bw)
else:
return sturges_bw
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
a = np.asarray(a)
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(a, bins, range, weights):
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, str):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError as e:
raise TypeError(
'`bins` must be an integer, a string, or an array') from e
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def _histogram_dispatcher(
a, bins=None, range=None, density=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, density=None, weights=None):
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
return n, bin_edges
def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
weights=None):
if hasattr(sample, 'shape'): # same condition as used in histogramdd
yield sample
else:
yield from sample
with contextlib.suppress(TypeError):
yield from bins
yield weights
@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, density=None, weights=None):
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
try:
n = operator.index(bins[i])
except TypeError as e:
raise TypeError(
"`bins[{}]` must be an integer, when a scalar".format(i)
) from e
edges[i] = np.linspace(smin, smax, n + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
if density:
# calculate the probability density function
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
| true | true |
f7feb295357776d5237bdbb075cba1587befd7ba | 1,456 | py | Python | CSD_modeling/extract_arc.py | seanandrews/DSHARP_CPDs | 40d8f02945e0d412c5c912b050d4a3d8c6dbbfa2 | [
"MIT"
] | 1 | 2020-05-04T15:55:03.000Z | 2020-05-04T15:55:03.000Z | CSD_modeling/extract_arc.py | seanandrews/DSHARP_CPDs | 40d8f02945e0d412c5c912b050d4a3d8c6dbbfa2 | [
"MIT"
] | null | null | null | CSD_modeling/extract_arc.py | seanandrews/DSHARP_CPDs | 40d8f02945e0d412c5c912b050d4a3d8c6dbbfa2 | [
"MIT"
] | 1 | 2020-09-22T19:04:48.000Z | 2020-09-22T19:04:48.000Z | import os
import numpy as np
execfile('ExportMS.py')
# read which disk this is about
target = str(np.loadtxt('whichdisk.txt', dtype='str'))
# load FITS model of the arc (the modified CLEAN model)
importfits(fitsimage='data/'+target+'_data_arc.cleanmodel.fits',
imagename='data/'+target+'_data_arc.cleanmodel', overwrite=True)
# Fourier transform the arc model onto the same (u,v) tracks as in the data
# MS (make a copy first!), and store them in the 'MODEL_DATA' column
os.system('rm -rf data/'+target+'_data.ms*')
os.system('cp -r data/'+target+'_continuum_spavg_tbin30s.ms data/temp_' + \
target+'.ms')
ft(vis='data/temp_'+target+'.ms',
model='data/'+target+'_data_arc.model', usescratch=True)
# Now subtract the FT of the arc model from the observed visibilities; the
# result is stored in the 'CORRECTED_DATA' column
uvsub(vis='data/temp_'+target+'.ms')
# Split out the 'CORRECTED_DATA' visibilities into their own "arc-less" MS
os.system('rm -rf data/'+target+'_data_symm.ms*')
split(vis='data/temp_'+target+'.ms', outputvis='data/'+target+'_data_symm.ms',
datacolumn='corrected')
# Export the "arc-less" MS into npz format for frankenstein modeling
ExportMS('data/'+target+'_data_symm.ms')
# Clean up
os.system('rm -rf data/temp_'+target+'.ms*')
os.system('rm -rf data/'+target+'_data_symm_spavg.ms*')
os.system('mv data/'+target+'_data_symm_spavg.vis.npz ' + \
'data/'+target+'_data_symm.vis.npz')
| 39.351351 | 79 | 0.707418 | import os
import numpy as np
execfile('ExportMS.py')
target = str(np.loadtxt('whichdisk.txt', dtype='str'))
importfits(fitsimage='data/'+target+'_data_arc.cleanmodel.fits',
imagename='data/'+target+'_data_arc.cleanmodel', overwrite=True)
os.system('rm -rf data/'+target+'_data.ms*')
os.system('cp -r data/'+target+'_continuum_spavg_tbin30s.ms data/temp_' + \
target+'.ms')
ft(vis='data/temp_'+target+'.ms',
model='data/'+target+'_data_arc.model', usescratch=True)
uvsub(vis='data/temp_'+target+'.ms')
os.system('rm -rf data/'+target+'_data_symm.ms*')
split(vis='data/temp_'+target+'.ms', outputvis='data/'+target+'_data_symm.ms',
datacolumn='corrected')
ExportMS('data/'+target+'_data_symm.ms')
os.system('rm -rf data/temp_'+target+'.ms*')
os.system('rm -rf data/'+target+'_data_symm_spavg.ms*')
os.system('mv data/'+target+'_data_symm_spavg.vis.npz ' + \
'data/'+target+'_data_symm.vis.npz')
| true | true |
f7feb45b8caec3d7483b4842c53a356fbd2df5e1 | 3,812 | py | Python | example/tlo-EX-Data.py | sspickle/genlatex | d5fd86d4415c85c2f2933981e0e65f9df9f3cda5 | [
"MIT"
] | null | null | null | example/tlo-EX-Data.py | sspickle/genlatex | d5fd86d4415c85c2f2933981e0e65f9df9f3cda5 | [
"MIT"
] | null | null | null | example/tlo-EX-Data.py | sspickle/genlatex | d5fd86d4415c85c2f2933981e0e65f9df9f3cda5 | [
"MIT"
] | 1 | 2021-08-01T17:12:18.000Z | 2021-08-01T17:12:18.000Z |
import random
import vpython as vp
import numpy as np
from genlatex import latex_float, latex_vec
data = []
"""
naming convention.
tlo: this is the prefix to all the templates, and to the .tex outputs
e.g., if you're working on TLO-3 and this is the third quiz you've
given, you might say "tlo = 'TLO-3v3' and then the template would
be 'TLO-3v1-template.txt' and the .tex files would be:
'TLO-3v1-XX.tex' and
'TLO-3v1-soln-XX.tex'
where 'XX' would be a quiz version number based on the random numbers
used to create the problem/solution pairs.
"""
tlo='TLO-EX'
templateFile = tlo + '-template.txt'
quizFilenameTemplate = tlo + '-{:}.tex'
quizSolnFilenameTemplate = tlo + '-soln-{:}.tex'
qe = 1.6e-19
G = 1.67e-11
#
# --- this is where the random values are set up --
#
rint = random.randint
uni = random.uniform
def getTemplateValues(numSamples, seeds):
"""
input:
numSamples: Integer, number of random Versions to generate.
seeds: a list of integers to act as random number seeds.
This allows you to re-create the same values reproducibly.
If you send in the same integer, you get the same values again.
output:
A dictionary of data, answers, and the template filenames
(see below for details)
This example creates a random planet, star mass and position.
"""
data = []
answers = []
for i in range(numSamples):
random.seed(seeds[i]) # make these reproducible, but randomish
Mstar = uni(0.6,1.0)*2e30
Mplanet = random.randint(2,9)*1e26
rStar = vp.vec(rint(-8,8),rint(-8,8),rint(-8,8))*1e11
rPlanet = vp.vec(rint(-8,8),rint(-8,8),rint(-8,8))*1e11
MstarStr = latex_float(Mstar) + r'\,{\rm kg}'
MplanetStr = latex_float(Mplanet) + r'\,{\rm kg}'
rStarStr = latex_vec(rStar) + r'\,{\rm m}'
rPlanetStr = latex_vec(rPlanet) + r'\,{\rm m}'
dt = uni(0.8,1.2)*30*3600*24 # around one month
dtStr = latex_float(dt) + r'\,{\rm s}'
r = rPlanet - rStar
rStr = latex_vec(r) + r'\,{\rm m}'
rhat = r.norm()
rHatStr = latex_vec(rhat)
# let's work out a circular orbit speed
v = np.sqrt(G*Mstar/r.mag)
vPlanet = r.cross(vp.vec(0,0,1)).norm()*v # pick a good perp. direction
vPlanetStr = latex_vec(vPlanet) + r'\,{\rm m/s}'
newData = {}
newData.update(vnum= str(seeds[i]).zfill(2))
newData.update(tlo=tlo, dt=dtStr)
newData.update(Mstar=MstarStr)
newData.update(Mplanet=MplanetStr)
newData.update(rStar=rStarStr)
newData.update(rPlanet=rPlanetStr)
newData.update(vPlanet=vPlanetStr)
data.append(newData)
F = -G*Mplanet*Mstar*r.norm()/r.mag**2
p = Mplanet*vPlanet
dp = F*dt
pnew = p + dp
vnew = pnew/Mplanet
Fstr = latex_vec(F) + r'\,{\rm N}'
pStr = latex_vec(p) + r'\,{\rm N s}'
dPstr = latex_vec(dp) + r'\,{\rm N s}'
vNewStr = latex_vec(vnew) + r'\,{\rm m/s}'
rNew = rPlanet + vnew*dt
rNewStr = latex_vec(rNew) + r'\,{\rm m}'
newAnswer = {}
newAnswer.update(F=Fstr, p=pStr, dp=dPstr)
newAnswer.update(r=rStr, rhat=rHatStr)
newAnswer.update(vNew = vNewStr)
newAnswer.update(rNew = rNewStr)
answers.append(newAnswer)
def returnDict(**kwargs):
return kwargs
return returnDict(data = data,
answers = answers,
templateFile = templateFile,
quizFilenameTemplate = quizFilenameTemplate,
quizSolnFilenameTemplate = quizSolnFilenameTemplate)
| 29.323077 | 79 | 0.583158 |
import random
import vpython as vp
import numpy as np
from genlatex import latex_float, latex_vec
data = []
tlo='TLO-EX'
templateFile = tlo + '-template.txt'
quizFilenameTemplate = tlo + '-{:}.tex'
quizSolnFilenameTemplate = tlo + '-soln-{:}.tex'
qe = 1.6e-19
G = 1.67e-11
rint = random.randint
uni = random.uniform
def getTemplateValues(numSamples, seeds):
data = []
answers = []
for i in range(numSamples):
random.seed(seeds[i])
Mstar = uni(0.6,1.0)*2e30
Mplanet = random.randint(2,9)*1e26
rStar = vp.vec(rint(-8,8),rint(-8,8),rint(-8,8))*1e11
rPlanet = vp.vec(rint(-8,8),rint(-8,8),rint(-8,8))*1e11
MstarStr = latex_float(Mstar) + r'\,{\rm kg}'
MplanetStr = latex_float(Mplanet) + r'\,{\rm kg}'
rStarStr = latex_vec(rStar) + r'\,{\rm m}'
rPlanetStr = latex_vec(rPlanet) + r'\,{\rm m}'
dt = uni(0.8,1.2)*30*3600*24
dtStr = latex_float(dt) + r'\,{\rm s}'
r = rPlanet - rStar
rStr = latex_vec(r) + r'\,{\rm m}'
rhat = r.norm()
rHatStr = latex_vec(rhat)
v = np.sqrt(G*Mstar/r.mag)
vPlanet = r.cross(vp.vec(0,0,1)).norm()*v # pick a good perp. direction
vPlanetStr = latex_vec(vPlanet) + r'\,{\rm m/s}'
newData = {}
newData.update(vnum= str(seeds[i]).zfill(2))
newData.update(tlo=tlo, dt=dtStr)
newData.update(Mstar=MstarStr)
newData.update(Mplanet=MplanetStr)
newData.update(rStar=rStarStr)
newData.update(rPlanet=rPlanetStr)
newData.update(vPlanet=vPlanetStr)
data.append(newData)
F = -G*Mplanet*Mstar*r.norm()/r.mag**2
p = Mplanet*vPlanet
dp = F*dt
pnew = p + dp
vnew = pnew/Mplanet
Fstr = latex_vec(F) + r'\,{\rm N}'
pStr = latex_vec(p) + r'\,{\rm N s}'
dPstr = latex_vec(dp) + r'\,{\rm N s}'
vNewStr = latex_vec(vnew) + r'\,{\rm m/s}'
rNew = rPlanet + vnew*dt
rNewStr = latex_vec(rNew) + r'\,{\rm m}'
newAnswer = {}
newAnswer.update(F=Fstr, p=pStr, dp=dPstr)
newAnswer.update(r=rStr, rhat=rHatStr)
newAnswer.update(vNew = vNewStr)
newAnswer.update(rNew = rNewStr)
answers.append(newAnswer)
def returnDict(**kwargs):
return kwargs
return returnDict(data = data,
answers = answers,
templateFile = templateFile,
quizFilenameTemplate = quizFilenameTemplate,
quizSolnFilenameTemplate = quizSolnFilenameTemplate)
| true | true |
f7feb4da5e265294647ab11c3fbcfe866e9f6c1e | 1,104 | py | Python | distributed/protocol/tests/test_cupy.py | mlhenderson/distributed | 105d040fb9cabfa3581dcdce73165c77cb22de9a | [
"BSD-3-Clause"
] | null | null | null | distributed/protocol/tests/test_cupy.py | mlhenderson/distributed | 105d040fb9cabfa3581dcdce73165c77cb22de9a | [
"BSD-3-Clause"
] | null | null | null | distributed/protocol/tests/test_cupy.py | mlhenderson/distributed | 105d040fb9cabfa3581dcdce73165c77cb22de9a | [
"BSD-3-Clause"
] | null | null | null | from distributed.protocol import serialize, deserialize
import pickle
import pytest
cupy = pytest.importorskip("cupy")
@pytest.mark.parametrize("size", [0, 10])
@pytest.mark.parametrize("dtype", ["u1", "u4", "u8", "f4"])
def test_serialize_cupy(size, dtype):
x = cupy.arange(size, dtype=dtype)
header, frames = serialize(x, serializers=("cuda", "dask", "pickle"))
y = deserialize(header, frames, deserializers=("cuda", "dask", "pickle", "error"))
assert (x == y).all()
@pytest.mark.parametrize("dtype", ["u1", "u4", "u8", "f4"])
def test_serialize_cupy_from_numba(dtype):
cuda = pytest.importorskip("numba.cuda")
np = pytest.importorskip("numpy")
if not cuda.is_available():
pytest.skip("CUDA is not available")
size = 10
x_np = np.arange(size, dtype=dtype)
x = cuda.to_device(x_np)
header, frames = serialize(x, serializers=("cuda", "dask", "pickle"))
header["type-serialized"] = pickle.dumps(cupy.ndarray)
y = deserialize(header, frames, deserializers=("cuda", "dask", "pickle", "error"))
assert (x_np == cupy.asnumpy(y)).all()
| 31.542857 | 86 | 0.65942 | from distributed.protocol import serialize, deserialize
import pickle
import pytest
cupy = pytest.importorskip("cupy")
@pytest.mark.parametrize("size", [0, 10])
@pytest.mark.parametrize("dtype", ["u1", "u4", "u8", "f4"])
def test_serialize_cupy(size, dtype):
x = cupy.arange(size, dtype=dtype)
header, frames = serialize(x, serializers=("cuda", "dask", "pickle"))
y = deserialize(header, frames, deserializers=("cuda", "dask", "pickle", "error"))
assert (x == y).all()
@pytest.mark.parametrize("dtype", ["u1", "u4", "u8", "f4"])
def test_serialize_cupy_from_numba(dtype):
cuda = pytest.importorskip("numba.cuda")
np = pytest.importorskip("numpy")
if not cuda.is_available():
pytest.skip("CUDA is not available")
size = 10
x_np = np.arange(size, dtype=dtype)
x = cuda.to_device(x_np)
header, frames = serialize(x, serializers=("cuda", "dask", "pickle"))
header["type-serialized"] = pickle.dumps(cupy.ndarray)
y = deserialize(header, frames, deserializers=("cuda", "dask", "pickle", "error"))
assert (x_np == cupy.asnumpy(y)).all()
| true | true |
f7feb6a02d26b0c108088b2869515458cb36993a | 1,159 | py | Python | layout/layout_in_form.py | thanhkaist/Qt-Python-Binding-Examples | 25b3313fd03e396014cce0e8f7eec8823b3ebd29 | [
"BSD-3-Clause"
] | 2 | 2019-10-20T05:40:51.000Z | 2019-10-31T17:26:27.000Z | qt_layouts/layout_in_form.py | bisratyalew/Qt-Python-Sample-Examples | 45d69e01adefc854ac25627110611ca27b334a55 | [
"MIT"
] | null | null | null | qt_layouts/layout_in_form.py | bisratyalew/Qt-Python-Sample-Examples | 45d69e01adefc854ac25627110611ca27b334a55 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
layout in form
Tested environment:
Mac OS X 10.6.8
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
form = QtGui.QFormLayout(self)
name_label = QtGui.QLabel("Name", self)
name_lineedit = QtGui.QLineEdit(self)
form.addRow(name_label, name_lineedit)
age_label = QtGui.QLabel("Age", self)
age_lineedit = QtGui.QLineEdit(self)
form.addRow(age_label, age_lineedit)
location_label = QtGui.QLabel("Location", self)
location_lineedit = QtGui.QLineEdit(self)
form.addRow(location_label, location_lineedit)
self.setLayout(form)
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_()) | 20.696429 | 55 | 0.628991 |
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
form = QtGui.QFormLayout(self)
name_label = QtGui.QLabel("Name", self)
name_lineedit = QtGui.QLineEdit(self)
form.addRow(name_label, name_lineedit)
age_label = QtGui.QLabel("Age", self)
age_lineedit = QtGui.QLineEdit(self)
form.addRow(age_label, age_lineedit)
location_label = QtGui.QLabel("Location", self)
location_lineedit = QtGui.QLineEdit(self)
form.addRow(location_label, location_lineedit)
self.setLayout(form)
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_()) | true | true |
f7feb6c5633e4346afbd8bd79b8fd28b214d48ea | 144 | py | Python | ver1_0/openassembly/pirate_messages/tasks.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | 1 | 2015-11-05T08:22:19.000Z | 2015-11-05T08:22:19.000Z | ver1_0/openassembly/pirate_messages/tasks.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | null | null | null | ver1_0/openassembly/pirate_messages/tasks.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | 1 | 2018-02-03T18:25:41.000Z | 2018-02-03T18:25:41.000Z | from celery.task import task
@task(ignore_results=True)
def set_to_read(notes):
for i in notes:
i.is_read = True
i.save()
| 16 | 28 | 0.645833 | from celery.task import task
@task(ignore_results=True)
def set_to_read(notes):
for i in notes:
i.is_read = True
i.save()
| true | true |
f7feb76b423df24342b91752bbb1aed44778e8b4 | 827 | py | Python | Cura/Uranium/UM/View/DefaultPass.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Uranium/UM/View/DefaultPass.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Uranium/UM/View/DefaultPass.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import UM.Qt.QtApplication
from UM.View.RenderPass import RenderPass
## A render pass subclass that renders everything with the default parameters.
#
# This class provides the basic rendering of the objects in the scene.
class DefaultPass(RenderPass):
def __init__(self, width: int, height: int) -> None:
super().__init__("default", width, height, 0)
self._renderer = UM.Qt.QtApplication.QtApplication.getInstance().getRenderer()
def render(self) -> None:
self.bind()
camera = UM.Qt.QtApplication.QtApplication.getInstance().getController().getScene().getActiveCamera()
for batch in self._renderer.getBatches():
batch.render(camera)
self.release()
| 30.62963 | 109 | 0.702539 |
import UM.Qt.QtApplication
from UM.View.RenderPass import RenderPass
nt) -> None:
super().__init__("default", width, height, 0)
self._renderer = UM.Qt.QtApplication.QtApplication.getInstance().getRenderer()
def render(self) -> None:
self.bind()
camera = UM.Qt.QtApplication.QtApplication.getInstance().getController().getScene().getActiveCamera()
for batch in self._renderer.getBatches():
batch.render(camera)
self.release()
| true | true |
f7feb8017d4807f43beed287f8fc3702d050a955 | 143 | py | Python | redmail/test/log/conftest.py | rveachkc/red-mail | 97a9e89e8a04e67c2824a79ce45489931b4d1b82 | [
"MIT"
] | 173 | 2022-01-01T21:16:02.000Z | 2022-03-29T06:25:42.000Z | redmail/test/log/conftest.py | rveachkc/red-mail | 97a9e89e8a04e67c2824a79ce45489931b4d1b82 | [
"MIT"
] | 36 | 2022-01-01T23:21:42.000Z | 2022-03-25T07:02:45.000Z | redmail/test/log/conftest.py | rveachkc/red-mail | 97a9e89e8a04e67c2824a79ce45489931b4d1b82 | [
"MIT"
] | 11 | 2022-01-02T12:59:58.000Z | 2022-02-27T11:12:57.000Z |
import logging
import pytest
@pytest.fixture
def logger():
logger = logging.getLogger("_test")
logger.handlers = []
return logger | 15.888889 | 39 | 0.699301 |
import logging
import pytest
@pytest.fixture
def logger():
logger = logging.getLogger("_test")
logger.handlers = []
return logger | true | true |
f7feb8b9156c3ea365d1bd5dd0296c24265c8a4a | 2,833 | py | Python | datatest/__past__/api09.py | ajhynes7/datatest | 78742e98de992807286655f5685a2dc33a7b452e | [
"Apache-2.0"
] | 277 | 2016-05-12T13:22:49.000Z | 2022-03-11T00:18:32.000Z | datatest/__past__/api09.py | ajhynes7/datatest | 78742e98de992807286655f5685a2dc33a7b452e | [
"Apache-2.0"
] | 57 | 2016-05-18T01:03:32.000Z | 2022-02-17T13:48:43.000Z | datatest/__past__/api09.py | ajhynes7/datatest | 78742e98de992807286655f5685a2dc33a7b452e | [
"Apache-2.0"
] | 16 | 2016-05-22T11:35:19.000Z | 2021-12-01T19:41:42.000Z | """Backward compatibility for version 0.9 API."""
from __future__ import absolute_import
import datatest
from datatest._compatibility.collections.abc import Mapping
from datatest._compatibility.collections.abc import Set
from datatest._normalize import normalize
from datatest._utils import IterItems
class RequiredSubset_090(datatest.requirements.GroupRequirement):
"""Implements inverted subset behavior from 0.9.x API."""
def __init__(self, requirement):
if not isinstance(requirement, Set):
requirement = set(requirement)
self._set = requirement
def check_group(self, group):
missing = self._set.copy()
for element in group:
if not missing:
break
missing.discard(element)
differences = (Missing(element) for element in missing)
description = 'must contain all elements of given requirement'
return differences, description
class RequiredSuperset_090(datatest.requirements.GroupRequirement):
"""Implements inverted superset behavior from 0.9.x API."""
def __init__(self, requirement):
if not isinstance(requirement, Set):
requirement = set(requirement)
self._set = requirement
def check_group(self, group):
superset = self._set
extras = set()
for element in group:
if element not in superset:
extras.add(element)
differences = (Extra(element) for element in extras)
description = 'may only contain elements of given requirement'
return differences, description
class ValidateType(datatest.validation.ValidateType):
def subset(self, data, requirement, msg=None):
"""Implements API 0.9.x subset behavior."""
__tracebackhide__ = datatest.validation._pytest_tracebackhide
requirement = normalize(requirement, lazy_evaluation=False, default_type=set)
if isinstance(requirement, (Mapping, IterItems)):
factory = RequiredSubset_090
requirement = datatest.requirements.RequiredMapping(requirement, factory)
else:
requirement = RequiredSubset_090(requirement)
self(data, requirement, msg=msg)
def superset(self, data, requirement, msg=None):
"""Implements API 0.9.x superset behavior."""
__tracebackhide__ = datatest.validation._pytest_tracebackhide
requirement = normalize(requirement, lazy_evaluation=False, default_type=set)
if isinstance(requirement, (Mapping, IterItems)):
factory = RequiredSuperset_090
requirement = datatest.requirements.RequiredMapping(requirement, factory)
else:
requirement = RequiredSuperset_090(requirement)
self(data, requirement, msg=msg)
datatest.validate = ValidateType()
| 34.54878 | 85 | 0.690081 | from __future__ import absolute_import
import datatest
from datatest._compatibility.collections.abc import Mapping
from datatest._compatibility.collections.abc import Set
from datatest._normalize import normalize
from datatest._utils import IterItems
class RequiredSubset_090(datatest.requirements.GroupRequirement):
def __init__(self, requirement):
if not isinstance(requirement, Set):
requirement = set(requirement)
self._set = requirement
def check_group(self, group):
missing = self._set.copy()
for element in group:
if not missing:
break
missing.discard(element)
differences = (Missing(element) for element in missing)
description = 'must contain all elements of given requirement'
return differences, description
class RequiredSuperset_090(datatest.requirements.GroupRequirement):
def __init__(self, requirement):
if not isinstance(requirement, Set):
requirement = set(requirement)
self._set = requirement
def check_group(self, group):
superset = self._set
extras = set()
for element in group:
if element not in superset:
extras.add(element)
differences = (Extra(element) for element in extras)
description = 'may only contain elements of given requirement'
return differences, description
class ValidateType(datatest.validation.ValidateType):
def subset(self, data, requirement, msg=None):
__tracebackhide__ = datatest.validation._pytest_tracebackhide
requirement = normalize(requirement, lazy_evaluation=False, default_type=set)
if isinstance(requirement, (Mapping, IterItems)):
factory = RequiredSubset_090
requirement = datatest.requirements.RequiredMapping(requirement, factory)
else:
requirement = RequiredSubset_090(requirement)
self(data, requirement, msg=msg)
def superset(self, data, requirement, msg=None):
__tracebackhide__ = datatest.validation._pytest_tracebackhide
requirement = normalize(requirement, lazy_evaluation=False, default_type=set)
if isinstance(requirement, (Mapping, IterItems)):
factory = RequiredSuperset_090
requirement = datatest.requirements.RequiredMapping(requirement, factory)
else:
requirement = RequiredSuperset_090(requirement)
self(data, requirement, msg=msg)
datatest.validate = ValidateType()
| true | true |
f7feb8d87a5253d1c27c5a37431a86f31b84c72e | 5,983 | py | Python | GPGO/dpd_opt_script.py | FNTwin/BayGPGO | bad7e335d2fd19a93aeee0d591ea7da51c2c6d59 | [
"MIT"
] | null | null | null | GPGO/dpd_opt_script.py | FNTwin/BayGPGO | bad7e335d2fd19a93aeee0d591ea7da51c2c6d59 | [
"MIT"
] | null | null | null | GPGO/dpd_opt_script.py | FNTwin/BayGPGO | bad7e335d2fd19a93aeee0d591ea7da51c2c6d59 | [
"MIT"
] | null | null | null | from GPGO.GaussianProcess.GP import GP, generate_grid
from GPGO.GaussianProcess.Kernel.RBF import RBF
from GPGO.BayesOpt import BayesianOptimization
import numpy as np
import os
import argparse
def get_right_coeffs(array):
# truth_index=[2,3,6,7,9,15,16,19,20,21,24,25,30]
truth_index = [2, 6, 7, 9, 15, 19, 20, 30]
return np.atleast_2d(np.squeeze(array)[truth_index])
def fill_spots(array):
# truth_index = [2, 3, 6, 7, 9, 15, 16, 19, 20, 21, 24, 25, 30]
truth_index = [2, 6, 7, 9, 15, 19, 20, 30]
copy_to = [4, 10, 11, 13, 14, 17, 22, 26, 28, 29]
copy_from = [2, 3, 2, 6, 7, 15, 16, 15, 19, 20]
coeffs = np.zeros(36)
coeffs[truth_index] = np.squeeze(array)
N = [3, 16, 21, 24, 25]
coeffs[N] = np.array([127.19, 2.51, -4.3, 124.4, 4.5])
coeffs[copy_to] = coeffs[copy_from]
return np.atleast_2d(coeffs)
def read_interaction(path):
"""Read the dpd interaction file and return the array of the interactions parameters"""
path = os.path.join(path, "full_G11_326N16.solv.inputinteg.txt")
with open(path, "r") as f:
coeffs = []
for row in f:
a = row.split()
if "pair_coeffs" in a:
coeffs.append(float(a[3]))
return np.atleast_2d(coeffs)
def write_interaction(path, array):
# Array will be less than 36 bounds
bounds = ["! AuE\t\tAuE", "! AuE\t\tAuI", "! AuE\t\tC", "! AuE\t\tN", "! AuE\t\tL", "! AuE\t\tS", "! AuE\t\tW",
"! AuE\t\tCl", "! AuI\t\tAuI", "! AuI\t\tC", "! AuI\t\tN", "! AuI\t\tL", "! AuI\t\tS", "! AuI\t\tW",
"! AuI\t\tCl", "! C\t\tC", "! C\t\tN", "! C\t\tL", "! C\t\tS", "! C\t\tW", "! C\t\tCl",
"! N\t\tN", "! N\t\tL", "! N\t\tS", "! N\t\tW", "! N\t\tCl", "! L\t\tL", "! L\t\tS",
"! L\t\tW", "! L\t\tCl", "! S\t\tS", "! S\t\tW", "! S\t\tCl", "! W\t\tW", "! W\t\tCl",
"! Cl\t\tCl"]
bounds_index = ["2\t2", "1\t2", "2\t3", "2\t5", "2\t4", "2\t6", "2\t7", "2\t8", "1\t1", "1\t3",
"1\t5",
"1\t4", "1\t6", "1\t7", "1\t8", "3\t3", "3\t5", "3\t4", "3\t6", "3\t7", "3\t8",
"5\t5", "4\t5",
"5\t6", "5\t7", "5\t8", "4\t4", "4\t6", "4\t7", "4\t8", "6\t6", "6\t7", "6\t8",
"7\t7", "7\t8",
"8\t8"]
# bound_mask=[0,1,5,8,12,18,23,27,31,32,33,34,35]
# mask_value=[51.6, 51.6, -10., 51.6 , 40., 72.,68.9,72., 80.,80.,51.6,51.6, 51.6]
# N beads fixed
# bound_mask=[0, 1, 3, 5, 8, 12, 16, 18, 21, 23,24,25 ,27,31,32,33,34,35]
# mask_value=[51.6, 51.6, 127.19, -10., 51.6 , 40.,2.5, 72.,-4.3,68.9,124.4,4.53,72., 80.,80.,51.6,51.6, 51.6]
bound_mask = [0, 1, 3, 5, 8, 12, 16, 18, 21, 23, 24, 25, 27, 31, 32, 33, 34, 35]
mask_value = [51.6, 51.6, 127.19, -10., 51.6, 40., 2.5, 72., -4.3, 68.9, 124.4, 4.53, 72., 80., 80., 51.6, 51.6,
51.6]
n_bounds = 36
# n_real_bounds=13
n_real_bounds = 8
array = np.squeeze(array)
"write an interaction file in path"
path = os.path.join(path, "full_G11_326N16.solv.inputinteg.txt")
with open(path, "w") as f:
f.write("\n# Atom Types used: AuE: 2, AuI: 1, C: 3, Cl: 8, L: 4, N: 5, S: 6, W: 7, \n\n")
f.write("# pair_coeff, to be imported in the lammps input file...\n")
for i in range(len(bounds)):
if i in bound_mask:
f.write(
f'pair_coeff\t{bounds_index[i]}\tdpd\t{mask_value[bound_mask.index(i)]:.4f}\t\t{4.5:.4f}\t#{bounds[i]}\n')
else:
f.write(f'pair_coeff\t{bounds_index[i]}\tdpd\t{np.squeeze(array[i]):.4f}\t\t{4.5:.4f}\t#{bounds[i]}\n')
def write_db(path, array):
with open(path, "ab") as f:
np.savetxt(f, array, fmt="%2.3f", header="#----------------------")
def read_db(path):
return np.atleast_2d(np.loadtxt(path))
def parse_cmd():
"""
Function that parse the input from command line.
Read three flags: -f , -o, -c
-f: path to the input file [Required]
-o: Path to the output file [Optional]
Output: args object containing the input path, the outputh path and the dictionary of the charges
"""
parser = argparse.ArgumentParser(description="Prepare the lammps pair coeffs")
parser.add_argument('-f', '--file', dest='file',
action='store', type=str, help="Path to input fie")
args = parser.parse_args()
return args
def main():
args = parse_cmd()
path_interaction = args.file
# Write X and Y
# X_old=get_right_coeffs(read_interaction(path_interaction))
path_x = "/home/merk/Desktop/optimization_run/data_X.txt"
path_y = "/home/merk/Desktop/optimization_run/data_Y.txt"
X, Y = read_db(path_x), read_db(path_y).reshape(-1, 1)
print(X.shape)
tmp = []
for i in X:
tmp.append(get_right_coeffs(i))
X = np.asarray(np.squeeze(tmp))
dim = X[0].shape[0]
print(X.shape)
# bo run
# mean, var=np.mean(X), np.std(X)
# X= (X - mean)/var
# low, up =(-10-mean)/var , (140 - mean)/var
boundaries = [[-10, 140] for i in range(dim)]
boundaries=[[0,1] for i in range(dim)]
min=-10
max=140
X=(X-min)/(max-min)
gp = GP(X, Y, RBF(), normalize_y=True)
gp.set_boundary([[1e-4,1]])
settings = {"type": "BFGS",
"ac_type": "EI",
"n_search": 100,
"boundaries": boundaries,
"epsilon": 0.1,
"iteration": 1,
"minimization": True,
"optimization": True,
"n_restart": 30,
"sampling": "LHS"}
BayOpt = BayesianOptimization(X, Y, settings, gp, func=None)
proposal = BayOpt.suggest_location()
# Write new file
# proposal= proposal *var + mean
proposal=proposal*(max-min)+min
print(proposal)
#write_interaction(path_interaction, fill_spots(proposal))
if __name__ == "__main__":
main()
| 35.613095 | 126 | 0.536019 | from GPGO.GaussianProcess.GP import GP, generate_grid
from GPGO.GaussianProcess.Kernel.RBF import RBF
from GPGO.BayesOpt import BayesianOptimization
import numpy as np
import os
import argparse
def get_right_coeffs(array):
truth_index = [2, 6, 7, 9, 15, 19, 20, 30]
return np.atleast_2d(np.squeeze(array)[truth_index])
def fill_spots(array):
truth_index = [2, 6, 7, 9, 15, 19, 20, 30]
copy_to = [4, 10, 11, 13, 14, 17, 22, 26, 28, 29]
copy_from = [2, 3, 2, 6, 7, 15, 16, 15, 19, 20]
coeffs = np.zeros(36)
coeffs[truth_index] = np.squeeze(array)
N = [3, 16, 21, 24, 25]
coeffs[N] = np.array([127.19, 2.51, -4.3, 124.4, 4.5])
coeffs[copy_to] = coeffs[copy_from]
return np.atleast_2d(coeffs)
def read_interaction(path):
path = os.path.join(path, "full_G11_326N16.solv.inputinteg.txt")
with open(path, "r") as f:
coeffs = []
for row in f:
a = row.split()
if "pair_coeffs" in a:
coeffs.append(float(a[3]))
return np.atleast_2d(coeffs)
def write_interaction(path, array):
bounds = ["! AuE\t\tAuE", "! AuE\t\tAuI", "! AuE\t\tC", "! AuE\t\tN", "! AuE\t\tL", "! AuE\t\tS", "! AuE\t\tW",
"! AuE\t\tCl", "! AuI\t\tAuI", "! AuI\t\tC", "! AuI\t\tN", "! AuI\t\tL", "! AuI\t\tS", "! AuI\t\tW",
"! AuI\t\tCl", "! C\t\tC", "! C\t\tN", "! C\t\tL", "! C\t\tS", "! C\t\tW", "! C\t\tCl",
"! N\t\tN", "! N\t\tL", "! N\t\tS", "! N\t\tW", "! N\t\tCl", "! L\t\tL", "! L\t\tS",
"! L\t\tW", "! L\t\tCl", "! S\t\tS", "! S\t\tW", "! S\t\tCl", "! W\t\tW", "! W\t\tCl",
"! Cl\t\tCl"]
bounds_index = ["2\t2", "1\t2", "2\t3", "2\t5", "2\t4", "2\t6", "2\t7", "2\t8", "1\t1", "1\t3",
"1\t5",
"1\t4", "1\t6", "1\t7", "1\t8", "3\t3", "3\t5", "3\t4", "3\t6", "3\t7", "3\t8",
"5\t5", "4\t5",
"5\t6", "5\t7", "5\t8", "4\t4", "4\t6", "4\t7", "4\t8", "6\t6", "6\t7", "6\t8",
"7\t7", "7\t8",
"8\t8"]
bound_mask = [0, 1, 3, 5, 8, 12, 16, 18, 21, 23, 24, 25, 27, 31, 32, 33, 34, 35]
mask_value = [51.6, 51.6, 127.19, -10., 51.6, 40., 2.5, 72., -4.3, 68.9, 124.4, 4.53, 72., 80., 80., 51.6, 51.6,
51.6]
n_bounds = 36
n_real_bounds = 8
array = np.squeeze(array)
path = os.path.join(path, "full_G11_326N16.solv.inputinteg.txt")
with open(path, "w") as f:
f.write("\n# Atom Types used: AuE: 2, AuI: 1, C: 3, Cl: 8, L: 4, N: 5, S: 6, W: 7, \n\n")
f.write("# pair_coeff, to be imported in the lammps input file...\n")
for i in range(len(bounds)):
if i in bound_mask:
f.write(
f'pair_coeff\t{bounds_index[i]}\tdpd\t{mask_value[bound_mask.index(i)]:.4f}\t\t{4.5:.4f}\t#{bounds[i]}\n')
else:
f.write(f'pair_coeff\t{bounds_index[i]}\tdpd\t{np.squeeze(array[i]):.4f}\t\t{4.5:.4f}\t#{bounds[i]}\n')
def write_db(path, array):
with open(path, "ab") as f:
np.savetxt(f, array, fmt="%2.3f", header="#----------------------")
def read_db(path):
return np.atleast_2d(np.loadtxt(path))
def parse_cmd():
parser = argparse.ArgumentParser(description="Prepare the lammps pair coeffs")
parser.add_argument('-f', '--file', dest='file',
action='store', type=str, help="Path to input fie")
args = parser.parse_args()
return args
def main():
args = parse_cmd()
path_interaction = args.file
path_x = "/home/merk/Desktop/optimization_run/data_X.txt"
path_y = "/home/merk/Desktop/optimization_run/data_Y.txt"
X, Y = read_db(path_x), read_db(path_y).reshape(-1, 1)
print(X.shape)
tmp = []
for i in X:
tmp.append(get_right_coeffs(i))
X = np.asarray(np.squeeze(tmp))
dim = X[0].shape[0]
print(X.shape)
boundaries = [[-10, 140] for i in range(dim)]
boundaries=[[0,1] for i in range(dim)]
min=-10
max=140
X=(X-min)/(max-min)
gp = GP(X, Y, RBF(), normalize_y=True)
gp.set_boundary([[1e-4,1]])
settings = {"type": "BFGS",
"ac_type": "EI",
"n_search": 100,
"boundaries": boundaries,
"epsilon": 0.1,
"iteration": 1,
"minimization": True,
"optimization": True,
"n_restart": 30,
"sampling": "LHS"}
BayOpt = BayesianOptimization(X, Y, settings, gp, func=None)
proposal = BayOpt.suggest_location()
proposal=proposal*(max-min)+min
print(proposal)
if __name__ == "__main__":
main()
| true | true |
f7feb92edc8727f99dc5f12f1f7e9a016e25ed15 | 1,485 | py | Python | setup.py | AugurProject/pyethereum | e0a2edfd586d4d628e4c64c511d0ace514af44ee | [
"MIT"
] | 7 | 2017-10-12T20:26:52.000Z | 2022-03-26T19:42:35.000Z | setup.py | AugurProject/pyethereum | e0a2edfd586d4d628e4c64c511d0ace514af44ee | [
"MIT"
] | 1 | 2017-07-12T07:03:22.000Z | 2017-07-12T07:52:54.000Z | setup.py | AugurProject/pyethereum | e0a2edfd586d4d628e4c64c511d0ace514af44ee | [
"MIT"
] | 2 | 2017-08-05T18:15:36.000Z | 2018-05-17T03:00:35.000Z | from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
# requirements
install_requires = set(x.strip() for x in open('requirements.txt'))
install_requires_replacements = {
'https://github.com/ethereum/ethash/tarball/master': 'pyethash',
}
install_requires = [
install_requires_replacements.get(
r, r) for r in install_requires]
# dev requirements
tests_require = set(x.strip() for x in open('dev_requirements.txt'))
# dependency links
dependency_links = []
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
# see:
# https://github.com/ethereum/pyethapp/wiki/Development:-Versions-and-Releases
version = '2.1.1'
setup(
name="ethereum-augur-temp",
packages=find_packages("."),
description='Next generation cryptocurrency network',
long_description=readme,
url='https://github.com/ethereum/pyethereum/',
install_requires=install_requires,
tests_require=tests_require,
dependency_links=dependency_links,
setup_requires=[
# 'pytest-runner==2.7'
],
version=version,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 29.7 | 85 | 0.682828 | from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
install_requires = set(x.strip() for x in open('requirements.txt'))
install_requires_replacements = {
'https://github.com/ethereum/ethash/tarball/master': 'pyethash',
}
install_requires = [
install_requires_replacements.get(
r, r) for r in install_requires]
tests_require = set(x.strip() for x in open('dev_requirements.txt'))
dependency_links = []
# see:
# https://github.com/ethereum/pyethapp/wiki/Development:-Versions-and-Releases
version = '2.1.1'
setup(
name="ethereum-augur-temp",
packages=find_packages("."),
description='Next generation cryptocurrency network',
long_description=readme,
url='https://github.com/ethereum/pyethereum/',
install_requires=install_requires,
tests_require=tests_require,
dependency_links=dependency_links,
setup_requires=[
# 'pytest-runner==2.7'
],
version=version,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| true | true |
f7feb932e264f7dc151faba6558bd455a687d477 | 1,365 | py | Python | languages/ms.py | nursix/STL | 682d8455c8e1c761f48542dad96da08767301923 | [
"MIT"
] | 1 | 2017-11-16T14:50:19.000Z | 2017-11-16T14:50:19.000Z | languages/ms.py | vpccalderara/sahana | 6eb3f9798879dfa51bbe5d2b84829b1402671499 | [
"MIT"
] | null | null | null | languages/ms.py | vpccalderara/sahana | 6eb3f9798879dfa51bbe5d2b84829b1402671499 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
{
'Malaysian': 'Bahasa Melayu',
'agree': 'setuju',
'all of it': 'semuanya',
'Dear %(person_name)s': 'Kepada %(person_name)s',
'disagree': 'tidak setuju',
'most of it': 'kebanyakannya',
'no': 'tidak',
'no change': 'tiada perubahan',
'not at all': 'tidak sama sekali',
'part of it': 'sebahagian daripadanya',
'significant change': 'perubahan yang ketara',
'some change': 'sedikit perubahan',
'strongly agree': 'sangat setuju',
'strongly disagree': 'sangat tidak setuju',
'Thank you for taking this survey and helping us to increase the quality of our trainings.': 'Terima kasih kerana sudi mengambil kaji selidik ini dan membantu kami untuk meningkatkan kualiti latihan kami.',
'Thank you for your participation.': 'Terima kasih atas penyertaan anda.',
'The information collected through this questionnaire will be treated as confidential.': 'Maklumat yang dikumpul melalui soal selidik ini akan dianggap sebagai sulit.',
'We have 8 simple questions for you, this should not take more than 15mn of your time.': 'Kami menyediakan 8 soalan mudah untuk anda, ini tidak sepatutnya mengambil lebih daripada 15minits masa anda.',
'yes': 'ya',
'You are receiving this email as a participant of %(event_name)s held in %(location)s on %(date)s.': 'Anda menerima e-mel ini sebagai peserta %(event_name)s yang diadakan di %(location)s pada %(date)s.',
}
| 56.875 | 206 | 0.738462 |
{
'Malaysian': 'Bahasa Melayu',
'agree': 'setuju',
'all of it': 'semuanya',
'Dear %(person_name)s': 'Kepada %(person_name)s',
'disagree': 'tidak setuju',
'most of it': 'kebanyakannya',
'no': 'tidak',
'no change': 'tiada perubahan',
'not at all': 'tidak sama sekali',
'part of it': 'sebahagian daripadanya',
'significant change': 'perubahan yang ketara',
'some change': 'sedikit perubahan',
'strongly agree': 'sangat setuju',
'strongly disagree': 'sangat tidak setuju',
'Thank you for taking this survey and helping us to increase the quality of our trainings.': 'Terima kasih kerana sudi mengambil kaji selidik ini dan membantu kami untuk meningkatkan kualiti latihan kami.',
'Thank you for your participation.': 'Terima kasih atas penyertaan anda.',
'The information collected through this questionnaire will be treated as confidential.': 'Maklumat yang dikumpul melalui soal selidik ini akan dianggap sebagai sulit.',
'We have 8 simple questions for you, this should not take more than 15mn of your time.': 'Kami menyediakan 8 soalan mudah untuk anda, ini tidak sepatutnya mengambil lebih daripada 15minits masa anda.',
'yes': 'ya',
'You are receiving this email as a participant of %(event_name)s held in %(location)s on %(date)s.': 'Anda menerima e-mel ini sebagai peserta %(event_name)s yang diadakan di %(location)s pada %(date)s.',
}
| true | true |
f7feba3795eb81b778951d3476fcdcf2185683f7 | 38,828 | py | Python | GeoHealthCheck/app.py | dersteppenwolf/GeoHealthCheck | a1f4792dfca8f3774b4af9acd61de66a44103249 | [
"MIT"
] | null | null | null | GeoHealthCheck/app.py | dersteppenwolf/GeoHealthCheck | a1f4792dfca8f3774b4af9acd61de66a44103249 | [
"MIT"
] | null | null | null | GeoHealthCheck/app.py | dersteppenwolf/GeoHealthCheck | a1f4792dfca8f3774b4af9acd61de66a44103249 | [
"MIT"
] | null | null | null | # =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Just van den Broecke <justb4@gmail.com>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import base64
import csv
import json
import logging
from io import StringIO
from flask import (abort, flash, g, jsonify, redirect,
render_template, request, url_for)
from flask_babel import gettext
from flask_login import (LoginManager, login_user, logout_user,
current_user, login_required)
from flask_migrate import Migrate
from itertools import chain
import views
from __init__ import __version__
from enums import RESOURCE_TYPES
from factory import Factory
from init import App
from models import Resource, Run, ProbeVars, CheckVars, Tag, User, Recipient
from resourceauth import ResourceAuth
from util import send_email, geocode, format_checked_datetime, \
format_run_status, format_obj_value
# Module globals for convenience
LOGGER = logging.getLogger(__name__)
APP = App.get_app()
CONFIG = App.get_config()
DB = App.get_db()
BABEL = App.get_babel()
MIGRATE = Migrate(APP, DB)
LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.init_app(APP)
LANGUAGES = (
('en', 'English'),
('fr', 'Français'),
('de', 'German'),
('nl_NL', 'Nederlands (Nederland)'),
('es_BO', 'Español (Bolivia)'),
('hr_HR', 'Croatian (Croatia)')
)
# Should GHC Runner be run within GHC webapp?
if CONFIG['GHC_RUNNER_IN_WEBAPP'] is True:
LOGGER.info('Running GHC Scheduler in WebApp')
from scheduler import start_schedule
# Start scheduler
start_schedule()
else:
LOGGER.info('NOT Running GHC Scheduler in WebApp')
# commit or rollback shorthand
def db_commit():
err = None
try:
DB.session.commit()
except Exception:
DB.session.rollback()
# finally:
# DB.session.close()
return err
@APP.before_request
def before_request():
g.user = current_user
if request.args and 'lang' in request.args and request.args['lang'] != '':
g.current_lang = request.args['lang']
if not hasattr(g, 'current_lang'):
g.current_lang = 'en'
if CONFIG['GHC_REQUIRE_WEBAPP_AUTH'] is True:
# Login is required to access GHC Webapp.
# We need to pass-through static resources like CSS.
if any(['/static/' in request.path,
request.path.endswith('.ico'),
g.user.is_authenticated(), # This is from Flask-Login
(request.endpoint is not None
and getattr(APP.view_functions[request.endpoint],
'is_public', False))]):
return # Access granted
else:
return redirect(url_for('login'))
# Marks (endpoint-) function as always to be accessible
# (used for GHC_REQUIRE_WEBAPP_AUTH)
def public_route(decorated_function):
decorated_function.is_public = True
return decorated_function
@APP.teardown_appcontext
def shutdown_session(exception=None):
DB.session.remove()
@BABEL.localeselector
def get_locale():
return g.get('current_lang', 'en')
# return request.accept_languages.best_match(LANGUAGES.keys())
@LOGIN_MANAGER.user_loader
def load_user(identifier):
return User.query.get(int(identifier))
@LOGIN_MANAGER.unauthorized_handler
def unauthorized_callback():
if request.query_string:
url = '%s%s?%s' % (request.script_root, request.path,
request.query_string)
else:
url = '%s%s' % (request.script_root, request.path)
return redirect(url_for('login', lang=g.current_lang, next=url))
@LOGIN_MANAGER.request_loader
def load_user_from_request(request):
# Try to login using Basic Auth
# Inspiration: https://flask-login.readthedocs.io
# /en/latest/#custom-login-using-request-loader
basic_auth_val = request.headers.get('Authorization')
if basic_auth_val:
basic_auth_val = basic_auth_val.replace('Basic ', '', 1)
authenticated = False
try:
username, password = base64.b64decode(basic_auth_val).split(':')
user = User.query.filter_by(username=username).first()
if user:
authenticated = user.authenticate(password)
finally:
# Ignore errors, they should all fail the auth attempt
pass
if not authenticated:
LOGGER.warning('Unauthorized access for user=%s' % username)
abort(401)
else:
return user
# TODO: may add login via api-key or token here
# finally, return None if both methods did not login the user
return None
@APP.template_filter('cssize_reliability')
def cssize_reliability(value, css_type=None):
"""returns CSS button class snippet based on score"""
number = int(value)
if CONFIG['GHC_RELIABILITY_MATRIX']['red']['min'] <= number <= \
CONFIG['GHC_RELIABILITY_MATRIX']['red']['max']:
score = 'danger'
panel = 'red'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['orange']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['orange']['max']):
score = 'warning'
panel = 'yellow'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['green']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['green']['max']):
score = 'success'
panel = 'green'
else: # should never really get here
score = 'info'
panel = 'blue'
if css_type is not None and css_type == 'panel':
return panel
else:
return score
@APP.template_filter('cssize_reliability2')
def cssize_reliability2(value):
"""returns CSS panel class snippet based on score"""
return cssize_reliability(value, 'panel')
@APP.template_filter('round2')
def round2(value):
"""rounds a number to 2 decimal places except for values of 0 or 100"""
if value in [0.0, 100.0]:
return int(value)
return round(value, 2)
@APP.context_processor
def context_processors():
"""global context processors for templates"""
rtc = views.get_resource_types_counts()
tags = views.get_tag_counts()
return {
'app_version': __version__,
'resource_types': RESOURCE_TYPES,
'resource_types_counts': rtc['counts'],
'resources_total': rtc['total'],
'languages': LANGUAGES,
'tags': tags,
'tagnames': list(tags.keys())
}
@APP.route('/')
def home():
"""homepage"""
response = views.get_health_summary()
return render_template('home.html', response=response)
@APP.route('/csv', endpoint='csv')
@APP.route('/json', endpoint='json')
def export():
"""export resource list as JSON"""
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
query = request.args.get('q')
response = views.list_resources(resource_type, query)
if request.url_rule.rule == '/json':
json_dict = {'total': response['total'], 'resources': []}
for r in response['resources']:
try:
ghc_url = '%s/resource/%s' % \
(CONFIG['GHC_SITE_URL'], r.identifier)
last_run_report = '-'
if r.last_run:
last_run_report = r.last_run.report
json_dict['resources'].append({
'resource_type': r.resource_type,
'title': r.title,
'url': r.url,
'ghc_url': ghc_url,
'ghc_json': '%s/json' % ghc_url,
'ghc_csv': '%s/csv' % ghc_url,
'first_run': format_checked_datetime(r.first_run),
'last_run': format_checked_datetime(r.last_run),
'status': format_run_status(r.last_run),
'min_response_time': round(r.min_response_time, 2),
'average_response_time': round(r.average_response_time, 2),
'max_response_time': round(r.max_response_time, 2),
'reliability': round(r.reliability, 2),
'last_report': format_obj_value(last_run_report)
})
except Exception as e:
LOGGER.warning(
'JSON error resource id=%d: %s' % (r.identifier, str(e)))
return jsonify(json_dict)
elif request.url_rule.rule == '/csv':
output = StringIO()
writer = csv.writer(output)
header = [
'resource_type', 'title', 'url', 'ghc_url', 'ghc_json', 'ghc_csv',
'first_run', 'last_run', 'status', 'min_response_time',
'average_response_time', 'max_response_time', 'reliability'
]
writer.writerow(header)
for r in response['resources']:
try:
ghc_url = '%s%s' % (CONFIG['GHC_SITE_URL'],
url_for('get_resource_by_id',
identifier=r.identifier))
writer.writerow([
r.resource_type,
r.title,
r.url,
ghc_url,
'%s/json' % ghc_url,
'%s/csv' % ghc_url,
format_checked_datetime(r.first_run),
format_checked_datetime(r.last_run),
format_run_status(r.last_run),
round(r.min_response_time, 2),
round(r.average_response_time, 2),
round(r.max_response_time, 2),
round(r.reliability, 2)
])
except Exception as e:
LOGGER.warning(
'CSV error resource id=%d: %s' % (r.identifier, str(e)))
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/opensearch')
def opensearch():
"""generate OpenSearch description document"""
content = render_template('opensearch_description.xml')
return content, 200, {'Content-type': 'text/xml'}
@APP.route('/resource/<identifier>/csv', endpoint='csv-resource')
@APP.route('/resource/<identifier>/json', endpoint='json-resource')
def export_resource(identifier):
"""export resource as JSON or CSV"""
resource = views.get_resource_by_id(identifier)
history_csv = '%s/resource/%s/history/csv' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
history_json = '%s/resource/%s/history/json' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
if 'json' in request.url_rule.rule:
last_run_report = '-'
if resource.last_run:
last_run_report = resource.last_run.report
json_dict = {
'identifier': resource.identifier,
'title': resource.title,
'url': resource.url,
'resource_type': resource.resource_type,
'owner': resource.owner.username,
'min_response_time': resource.min_response_time,
'average_response_time': resource.average_response_time,
'max_response_time': resource.max_response_time,
'reliability': resource.reliability,
'status': format_run_status(resource.last_run),
'first_run': format_checked_datetime(resource.first_run),
'last_run': format_checked_datetime(resource.last_run),
'history_csv': history_csv,
'history_json': history_json,
'last_report': format_obj_value(last_run_report)
}
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'identifier', 'title', 'url', 'resource_type', 'owner',
'min_response_time', 'average_response_time', 'max_response_time',
'reliability', 'status', 'first_run', 'last_run', 'history_csv',
'history_json'
]
writer.writerow(header)
writer.writerow([
resource.identifier,
resource.title,
resource.url,
resource.resource_type,
resource.owner.username,
resource.min_response_time,
resource.average_response_time,
resource.max_response_time,
resource.reliability,
format_run_status(resource.last_run),
format_checked_datetime(resource.first_run),
format_checked_datetime(resource.last_run),
history_csv,
history_json
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/resource/<identifier>/history/csv',
endpoint='csv-resource-history')
@APP.route('/resource/<identifier>/history/json',
endpoint='json-resource-history')
def export_resource_history(identifier):
"""export resource history as JSON or CSV"""
resource = views.get_resource_by_id(identifier)
if 'json' in request.url_rule.rule:
json_dict = {'runs': []}
for run in resource.runs:
json_dict['runs'].append({
'owner': resource.owner.username,
'resource_type': resource.resource_type,
'checked_datetime': format_checked_datetime(run),
'title': resource.title,
'url': resource.url,
'response_time': round(run.response_time, 2),
'status': format_run_status(run)
})
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'owner', 'resource_type', 'checked_datetime', 'title', 'url',
'response_time', 'status'
]
writer.writerow(header)
for run in resource.runs:
writer.writerow([
resource.owner.username,
resource.resource_type,
format_checked_datetime(run),
resource.title,
resource.url,
round(run.response_time, 2),
format_run_status(run),
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/settings')
def settings():
"""settings"""
pass
@APP.route('/resources')
def resources():
"""lists resources with optional filter"""
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
tag = request.args.get('tag')
query = request.args.get('q')
response = views.list_resources(resource_type, query, tag)
return render_template('resources.html', response=response)
@APP.route('/resource/<identifier>')
def get_resource_by_id(identifier):
"""show resource"""
response = views.get_resource_by_id(identifier)
return render_template('resource.html', resource=response)
@APP.route('/register', methods=['GET', 'POST'])
def register():
"""register a new user"""
if not CONFIG['GHC_SELF_REGISTER']:
msg1 = gettext('This site is not configured for self-registration')
msg2 = gettext('Please contact')
msg = '%s. %s %s' % (msg1, msg2,
CONFIG['GHC_ADMIN_EMAIL'])
flash('%s' % msg, 'danger')
return render_template('register.html', errmsg=msg)
if request.method == 'GET':
return render_template('register.html')
# Check for existing user or email
user = User.query.filter_by(username=request.form['username']).first()
email = User.query.filter_by(email=request.form['email']).first()
if user or email:
flash('%s' % gettext('Invalid username or email'), 'danger')
return render_template('register.html')
user = User(request.form['username'],
request.form['password'], request.form['email'])
DB.session.add(user)
try:
DB.session.commit()
except Exception as err:
DB.session.rollback()
bad_column = err.message.split()[2]
bad_value = request.form[bad_column]
msg = gettext('already registered')
flash('%s %s %s' % (bad_column, bad_value, msg), 'danger')
return redirect(url_for('register', lang=g.current_lang))
return redirect(url_for('login', lang=g.current_lang))
@APP.route('/add', methods=['GET', 'POST'])
@login_required
def add():
"""add resource"""
if not g.user.is_authenticated():
return render_template('add.html')
if request.method == 'GET':
return render_template('add.html')
resource_type = request.form['resource_type']
tags = request.form.getlist('tags')
url = request.form['url'].strip()
resources_to_add = []
from healthcheck import sniff_test_resource, run_test_resource
sniffed_resources = sniff_test_resource(CONFIG, resource_type, url)
if not sniffed_resources:
msg = gettext("No resources detected")
LOGGER.exception()
flash(msg, 'danger')
for (resource_type, resource_url,
title, success, response_time,
message, start_time, resource_tags,) in sniffed_resources:
tags_to_add = []
for tag in chain(tags, resource_tags):
tag_obj = tag
if not isinstance(tag, Tag):
tag_obj = Tag.query.filter_by(name=tag).first()
if tag_obj is None:
tag_obj = Tag(name=tag)
tags_to_add.append(tag_obj)
resource_to_add = Resource(current_user,
resource_type,
title,
resource_url,
tags=tags_to_add)
resources_to_add.append(resource_to_add)
probe_to_add = None
checks_to_add = []
# Always add a default Probe and Check(s)
# from the GHC_PROBE_DEFAULTS conf
if resource_type in CONFIG['GHC_PROBE_DEFAULTS']:
resource_settings = CONFIG['GHC_PROBE_DEFAULTS'][resource_type]
probe_class = resource_settings['probe_class']
if probe_class:
# Add the default Probe
probe_obj = Factory.create_obj(probe_class)
probe_to_add = ProbeVars(
resource_to_add, probe_class,
probe_obj.get_default_parameter_values())
# Add optional default (parameterized)
# Checks to add to this Probe
checks_info = probe_obj.get_checks_info()
checks_param_info = probe_obj.get_plugin_vars()['CHECKS_AVAIL']
for check_class in checks_info:
check_param_info = checks_param_info[check_class]
if 'default' in checks_info[check_class]:
if checks_info[check_class]['default']:
# Filter out params for Check with fixed values
param_defs = check_param_info['PARAM_DEFS']
param_vals = {}
for param in param_defs:
if param_defs[param]['value']:
param_vals[param] = \
param_defs[param]['value']
check_vars = CheckVars(
probe_to_add, check_class, param_vals)
checks_to_add.append(check_vars)
result = run_test_resource(resource_to_add)
run_to_add = Run(resource_to_add, result)
DB.session.add(resource_to_add)
# prepopulate notifications for current user
resource_to_add.set_recipients('email', [g.user.email])
if probe_to_add:
DB.session.add(probe_to_add)
for check_to_add in checks_to_add:
DB.session.add(check_to_add)
DB.session.add(run_to_add)
try:
DB.session.commit()
msg = gettext('Services registered')
flash('%s (%s, %s)' % (msg, resource_type, url), 'success')
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for('home', lang=g.current_lang))
if len(resources_to_add) == 1:
return edit_resource(resources_to_add[0].identifier)
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/resource/<int:resource_identifier>/update', methods=['POST'])
@login_required
def update(resource_identifier):
"""update a resource"""
update_counter = 0
status = 'success'
try:
resource_identifier_dict = request.get_json()
resource = Resource.query.filter_by(
identifier=resource_identifier).first()
for key, value in resource_identifier_dict.items():
if key == 'tags':
resource_tags = [t.name for t in resource.tags]
tags_to_add = set(value) - set(resource_tags)
tags_to_delete = set(resource_tags) - set(value)
# Existing Tags: create relation else add new Tag
all_tag_objs = Tag.query.all()
for tag in tags_to_add:
tag_add_obj = None
for tag_obj in all_tag_objs:
if tag == tag_obj.name:
# use existing
tag_add_obj = tag_obj
break
if not tag_add_obj:
# add new
tag_add_obj = Tag(name=tag)
DB.session.add(tag_add_obj)
resource.tags.append(tag_add_obj)
for tag in tags_to_delete:
tag_to_delete = Tag.query.filter_by(name=tag).first()
resource.tags.remove(tag_to_delete)
update_counter += 1
elif key == 'probes':
# Remove all existing ProbeVars for Resource
for probe_var in resource.probe_vars:
resource.probe_vars.remove(probe_var)
# Add ProbeVars anew each with optional CheckVars
for probe in value:
LOGGER.info('adding Probe class=%s parms=%s' %
(probe['probe_class'], str(probe)))
probe_vars = ProbeVars(resource, probe['probe_class'],
probe['parameters'])
for check in probe['checks']:
check_vars = CheckVars(
probe_vars, check['check_class'],
check['parameters'])
probe_vars.check_vars.append(check_vars)
resource.probe_vars.append(probe_vars)
update_counter += 1
elif key == 'notify_emails':
resource.set_recipients('email',
[v for v in value if v.strip()])
elif key == 'notify_webhooks':
resource.set_recipients('webhook',
[v for v in value if v.strip()])
elif key == 'auth':
resource.auth = value
elif getattr(resource, key) != resource_identifier_dict[key]:
# Update other resource attrs, mainly 'name'
setattr(resource, key, resource_identifier_dict[key])
min_run_freq = CONFIG['GHC_MINIMAL_RUN_FREQUENCY_MINS']
if int(resource.run_frequency) < min_run_freq:
resource.run_frequency = min_run_freq
update_counter += 1
# Always update geo-IP: maybe failure on creation or
# IP-address of URL may have changed.
latitude, longitude = geocode(resource.url)
if latitude != 0.0 and longitude != 0.0:
# Only update for valid lat/lon
resource.latitude = latitude
resource.longitude = longitude
update_counter += 1
except Exception as err:
LOGGER.error("Cannot update resource: %s", err, exc_info=err)
DB.session.rollback()
status = str(err)
update_counter = 0
# finally:
# DB.session.close()
if update_counter > 0:
err = db_commit()
if err:
status = str(err)
return jsonify({'status': status})
@APP.route('/resource/<int:resource_identifier>/test', methods=['GET', 'POST'])
@login_required
def test(resource_identifier):
"""test a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
from healthcheck import run_test_resource
result = run_test_resource(
resource)
if request.method == 'GET':
if result.message == 'Skipped':
msg = gettext('INFO')
flash('%s: %s' % (msg, result.message), 'info')
elif result.message not in ['OK', None, 'None']:
msg = gettext('ERROR')
flash('%s: %s' % (msg, result.message), 'danger')
else:
flash(gettext('Resource tested successfully'), 'success')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
elif request.method == 'POST':
return jsonify(result.get_report())
@APP.route('/resource/<int:resource_identifier>/edit')
@login_required
def edit_resource(resource_identifier):
"""edit a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
probes_avail = views.get_probes_avail(resource.resource_type, resource)
suggestions = json.dumps(Recipient.get_suggestions('email',
g.user.username))
return render_template('edit_resource.html',
lang=g.current_lang,
resource=resource,
suggestions=suggestions,
auths_avail=ResourceAuth.get_auth_defs(),
probes_avail=probes_avail)
@APP.route('/resource/<int:resource_identifier>/delete')
@login_required
def delete(resource_identifier):
"""delete a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if g.user.role != 'admin' and g.user.username != resource.owner.username:
msg = gettext('You do not have access to delete this resource')
flash(msg, 'danger')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(url_for('home', lang=g.current_lang))
resource.clear_recipients()
DB.session.delete(resource)
try:
DB.session.commit()
flash(gettext('Resource deleted'), 'success')
return redirect(url_for('home', lang=g.current_lang))
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for(request.referrer))
@APP.route('/probe/<string:probe_class>/<int:resource_identifier>/edit_form')
@APP.route('/probe/<string:probe_class>/edit_form')
@login_required
def get_probe_edit_form(probe_class, resource_identifier=None):
"""get the form to edit a Probe"""
probe_obj = Factory.create_obj(probe_class)
if resource_identifier:
resource = views.get_resource_by_id(resource_identifier)
if resource:
probe_obj._resource = resource
probe_obj.expand_params(resource)
probe_info = probe_obj.get_plugin_vars()
probe_vars = ProbeVars(
None, probe_class, probe_obj.get_default_parameter_values())
# Get only the default Checks for this Probe class
checks_avail = probe_obj.get_checks_info_defaults()
checks_avail = probe_obj.expand_check_vars(checks_avail)
for check_class in checks_avail:
check_obj = Factory.create_obj(check_class)
check_params = check_obj.get_default_parameter_values()
probe_check_param_defs = \
probe_info['CHECKS_AVAIL'][check_class]['PARAM_DEFS']
for param in probe_check_param_defs:
if 'value' in probe_check_param_defs[param]:
check_params[param] = probe_check_param_defs[param]['value']
# Appends 'check_vars' to 'probe_vars' (SQLAlchemy)
CheckVars(probe_vars, check_class, check_params)
return render_template('includes/probe_edit_form.html',
lang=g.current_lang,
probe=probe_vars, probe_info=probe_info)
@APP.route('/check/<string:check_class>/edit_form')
@login_required
def get_check_edit_form(check_class):
"""get the form to edit a Check"""
check_obj = Factory.create_obj(check_class)
check_info = check_obj.get_plugin_vars()
check_vars = CheckVars(
None, check_class, check_obj.get_default_parameter_values())
return render_template('includes/check_edit_form.html',
lang=g.current_lang,
check=check_vars, check_info=check_info)
@APP.route('/login', methods=['GET', 'POST'])
@public_route
def login():
"""login"""
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
registered_user = User.query.filter_by(username=username).first()
authenticated = False
if registered_user:
# May not have upgraded to pw encryption: warn
if len(registered_user.password) < 80:
msg = 'Please upgrade GHC to encrypted passwords first, see docs!'
flash(gettext(msg), 'danger')
return redirect(url_for('login', lang=g.current_lang))
try:
authenticated = registered_user.authenticate(password)
finally:
pass
if not authenticated:
flash(gettext('Invalid username and / or password'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
# Login ok
login_user(registered_user)
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/logout')
def logout():
"""logout"""
logout_user()
flash(gettext('Logged out'), 'success')
if request.referrer:
return redirect(request.referrer)
else:
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset_req', methods=['GET', 'POST'])
@public_route
def reset_req():
"""
Reset password request handling.
"""
if request.method == 'GET':
return render_template('reset_password_request.html')
# Reset request form with email
email = request.form['email']
registered_user = User.query.filter_by(email=email).first()
if registered_user is None:
LOGGER.warn('Invalid email for reset_req: %s' % email)
flash(gettext('Invalid email'), 'danger')
return redirect(url_for('reset_req', lang=g.current_lang))
# Generate reset url using user-specific token
token = registered_user.get_token()
reset_url = '%s/reset/%s' % (CONFIG['GHC_SITE_URL'], token)
# Create message body with reset link
msg_body = render_template('reset_password_email.txt',
lang=g.current_lang, config=CONFIG,
reset_url=reset_url,
username=registered_user.username)
try:
from email.mime.text import MIMEText
from email.utils import formataddr
msg = MIMEText(msg_body, 'plain', 'utf-8')
msg['From'] = formataddr((CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL']))
msg['To'] = registered_user.email
msg['Subject'] = '[%s] %s' % (CONFIG['GHC_SITE_TITLE'],
gettext('reset password'))
from_addr = '%s <%s>' % (CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL'])
to_addr = registered_user.email
msg_text = msg.as_string()
send_email(CONFIG['GHC_SMTP'], from_addr, to_addr, msg_text)
except Exception as err:
msg = 'Cannot send email. Contact admin: '
LOGGER.warn(msg + ' err=' + str(err))
flash(gettext(msg) + CONFIG['GHC_ADMIN_EMAIL'], 'danger')
return redirect(url_for('login', lang=g.current_lang))
flash(gettext('Password reset link sent via email'), 'success')
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset/<token>', methods=['GET', 'POST'])
@public_route
def reset(token=None):
"""
Reset password submit form handling.
"""
# Must have at least a token to proceed.
if token is None:
return redirect(url_for('reset_req', lang=g.current_lang))
# Token received: verify if ok, may also time-out.
registered_user = User.verify_token(token)
if registered_user is None:
LOGGER.warn('Cannot find User from token: %s' % token)
flash(gettext('Invalid token'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
# Token and user ok: return reset form.
if request.method == 'GET':
return render_template('reset_password_form.html')
# Valid token and user: change password from form-value
password = request.form['password']
if not password:
flash(gettext('Password required'), 'danger')
return redirect(url_for('reset/%s' % token, lang=g.current_lang))
registered_user.set_password(password)
DB.session.add(registered_user)
try:
DB.session.commit()
flash(gettext('Update password OK'), 'success')
except Exception as err:
msg = 'Update password failed!'
LOGGER.warn(msg + ' err=' + str(err))
DB.session.rollback()
flash(gettext(msg), 'danger')
# Finally redirect user to login page
return redirect(url_for('login', lang=g.current_lang))
#
# REST Interface Calls
#
@APP.route('/api/v1.0/summary')
@APP.route('/api/v1.0/summary/')
@APP.route('/api/v1.0/summary.<content_type>')
def api_summary(content_type='json'):
"""
Get health summary for all Resources within this instance.
"""
health_summary = views.get_health_summary()
# Convert Runs to dict-like structure
for run in ['first_run', 'last_run']:
run_obj = health_summary.get(run, None)
if run_obj:
health_summary[run] = run_obj.for_json()
# Convert Resources failing to dict-like structure
failed_resources = []
for resource in health_summary['failed_resources']:
failed_resources.append(resource.for_json())
health_summary['failed_resources'] = failed_resources
if content_type == 'json':
result = jsonify(health_summary)
else:
result = '<pre>\n%s\n</pre>' % \
render_template('status_report_email.txt',
lang=g.current_lang, summary=health_summary)
return result
@APP.route('/api/v1.0/probes-avail/')
@APP.route('/api/v1.0/probes-avail/<resource_type>')
@APP.route('/api/v1.0/probes-avail/<resource_type>/<int:resource_id>')
def api_probes_avail(resource_type=None, resource_id=None):
"""
Get available (configured) Probes for this
installation, optional for resource type
"""
resource = None
if resource_id:
resource = views.get_resource_by_id(resource_id)
probes = views.get_probes_avail(resource_type=resource_type,
resource=resource)
return jsonify(probes)
@APP.route('/api/v1.0/runs/<int:resource_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>.<content_type>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>.<content_type>')
def api_runs(resource_id, run_id=None, content_type='json'):
"""
Get Runs (History of results) for Resource.
"""
if run_id:
runs = [views.get_run_by_id(run_id)]
else:
runs = views.get_run_by_resource_id(resource_id)
run_arr = []
for run in runs:
run_dict = {
'id': run.identifier,
'success': run.success,
'response_time': run.response_time,
'checked_datetime': run.checked_datetime,
'message': run.message,
'report': run.report
}
run_arr.append(run_dict)
runs_dict = {'total': len(run_arr), 'runs': run_arr}
result = 'unknown'
if content_type == 'json':
result = jsonify(runs_dict)
elif content_type == 'html':
result = render_template('includes/runs.html',
lang=g.current_lang, runs=runs_dict['runs'])
return result
if __name__ == '__main__': # run locally, for fun
import sys
HOST = '0.0.0.0'
PORT = 8000
if len(sys.argv) > 1:
HOST, PORT = sys.argv[1].split(':')
APP.run(host=HOST, port=int(PORT), use_reloader=True, debug=True)
| 35.298182 | 79 | 0.604152 |
import base64
import csv
import json
import logging
from io import StringIO
from flask import (abort, flash, g, jsonify, redirect,
render_template, request, url_for)
from flask_babel import gettext
from flask_login import (LoginManager, login_user, logout_user,
current_user, login_required)
from flask_migrate import Migrate
from itertools import chain
import views
from __init__ import __version__
from enums import RESOURCE_TYPES
from factory import Factory
from init import App
from models import Resource, Run, ProbeVars, CheckVars, Tag, User, Recipient
from resourceauth import ResourceAuth
from util import send_email, geocode, format_checked_datetime, \
format_run_status, format_obj_value
LOGGER = logging.getLogger(__name__)
APP = App.get_app()
CONFIG = App.get_config()
DB = App.get_db()
BABEL = App.get_babel()
MIGRATE = Migrate(APP, DB)
LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.init_app(APP)
LANGUAGES = (
('en', 'English'),
('fr', 'Français'),
('de', 'German'),
('nl_NL', 'Nederlands (Nederland)'),
('es_BO', 'Español (Bolivia)'),
('hr_HR', 'Croatian (Croatia)')
)
if CONFIG['GHC_RUNNER_IN_WEBAPP'] is True:
LOGGER.info('Running GHC Scheduler in WebApp')
from scheduler import start_schedule
start_schedule()
else:
LOGGER.info('NOT Running GHC Scheduler in WebApp')
def db_commit():
err = None
try:
DB.session.commit()
except Exception:
DB.session.rollback()
return err
@APP.before_request
def before_request():
g.user = current_user
if request.args and 'lang' in request.args and request.args['lang'] != '':
g.current_lang = request.args['lang']
if not hasattr(g, 'current_lang'):
g.current_lang = 'en'
if CONFIG['GHC_REQUIRE_WEBAPP_AUTH'] is True:
if any(['/static/' in request.path,
request.path.endswith('.ico'),
g.user.is_authenticated(),
(request.endpoint is not None
and getattr(APP.view_functions[request.endpoint],
'is_public', False))]):
return
else:
return redirect(url_for('login'))
def public_route(decorated_function):
decorated_function.is_public = True
return decorated_function
@APP.teardown_appcontext
def shutdown_session(exception=None):
DB.session.remove()
@BABEL.localeselector
def get_locale():
return g.get('current_lang', 'en')
@LOGIN_MANAGER.user_loader
def load_user(identifier):
return User.query.get(int(identifier))
@LOGIN_MANAGER.unauthorized_handler
def unauthorized_callback():
if request.query_string:
url = '%s%s?%s' % (request.script_root, request.path,
request.query_string)
else:
url = '%s%s' % (request.script_root, request.path)
return redirect(url_for('login', lang=g.current_lang, next=url))
@LOGIN_MANAGER.request_loader
def load_user_from_request(request):
ers.get('Authorization')
if basic_auth_val:
basic_auth_val = basic_auth_val.replace('Basic ', '', 1)
authenticated = False
try:
username, password = base64.b64decode(basic_auth_val).split(':')
user = User.query.filter_by(username=username).first()
if user:
authenticated = user.authenticate(password)
finally:
pass
if not authenticated:
LOGGER.warning('Unauthorized access for user=%s' % username)
abort(401)
else:
return user
return None
@APP.template_filter('cssize_reliability')
def cssize_reliability(value, css_type=None):
number = int(value)
if CONFIG['GHC_RELIABILITY_MATRIX']['red']['min'] <= number <= \
CONFIG['GHC_RELIABILITY_MATRIX']['red']['max']:
score = 'danger'
panel = 'red'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['orange']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['orange']['max']):
score = 'warning'
panel = 'yellow'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['green']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['green']['max']):
score = 'success'
panel = 'green'
else:
score = 'info'
panel = 'blue'
if css_type is not None and css_type == 'panel':
return panel
else:
return score
@APP.template_filter('cssize_reliability2')
def cssize_reliability2(value):
return cssize_reliability(value, 'panel')
@APP.template_filter('round2')
def round2(value):
if value in [0.0, 100.0]:
return int(value)
return round(value, 2)
@APP.context_processor
def context_processors():
rtc = views.get_resource_types_counts()
tags = views.get_tag_counts()
return {
'app_version': __version__,
'resource_types': RESOURCE_TYPES,
'resource_types_counts': rtc['counts'],
'resources_total': rtc['total'],
'languages': LANGUAGES,
'tags': tags,
'tagnames': list(tags.keys())
}
@APP.route('/')
def home():
response = views.get_health_summary()
return render_template('home.html', response=response)
@APP.route('/csv', endpoint='csv')
@APP.route('/json', endpoint='json')
def export():
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
query = request.args.get('q')
response = views.list_resources(resource_type, query)
if request.url_rule.rule == '/json':
json_dict = {'total': response['total'], 'resources': []}
for r in response['resources']:
try:
ghc_url = '%s/resource/%s' % \
(CONFIG['GHC_SITE_URL'], r.identifier)
last_run_report = '-'
if r.last_run:
last_run_report = r.last_run.report
json_dict['resources'].append({
'resource_type': r.resource_type,
'title': r.title,
'url': r.url,
'ghc_url': ghc_url,
'ghc_json': '%s/json' % ghc_url,
'ghc_csv': '%s/csv' % ghc_url,
'first_run': format_checked_datetime(r.first_run),
'last_run': format_checked_datetime(r.last_run),
'status': format_run_status(r.last_run),
'min_response_time': round(r.min_response_time, 2),
'average_response_time': round(r.average_response_time, 2),
'max_response_time': round(r.max_response_time, 2),
'reliability': round(r.reliability, 2),
'last_report': format_obj_value(last_run_report)
})
except Exception as e:
LOGGER.warning(
'JSON error resource id=%d: %s' % (r.identifier, str(e)))
return jsonify(json_dict)
elif request.url_rule.rule == '/csv':
output = StringIO()
writer = csv.writer(output)
header = [
'resource_type', 'title', 'url', 'ghc_url', 'ghc_json', 'ghc_csv',
'first_run', 'last_run', 'status', 'min_response_time',
'average_response_time', 'max_response_time', 'reliability'
]
writer.writerow(header)
for r in response['resources']:
try:
ghc_url = '%s%s' % (CONFIG['GHC_SITE_URL'],
url_for('get_resource_by_id',
identifier=r.identifier))
writer.writerow([
r.resource_type,
r.title,
r.url,
ghc_url,
'%s/json' % ghc_url,
'%s/csv' % ghc_url,
format_checked_datetime(r.first_run),
format_checked_datetime(r.last_run),
format_run_status(r.last_run),
round(r.min_response_time, 2),
round(r.average_response_time, 2),
round(r.max_response_time, 2),
round(r.reliability, 2)
])
except Exception as e:
LOGGER.warning(
'CSV error resource id=%d: %s' % (r.identifier, str(e)))
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/opensearch')
def opensearch():
content = render_template('opensearch_description.xml')
return content, 200, {'Content-type': 'text/xml'}
@APP.route('/resource/<identifier>/csv', endpoint='csv-resource')
@APP.route('/resource/<identifier>/json', endpoint='json-resource')
def export_resource(identifier):
resource = views.get_resource_by_id(identifier)
history_csv = '%s/resource/%s/history/csv' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
history_json = '%s/resource/%s/history/json' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
if 'json' in request.url_rule.rule:
last_run_report = '-'
if resource.last_run:
last_run_report = resource.last_run.report
json_dict = {
'identifier': resource.identifier,
'title': resource.title,
'url': resource.url,
'resource_type': resource.resource_type,
'owner': resource.owner.username,
'min_response_time': resource.min_response_time,
'average_response_time': resource.average_response_time,
'max_response_time': resource.max_response_time,
'reliability': resource.reliability,
'status': format_run_status(resource.last_run),
'first_run': format_checked_datetime(resource.first_run),
'last_run': format_checked_datetime(resource.last_run),
'history_csv': history_csv,
'history_json': history_json,
'last_report': format_obj_value(last_run_report)
}
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'identifier', 'title', 'url', 'resource_type', 'owner',
'min_response_time', 'average_response_time', 'max_response_time',
'reliability', 'status', 'first_run', 'last_run', 'history_csv',
'history_json'
]
writer.writerow(header)
writer.writerow([
resource.identifier,
resource.title,
resource.url,
resource.resource_type,
resource.owner.username,
resource.min_response_time,
resource.average_response_time,
resource.max_response_time,
resource.reliability,
format_run_status(resource.last_run),
format_checked_datetime(resource.first_run),
format_checked_datetime(resource.last_run),
history_csv,
history_json
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/resource/<identifier>/history/csv',
endpoint='csv-resource-history')
@APP.route('/resource/<identifier>/history/json',
endpoint='json-resource-history')
def export_resource_history(identifier):
resource = views.get_resource_by_id(identifier)
if 'json' in request.url_rule.rule:
json_dict = {'runs': []}
for run in resource.runs:
json_dict['runs'].append({
'owner': resource.owner.username,
'resource_type': resource.resource_type,
'checked_datetime': format_checked_datetime(run),
'title': resource.title,
'url': resource.url,
'response_time': round(run.response_time, 2),
'status': format_run_status(run)
})
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'owner', 'resource_type', 'checked_datetime', 'title', 'url',
'response_time', 'status'
]
writer.writerow(header)
for run in resource.runs:
writer.writerow([
resource.owner.username,
resource.resource_type,
format_checked_datetime(run),
resource.title,
resource.url,
round(run.response_time, 2),
format_run_status(run),
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/settings')
def settings():
pass
@APP.route('/resources')
def resources():
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
tag = request.args.get('tag')
query = request.args.get('q')
response = views.list_resources(resource_type, query, tag)
return render_template('resources.html', response=response)
@APP.route('/resource/<identifier>')
def get_resource_by_id(identifier):
response = views.get_resource_by_id(identifier)
return render_template('resource.html', resource=response)
@APP.route('/register', methods=['GET', 'POST'])
def register():
if not CONFIG['GHC_SELF_REGISTER']:
msg1 = gettext('This site is not configured for self-registration')
msg2 = gettext('Please contact')
msg = '%s. %s %s' % (msg1, msg2,
CONFIG['GHC_ADMIN_EMAIL'])
flash('%s' % msg, 'danger')
return render_template('register.html', errmsg=msg)
if request.method == 'GET':
return render_template('register.html')
user = User.query.filter_by(username=request.form['username']).first()
email = User.query.filter_by(email=request.form['email']).first()
if user or email:
flash('%s' % gettext('Invalid username or email'), 'danger')
return render_template('register.html')
user = User(request.form['username'],
request.form['password'], request.form['email'])
DB.session.add(user)
try:
DB.session.commit()
except Exception as err:
DB.session.rollback()
bad_column = err.message.split()[2]
bad_value = request.form[bad_column]
msg = gettext('already registered')
flash('%s %s %s' % (bad_column, bad_value, msg), 'danger')
return redirect(url_for('register', lang=g.current_lang))
return redirect(url_for('login', lang=g.current_lang))
@APP.route('/add', methods=['GET', 'POST'])
@login_required
def add():
if not g.user.is_authenticated():
return render_template('add.html')
if request.method == 'GET':
return render_template('add.html')
resource_type = request.form['resource_type']
tags = request.form.getlist('tags')
url = request.form['url'].strip()
resources_to_add = []
from healthcheck import sniff_test_resource, run_test_resource
sniffed_resources = sniff_test_resource(CONFIG, resource_type, url)
if not sniffed_resources:
msg = gettext("No resources detected")
LOGGER.exception()
flash(msg, 'danger')
for (resource_type, resource_url,
title, success, response_time,
message, start_time, resource_tags,) in sniffed_resources:
tags_to_add = []
for tag in chain(tags, resource_tags):
tag_obj = tag
if not isinstance(tag, Tag):
tag_obj = Tag.query.filter_by(name=tag).first()
if tag_obj is None:
tag_obj = Tag(name=tag)
tags_to_add.append(tag_obj)
resource_to_add = Resource(current_user,
resource_type,
title,
resource_url,
tags=tags_to_add)
resources_to_add.append(resource_to_add)
probe_to_add = None
checks_to_add = []
if resource_type in CONFIG['GHC_PROBE_DEFAULTS']:
resource_settings = CONFIG['GHC_PROBE_DEFAULTS'][resource_type]
probe_class = resource_settings['probe_class']
if probe_class:
probe_obj = Factory.create_obj(probe_class)
probe_to_add = ProbeVars(
resource_to_add, probe_class,
probe_obj.get_default_parameter_values())
checks_info = probe_obj.get_checks_info()
checks_param_info = probe_obj.get_plugin_vars()['CHECKS_AVAIL']
for check_class in checks_info:
check_param_info = checks_param_info[check_class]
if 'default' in checks_info[check_class]:
if checks_info[check_class]['default']:
param_defs = check_param_info['PARAM_DEFS']
param_vals = {}
for param in param_defs:
if param_defs[param]['value']:
param_vals[param] = \
param_defs[param]['value']
check_vars = CheckVars(
probe_to_add, check_class, param_vals)
checks_to_add.append(check_vars)
result = run_test_resource(resource_to_add)
run_to_add = Run(resource_to_add, result)
DB.session.add(resource_to_add)
resource_to_add.set_recipients('email', [g.user.email])
if probe_to_add:
DB.session.add(probe_to_add)
for check_to_add in checks_to_add:
DB.session.add(check_to_add)
DB.session.add(run_to_add)
try:
DB.session.commit()
msg = gettext('Services registered')
flash('%s (%s, %s)' % (msg, resource_type, url), 'success')
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for('home', lang=g.current_lang))
if len(resources_to_add) == 1:
return edit_resource(resources_to_add[0].identifier)
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/resource/<int:resource_identifier>/update', methods=['POST'])
@login_required
def update(resource_identifier):
update_counter = 0
status = 'success'
try:
resource_identifier_dict = request.get_json()
resource = Resource.query.filter_by(
identifier=resource_identifier).first()
for key, value in resource_identifier_dict.items():
if key == 'tags':
resource_tags = [t.name for t in resource.tags]
tags_to_add = set(value) - set(resource_tags)
tags_to_delete = set(resource_tags) - set(value)
all_tag_objs = Tag.query.all()
for tag in tags_to_add:
tag_add_obj = None
for tag_obj in all_tag_objs:
if tag == tag_obj.name:
tag_add_obj = tag_obj
break
if not tag_add_obj:
tag_add_obj = Tag(name=tag)
DB.session.add(tag_add_obj)
resource.tags.append(tag_add_obj)
for tag in tags_to_delete:
tag_to_delete = Tag.query.filter_by(name=tag).first()
resource.tags.remove(tag_to_delete)
update_counter += 1
elif key == 'probes':
for probe_var in resource.probe_vars:
resource.probe_vars.remove(probe_var)
for probe in value:
LOGGER.info('adding Probe class=%s parms=%s' %
(probe['probe_class'], str(probe)))
probe_vars = ProbeVars(resource, probe['probe_class'],
probe['parameters'])
for check in probe['checks']:
check_vars = CheckVars(
probe_vars, check['check_class'],
check['parameters'])
probe_vars.check_vars.append(check_vars)
resource.probe_vars.append(probe_vars)
update_counter += 1
elif key == 'notify_emails':
resource.set_recipients('email',
[v for v in value if v.strip()])
elif key == 'notify_webhooks':
resource.set_recipients('webhook',
[v for v in value if v.strip()])
elif key == 'auth':
resource.auth = value
elif getattr(resource, key) != resource_identifier_dict[key]:
setattr(resource, key, resource_identifier_dict[key])
min_run_freq = CONFIG['GHC_MINIMAL_RUN_FREQUENCY_MINS']
if int(resource.run_frequency) < min_run_freq:
resource.run_frequency = min_run_freq
update_counter += 1
latitude, longitude = geocode(resource.url)
if latitude != 0.0 and longitude != 0.0:
resource.latitude = latitude
resource.longitude = longitude
update_counter += 1
except Exception as err:
LOGGER.error("Cannot update resource: %s", err, exc_info=err)
DB.session.rollback()
status = str(err)
update_counter = 0
if update_counter > 0:
err = db_commit()
if err:
status = str(err)
return jsonify({'status': status})
@APP.route('/resource/<int:resource_identifier>/test', methods=['GET', 'POST'])
@login_required
def test(resource_identifier):
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
from healthcheck import run_test_resource
result = run_test_resource(
resource)
if request.method == 'GET':
if result.message == 'Skipped':
msg = gettext('INFO')
flash('%s: %s' % (msg, result.message), 'info')
elif result.message not in ['OK', None, 'None']:
msg = gettext('ERROR')
flash('%s: %s' % (msg, result.message), 'danger')
else:
flash(gettext('Resource tested successfully'), 'success')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
elif request.method == 'POST':
return jsonify(result.get_report())
@APP.route('/resource/<int:resource_identifier>/edit')
@login_required
def edit_resource(resource_identifier):
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
probes_avail = views.get_probes_avail(resource.resource_type, resource)
suggestions = json.dumps(Recipient.get_suggestions('email',
g.user.username))
return render_template('edit_resource.html',
lang=g.current_lang,
resource=resource,
suggestions=suggestions,
auths_avail=ResourceAuth.get_auth_defs(),
probes_avail=probes_avail)
@APP.route('/resource/<int:resource_identifier>/delete')
@login_required
def delete(resource_identifier):
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if g.user.role != 'admin' and g.user.username != resource.owner.username:
msg = gettext('You do not have access to delete this resource')
flash(msg, 'danger')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(url_for('home', lang=g.current_lang))
resource.clear_recipients()
DB.session.delete(resource)
try:
DB.session.commit()
flash(gettext('Resource deleted'), 'success')
return redirect(url_for('home', lang=g.current_lang))
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for(request.referrer))
@APP.route('/probe/<string:probe_class>/<int:resource_identifier>/edit_form')
@APP.route('/probe/<string:probe_class>/edit_form')
@login_required
def get_probe_edit_form(probe_class, resource_identifier=None):
probe_obj = Factory.create_obj(probe_class)
if resource_identifier:
resource = views.get_resource_by_id(resource_identifier)
if resource:
probe_obj._resource = resource
probe_obj.expand_params(resource)
probe_info = probe_obj.get_plugin_vars()
probe_vars = ProbeVars(
None, probe_class, probe_obj.get_default_parameter_values())
checks_avail = probe_obj.get_checks_info_defaults()
checks_avail = probe_obj.expand_check_vars(checks_avail)
for check_class in checks_avail:
check_obj = Factory.create_obj(check_class)
check_params = check_obj.get_default_parameter_values()
probe_check_param_defs = \
probe_info['CHECKS_AVAIL'][check_class]['PARAM_DEFS']
for param in probe_check_param_defs:
if 'value' in probe_check_param_defs[param]:
check_params[param] = probe_check_param_defs[param]['value']
CheckVars(probe_vars, check_class, check_params)
return render_template('includes/probe_edit_form.html',
lang=g.current_lang,
probe=probe_vars, probe_info=probe_info)
@APP.route('/check/<string:check_class>/edit_form')
@login_required
def get_check_edit_form(check_class):
check_obj = Factory.create_obj(check_class)
check_info = check_obj.get_plugin_vars()
check_vars = CheckVars(
None, check_class, check_obj.get_default_parameter_values())
return render_template('includes/check_edit_form.html',
lang=g.current_lang,
check=check_vars, check_info=check_info)
@APP.route('/login', methods=['GET', 'POST'])
@public_route
def login():
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
registered_user = User.query.filter_by(username=username).first()
authenticated = False
if registered_user:
if len(registered_user.password) < 80:
msg = 'Please upgrade GHC to encrypted passwords first, see docs!'
flash(gettext(msg), 'danger')
return redirect(url_for('login', lang=g.current_lang))
try:
authenticated = registered_user.authenticate(password)
finally:
pass
if not authenticated:
flash(gettext('Invalid username and / or password'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
login_user(registered_user)
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/logout')
def logout():
logout_user()
flash(gettext('Logged out'), 'success')
if request.referrer:
return redirect(request.referrer)
else:
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset_req', methods=['GET', 'POST'])
@public_route
def reset_req():
if request.method == 'GET':
return render_template('reset_password_request.html')
email = request.form['email']
registered_user = User.query.filter_by(email=email).first()
if registered_user is None:
LOGGER.warn('Invalid email for reset_req: %s' % email)
flash(gettext('Invalid email'), 'danger')
return redirect(url_for('reset_req', lang=g.current_lang))
token = registered_user.get_token()
reset_url = '%s/reset/%s' % (CONFIG['GHC_SITE_URL'], token)
msg_body = render_template('reset_password_email.txt',
lang=g.current_lang, config=CONFIG,
reset_url=reset_url,
username=registered_user.username)
try:
from email.mime.text import MIMEText
from email.utils import formataddr
msg = MIMEText(msg_body, 'plain', 'utf-8')
msg['From'] = formataddr((CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL']))
msg['To'] = registered_user.email
msg['Subject'] = '[%s] %s' % (CONFIG['GHC_SITE_TITLE'],
gettext('reset password'))
from_addr = '%s <%s>' % (CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL'])
to_addr = registered_user.email
msg_text = msg.as_string()
send_email(CONFIG['GHC_SMTP'], from_addr, to_addr, msg_text)
except Exception as err:
msg = 'Cannot send email. Contact admin: '
LOGGER.warn(msg + ' err=' + str(err))
flash(gettext(msg) + CONFIG['GHC_ADMIN_EMAIL'], 'danger')
return redirect(url_for('login', lang=g.current_lang))
flash(gettext('Password reset link sent via email'), 'success')
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset/<token>', methods=['GET', 'POST'])
@public_route
def reset(token=None):
if token is None:
return redirect(url_for('reset_req', lang=g.current_lang))
registered_user = User.verify_token(token)
if registered_user is None:
LOGGER.warn('Cannot find User from token: %s' % token)
flash(gettext('Invalid token'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
if request.method == 'GET':
return render_template('reset_password_form.html')
password = request.form['password']
if not password:
flash(gettext('Password required'), 'danger')
return redirect(url_for('reset/%s' % token, lang=g.current_lang))
registered_user.set_password(password)
DB.session.add(registered_user)
try:
DB.session.commit()
flash(gettext('Update password OK'), 'success')
except Exception as err:
msg = 'Update password failed!'
LOGGER.warn(msg + ' err=' + str(err))
DB.session.rollback()
flash(gettext(msg), 'danger')
return redirect(url_for('login', lang=g.current_lang))
@APP.route('/api/v1.0/summary')
@APP.route('/api/v1.0/summary/')
@APP.route('/api/v1.0/summary.<content_type>')
def api_summary(content_type='json'):
health_summary = views.get_health_summary()
for run in ['first_run', 'last_run']:
run_obj = health_summary.get(run, None)
if run_obj:
health_summary[run] = run_obj.for_json()
failed_resources = []
for resource in health_summary['failed_resources']:
failed_resources.append(resource.for_json())
health_summary['failed_resources'] = failed_resources
if content_type == 'json':
result = jsonify(health_summary)
else:
result = '<pre>\n%s\n</pre>' % \
render_template('status_report_email.txt',
lang=g.current_lang, summary=health_summary)
return result
@APP.route('/api/v1.0/probes-avail/')
@APP.route('/api/v1.0/probes-avail/<resource_type>')
@APP.route('/api/v1.0/probes-avail/<resource_type>/<int:resource_id>')
def api_probes_avail(resource_type=None, resource_id=None):
resource = None
if resource_id:
resource = views.get_resource_by_id(resource_id)
probes = views.get_probes_avail(resource_type=resource_type,
resource=resource)
return jsonify(probes)
@APP.route('/api/v1.0/runs/<int:resource_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>.<content_type>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>.<content_type>')
def api_runs(resource_id, run_id=None, content_type='json'):
if run_id:
runs = [views.get_run_by_id(run_id)]
else:
runs = views.get_run_by_resource_id(resource_id)
run_arr = []
for run in runs:
run_dict = {
'id': run.identifier,
'success': run.success,
'response_time': run.response_time,
'checked_datetime': run.checked_datetime,
'message': run.message,
'report': run.report
}
run_arr.append(run_dict)
runs_dict = {'total': len(run_arr), 'runs': run_arr}
result = 'unknown'
if content_type == 'json':
result = jsonify(runs_dict)
elif content_type == 'html':
result = render_template('includes/runs.html',
lang=g.current_lang, runs=runs_dict['runs'])
return result
if __name__ == '__main__':
import sys
HOST = '0.0.0.0'
PORT = 8000
if len(sys.argv) > 1:
HOST, PORT = sys.argv[1].split(':')
APP.run(host=HOST, port=int(PORT), use_reloader=True, debug=True)
| true | true |
f7feba68caa8e405556b6ebf1b35498ba9d47f13 | 2,462 | py | Python | src/lib/gpxpy/utils.py | jdesgats/opengpxmapper | 2164bb294ec18bd96dbf82c5db83ca28e002896f | [
"Unlicense",
"MIT"
] | 1 | 2015-03-12T11:21:17.000Z | 2015-03-12T11:21:17.000Z | src/lib/gpxpy/utils.py | jdesgats/opengpxmapper | 2164bb294ec18bd96dbf82c5db83ca28e002896f | [
"Unlicense",
"MIT"
] | null | null | null | src/lib/gpxpy/utils.py | jdesgats/opengpxmapper | 2164bb294ec18bd96dbf82c5db83ca28e002896f | [
"Unlicense",
"MIT"
] | 1 | 2021-12-25T16:46:11.000Z | 2021-12-25T16:46:11.000Z | # -*- coding: utf-8 -*-
# Copyright 2011 Tomo Krajina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys as mod_sys
import xml.sax.saxutils as mod_saxutils
PYTHON_VERSION = mod_sys.version.split(' ')[0]
def to_xml(tag, attributes=None, content=None, default=None, escape=False):
attributes = attributes or {}
result = '\n<%s' % tag
if content is None and default:
content = default
if attributes:
for attribute in attributes.keys():
result += ' %s="%s"' % (attribute, attributes[attribute])
if content is None:
result += '/>'
else:
if escape:
result += '>%s</%s>' % (mod_saxutils.escape(content), tag)
else:
result += '>%s</%s>' % (content, tag)
result = make_str(result)
return result
def is_numeric(object):
try:
float(object)
return True
except TypeError:
return False
except ValueError:
return False
def to_number(s, default=0):
try:
return float(s)
except TypeError:
pass
except ValueError:
pass
return default
# Hash utilities:
def __hash(obj):
result = 0
if obj is None:
return result
elif isinstance(obj, dict):
raise RuntimeError('__hash_single_object for dict not yet implemented')
elif isinstance(obj, list) or isinstance(obj, tuple):
return hash_list_or_tuple(obj)
return hash(obj)
def hash_list_or_tuple(iteration):
result = 17
for obj in iteration:
result = result * 31 + __hash(obj)
return result
def hash_object(obj, *attributes):
result = 19
for attribute in attributes:
result = result * 31 + __hash(getattr(obj, attribute))
return result
def make_str(s):
""" Convert a str or unicode object into a str type. """
if PYTHON_VERSION[0] == '2':
if isinstance(s, unicode):
return s.encode("utf-8")
return str(s)
| 24.376238 | 79 | 0.642973 |
import sys as mod_sys
import xml.sax.saxutils as mod_saxutils
PYTHON_VERSION = mod_sys.version.split(' ')[0]
def to_xml(tag, attributes=None, content=None, default=None, escape=False):
attributes = attributes or {}
result = '\n<%s' % tag
if content is None and default:
content = default
if attributes:
for attribute in attributes.keys():
result += ' %s="%s"' % (attribute, attributes[attribute])
if content is None:
result += '/>'
else:
if escape:
result += '>%s</%s>' % (mod_saxutils.escape(content), tag)
else:
result += '>%s</%s>' % (content, tag)
result = make_str(result)
return result
def is_numeric(object):
try:
float(object)
return True
except TypeError:
return False
except ValueError:
return False
def to_number(s, default=0):
try:
return float(s)
except TypeError:
pass
except ValueError:
pass
return default
def __hash(obj):
result = 0
if obj is None:
return result
elif isinstance(obj, dict):
raise RuntimeError('__hash_single_object for dict not yet implemented')
elif isinstance(obj, list) or isinstance(obj, tuple):
return hash_list_or_tuple(obj)
return hash(obj)
def hash_list_or_tuple(iteration):
result = 17
for obj in iteration:
result = result * 31 + __hash(obj)
return result
def hash_object(obj, *attributes):
result = 19
for attribute in attributes:
result = result * 31 + __hash(getattr(obj, attribute))
return result
def make_str(s):
if PYTHON_VERSION[0] == '2':
if isinstance(s, unicode):
return s.encode("utf-8")
return str(s)
| true | true |
f7febaca2f57a1183dd1609bb06226e92d527c6b | 22 | py | Python | db_model/post-comment.py | UtkarshR8j/evolv-challenge | 81469c2eab27db140e2c7a369885b7e3b1584b77 | [
"MIT"
] | null | null | null | db_model/post-comment.py | UtkarshR8j/evolv-challenge | 81469c2eab27db140e2c7a369885b7e3b1584b77 | [
"MIT"
] | null | null | null | db_model/post-comment.py | UtkarshR8j/evolv-challenge | 81469c2eab27db140e2c7a369885b7e3b1584b77 | [
"MIT"
] | null | null | null | from . import crud_db
| 11 | 21 | 0.772727 | from . import crud_db
| true | true |
f7febb023cfee9a4ea9b3b7a10a95627418d1772 | 5,669 | py | Python | ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 25 | 2019-12-04T03:09:55.000Z | 2022-03-08T10:52:06.000Z | ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 29 | 2019-12-04T03:00:39.000Z | 2022-03-02T06:25:44.000Z | ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 33 | 2019-12-04T02:51:30.000Z | 2022-03-24T02:47:38.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
from resource_management.libraries.functions.check_process_status import wait_process_stopped
import re
def service(action=None, name=None, user=None, create_pid_dir=False,
create_log_dir=False):
import params
pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
log_dir = format("{hdfs_log_dir_prefix}/{user}")
check_process = format(
"ls {pid_file} >/dev/null 2>&1 &&"
" ps -p `cat {pid_file}` >/dev/null 2>&1")
if create_pid_dir:
Directory(pid_dir,
owner=user,
create_parents = True)
if create_log_dir:
Directory(log_dir,
owner=user,
create_parents = True)
hadoop_env_exports = {
'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
}
if params.security_enabled and name == "datanode":
dfs_dn_port = get_port(params.dfs_dn_addr)
dfs_dn_http_port = get_port(params.dfs_dn_http_addr)
dfs_dn_https_port = get_port(params.dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if params.dfs_http_policy == "HTTPS_ONLY":
secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
elif params.dfs_http_policy == "HTTP_AND_HTTPS":
secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
# Calculate HADOOP_SECURE_DN_* env vars, but not append them yet
# These variables should not be set when starting secure datanode as a non-root
## On secure datanodes, user to run the datanode as after dropping privileges
hadoop_secure_dn_user = params.hdfs_user
## Where log files are stored in the secure data environment.
hadoop_secure_dn_log_dir = format("{hdfs_log_dir_prefix}/{hadoop_secure_dn_user}")
## The directory where pid files are stored in the secure data environment.
hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hadoop_secure_dn_user}")
hadoop_secure_dn_exports = {
'HADOOP_SECURE_DN_USER' : hadoop_secure_dn_user,
'HADOOP_SECURE_DN_LOG_DIR' : hadoop_secure_dn_log_dir,
'HADOOP_SECURE_DN_PID_DIR' : hadoop_secure_dn_pid_dir
}
hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
# At Champlain stack and further, we may start datanode as a non-root even in secure cluster
if not params.stack_is_champlain_or_further or secure_ports_are_in_use:
user = "root"
pid_file = format(
"{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
if params.stack_is_champlain_or_further:
hadoop_env_exports.update(hadoop_secure_dn_exports)
if action == 'stop' and params.stack_is_champlain_or_further and \
os.path.isfile(hadoop_secure_dn_pid_file):
# We need special handling for this case to handle the situation
# when we configure non-root secure DN and then restart it
# to handle new configs. Otherwise we will not be able to stop
# a running instance
user = "root"
try:
with open(hadoop_secure_dn_pid_file, 'r') as f:
pid = f.read()
os.kill(int(pid), 0)
hadoop_env_exports.update(hadoop_secure_dn_exports)
except IOError:
pass # Can not open pid file
except ValueError:
pass # Pid file content is invalid
except OSError:
pass # Process is not running
hadoop_env_exports_str = ''
for exp in hadoop_env_exports.items():
hadoop_env_exports_str += "export {0}={1} && ".format(exp[0], exp[1])
hadoop_daemon = format(
"{hadoop_env_exports_str}"
"{hadoop_bin}/hadoop-daemon.sh")
cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
daemon_cmd = format("{ulimit_cmd} su -s /bin/bash - {user} -c '{cmd} {action} {name}'")
service_is_up = check_process if action == "start" else None
#remove pid file from dead process
File(pid_file,
action="delete",
not_if=check_process,
)
Execute(daemon_cmd,
not_if=service_is_up
)
if action == "stop":
wait_process_stopped(pid_file)
File(pid_file,
action="delete",
)
def get_port(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None:
return int(m.group(2))
else:
return None
def is_secure_port(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
| 37.052288 | 132 | 0.709473 | import os
from resource_management import *
from resource_management.libraries.functions.check_process_status import wait_process_stopped
import re
def service(action=None, name=None, user=None, create_pid_dir=False,
create_log_dir=False):
import params
pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
log_dir = format("{hdfs_log_dir_prefix}/{user}")
check_process = format(
"ls {pid_file} >/dev/null 2>&1 &&"
" ps -p `cat {pid_file}` >/dev/null 2>&1")
if create_pid_dir:
Directory(pid_dir,
owner=user,
create_parents = True)
if create_log_dir:
Directory(log_dir,
owner=user,
create_parents = True)
hadoop_env_exports = {
'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
}
if params.security_enabled and name == "datanode":
dfs_dn_port = get_port(params.dfs_dn_addr)
dfs_dn_http_port = get_port(params.dfs_dn_http_addr)
dfs_dn_https_port = get_port(params.dfs_dn_https_addr)
if params.dfs_http_policy == "HTTPS_ONLY":
secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
elif params.dfs_http_policy == "HTTP_AND_HTTPS":
secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
else:
secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
)
cure_dn_user}")
hadoop_secure_dn_exports = {
'HADOOP_SECURE_DN_USER' : hadoop_secure_dn_user,
'HADOOP_SECURE_DN_LOG_DIR' : hadoop_secure_dn_log_dir,
'HADOOP_SECURE_DN_PID_DIR' : hadoop_secure_dn_pid_dir
}
hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
if not params.stack_is_champlain_or_further or secure_ports_are_in_use:
user = "root"
pid_file = format(
"{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
if params.stack_is_champlain_or_further:
hadoop_env_exports.update(hadoop_secure_dn_exports)
if action == 'stop' and params.stack_is_champlain_or_further and \
os.path.isfile(hadoop_secure_dn_pid_file):
user = "root"
try:
with open(hadoop_secure_dn_pid_file, 'r') as f:
pid = f.read()
os.kill(int(pid), 0)
hadoop_env_exports.update(hadoop_secure_dn_exports)
except IOError:
pass
except ValueError:
pass
except OSError:
pass
hadoop_env_exports_str = ''
for exp in hadoop_env_exports.items():
hadoop_env_exports_str += "export {0}={1} && ".format(exp[0], exp[1])
hadoop_daemon = format(
"{hadoop_env_exports_str}"
"{hadoop_bin}/hadoop-daemon.sh")
cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
daemon_cmd = format("{ulimit_cmd} su -s /bin/bash - {user} -c '{cmd} {action} {name}'")
service_is_up = check_process if action == "start" else None
File(pid_file,
action="delete",
not_if=check_process,
)
Execute(daemon_cmd,
not_if=service_is_up
)
if action == "stop":
wait_process_stopped(pid_file)
File(pid_file,
action="delete",
)
def get_port(address):
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None:
return int(m.group(2))
else:
return None
def is_secure_port(port):
if port is not None:
return port < 1024
else:
return False
| true | true |
f7febb8e588e9195aa2a3c12f5179b386a0c0bf6 | 2,217 | py | Python | graffiti/user_ns.py | rbardaji/graffiti | e10490a58b7eff041ff8212784f05daa076e3f53 | [
"MIT"
] | null | null | null | graffiti/user_ns.py | rbardaji/graffiti | e10490a58b7eff041ff8212784f05daa076e3f53 | [
"MIT"
] | null | null | null | graffiti/user_ns.py | rbardaji/graffiti | e10490a58b7eff041ff8212784f05daa076e3f53 | [
"MIT"
] | null | null | null | from flask_restx import Namespace, Resource, fields
from flask import request
from .utils.db_manager import get_query_id, get_query
from .utils.auth_manager import get_token, get_token_info
from .utils.decorator import token_required
api = Namespace('user',
description='Get an Authorization Token and check your ' + \
'previous API requests')
user_response = api.model('user_response', {
'status': fields.Boolean(
description='Indicates if the operation was successful'),
'message': fields.String(description='Message for the user'),
'result': fields.List(fields.Raw, description='Content of the response')})
query_parse = api.parser()
query_parse.add_argument('namespace')
@api.route('/token/<string:user>/<string:password>')
@api.response(404, 'Invalid email or password.')
@api.response(503, 'Connection error with the AAI.')
class GetToken(Resource):
""" Authorization operations"""
@api.marshal_with(user_response, code=201, skip_none=True)
def get(self, user, password):
"""
Get the Authorization Token to use this API.
"""
return get_token(user, password)
@api.route('/history')
@api.response(401, "Authorization Token is missing or is invalid.")
@api.response(503, "Connection error with the DB.")
class GetHistory(Resource):
@api.doc(security='apikey')
@api.expect(query_parse)
@api.marshal_with(user_response, code=200, skip_none=True)
@token_required
def get(self):
"""
Get a list of the ids {id_query} from the previous API requests.
"""
token_info, _ = get_token_info(request)
user_id = token_info['result']['user_id']
namespace = request.args.get('namespace')
return get_query(user_id, namespace)
@api.route('/history/<string:id_query>')
@api.response(404, 'id_query not found')
@api.response(503, 'Internal error. Unable to connect to DB')
class GetQueryId(Resource):
@api.doc(security='apikey')
@api.marshal_with(user_response, code=200, skip_none=True)
@token_required
def get(self, id_query):
"""
Get the content of the query
"""
return get_query_id(id_query)
| 32.130435 | 78 | 0.684258 | from flask_restx import Namespace, Resource, fields
from flask import request
from .utils.db_manager import get_query_id, get_query
from .utils.auth_manager import get_token, get_token_info
from .utils.decorator import token_required
api = Namespace('user',
description='Get an Authorization Token and check your ' + \
'previous API requests')
user_response = api.model('user_response', {
'status': fields.Boolean(
description='Indicates if the operation was successful'),
'message': fields.String(description='Message for the user'),
'result': fields.List(fields.Raw, description='Content of the response')})
query_parse = api.parser()
query_parse.add_argument('namespace')
@api.route('/token/<string:user>/<string:password>')
@api.response(404, 'Invalid email or password.')
@api.response(503, 'Connection error with the AAI.')
class GetToken(Resource):
@api.marshal_with(user_response, code=201, skip_none=True)
def get(self, user, password):
return get_token(user, password)
@api.route('/history')
@api.response(401, "Authorization Token is missing or is invalid.")
@api.response(503, "Connection error with the DB.")
class GetHistory(Resource):
@api.doc(security='apikey')
@api.expect(query_parse)
@api.marshal_with(user_response, code=200, skip_none=True)
@token_required
def get(self):
token_info, _ = get_token_info(request)
user_id = token_info['result']['user_id']
namespace = request.args.get('namespace')
return get_query(user_id, namespace)
@api.route('/history/<string:id_query>')
@api.response(404, 'id_query not found')
@api.response(503, 'Internal error. Unable to connect to DB')
class GetQueryId(Resource):
@api.doc(security='apikey')
@api.marshal_with(user_response, code=200, skip_none=True)
@token_required
def get(self, id_query):
return get_query_id(id_query)
| true | true |
f7febd36d05a871c6e97fd490bf41a5eb13551e3 | 862 | py | Python | zerver/migrations/0087_remove_old_scheduled_jobs.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | 1 | 2020-04-09T18:34:44.000Z | 2020-04-09T18:34:44.000Z | zerver/migrations/0087_remove_old_scheduled_jobs.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | null | null | null | zerver/migrations/0087_remove_old_scheduled_jobs.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 1.10.5 on 2017-05-10 05:59
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def delete_old_scheduled_jobs(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""Delete any old scheduled jobs, to handle changes in the format of
send_email. Ideally, we'd translate the jobs, but it's not really
worth the development effort to save a few invitation reminders
and day2 followup emails.
"""
ScheduledJob = apps.get_model('zerver', 'ScheduledJob')
ScheduledJob.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('zerver', '0086_realm_alter_default_org_type'),
]
operations = [
migrations.RunPython(delete_old_scheduled_jobs),
]
| 34.48 | 92 | 0.74478 |
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def delete_old_scheduled_jobs(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
ScheduledJob = apps.get_model('zerver', 'ScheduledJob')
ScheduledJob.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('zerver', '0086_realm_alter_default_org_type'),
]
operations = [
migrations.RunPython(delete_old_scheduled_jobs),
]
| true | true |
f7febdd608474fa96a7f7b00850d15a79ef14c7a | 3,019 | py | Python | src/fever_cs.py | aic-factcheck/fever-cs-baseline | 8f1721fdecb3c1aa226a1c4c0458f7e3a3166250 | [
"MIT"
] | null | null | null | src/fever_cs.py | aic-factcheck/fever-cs-baseline | 8f1721fdecb3c1aa226a1c4c0458f7e3a3166250 | [
"MIT"
] | null | null | null | src/fever_cs.py | aic-factcheck/fever-cs-baseline | 8f1721fdecb3c1aa226a1c4c0458f7e3a3166250 | [
"MIT"
] | null | null | null | import json
from logging.config import dictConfig
from typing import List, Dict
from allennlp.models import load_archive
from allennlp.predictors import Predictor
from fever.api.web_server import fever_web_api
from fever.evidence.retrieval_methods.retrieval_method import RetrievalMethod
import os
import logging
from fever.evidence.retrieval_methods.top_docs import TopNDocsTopNSents
from fever.reader import FEVERDocumentDatabase
def predict_single(predictor, retrieval_method, instance):
evidence = retrieval_method.get_sentences_for_claim(instance["claim"])
test_instance = predictor._json_to_instance({"claim": instance["claim"], "predicted_sentences": evidence})
predicted = predictor.predict_instance(test_instance)
max_id = predicted["label_logits"].index(max(predicted["label_logits"]))
return {
"predicted_label": predictor._model.vocab.get_token_from_index(max_id, namespace="labels"),
"predicted_evidence": evidence
}
def make_api():
logger = logging.getLogger()
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
},
'allennlp': {
'level': 'INFO',
'handlers': ['wsgi']
},
})
logger.info("My sample FEVER application")
config = json.load(open(os.getenv("CONFIG_PATH","configs/predict_docker.json")))
# Create document retrieval model
logger.info("Load FEVER Document database from {0}".format(config["database"]))
db = FEVERDocumentDatabase(config["database"])
logger.info("Load DrQA Document retrieval index from {0}".format(config['index']))
retrieval_method = RetrievalMethod.by_name("top_docs")(db,
config["index"],
config["n_docs"],
config["n_sents"])
# Load the pre-trained predictor and model from the .tar.gz in the config file.
# Override the database location for our model as this now comes from a read-only volume
logger.info("Load Model from {0}".format(config['model']))
archive = load_archive(config["model"],
cuda_device=config["cuda_device"],
overrides='{"dataset_reader":{"database":"' + config["database"] + '" }}')
predictor = Predictor.from_archive(archive, predictor_name="fever")
def baseline_predict(instances):
predictions = []
for instance in instances:
predictions.append(predict_single(predictor, retrieval_method, instance))
return predictions
return fever_web_api(baseline_predict)
| 37.7375 | 110 | 0.627691 | import json
from logging.config import dictConfig
from typing import List, Dict
from allennlp.models import load_archive
from allennlp.predictors import Predictor
from fever.api.web_server import fever_web_api
from fever.evidence.retrieval_methods.retrieval_method import RetrievalMethod
import os
import logging
from fever.evidence.retrieval_methods.top_docs import TopNDocsTopNSents
from fever.reader import FEVERDocumentDatabase
def predict_single(predictor, retrieval_method, instance):
evidence = retrieval_method.get_sentences_for_claim(instance["claim"])
test_instance = predictor._json_to_instance({"claim": instance["claim"], "predicted_sentences": evidence})
predicted = predictor.predict_instance(test_instance)
max_id = predicted["label_logits"].index(max(predicted["label_logits"]))
return {
"predicted_label": predictor._model.vocab.get_token_from_index(max_id, namespace="labels"),
"predicted_evidence": evidence
}
def make_api():
logger = logging.getLogger()
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
},
'allennlp': {
'level': 'INFO',
'handlers': ['wsgi']
},
})
logger.info("My sample FEVER application")
config = json.load(open(os.getenv("CONFIG_PATH","configs/predict_docker.json")))
logger.info("Load FEVER Document database from {0}".format(config["database"]))
db = FEVERDocumentDatabase(config["database"])
logger.info("Load DrQA Document retrieval index from {0}".format(config['index']))
retrieval_method = RetrievalMethod.by_name("top_docs")(db,
config["index"],
config["n_docs"],
config["n_sents"])
logger.info("Load Model from {0}".format(config['model']))
archive = load_archive(config["model"],
cuda_device=config["cuda_device"],
overrides='{"dataset_reader":{"database":"' + config["database"] + '" }}')
predictor = Predictor.from_archive(archive, predictor_name="fever")
def baseline_predict(instances):
predictions = []
for instance in instances:
predictions.append(predict_single(predictor, retrieval_method, instance))
return predictions
return fever_web_api(baseline_predict)
| true | true |
f7febe073fcf8ad7273e7569a5ed882fdc4e8ef5 | 5,463 | py | Python | blazar/plugins/devices/zun_plugin.py | 4sarathbabu/blazar | 16ed6ad588e3508d1436aa3b2f1aae4ed5f4928b | [
"Apache-2.0"
] | 1 | 2022-02-22T11:34:45.000Z | 2022-02-22T11:34:45.000Z | blazar/plugins/devices/zun_plugin.py | 4sarathbabu/blazar | 16ed6ad588e3508d1436aa3b2f1aae4ed5f4928b | [
"Apache-2.0"
] | 19 | 2017-05-23T21:34:09.000Z | 2022-02-21T19:15:29.000Z | blazar/plugins/devices/zun_plugin.py | 4sarathbabu/blazar | 16ed6ad588e3508d1436aa3b2f1aae4ed5f4928b | [
"Apache-2.0"
] | 4 | 2018-08-20T19:39:02.000Z | 2022-02-22T11:36:39.000Z | # -*- coding: utf-8 -*-
#
# Author: Pierre Riteau <pierre@stackhpc.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from blazar.db import api as db_api
from blazar.db import exceptions as db_ex
from blazar.manager import exceptions as manager_ex
from blazar.utils.openstack import placement
from blazar.utils.openstack import zun
from oslo_log import log as logging
from zunclient import exceptions as zun_ex
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ZunPlugin(zun.ZunClientWrapper):
"""Plugin for zun device driver."""
device_driver = 'zun'
def create_device(self, device_values):
device_id = device_values.get('id')
device_name = device_values.get('name')
device_ref = device_id or device_name
if device_ref is None:
raise manager_ex.InvalidHost(host=device_values)
inventory = zun.ZunInventory()
zun_compute_node = inventory.get_host_details(device_ref)
if len(zun_compute_node['containers']) > 0:
raise manager_ex.HostHavingContainers(host=device_ref)
device_properties = {'id': zun_compute_node['id'],
'name': zun_compute_node['name'],
'device_type': 'container',
'device_driver': 'zun'}
to_store = set(device_values.keys()) - set(device_properties.keys())
extra_capabilities_keys = to_store
extra_capabilities = dict(
(key, device_values[key]) for key in extra_capabilities_keys
)
if any([len(key) > 64 for key in extra_capabilities_keys]):
raise manager_ex.ExtraCapabilityTooLong()
placement_client = placement.BlazarPlacementClient()
placement_client.create_reservation_provider(
host_name=zun_compute_node['name'])
device = None
cantaddextracapability = []
try:
device = db_api.device_create(device_properties)
except db_ex.BlazarDBException:
placement_client.delete_reservation_provider(
host_name=zun_compute_node['name'])
raise
for key in extra_capabilities:
values = {'device_id': device['id'],
'capability_name': key,
'capability_value': extra_capabilities[key],
}
try:
db_api.device_extra_capability_create(values)
except db_ex.BlazarDBException:
cantaddextracapability.append(key)
if cantaddextracapability:
raise manager_ex.CantAddExtraCapability(
keys=cantaddextracapability,
host=device['id'])
return device['id']
def cleanup_device(self, device):
try:
# TODO(jason): zunclient is broken when passing both all_projects
# and 'host' as a keyword argument; the parameters are encoded
# like /v1/containers/?all_projects=1?host=..., which is malformed.
# Passing in 'host' to the list() function would however probably
# be more efficient.
host_containers = [
container for container in
self.zun.containers.list(all_projects=True)
if container.host == device['name']
]
except zun_ex.ClientException as exc:
LOG.error((
'During lease teardown, failed to enumerate containers. '
'Containers may need to be manually cleaned up on %s.'
'Error: %s'
), device['name'], exc)
host_containers = []
for container in host_containers:
try:
self.zun.containers.delete(
container.uuid, force=True, stop=True)
except zun_ex.NotFound:
LOG.info('Could not find container %s, may have been deleted '
'concurrently.', container.name)
except Exception as e:
LOG.exception('Failed to delete %s: %s.',
container.name, str(e))
def poll_resource_failures(self, devices):
failed_devices = []
recovered_devices = []
zun_compute_services = {s.host: s for s in self.zun.services.list()}
zun_devices = {d["name"]: d for d in devices
if d.get("device_driver") == self.device_driver}
for device_name, device in zun_devices.items():
is_reservable = device.get("reservable")
cs = zun_compute_services.get(device_name)
if is_reservable and cs and \
cs.state == 'down' or cs.disabled:
failed_devices.append(device)
if not is_reservable and cs and \
cs.state == 'up' and not cs.disabled:
recovered_devices.append(device)
return failed_devices, recovered_devices
| 39.586957 | 79 | 0.615596 |
from oslo_config import cfg
from blazar.db import api as db_api
from blazar.db import exceptions as db_ex
from blazar.manager import exceptions as manager_ex
from blazar.utils.openstack import placement
from blazar.utils.openstack import zun
from oslo_log import log as logging
from zunclient import exceptions as zun_ex
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ZunPlugin(zun.ZunClientWrapper):
device_driver = 'zun'
def create_device(self, device_values):
device_id = device_values.get('id')
device_name = device_values.get('name')
device_ref = device_id or device_name
if device_ref is None:
raise manager_ex.InvalidHost(host=device_values)
inventory = zun.ZunInventory()
zun_compute_node = inventory.get_host_details(device_ref)
if len(zun_compute_node['containers']) > 0:
raise manager_ex.HostHavingContainers(host=device_ref)
device_properties = {'id': zun_compute_node['id'],
'name': zun_compute_node['name'],
'device_type': 'container',
'device_driver': 'zun'}
to_store = set(device_values.keys()) - set(device_properties.keys())
extra_capabilities_keys = to_store
extra_capabilities = dict(
(key, device_values[key]) for key in extra_capabilities_keys
)
if any([len(key) > 64 for key in extra_capabilities_keys]):
raise manager_ex.ExtraCapabilityTooLong()
placement_client = placement.BlazarPlacementClient()
placement_client.create_reservation_provider(
host_name=zun_compute_node['name'])
device = None
cantaddextracapability = []
try:
device = db_api.device_create(device_properties)
except db_ex.BlazarDBException:
placement_client.delete_reservation_provider(
host_name=zun_compute_node['name'])
raise
for key in extra_capabilities:
values = {'device_id': device['id'],
'capability_name': key,
'capability_value': extra_capabilities[key],
}
try:
db_api.device_extra_capability_create(values)
except db_ex.BlazarDBException:
cantaddextracapability.append(key)
if cantaddextracapability:
raise manager_ex.CantAddExtraCapability(
keys=cantaddextracapability,
host=device['id'])
return device['id']
def cleanup_device(self, device):
try:
host_containers = [
container for container in
self.zun.containers.list(all_projects=True)
if container.host == device['name']
]
except zun_ex.ClientException as exc:
LOG.error((
'During lease teardown, failed to enumerate containers. '
'Containers may need to be manually cleaned up on %s.'
'Error: %s'
), device['name'], exc)
host_containers = []
for container in host_containers:
try:
self.zun.containers.delete(
container.uuid, force=True, stop=True)
except zun_ex.NotFound:
LOG.info('Could not find container %s, may have been deleted '
'concurrently.', container.name)
except Exception as e:
LOG.exception('Failed to delete %s: %s.',
container.name, str(e))
def poll_resource_failures(self, devices):
failed_devices = []
recovered_devices = []
zun_compute_services = {s.host: s for s in self.zun.services.list()}
zun_devices = {d["name"]: d for d in devices
if d.get("device_driver") == self.device_driver}
for device_name, device in zun_devices.items():
is_reservable = device.get("reservable")
cs = zun_compute_services.get(device_name)
if is_reservable and cs and \
cs.state == 'down' or cs.disabled:
failed_devices.append(device)
if not is_reservable and cs and \
cs.state == 'up' and not cs.disabled:
recovered_devices.append(device)
return failed_devices, recovered_devices
| true | true |
f7fec0553288fad8f505cc07198fe4784885fdf5 | 7,963 | py | Python | Providers/nxOMSAutomationWorker/automationworker/3.x/worker/sandbox.py | Microsoft/PowerShell-DSC-for-Linux | 286aa5f66ca3c70148c4afb03b7e7c40de0d44af | [
"MIT"
] | 154 | 2015-11-25T13:30:23.000Z | 2019-04-30T17:12:24.000Z | Providers/nxOMSAutomationWorker/automationworker/3.x/worker/sandbox.py | Microsoft/PowerShell-DSC-for-Linux | 286aa5f66ca3c70148c4afb03b7e7c40de0d44af | [
"MIT"
] | 317 | 2015-12-04T17:19:49.000Z | 2019-04-19T15:26:34.000Z | Providers/nxOMSAutomationWorker/automationworker/3.x/worker/sandbox.py | Microsoft/PowerShell-DSC-for-Linux | 286aa5f66ca3c70148c4afb03b7e7c40de0d44af | [
"MIT"
] | 72 | 2015-11-23T03:02:15.000Z | 2019-04-17T14:45:39.000Z | #!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
"""Sandbox module."""
import os
import sys
import time
import traceback
from queue import Queue, Empty
import util
# append worker binary source path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
util.add_all_packages_under_automationworker_to_sys_path()
import configuration3 as configuration
import tracer
from automationconstants import jobstatus
from automationconstants import pendingactions
from httpclientfactory import HttpClientFactory
from job import Job
from jrdsclient import JRDSClient
from workerexception import *
routine_loop = True
job_map = {}
def exit_on_error(message, exit_code=1):
crash_log_filename = "automation_sandbox_crash_" + str(os.environ["sandbox_id"]) + ".log"
util.exit_on_error(filename=crash_log_filename, message=message, exit_code=exit_code)
def safe_loop(func):
def decorated_func(*args, **kwargs):
global routine_loop
while routine_loop:
try:
func(*args, **kwargs)
except (JrdsAuthorizationException,
JrdsSandboxTerminated,
InvalidFilePermissionException,
FileNotFoundException,
Exception):
stack = traceback.format_exc()
tracer.log_sandbox_safe_loop_terminal_exception(stack)
sys.stderr.write(stack) # writing exception to stderr to be traced by the worker
sys.stderr.flush()
time.sleep(1) # allow the trace to make it to stdout (since traces are background threads)
# this will work as long as all threads are daemon
# daemon threads are only supported in 2.6+
sys.exit(1)
time.sleep(configuration.get_jrds_get_job_actions_polling_freq())
return decorated_func
class Sandbox:
def __init__(self):
self.sandbox_id = os.environ["sandbox_id"]
tracer.log_sandbox_starting(sandbox_id=self.sandbox_id, pid=os.getpid(),
worker_type=configuration.get_worker_type())
tracer.log_sandbox_configuration(sandbox_id=self.sandbox_id,
enforce_runbook_signature_validation=configuration.get_enforce_runbook_signature_validation(),
gpg_public_keyring_paths=configuration.get_gpg_public_keyrings_path(),
working_directory=os.getcwd())
http_client_factory = HttpClientFactory(configuration.get_jrds_cert_path(), configuration.get_jrds_key_path(),
configuration.get_verify_certificates())
http_client = http_client_factory.create_http_client(sys.version_info)
self.jrds_client = JRDSClient(http_client)
@staticmethod
def assert_environment_prerequisite():
jrds_cert_path = configuration.get_jrds_cert_path()
if util.assert_file_read_permission(jrds_cert_path) is False:
raise InvalidFilePermissionException(jrds_cert_path)
jrds_key_path = configuration.get_jrds_key_path()
if util.assert_file_read_permission(jrds_key_path) is False:
raise InvalidFilePermissionException(jrds_key_path)
worker_conf_path = configuration.get_worker_configuration_file_path()
if util.assert_file_read_permission(worker_conf_path) is False:
raise InvalidFilePermissionException(worker_conf_path)
proxy_conf_path = configuration.get_proxy_configuration_path()
if proxy_conf_path != configuration.DEFAULT_PROXY_CONFIGURATION_PATH and os.path.isfile(proxy_conf_path):
if util.assert_file_read_permission(proxy_conf_path) is False:
raise InvalidFilePermissionException(proxy_conf_path)
@staticmethod
def stop_tracking_terminated_jobs():
terminated_job_ids = []
# clean up finished jobs
for job_id, job_tuple in list(job_map.items()):
if job_tuple[0].is_alive() is False:
try:
job_tuple[2].get(block=False)
raise SandboxRuntimeException()
except Empty:
pass
terminated_job_ids.append(job_id)
for job_id in terminated_job_ids:
removal = job_map.pop(job_id, None)
if removal is not None:
tracer.log_sandbox_stopped_tracking_job(job_id)
@safe_loop
def routine(self):
self.assert_environment_prerequisite()
# clean up finished jobs
self.stop_tracking_terminated_jobs()
# get job actions
try:
job_actions = self.jrds_client.get_job_actions(self.sandbox_id)
except JrdsSandboxTerminated:
tracer.log_sandbox_jrds_closure_request()
global routine_loop
routine_loop = False
return
for job_action in job_actions:
job_id = job_action["JobId"]
job_data = self.jrds_client.get_job_data(job_id)
job_pending_action = job_data.pending_action
job_status = job_data.job_status
# issue pending action
if job_pending_action == pendingactions.ACTIVATE_ENUM_INDEX or \
(job_pending_action is None and job_status == jobstatus.ACTIVATING_ENUM_INDEX) or \
(job_pending_action is None and job_status == jobstatus.RUNNING_ENUM_INDEX):
tracer.log_sandbox_pending_action_activate_detected(job_id, job_status, job_pending_action)
# check if the specified job is already running to prevent duplicate
if job_id in job_map:
continue
# create and start the new job
job_message_queue = Queue()
job_thread_exception_queue = Queue()
job = Job(self.sandbox_id, job_id, job_message_queue, self.jrds_client, job_thread_exception_queue)
job_map[job_id] = (job, job_message_queue, job_thread_exception_queue)
tracer.log_sandbox_started_tracking_job(job_id)
job.start()
elif job_pending_action == pendingactions.STOP_ENUM_INDEX:
tracer.log_sandbox_pending_action_stop_detected(job_id, job_status, job_pending_action)
# check if the specified job is already running before issuing pending action
if job_id not in job_map:
continue
# propagate pending action to job thread
job_map[job_id][1].put(job_pending_action)
elif job_pending_action is None:
tracer.log_sandbox_no_pending_action_detected(job_id, job_status)
else:
tracer.log_sandbox_unsupported_pending_action_detected(job_id, job_status, job_pending_action)
def main():
if len(sys.argv) < 2:
exit_on_error("Invalid configuration file path (absolute path is required).")
configuration_path = str(sys.argv[1])
if not os.path.isfile(configuration_path):
exit_on_error("Invalid configuration file path or empty configuration file (absolute path is required).")
# configuration has to be read first thing
try:
# remove the test_mode env_var value (mainly for Windows)
# this value is set in test
del os.environ["test_mode"]
except KeyError:
pass
configuration.read_and_set_configuration(configuration_path)
configuration.set_config({configuration.COMPONENT: "sandbox",
configuration.WORKING_DIRECTORY_PATH: os.getcwd()})
# do not trace anything before this point
sandbox = Sandbox()
sandbox.routine()
if __name__ == "__main__":
main()
| 41.046392 | 135 | 0.658295 |
import os
import sys
import time
import traceback
from queue import Queue, Empty
import util
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
util.add_all_packages_under_automationworker_to_sys_path()
import configuration3 as configuration
import tracer
from automationconstants import jobstatus
from automationconstants import pendingactions
from httpclientfactory import HttpClientFactory
from job import Job
from jrdsclient import JRDSClient
from workerexception import *
routine_loop = True
job_map = {}
def exit_on_error(message, exit_code=1):
crash_log_filename = "automation_sandbox_crash_" + str(os.environ["sandbox_id"]) + ".log"
util.exit_on_error(filename=crash_log_filename, message=message, exit_code=exit_code)
def safe_loop(func):
def decorated_func(*args, **kwargs):
global routine_loop
while routine_loop:
try:
func(*args, **kwargs)
except (JrdsAuthorizationException,
JrdsSandboxTerminated,
InvalidFilePermissionException,
FileNotFoundException,
Exception):
stack = traceback.format_exc()
tracer.log_sandbox_safe_loop_terminal_exception(stack)
sys.stderr.write(stack)
sys.stderr.flush()
time.sleep(1)
sys.exit(1)
time.sleep(configuration.get_jrds_get_job_actions_polling_freq())
return decorated_func
class Sandbox:
def __init__(self):
self.sandbox_id = os.environ["sandbox_id"]
tracer.log_sandbox_starting(sandbox_id=self.sandbox_id, pid=os.getpid(),
worker_type=configuration.get_worker_type())
tracer.log_sandbox_configuration(sandbox_id=self.sandbox_id,
enforce_runbook_signature_validation=configuration.get_enforce_runbook_signature_validation(),
gpg_public_keyring_paths=configuration.get_gpg_public_keyrings_path(),
working_directory=os.getcwd())
http_client_factory = HttpClientFactory(configuration.get_jrds_cert_path(), configuration.get_jrds_key_path(),
configuration.get_verify_certificates())
http_client = http_client_factory.create_http_client(sys.version_info)
self.jrds_client = JRDSClient(http_client)
@staticmethod
def assert_environment_prerequisite():
jrds_cert_path = configuration.get_jrds_cert_path()
if util.assert_file_read_permission(jrds_cert_path) is False:
raise InvalidFilePermissionException(jrds_cert_path)
jrds_key_path = configuration.get_jrds_key_path()
if util.assert_file_read_permission(jrds_key_path) is False:
raise InvalidFilePermissionException(jrds_key_path)
worker_conf_path = configuration.get_worker_configuration_file_path()
if util.assert_file_read_permission(worker_conf_path) is False:
raise InvalidFilePermissionException(worker_conf_path)
proxy_conf_path = configuration.get_proxy_configuration_path()
if proxy_conf_path != configuration.DEFAULT_PROXY_CONFIGURATION_PATH and os.path.isfile(proxy_conf_path):
if util.assert_file_read_permission(proxy_conf_path) is False:
raise InvalidFilePermissionException(proxy_conf_path)
@staticmethod
def stop_tracking_terminated_jobs():
terminated_job_ids = []
for job_id, job_tuple in list(job_map.items()):
if job_tuple[0].is_alive() is False:
try:
job_tuple[2].get(block=False)
raise SandboxRuntimeException()
except Empty:
pass
terminated_job_ids.append(job_id)
for job_id in terminated_job_ids:
removal = job_map.pop(job_id, None)
if removal is not None:
tracer.log_sandbox_stopped_tracking_job(job_id)
@safe_loop
def routine(self):
self.assert_environment_prerequisite()
self.stop_tracking_terminated_jobs()
try:
job_actions = self.jrds_client.get_job_actions(self.sandbox_id)
except JrdsSandboxTerminated:
tracer.log_sandbox_jrds_closure_request()
global routine_loop
routine_loop = False
return
for job_action in job_actions:
job_id = job_action["JobId"]
job_data = self.jrds_client.get_job_data(job_id)
job_pending_action = job_data.pending_action
job_status = job_data.job_status
if job_pending_action == pendingactions.ACTIVATE_ENUM_INDEX or \
(job_pending_action is None and job_status == jobstatus.ACTIVATING_ENUM_INDEX) or \
(job_pending_action is None and job_status == jobstatus.RUNNING_ENUM_INDEX):
tracer.log_sandbox_pending_action_activate_detected(job_id, job_status, job_pending_action)
if job_id in job_map:
continue
job_message_queue = Queue()
job_thread_exception_queue = Queue()
job = Job(self.sandbox_id, job_id, job_message_queue, self.jrds_client, job_thread_exception_queue)
job_map[job_id] = (job, job_message_queue, job_thread_exception_queue)
tracer.log_sandbox_started_tracking_job(job_id)
job.start()
elif job_pending_action == pendingactions.STOP_ENUM_INDEX:
tracer.log_sandbox_pending_action_stop_detected(job_id, job_status, job_pending_action)
if job_id not in job_map:
continue
job_map[job_id][1].put(job_pending_action)
elif job_pending_action is None:
tracer.log_sandbox_no_pending_action_detected(job_id, job_status)
else:
tracer.log_sandbox_unsupported_pending_action_detected(job_id, job_status, job_pending_action)
def main():
if len(sys.argv) < 2:
exit_on_error("Invalid configuration file path (absolute path is required).")
configuration_path = str(sys.argv[1])
if not os.path.isfile(configuration_path):
exit_on_error("Invalid configuration file path or empty configuration file (absolute path is required).")
try:
del os.environ["test_mode"]
except KeyError:
pass
configuration.read_and_set_configuration(configuration_path)
configuration.set_config({configuration.COMPONENT: "sandbox",
configuration.WORKING_DIRECTORY_PATH: os.getcwd()})
sandbox = Sandbox()
sandbox.routine()
if __name__ == "__main__":
main()
| true | true |
f7fec056b4ec04a2de2b3bb684da35f7774d20e1 | 1,706 | py | Python | profiles_api/migrations/0001_initial.py | anilchoudhary/profiles-rest-api | fcd0f9756e0f7df8a39e265d100f0e4b05884fe7 | [
"MIT"
] | null | null | null | profiles_api/migrations/0001_initial.py | anilchoudhary/profiles-rest-api | fcd0f9756e0f7df8a39e265d100f0e4b05884fe7 | [
"MIT"
] | 7 | 2020-06-06T01:33:27.000Z | 2022-02-10T15:12:50.000Z | profiles_api/migrations/0001_initial.py | anilchoudhary/profiles-rest-api | fcd0f9756e0f7df8a39e265d100f0e4b05884fe7 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-03-23 05:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.176471 | 266 | 0.638921 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true | true |
f7fec1f1fd5647f172d22cfb001806a0622e0bc3 | 1,793 | py | Python | robot-server/robot_server/health/router.py | knownmed/opentrons | d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c | [
"Apache-2.0"
] | 235 | 2017-10-27T20:37:27.000Z | 2022-03-30T14:09:49.000Z | robot-server/robot_server/health/router.py | knownmed/opentrons | d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c | [
"Apache-2.0"
] | 8,425 | 2017-10-26T15:25:43.000Z | 2022-03-31T23:54:26.000Z | robot-server/robot_server/health/router.py | knownmed/opentrons | d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c | [
"Apache-2.0"
] | 130 | 2017-11-09T21:02:37.000Z | 2022-03-15T18:01:24.000Z | """HTTP routes and handlers for /health endpoints."""
from fastapi import APIRouter, Depends, status
from opentrons import __version__, config, protocol_api
from opentrons.hardware_control import API as HardwareAPI
from robot_server.hardware import get_hardware
from robot_server.service.legacy.models import V1BasicResponse
from .models import Health, HealthLinks
LOG_PATHS = ["/logs/serial.log", "/logs/api.log", "/logs/server.log"]
health_router = APIRouter()
@health_router.get(
path="/health",
summary="Get server health",
status_code=status.HTTP_200_OK,
response_model=Health,
responses={
status.HTTP_503_SERVICE_UNAVAILABLE: {
"model": V1BasicResponse,
"description": "Robot motor controller is not ready",
}
},
)
async def get_health(hardware: HardwareAPI = Depends(get_hardware)) -> Health:
"""Get information about the health of the robot server.
Use the health endpoint to check that the robot server is running
anr ready to operate. A 200 OK response means the server is running.
The response includes information about the software and system.
"""
return Health(
name=config.name(),
api_version=__version__,
fw_version=hardware.fw_version,
board_revision=hardware.board_revision,
logs=LOG_PATHS,
system_version=config.OT_SYSTEM_VERSION,
maximum_protocol_api_version=list(protocol_api.MAX_SUPPORTED_VERSION),
minimum_protocol_api_version=list(protocol_api.MIN_SUPPORTED_VERSION),
links=HealthLinks(
apiLog="/logs/api.log",
serialLog="/logs/serial.log",
serverLog="/logs/server.log",
apiSpec="/openapi.json",
systemTime="/system/time",
),
)
| 33.203704 | 78 | 0.697156 | from fastapi import APIRouter, Depends, status
from opentrons import __version__, config, protocol_api
from opentrons.hardware_control import API as HardwareAPI
from robot_server.hardware import get_hardware
from robot_server.service.legacy.models import V1BasicResponse
from .models import Health, HealthLinks
LOG_PATHS = ["/logs/serial.log", "/logs/api.log", "/logs/server.log"]
health_router = APIRouter()
@health_router.get(
path="/health",
summary="Get server health",
status_code=status.HTTP_200_OK,
response_model=Health,
responses={
status.HTTP_503_SERVICE_UNAVAILABLE: {
"model": V1BasicResponse,
"description": "Robot motor controller is not ready",
}
},
)
async def get_health(hardware: HardwareAPI = Depends(get_hardware)) -> Health:
return Health(
name=config.name(),
api_version=__version__,
fw_version=hardware.fw_version,
board_revision=hardware.board_revision,
logs=LOG_PATHS,
system_version=config.OT_SYSTEM_VERSION,
maximum_protocol_api_version=list(protocol_api.MAX_SUPPORTED_VERSION),
minimum_protocol_api_version=list(protocol_api.MIN_SUPPORTED_VERSION),
links=HealthLinks(
apiLog="/logs/api.log",
serialLog="/logs/serial.log",
serverLog="/logs/server.log",
apiSpec="/openapi.json",
systemTime="/system/time",
),
)
| true | true |
f7fec20f25cc85ade8c079bffb0d1757f3ad0bab | 779 | py | Python | belvo/resources/incomes.py | ejimz/belvo-python | 2eb1eb385946baef47ca596634cbc5ec2183b606 | [
"MIT"
] | 14 | 2020-04-04T05:02:32.000Z | 2022-03-14T02:08:48.000Z | belvo/resources/incomes.py | ejimz/belvo-python | 2eb1eb385946baef47ca596634cbc5ec2183b606 | [
"MIT"
] | 122 | 2019-11-18T09:20:52.000Z | 2022-03-01T15:38:13.000Z | belvo/resources/incomes.py | ejimz/belvo-python | 2eb1eb385946baef47ca596634cbc5ec2183b606 | [
"MIT"
] | 11 | 2020-01-18T00:41:01.000Z | 2022-01-26T20:52:53.000Z | from typing import Dict, List, Union
from belvo.resources.base import Resource
class Incomes(Resource):
endpoint = "/api/incomes/"
def create(
self,
link: str,
*,
token: str = None,
save_data: bool = True,
date_from: str = None,
date_to: str = None,
raise_exception: bool = False,
**kwargs: Dict,
) -> Union[List[Dict], Dict]:
data = {"link": link, "save_data": save_data}
if date_from:
data.update(date_from=date_from)
if date_to:
data.update(date_to=date_to)
if token:
data.update(token=token)
return self.session.post(
self.endpoint, data=data, raise_exception=raise_exception, **kwargs
)
| 23.606061 | 79 | 0.563543 | from typing import Dict, List, Union
from belvo.resources.base import Resource
class Incomes(Resource):
endpoint = "/api/incomes/"
def create(
self,
link: str,
*,
token: str = None,
save_data: bool = True,
date_from: str = None,
date_to: str = None,
raise_exception: bool = False,
**kwargs: Dict,
) -> Union[List[Dict], Dict]:
data = {"link": link, "save_data": save_data}
if date_from:
data.update(date_from=date_from)
if date_to:
data.update(date_to=date_to)
if token:
data.update(token=token)
return self.session.post(
self.endpoint, data=data, raise_exception=raise_exception, **kwargs
)
| true | true |
f7fec32b9ee150c6800dd136bb3b93d98a0e64ed | 398 | py | Python | app/core/urls.py | nyyirs/profiles-rest-api | b3c270e20e4d9503dd8ab2d533c56995c49f3560 | [
"MIT"
] | null | null | null | app/core/urls.py | nyyirs/profiles-rest-api | b3c270e20e4d9503dd8ab2d533c56995c49f3560 | [
"MIT"
] | null | null | null | app/core/urls.py | nyyirs/profiles-rest-api | b3c270e20e4d9503dd8ab2d533c56995c49f3560 | [
"MIT"
] | null | null | null | from django.urls import include, path
from . import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
]
| 28.428571 | 53 | 0.753769 | from django.urls import include, path
from . import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
]
| true | true |
f7fec3693b5e645595012d950293121f9175c155 | 550 | py | Python | env/Lib/site-packages/plotly/validators/scatter/marker/line/_coloraxis.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/Lib/site-packages/plotly/validators/scatter/marker/line/_coloraxis.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/Lib/site-packages/plotly/validators/scatter/marker/line/_coloraxis.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="scatter.marker.line", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
**kwargs
)
| 34.375 | 82 | 0.62 | import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="scatter.marker.line", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
**kwargs
)
| true | true |
f7fec39a0c172b0c6ce55288297ae45c020fb8f5 | 1,329 | py | Python | monk/system_unit_tests/pytorch/test_loss_l1.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 542 | 2019-11-10T12:09:31.000Z | 2022-03-28T11:39:07.000Z | monk/system_unit_tests/pytorch/test_loss_l1.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 117 | 2019-11-12T09:39:24.000Z | 2022-03-12T00:20:41.000Z | monk/system_unit_tests/pytorch/test_loss_l1.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 246 | 2019-11-09T21:53:24.000Z | 2022-03-29T00:57:07.000Z | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import torch
import numpy as np
from pytorch.losses.return_loss import load_loss
def test_loss_l1(system_dict):
forward = True;
test = "test_loss_l1";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
label = torch.randn(1, 5);
y = torch.randn(1, 5);
gtf.loss_l1();
load_loss(gtf.system_dict);
loss_obj = gtf.system_dict["local"]["criterion"];
loss_val = loss_obj(y, label);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 26.58 | 69 | 0.616253 | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import torch
import numpy as np
from pytorch.losses.return_loss import load_loss
def test_loss_l1(system_dict):
forward = True;
test = "test_loss_l1";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
label = torch.randn(1, 5);
y = torch.randn(1, 5);
gtf.loss_l1();
load_loss(gtf.system_dict);
loss_obj = gtf.system_dict["local"]["criterion"];
loss_val = loss_obj(y, label);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| true | true |
f7fec44e09a5e8cca9f453026f91743bdebfc047 | 13,924 | py | Python | log_casp_inh/model_733.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_casp_inh/model_733.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_casp_inh/model_733.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])
Monomer('Ligand', ['Receptor'])
Monomer('C6pro', ['C3A'])
Monomer('ParpU', ['C3A'])
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('BidM', ['BaxM'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('Xiap', ['SmacC', 'C3A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C3ub')
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C3pro', ['C8A'])
Monomer('SmacM', ['BaxA'])
Monomer('SmacC', ['Xiap'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('C6pro_0', 100.0)
Parameter('ParpU_0', 1000000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('C8A_0', 0.0)
Parameter('Xiap_0', 183250.0)
Parameter('Receptor_0', 100.0)
Parameter('C3ub_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('SmacM_0', 100000.0)
Parameter('SmacC_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('BaxA_obs', BaxA())
Observable('Ligand_obs', Ligand())
Observable('C6pro_obs', C6pro())
Observable('ParpU_obs', ParpU())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('BidM_obs', BidM())
Observable('BaxM_obs', BaxM())
Observable('C8A_obs', C8A())
Observable('Xiap_obs', Xiap())
Observable('Receptor_obs', Receptor())
Observable('C3ub_obs', C3ub())
Observable('Fadd_obs', Fadd())
Observable('C3pro_obs', C3pro())
Observable('SmacM_obs', SmacM())
Observable('SmacC_obs', SmacC())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(Xiap(SmacC=None, C3A=None), Xiap_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C3ub(), C3ub_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| 85.423313 | 598 | 0.808532 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])
Monomer('Ligand', ['Receptor'])
Monomer('C6pro', ['C3A'])
Monomer('ParpU', ['C3A'])
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('BidM', ['BaxM'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('Xiap', ['SmacC', 'C3A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C3ub')
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C3pro', ['C8A'])
Monomer('SmacM', ['BaxA'])
Monomer('SmacC', ['Xiap'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('C6pro_0', 100.0)
Parameter('ParpU_0', 1000000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('C8A_0', 0.0)
Parameter('Xiap_0', 183250.0)
Parameter('Receptor_0', 100.0)
Parameter('C3ub_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('SmacM_0', 100000.0)
Parameter('SmacC_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('BaxA_obs', BaxA())
Observable('Ligand_obs', Ligand())
Observable('C6pro_obs', C6pro())
Observable('ParpU_obs', ParpU())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('BidM_obs', BidM())
Observable('BaxM_obs', BaxM())
Observable('C8A_obs', C8A())
Observable('Xiap_obs', Xiap())
Observable('Receptor_obs', Receptor())
Observable('C3ub_obs', C3ub())
Observable('Fadd_obs', Fadd())
Observable('C3pro_obs', C3pro())
Observable('SmacM_obs', SmacM())
Observable('SmacC_obs', SmacC())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(Xiap(SmacC=None, C3A=None), Xiap_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C3ub(), C3ub_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| true | true |
f7fec4a9a2aff4634507fdcd8b997b292d49c8ea | 8,445 | py | Python | fixtures/contact.py | Stabilniy/python_training | b3495595445a80e408ad2a4a33536166259655f3 | [
"Apache-2.0"
] | 1 | 2020-04-27T12:11:51.000Z | 2020-04-27T12:11:51.000Z | fixtures/contact.py | Stabilniy/python_training | b3495595445a80e408ad2a4a33536166259655f3 | [
"Apache-2.0"
] | null | null | null | fixtures/contact.py | Stabilniy/python_training | b3495595445a80e408ad2a4a33536166259655f3 | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.support.ui import Select
import time
from modules.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def new_contact_creation(self, contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.return_to_homepage()
self.contact_cache = None
def select_contacts_by_index(self,index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contacts_by_id(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//input[@id='%s']" % id).click()
def delete(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.app.open_homepage()
time.sleep(5)
self.select_contacts_by_index(index)
wd.find_element_by_xpath("//div[2]//input[1]").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.open_homepage()
time.sleep(5)
self.select_contacts_by_id(id)
wd.find_element_by_xpath("//div[2]//input[1]").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def change_filed_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def change_selectlist_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
Select(wd.find_element_by_name(field_name)).select_by_visible_text(text)
def edit_contact(self):
self.edit_contact_by_index(0)
def edit_contact_by_index(self, contact, index):
wd = self.app.wd
self.app.open_homepage()
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
self.contact_cache = None
def edit_contact_by_id(self, contact, id):
wd = self.app.wd
#self.app.open_homepage()
wd.get("http://localhost/addressbook/edit.php?id='%s'" %id)
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_filed_value("firstname", contact.firstname)
self.change_filed_value("middlename", contact.middlename)
self.change_filed_value("lastname", contact.lastname)
self.change_filed_value("nickname", contact.nickname)
self.change_filed_value("title", contact.title)
self.change_filed_value("company", contact.company)
self.change_filed_value("address", contact.address)
self.change_filed_value("home", contact.home)
self.change_filed_value("mobile", contact.mobile)
self.change_filed_value("work", contact.work)
self.change_filed_value("fax", contact.fax)
self.change_filed_value("email", contact.email)
self.change_filed_value("email2", contact.email2)
self.change_filed_value("email3", contact.email3)
self.change_filed_value("address2", contact.address2)
self.change_filed_value("homepage", contact.homepage)
self.change_selectlist_value("bday", contact.bday)
self.change_selectlist_value("bmonth", contact.bmounth)
self.change_filed_value("byear", contact.byear)
self.change_selectlist_value("aday", contact.aday)
self.change_selectlist_value("amonth", contact.amonth)
self.change_filed_value("ayear", contact.ayear)
self.change_filed_value("phone2", contact.phone2)
self.change_filed_value("notes", contact.notes)
def count(self):
wd = self.app.wd
self.app.open_homepage()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_homepage()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(firstname=firstname, lastname = lastname, id=id, address = address , all_phones = all_phones, all_emails = all_emails))
return list(self.contact_cache)
def get_edit_page_index(self, index):
wd = self.app.wd
self.app.open_homepage()
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
def data_edit_page_index(self, index):
wd = self.app.wd
self.get_edit_page_index(index)
firstname = wd.find_element_by_xpath("//input[@name='firstname']").get_attribute("value")
lastname = wd.find_element_by_xpath("//input[@name='lastname']").get_attribute("value")
address = wd.find_element_by_xpath("//textarea[@name='address']").get_attribute("value")
home = wd.find_element_by_xpath("//input[@name='home']").get_attribute("value")
mobile = wd.find_element_by_xpath("//input[@name='mobile']").get_attribute("value")
work = wd.find_element_by_xpath("//input[@name='work']").get_attribute("value")
phone2 = wd.find_element_by_xpath("//input[@name='phone2']").get_attribute("value")
email = wd.find_element_by_xpath("//input[@name='email']").get_attribute("value")
email1 = wd.find_element_by_xpath("//input[@name='email2']").get_attribute("value")
email2 = wd.find_element_by_xpath("//input[@name='email3']").get_attribute("value")
return Contact(firstname = firstname, lastname = lastname , address = address, home = home, mobile = mobile, work = work, phone2 = phone2, email = email, email1 = email1, email2 = email2)
def get_edit_page_id(self, id):
wd = self.app.wd
self.app.open_homepage()
wd.get("http://localhost/addressbook/edit.php?id='%s'" % id)
def data_edit_page_id(self, id):
wd = self.app.wd
self.get_edit_page_id(id)
firstname = wd.find_element_by_xpath("//input[@name='firstname']").get_attribute("value")
lastname = wd.find_element_by_xpath("//input[@name='lastname']").get_attribute("value")
address = wd.find_element_by_xpath("//textarea[@name='address']").get_attribute("value")
home = wd.find_element_by_xpath("//input[@name='home']").get_attribute("value")
mobile = wd.find_element_by_xpath("//input[@name='mobile']").get_attribute("value")
work = wd.find_element_by_xpath("//input[@name='work']").get_attribute("value")
phone2 = wd.find_element_by_xpath("//input[@name='phone2']").get_attribute("value")
email = wd.find_element_by_xpath("//input[@name='email']").get_attribute("value")
email1 = wd.find_element_by_xpath("//input[@name='email2']").get_attribute("value")
email2 = wd.find_element_by_xpath("//input[@name='email3']").get_attribute("value")
return Contact(firstname = firstname, lastname = lastname , address = address, home = home, mobile = mobile, work = work, phone2 = phone2, email = email, email1 = email1, email2 = email2)
def add_contact_to_group(self, id_contact, id_group):
wd = self.app.wd
self.app.open_homepage()
time.sleep(5)
self.select_contacts_by_id(id_contact)
Select(wd.find_element_by_name("to_group")).select_by_value(id_group)
wd.find_element_by_name("add").click()
def delete_contact_from_group(self, id_contact, id_group):
wd = self.app.wd
wd.get("http://localhost/addressbook/?group=%s" %id_group)
time.sleep(5)
self.select_contacts_by_id(id_contact)
wd.find_element_by_name("remove").click()
| 45.403226 | 195 | 0.65743 | from selenium.webdriver.support.ui import Select
import time
from modules.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def new_contact_creation(self, contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.return_to_homepage()
self.contact_cache = None
def select_contacts_by_index(self,index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contacts_by_id(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//input[@id='%s']" % id).click()
def delete(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.app.open_homepage()
time.sleep(5)
self.select_contacts_by_index(index)
wd.find_element_by_xpath("//div[2]//input[1]").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.open_homepage()
time.sleep(5)
self.select_contacts_by_id(id)
wd.find_element_by_xpath("//div[2]//input[1]").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def change_filed_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def change_selectlist_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
Select(wd.find_element_by_name(field_name)).select_by_visible_text(text)
def edit_contact(self):
self.edit_contact_by_index(0)
def edit_contact_by_index(self, contact, index):
wd = self.app.wd
self.app.open_homepage()
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
self.contact_cache = None
def edit_contact_by_id(self, contact, id):
wd = self.app.wd
wd.get("http://localhost/addressbook/edit.php?id='%s'" %id)
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_filed_value("firstname", contact.firstname)
self.change_filed_value("middlename", contact.middlename)
self.change_filed_value("lastname", contact.lastname)
self.change_filed_value("nickname", contact.nickname)
self.change_filed_value("title", contact.title)
self.change_filed_value("company", contact.company)
self.change_filed_value("address", contact.address)
self.change_filed_value("home", contact.home)
self.change_filed_value("mobile", contact.mobile)
self.change_filed_value("work", contact.work)
self.change_filed_value("fax", contact.fax)
self.change_filed_value("email", contact.email)
self.change_filed_value("email2", contact.email2)
self.change_filed_value("email3", contact.email3)
self.change_filed_value("address2", contact.address2)
self.change_filed_value("homepage", contact.homepage)
self.change_selectlist_value("bday", contact.bday)
self.change_selectlist_value("bmonth", contact.bmounth)
self.change_filed_value("byear", contact.byear)
self.change_selectlist_value("aday", contact.aday)
self.change_selectlist_value("amonth", contact.amonth)
self.change_filed_value("ayear", contact.ayear)
self.change_filed_value("phone2", contact.phone2)
self.change_filed_value("notes", contact.notes)
def count(self):
wd = self.app.wd
self.app.open_homepage()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_homepage()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(firstname=firstname, lastname = lastname, id=id, address = address , all_phones = all_phones, all_emails = all_emails))
return list(self.contact_cache)
def get_edit_page_index(self, index):
wd = self.app.wd
self.app.open_homepage()
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
def data_edit_page_index(self, index):
wd = self.app.wd
self.get_edit_page_index(index)
firstname = wd.find_element_by_xpath("//input[@name='firstname']").get_attribute("value")
lastname = wd.find_element_by_xpath("//input[@name='lastname']").get_attribute("value")
address = wd.find_element_by_xpath("//textarea[@name='address']").get_attribute("value")
home = wd.find_element_by_xpath("//input[@name='home']").get_attribute("value")
mobile = wd.find_element_by_xpath("//input[@name='mobile']").get_attribute("value")
work = wd.find_element_by_xpath("//input[@name='work']").get_attribute("value")
phone2 = wd.find_element_by_xpath("//input[@name='phone2']").get_attribute("value")
email = wd.find_element_by_xpath("//input[@name='email']").get_attribute("value")
email1 = wd.find_element_by_xpath("//input[@name='email2']").get_attribute("value")
email2 = wd.find_element_by_xpath("//input[@name='email3']").get_attribute("value")
return Contact(firstname = firstname, lastname = lastname , address = address, home = home, mobile = mobile, work = work, phone2 = phone2, email = email, email1 = email1, email2 = email2)
def get_edit_page_id(self, id):
wd = self.app.wd
self.app.open_homepage()
wd.get("http://localhost/addressbook/edit.php?id='%s'" % id)
def data_edit_page_id(self, id):
wd = self.app.wd
self.get_edit_page_id(id)
firstname = wd.find_element_by_xpath("//input[@name='firstname']").get_attribute("value")
lastname = wd.find_element_by_xpath("//input[@name='lastname']").get_attribute("value")
address = wd.find_element_by_xpath("//textarea[@name='address']").get_attribute("value")
home = wd.find_element_by_xpath("//input[@name='home']").get_attribute("value")
mobile = wd.find_element_by_xpath("//input[@name='mobile']").get_attribute("value")
work = wd.find_element_by_xpath("//input[@name='work']").get_attribute("value")
phone2 = wd.find_element_by_xpath("//input[@name='phone2']").get_attribute("value")
email = wd.find_element_by_xpath("//input[@name='email']").get_attribute("value")
email1 = wd.find_element_by_xpath("//input[@name='email2']").get_attribute("value")
email2 = wd.find_element_by_xpath("//input[@name='email3']").get_attribute("value")
return Contact(firstname = firstname, lastname = lastname , address = address, home = home, mobile = mobile, work = work, phone2 = phone2, email = email, email1 = email1, email2 = email2)
def add_contact_to_group(self, id_contact, id_group):
wd = self.app.wd
self.app.open_homepage()
time.sleep(5)
self.select_contacts_by_id(id_contact)
Select(wd.find_element_by_name("to_group")).select_by_value(id_group)
wd.find_element_by_name("add").click()
def delete_contact_from_group(self, id_contact, id_group):
wd = self.app.wd
wd.get("http://localhost/addressbook/?group=%s" %id_group)
time.sleep(5)
self.select_contacts_by_id(id_contact)
wd.find_element_by_name("remove").click()
| true | true |
f7fec4ce0c2338c3018a4398ae64a4d387d55a5a | 1,083 | py | Python | rplugin/python3/deoplete/filter/matcher_full_fuzzy.py | kazufusa/deoplete.nvim | e61b8faee0b07f837f9009d780cbc3c2caaef76a | [
"MIT"
] | 10 | 2020-07-21T21:59:54.000Z | 2021-07-19T11:01:47.000Z | rplugin/python3/deoplete/filter/matcher_full_fuzzy.py | kazufusa/deoplete.nvim | e61b8faee0b07f837f9009d780cbc3c2caaef76a | [
"MIT"
] | 4 | 2017-04-15T17:45:36.000Z | 2017-11-20T16:27:17.000Z | rplugin/python3/deoplete/filter/matcher_full_fuzzy.py | kazufusa/deoplete.nvim | e61b8faee0b07f837f9009d780cbc3c2caaef76a | [
"MIT"
] | 1 | 2021-07-17T18:51:40.000Z | 2021-07-17T18:51:40.000Z | # ============================================================================
# FILE: matcher_full_fuzzy.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
from deoplete.base.filter import Base
from deoplete.util import fuzzy_escape, Nvim, UserContext, Candidates
class Filter(Base):
def __init__(self, vim: Nvim) -> None:
super().__init__(vim)
self.name = 'matcher_full_fuzzy'
self.description = 'full fuzzy matcher'
def filter(self, context: UserContext) -> Candidates:
complete_str = context['complete_str']
if context['ignorecase']:
complete_str = complete_str.lower()
p = re.compile(fuzzy_escape(complete_str, context['camelcase']))
if context['ignorecase']:
return [x for x in context['candidates']
if p.search(x['word'].lower())]
else:
return [x for x in context['candidates']
if p.search(x['word'])]
| 34.935484 | 78 | 0.538319 |
import re
from deoplete.base.filter import Base
from deoplete.util import fuzzy_escape, Nvim, UserContext, Candidates
class Filter(Base):
def __init__(self, vim: Nvim) -> None:
super().__init__(vim)
self.name = 'matcher_full_fuzzy'
self.description = 'full fuzzy matcher'
def filter(self, context: UserContext) -> Candidates:
complete_str = context['complete_str']
if context['ignorecase']:
complete_str = complete_str.lower()
p = re.compile(fuzzy_escape(complete_str, context['camelcase']))
if context['ignorecase']:
return [x for x in context['candidates']
if p.search(x['word'].lower())]
else:
return [x for x in context['candidates']
if p.search(x['word'])]
| true | true |
f7fec5036b94158de10bbd0ac3ca8c077be9ea5f | 10,200 | py | Python | utils/networks/network_si_pathint.py | ashishgaurav13/cl_safer_classifiers | a3df87a4bc863377485fa58a8a475991a4fc9800 | [
"MIT"
] | 1 | 2020-07-03T06:51:19.000Z | 2020-07-03T06:51:19.000Z | utils/networks/network_si_pathint.py | ashishgaurav13/cl_safer_classifiers | a3df87a4bc863377485fa58a8a475991a4fc9800 | [
"MIT"
] | 3 | 2021-06-08T20:55:17.000Z | 2022-03-12T00:14:23.000Z | utils/networks/network_si_pathint.py | ashishgaurav13/cl_safer_classifiers | a3df87a4bc863377485fa58a8a475991a4fc9800 | [
"MIT"
] | null | null | null | import os, sys
sys.path.extend([os.path.expanduser(
os.path.abspath('./utils/networks/synaptic_intelligence/')
)])
from pathint import utils as putils
import utils
from keras.models import Sequential
from keras.layers import Dense, Dropout
from pathint import protocols
from pathint.optimizers import KOOptimizer
from keras.optimizers import SGD, Adam, RMSprop
from keras.callbacks import Callback
from pathint.keras_utils import LossHistory
import numpy as np
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
# from keras_tqdm import TQDMNotebookCallback
class SINetwork:
def __init__(self, layer_sizes, feature_extractor_needed = False, use_dropout = False,
activation = 'relu', dropoutv = 0.5, reshape_dims = None, seed = 0,
session_config = None, it = None, c = 1.0, xi = 0.1, lr = 0.001):
assert(len(layer_sizes) == 4)
assert(session_config != None)
assert(it != None)
self.layer_sizes = layer_sizes
self.feature_extractor_needed = feature_extractor_needed
self.use_dropout = use_dropout
self.dropoutv = dropoutv
self.reshape_dims = reshape_dims
self.seed = seed
self.session_config = session_config
self.it = it
self.use_dropout = use_dropout
self.activation = utils.get_activation(activation)
print("Using feature extractor: %s" % self.feature_extractor_needed)
print("Using dropout, bn: %s, %f" % (self.use_dropout, self.dropoutv))
self.phs = {}
self.vars = {}
self.objs = {}
self.all_predictions = []
self.c = c
self.xi = xi
self.lr = float(lr)
def apply_feature_extractor(self, X):
if self.feature_extractor_needed:
if not hasattr(self, 'feature_extractor_set'):
with tf.variable_scope("feature_extractor"):
X, created_layers = utils.vgg16(X, self.training_ph)
self.feature_extractor_set = True
self.feature_extractor_layers = created_layers
else:
print("Reusing feature extractor")
with tf.variable_scope("feature_extractor", reuse = True):
X = utils.vgg16_reuse(X, self.training_ph, self.feature_extractor_layers)
else:
X = tf.reshape(X, [-1, self.layer_sizes[0]])
return X
def get_trainable_vars(self, scope = "", silent = False):
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = scope)
if self.feature_extractor_needed:
new_var_list = []
for item in var_list:
if not item.name.startswith("feature_extractor"):
new_var_list += [item]
var_list = new_var_list
if not silent:
print("Trainable vars: %s" % str(var_list))
return var_list
def create_session(self, improve_by = 5, min_epoch = 10):
self.objs['saver'] = tf.train.Saver()
# self.objs['sess'] = tf.Session(config = self.session_config)
self.objs['sess'] = tf.InteractiveSession()
self.objs['sess'].run(tf.global_variables_initializer())
self.objs['es'] = utils.EarlyStopping(
self.objs['sess'],
self.objs['saver'],
save_dir = "saved_seed%d" % self.seed,
improve_by = improve_by,
min_epoch = min_epoch
)
if self.feature_extractor_needed:
if not os.path.exists("vgg16_cifar100"):
print("Pretrained model doesnt exist for VGG16")
print("Run cifar100.py first")
exit(0)
else:
reqd_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = "feature_extractor")
feature_extractor_saver = tf.train.Saver(reqd_variables)
print("Restoring feature extractor variables")
feature_extractor_saver.restore(self.objs['sess'], "vgg16_cifar100/saved.ckpt")
print("Done")
# self.objs['sess'].graph.finalize()
def print_vars(self, var_list, show_values = False, extra = False):
for num, var in enumerate(var_list):
print_strs = []
if show_values:
if 'sess' in self.objs:
red_sum = self.objs['sess'].run(tf.reduce_sum(var))
print_strs += ["mag %f" % (red_sum)]
else:
print_strs += ["init"]
if extra:
if 'sess' in self.objs:
nonzerovar = tf.boolean_mask(var, tf.greater(var, 0.000001))
tmin = self.objs['sess'].run(tf.math.reduce_max(nonzerovar))
print_strs += ["tmax %f" % tmin]
nz = self.objs['sess'].run(tf.math.count_nonzero(var))
print_strs += ["nonzero %d" % nz]
num_elements = self.objs['sess'].run(tf.reduce_sum(tf.ones_like(var)))
print_strs += ["total %d" % num_elements]
print_str = "\t(%d) %s" % (num+1, var.name)
if len(print_strs) > 0:
print_str += " => %s" % " , ".join(print_strs)
print(print_str)
print("Number of vars: %d" % len(var_list))
def setup(self):
set_session(tf.Session(config = self.session_config))
activation_fn = self.activation
self.model = Sequential()
self.model.add(Dense(self.layer_sizes[1], activation=activation_fn, input_dim=self.layer_sizes[0]))
if self.use_dropout: self.model.add(Dropout(self.dropoutv))
self.model.add(Dense(self.layer_sizes[2], activation=activation_fn))
if self.use_dropout: self.model.add(Dropout(self.dropoutv))
self.model.add(Dense(self.layer_sizes[3], activation='softmax'))
protocol_name, protocol = protocols.PATH_INT_PROTOCOL(omega_decay='sum', xi=self.xi)
self.opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999)
opt_name = 'adam'
self.oopt = KOOptimizer(self.opt, model=self.model, **protocol)
self.model.compile(loss="categorical_crossentropy", optimizer=self.oopt, metrics=['accuracy'])
history = LossHistory()
# tqdm_callback = TQDMNotebookCallback()
self.extra_callbacks = [history] #, tqdm_callback]
print("Setting cval to %g" % self.c)
self.oopt.set_strength(self.c)
def preprocessed_(self, task_num, x):
if not hasattr(self, 'all_data'):
self.all_data = {}
self.all_data[task_num] = x
print('Added preprocessed data for task %d' % task_num)
def train_epoch(self, n_task, epoch, silent = False):
stuffs = self.model.fit(
self.all_data[n_task]['train_x'],
self.all_data[n_task]['train_y'],
batch_size = self.it.batch_size,
callbacks = self.extra_callbacks,
epochs = 1,
verbose = 0,
validation_data = (
self.all_data[n_task]['test_x'],
self.all_data[n_task]['test_y'],
))
# TODO: dropout
avg_tr_loss = stuffs.history['loss'][0]
avg_tr_acc = stuffs.history['acc'][0]
test_acc = stuffs.history['val_acc'][0]
task_accs_all = self.accuracies(n_task+1)
task_accs = np.average(task_accs_all)
pred_n_tasks = self.predictions(n_task+1)
self.all_predictions += [pred_n_tasks]
behs = [self.beh(i) for i in range(n_task)] # Check beh on n_task-1 tasks
beh_str = "Beh: " + str(behs)
del(self.all_predictions[-1])
print("Epoch: %d, Acc: %.2f%%, ValAcc: %.2f%%, Loss: %f" % (
epoch+1, avg_tr_acc * 100.0, test_acc * 100.0, avg_tr_loss))
if not silent:
print("PastValAcc(%d): %s => %.2f%%" % (n_task+1,
" ".join(["%.2f%%" % item for item in task_accs_all]), task_accs))
print("%s" % beh_str)
return task_accs
def accuracies(self, n):
task_accuracies = []
for i in range(n):
test_loss, test_acc = self.model.evaluate(
self.all_data[i]['test_x'],
self.all_data[i]['test_y'],
verbose = 0
)
task_accuracies.append(test_acc * 100.0)
return task_accuracies
def predictions(self, n):
pred_n_tasks = []
for i in range(n):
preds = np.argmax(self.model.predict(
self.all_data[i]['test_x'],
verbose = 0
), axis = 1).astype('uint8')
pred_n_tasks.append(preds)
return pred_n_tasks
def beh_show(self):
print_str = "%d rows: " % len(self.all_predictions)
print_str += "%s" % str([len(item) for item in self.all_predictions])
print(print_str)
# Behavior across n iterations
def beh(self, i):
if len(self.all_predictions) <= 1:
print("Not enough data for behaviour analysis")
return
# self.beh_show()
# print("Access rows %d:%d -> col %d" % (i, len(self.all_predictions)-1, i))
chosen_classifications = [item[i] for item in self.all_predictions[i:]] # only consider data from index i+
n = len(chosen_classifications[0])
same = 0
for ii in range(n):
reqd = chosen_classifications[0][ii]
is_same = True
for item in chosen_classifications[1:]:
if item[ii] != reqd:
is_same = False
break
if is_same:
same += 1
return round(same*100.0/n, 2)
def final_stats(self, n_task, n_permute_tasks):
task_accs = self.accuracies(n_permute_tasks)
pred_n_tasks = self.predictions(n_permute_tasks)
self.all_predictions += [pred_n_tasks]
behs = [self.beh(i) for i in range(n_task)] # Check beh on n_task-1 tasks
print("Final beh: " + str(behs))
task_accs = ["%.2f%%" % item for item in task_accs]
print("Task accuracies: " + " ".join(task_accs))
| 39.534884 | 114 | 0.582941 | import os, sys
sys.path.extend([os.path.expanduser(
os.path.abspath('./utils/networks/synaptic_intelligence/')
)])
from pathint import utils as putils
import utils
from keras.models import Sequential
from keras.layers import Dense, Dropout
from pathint import protocols
from pathint.optimizers import KOOptimizer
from keras.optimizers import SGD, Adam, RMSprop
from keras.callbacks import Callback
from pathint.keras_utils import LossHistory
import numpy as np
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
class SINetwork:
def __init__(self, layer_sizes, feature_extractor_needed = False, use_dropout = False,
activation = 'relu', dropoutv = 0.5, reshape_dims = None, seed = 0,
session_config = None, it = None, c = 1.0, xi = 0.1, lr = 0.001):
assert(len(layer_sizes) == 4)
assert(session_config != None)
assert(it != None)
self.layer_sizes = layer_sizes
self.feature_extractor_needed = feature_extractor_needed
self.use_dropout = use_dropout
self.dropoutv = dropoutv
self.reshape_dims = reshape_dims
self.seed = seed
self.session_config = session_config
self.it = it
self.use_dropout = use_dropout
self.activation = utils.get_activation(activation)
print("Using feature extractor: %s" % self.feature_extractor_needed)
print("Using dropout, bn: %s, %f" % (self.use_dropout, self.dropoutv))
self.phs = {}
self.vars = {}
self.objs = {}
self.all_predictions = []
self.c = c
self.xi = xi
self.lr = float(lr)
def apply_feature_extractor(self, X):
if self.feature_extractor_needed:
if not hasattr(self, 'feature_extractor_set'):
with tf.variable_scope("feature_extractor"):
X, created_layers = utils.vgg16(X, self.training_ph)
self.feature_extractor_set = True
self.feature_extractor_layers = created_layers
else:
print("Reusing feature extractor")
with tf.variable_scope("feature_extractor", reuse = True):
X = utils.vgg16_reuse(X, self.training_ph, self.feature_extractor_layers)
else:
X = tf.reshape(X, [-1, self.layer_sizes[0]])
return X
def get_trainable_vars(self, scope = "", silent = False):
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = scope)
if self.feature_extractor_needed:
new_var_list = []
for item in var_list:
if not item.name.startswith("feature_extractor"):
new_var_list += [item]
var_list = new_var_list
if not silent:
print("Trainable vars: %s" % str(var_list))
return var_list
def create_session(self, improve_by = 5, min_epoch = 10):
self.objs['saver'] = tf.train.Saver()
self.objs['sess'] = tf.InteractiveSession()
self.objs['sess'].run(tf.global_variables_initializer())
self.objs['es'] = utils.EarlyStopping(
self.objs['sess'],
self.objs['saver'],
save_dir = "saved_seed%d" % self.seed,
improve_by = improve_by,
min_epoch = min_epoch
)
if self.feature_extractor_needed:
if not os.path.exists("vgg16_cifar100"):
print("Pretrained model doesnt exist for VGG16")
print("Run cifar100.py first")
exit(0)
else:
reqd_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = "feature_extractor")
feature_extractor_saver = tf.train.Saver(reqd_variables)
print("Restoring feature extractor variables")
feature_extractor_saver.restore(self.objs['sess'], "vgg16_cifar100/saved.ckpt")
print("Done")
def print_vars(self, var_list, show_values = False, extra = False):
for num, var in enumerate(var_list):
print_strs = []
if show_values:
if 'sess' in self.objs:
red_sum = self.objs['sess'].run(tf.reduce_sum(var))
print_strs += ["mag %f" % (red_sum)]
else:
print_strs += ["init"]
if extra:
if 'sess' in self.objs:
nonzerovar = tf.boolean_mask(var, tf.greater(var, 0.000001))
tmin = self.objs['sess'].run(tf.math.reduce_max(nonzerovar))
print_strs += ["tmax %f" % tmin]
nz = self.objs['sess'].run(tf.math.count_nonzero(var))
print_strs += ["nonzero %d" % nz]
num_elements = self.objs['sess'].run(tf.reduce_sum(tf.ones_like(var)))
print_strs += ["total %d" % num_elements]
print_str = "\t(%d) %s" % (num+1, var.name)
if len(print_strs) > 0:
print_str += " => %s" % " , ".join(print_strs)
print(print_str)
print("Number of vars: %d" % len(var_list))
def setup(self):
set_session(tf.Session(config = self.session_config))
activation_fn = self.activation
self.model = Sequential()
self.model.add(Dense(self.layer_sizes[1], activation=activation_fn, input_dim=self.layer_sizes[0]))
if self.use_dropout: self.model.add(Dropout(self.dropoutv))
self.model.add(Dense(self.layer_sizes[2], activation=activation_fn))
if self.use_dropout: self.model.add(Dropout(self.dropoutv))
self.model.add(Dense(self.layer_sizes[3], activation='softmax'))
protocol_name, protocol = protocols.PATH_INT_PROTOCOL(omega_decay='sum', xi=self.xi)
self.opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999)
opt_name = 'adam'
self.oopt = KOOptimizer(self.opt, model=self.model, **protocol)
self.model.compile(loss="categorical_crossentropy", optimizer=self.oopt, metrics=['accuracy'])
history = LossHistory()
self.extra_callbacks = [history]
print("Setting cval to %g" % self.c)
self.oopt.set_strength(self.c)
def preprocessed_(self, task_num, x):
if not hasattr(self, 'all_data'):
self.all_data = {}
self.all_data[task_num] = x
print('Added preprocessed data for task %d' % task_num)
def train_epoch(self, n_task, epoch, silent = False):
stuffs = self.model.fit(
self.all_data[n_task]['train_x'],
self.all_data[n_task]['train_y'],
batch_size = self.it.batch_size,
callbacks = self.extra_callbacks,
epochs = 1,
verbose = 0,
validation_data = (
self.all_data[n_task]['test_x'],
self.all_data[n_task]['test_y'],
))
avg_tr_loss = stuffs.history['loss'][0]
avg_tr_acc = stuffs.history['acc'][0]
test_acc = stuffs.history['val_acc'][0]
task_accs_all = self.accuracies(n_task+1)
task_accs = np.average(task_accs_all)
pred_n_tasks = self.predictions(n_task+1)
self.all_predictions += [pred_n_tasks]
behs = [self.beh(i) for i in range(n_task)]
beh_str = "Beh: " + str(behs)
del(self.all_predictions[-1])
print("Epoch: %d, Acc: %.2f%%, ValAcc: %.2f%%, Loss: %f" % (
epoch+1, avg_tr_acc * 100.0, test_acc * 100.0, avg_tr_loss))
if not silent:
print("PastValAcc(%d): %s => %.2f%%" % (n_task+1,
" ".join(["%.2f%%" % item for item in task_accs_all]), task_accs))
print("%s" % beh_str)
return task_accs
def accuracies(self, n):
task_accuracies = []
for i in range(n):
test_loss, test_acc = self.model.evaluate(
self.all_data[i]['test_x'],
self.all_data[i]['test_y'],
verbose = 0
)
task_accuracies.append(test_acc * 100.0)
return task_accuracies
def predictions(self, n):
pred_n_tasks = []
for i in range(n):
preds = np.argmax(self.model.predict(
self.all_data[i]['test_x'],
verbose = 0
), axis = 1).astype('uint8')
pred_n_tasks.append(preds)
return pred_n_tasks
def beh_show(self):
print_str = "%d rows: " % len(self.all_predictions)
print_str += "%s" % str([len(item) for item in self.all_predictions])
print(print_str)
def beh(self, i):
if len(self.all_predictions) <= 1:
print("Not enough data for behaviour analysis")
return
chosen_classifications = [item[i] for item in self.all_predictions[i:]]
n = len(chosen_classifications[0])
same = 0
for ii in range(n):
reqd = chosen_classifications[0][ii]
is_same = True
for item in chosen_classifications[1:]:
if item[ii] != reqd:
is_same = False
break
if is_same:
same += 1
return round(same*100.0/n, 2)
def final_stats(self, n_task, n_permute_tasks):
task_accs = self.accuracies(n_permute_tasks)
pred_n_tasks = self.predictions(n_permute_tasks)
self.all_predictions += [pred_n_tasks]
behs = [self.beh(i) for i in range(n_task)]
print("Final beh: " + str(behs))
task_accs = ["%.2f%%" % item for item in task_accs]
print("Task accuracies: " + " ".join(task_accs))
| true | true |
f7fec56a2cca9fae95f9dcd0679592c38398cd18 | 4,507 | py | Python | sisyphus/models.py | bpotvin-bccrc/colossus | fa5ca7ce4cfe794c7d2167acb868aa9167988941 | [
"MIT"
] | 2 | 2018-10-03T16:05:14.000Z | 2019-03-08T23:01:29.000Z | sisyphus/models.py | bpotvin-bccrc/colossus | fa5ca7ce4cfe794c7d2167acb868aa9167988941 | [
"MIT"
] | 3 | 2019-05-09T22:48:22.000Z | 2020-06-05T18:52:05.000Z | sisyphus/models.py | bpotvin-bccrc/colossus | fa5ca7ce4cfe794c7d2167acb868aa9167988941 | [
"MIT"
] | 4 | 2018-08-16T22:25:10.000Z | 2021-02-19T16:10:15.000Z | """
Created on July 6, 2017
@author: Jessica Ngo (jngo@bccrc.ca)
"""
from __future__ import unicode_literals
import datetime
#============================
# Django imports
#----------------------------
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.contrib.postgres.fields import JSONField
#============================
# App imports
#----------------------------
from simple_history.models import HistoricalRecords
from dlp.models import *
from tenx.models import *
from core.helpers import *
from core.constants import *
from pbal.models import PbalSequencing
class DlpAnalysisVersion(models.Model):
"""
Keeps track of the available analysis software versions.
"""
history = HistoricalRecords(table_name='dlp_history_analysis_version')
version = create_chrfield(
"DlpAnalysis Version",
blank=False,
null=False,
)
def __str__(self):
return self.version
class ReferenceGenome(models.Model):
"""
The Reference genome to be used by the single cell pipeline
"""
history = HistoricalRecords(table_name='ref_genome_history')
reference_genome = create_chrfield("reference_genome", blank=False, null=False)
def __str__(self):
return "Reference Genome {reference_genome}".format(reference_genome=self.reference_genome)
class AnalysisRun(models.Model):
"""
Analysis/workflow details filled in or changed by database admin
"""
history = HistoricalRecords(table_name='analysis_run_history')
last_updated = models.DateTimeField(
"Analysis last updated date/time",
null=True,
default=timezone.now
)
run_status = create_chrfield(
"Run Status",
blank=False,
null=False,
default=IDLE,
choices=RUN_STATUS_CHOICES
)
log_file = create_chrfield("error_log", default=None, blank=True, null=True, max_length=1000)
sftp_path = create_chrfield(
"sftp path",
null=True,
blank=True,
)
blob_path = create_chrfield(
"Blob path",
null=True,
blank=True,
)
def __str__(self):
return 'Run Status: %s, Last Updated %s' % (self.run_status, self.last_updated)
def get_absolute_url(self):
return reverse("sisyphus:analysisrun_detail")
class DlpAnalysisInformation(models.Model):
history = HistoricalRecords(table_name='dlp_analysis_info_history')
library = models.ForeignKey(
DlpLibrary,
verbose_name="Library",
on_delete=models.CASCADE,
)
sequencings = models.ManyToManyField(DlpSequencing)
version = models.ForeignKey(
DlpAnalysisVersion,
verbose_name="Analysis Version",
on_delete=models.CASCADE,
)
lanes = models.ManyToManyField(DlpLane, blank=True)
fields_to_exclude = ['ID']
values_to_exclude = ['id']
analysis_jira_ticket = create_chrfield("Analysis Jira ticket", blank=False)
# database relationships
analysis_run = models.OneToOneField(AnalysisRun, blank=True, null=True)
# fields
priority_level = create_chrfield(
"Priority Level",
choices=priority_level_choices,
default="L",
blank=False,
null=False,
)
aligner = create_chrfield(
"Aligner",
choices=aligner_choices,
default="A",
blank=False,
null=False,
)
smoothing = create_chrfield(
"Smoothing",
choices=smoothing_choices,
default="M",
blank=False,
null=False,
)
montage_status = create_chrfield(
"Montage State",
choices=MONTAGE_STATUS_CHOICES,
default="Pending",
blank=False,
null=False,
)
# fields
analysis_submission_date = models.DateField(
"Analysis submission date",
null=True,
default=datetime.date.today, # this needs to be a date (not datetime)
)
reference_genome = models.ForeignKey(
ReferenceGenome,
verbose_name="ReferenceGenome",
null=True,
)
verified = create_chrfield(
"Verified",
choices=verified_choices,
default="F",
)
def get_absolute_url(self):
return reverse("sisyphus:analysisinformation_detail", kwargs={'pk':self.pk})
def __str__(self):
return "Analysis of {jira}".format(jira=self.analysis_jira_ticket)
class Meta:
ordering = ['pk']
| 24.494565 | 99 | 0.643665 |
from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.contrib.postgres.fields import JSONField
from simple_history.models import HistoricalRecords
from dlp.models import *
from tenx.models import *
from core.helpers import *
from core.constants import *
from pbal.models import PbalSequencing
class DlpAnalysisVersion(models.Model):
history = HistoricalRecords(table_name='dlp_history_analysis_version')
version = create_chrfield(
"DlpAnalysis Version",
blank=False,
null=False,
)
def __str__(self):
return self.version
class ReferenceGenome(models.Model):
history = HistoricalRecords(table_name='ref_genome_history')
reference_genome = create_chrfield("reference_genome", blank=False, null=False)
def __str__(self):
return "Reference Genome {reference_genome}".format(reference_genome=self.reference_genome)
class AnalysisRun(models.Model):
history = HistoricalRecords(table_name='analysis_run_history')
last_updated = models.DateTimeField(
"Analysis last updated date/time",
null=True,
default=timezone.now
)
run_status = create_chrfield(
"Run Status",
blank=False,
null=False,
default=IDLE,
choices=RUN_STATUS_CHOICES
)
log_file = create_chrfield("error_log", default=None, blank=True, null=True, max_length=1000)
sftp_path = create_chrfield(
"sftp path",
null=True,
blank=True,
)
blob_path = create_chrfield(
"Blob path",
null=True,
blank=True,
)
def __str__(self):
return 'Run Status: %s, Last Updated %s' % (self.run_status, self.last_updated)
def get_absolute_url(self):
return reverse("sisyphus:analysisrun_detail")
class DlpAnalysisInformation(models.Model):
history = HistoricalRecords(table_name='dlp_analysis_info_history')
library = models.ForeignKey(
DlpLibrary,
verbose_name="Library",
on_delete=models.CASCADE,
)
sequencings = models.ManyToManyField(DlpSequencing)
version = models.ForeignKey(
DlpAnalysisVersion,
verbose_name="Analysis Version",
on_delete=models.CASCADE,
)
lanes = models.ManyToManyField(DlpLane, blank=True)
fields_to_exclude = ['ID']
values_to_exclude = ['id']
analysis_jira_ticket = create_chrfield("Analysis Jira ticket", blank=False)
analysis_run = models.OneToOneField(AnalysisRun, blank=True, null=True)
priority_level = create_chrfield(
"Priority Level",
choices=priority_level_choices,
default="L",
blank=False,
null=False,
)
aligner = create_chrfield(
"Aligner",
choices=aligner_choices,
default="A",
blank=False,
null=False,
)
smoothing = create_chrfield(
"Smoothing",
choices=smoothing_choices,
default="M",
blank=False,
null=False,
)
montage_status = create_chrfield(
"Montage State",
choices=MONTAGE_STATUS_CHOICES,
default="Pending",
blank=False,
null=False,
)
analysis_submission_date = models.DateField(
"Analysis submission date",
null=True,
default=datetime.date.today,
)
reference_genome = models.ForeignKey(
ReferenceGenome,
verbose_name="ReferenceGenome",
null=True,
)
verified = create_chrfield(
"Verified",
choices=verified_choices,
default="F",
)
def get_absolute_url(self):
return reverse("sisyphus:analysisinformation_detail", kwargs={'pk':self.pk})
def __str__(self):
return "Analysis of {jira}".format(jira=self.analysis_jira_ticket)
class Meta:
ordering = ['pk']
| true | true |
f7fec5e7f96c3a35e2ad6fe5196f1c876615d155 | 16,355 | py | Python | django/utils/feedgenerator.py | pomarec/django | 98514849dce07acfaa224a90a784bba9d97249e5 | [
"BSD-3-Clause"
] | 1 | 2015-06-14T07:55:29.000Z | 2015-06-14T07:55:29.000Z | django/utils/feedgenerator.py | pomarec/django | 98514849dce07acfaa224a90a784bba9d97249e5 | [
"BSD-3-Clause"
] | null | null | null | django/utils/feedgenerator.py | pomarec/django | 98514849dce07acfaa224a90a784bba9d97249e5 | [
"BSD-3-Clause"
] | null | null | null | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependant results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| 40.68408 | 120 | 0.614063 | from __future__ import unicode_literals
import datetime
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import is_aware
def rfc2822_date(date):
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
return {}
def add_root_elements(self, handler):
pass
def item_attributes(self, item):
return {}
def add_item_elements(self, handler, item):
pass
def write(self, outfile, encoding):
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
def __init__(self, url, length, mime_type):
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| true | true |
f7fec803968bd1900ec2a92cb4c56d39eae63cb3 | 627 | py | Python | tests/models.py | octue/django-gcp | fedc5506e57e799e76b9876696cbdf172ba19e16 | [
"MIT"
] | null | null | null | tests/models.py | octue/django-gcp | fedc5506e57e799e76b9876696cbdf172ba19e16 | [
"MIT"
] | 4 | 2022-02-07T00:10:44.000Z | 2022-02-10T17:03:37.000Z | tests/models.py | octue/django-gcp | fedc5506e57e799e76b9876696cbdf172ba19e16 | [
"MIT"
] | null | null | null | from django.db.models import CharField
from django_gcp.models import MyAbstractModel
class Armadillo(MyAbstractModel):
"""
This is how you test abstract classes in your library without adding concrete models: add the concrete model
to your test app. You'll need to make the migrations for the test app:
python manage.py makemigrations tests
"""
name = CharField(max_length=32)
class Meta:
app_label = "tests"
def __str__(self):
# Ensures that the abstract class __str__ method is covered in testing
return super(Armadillo, self).__str__() + f' ("{self.name}")'
| 29.857143 | 112 | 0.701754 | from django.db.models import CharField
from django_gcp.models import MyAbstractModel
class Armadillo(MyAbstractModel):
name = CharField(max_length=32)
class Meta:
app_label = "tests"
def __str__(self):
return super(Armadillo, self).__str__() + f' ("{self.name}")'
| true | true |
f7fec8ff461f5da9b1641bb51f45c8ff7f2c9f1f | 3,303 | py | Python | isi_sdk/models/ntp_server.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/ntp_server.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/ntp_server.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class NtpServer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NtpServer - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'key': 'str'
}
self.attribute_map = {
'key': 'key'
}
self._key = None
@property
def key(self):
"""
Gets the key of this NtpServer.
Key value from key_file that maps to this server.
:return: The key of this NtpServer.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this NtpServer.
Key value from key_file that maps to this server.
:param key: The key of this NtpServer.
:type: str
"""
self._key = key
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.853659 | 77 | 0.549198 |
from pprint import pformat
from six import iteritems
import re
class NtpServer(object):
def __init__(self):
self.swagger_types = {
'key': 'str'
}
self.attribute_map = {
'key': 'key'
}
self._key = None
@property
def key(self):
return self._key
@key.setter
def key(self, key):
self._key = key
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fec93bbeedace372bec5fec4f6f3e8e3ddd53e | 171,648 | py | Python | tensorflow/python/data/ops/dataset_ops.py | leike666666/tensorflow | a3fd0ddfcb716be124e95b51e96e6c1e4507ef64 | [
"Apache-2.0"
] | 2 | 2020-09-27T16:51:58.000Z | 2020-10-22T06:16:29.000Z | tensorflow/python/data/ops/dataset_ops.py | leike666666/tensorflow | a3fd0ddfcb716be124e95b51e96e6c1e4507ef64 | [
"Apache-2.0"
] | 2 | 2021-08-25T15:57:35.000Z | 2022-02-10T01:09:32.000Z | tensorflow/python/data/ops/dataset_ops.py | leike666666/tensorflow | a3fd0ddfcb716be124e95b51e96e6c1e4507ef64 | [
"Apache-2.0"
] | 1 | 2020-04-22T01:47:46.000Z | 2020-04-22T01:47:46.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import sys
import threading
import warnings
import weakref
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import options as options_lib
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import sparse
from tensorflow.python.data.util import structure
from tensorflow.python.data.util import traverse
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as core_random_seed
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest as tf_nest
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency (roughly
# tf.function->wrap_function->dataset->autograph->tf.function).
# TODO(b/133251390): Use a regular import.
wrap_function = lazy_loader.LazyLoader(
"wrap_function", globals(),
"tensorflow.python.eager.wrap_function")
# TODO(mdan): Create a public API for this.
autograph_ctx = lazy_loader.LazyLoader(
"autograph_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph.impl.api")
ops.NotDifferentiable("ReduceDataset")
# A constant that can be used to enable auto-tuning.
AUTOTUNE = -1
tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
@tf_export("data.Dataset", v1=[])
@six.add_metaclass(abc.ABCMeta)
class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor):
"""Represents a potentially large set of elements.
The `tf.data.Dataset` API supports writing descriptive and efficient input
pipelines. `Dataset` usage follows a common pattern:
1. Create a source dataset from your input data.
2. Apply dataset transformations to preprocess the data.
3. Iterate over the dataset and process the elements.
Iteration happens in a streaming fashion, so the full dataset does not need to
fit into memory.
Source Datasets:
The simplest way to create a dataset is to create it from a python `list`:
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset:
... print(element)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)
tf.Tensor(3, shape=(), dtype=int32)
To process lines from files, use `tf.data.TextLineDataset`:
>>> dataset = tf.data.TextLineDataset(["file1.txt", "file2.txt"])
To process records written in the `TFRecord` format, use `TFRecordDataset`:
>>> dataset = tf.data.TFRecordDataset(["file1.tfrecords", "file2.tfrecords"])
To create a dataset of all files matching a pattern, use
`tf.data.Dataset.list_files`:
>>> dataset = tf.data.dataset.list_files("/path/*.txt") # doctest: +SKIP
See `tf.data.FixedLengthRecordDataset` and `tf.data.Dataset.from_generator`
for more ways to create datasets.
Transformations:
Once you have a dataset, you can apply transformations to prepare the data for
your model:
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.map(lambda x: x*2)
>>> list(dataset.as_numpy_iterator())
[2, 4, 6]
Common Terms:
**Element**: A single output from calling `next()` on a dataset iterator.
Elements may be nested structures containing multiple components. For
example, the element `(1, (3, "apple"))` has one tuple nested in another
tuple. The components are `1`, `3`, and `"apple"`.
**Component**: The leaf in the nested structure of an element.
Supported types:
Elements can be nested structures of tuples, named tuples, and dictionaries.
Element components can be of any type representable by `tf.TypeSpec`,
including `tf.Tensor`, `tf.data.Dataset`, `tf.SparseTensor`,
`tf.RaggedTensor`, and `tf.TensorArray`.
>>> a = 1 # Integer element
>>> b = 2.0 # Float element
>>> c = (1, 2) # Tuple element with 2 components
>>> d = {"a": (2, 2), "b": 3} # Dict element with 3 components
>>> Point = collections.namedtuple("Point", ["x", "y"]) # doctest: +SKIP
>>> e = Point(1, 2) # Named tuple # doctest: +SKIP
>>> f = tf.data.Dataset.range(10) # Dataset element
"""
def __init__(self, variant_tensor):
"""Creates a DatasetV2 object.
This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not
take anything in its constructor whereas in the DatasetV2, we expect
subclasses to create a variant_tensor and pass it in to the super() call.
Args:
variant_tensor: A DT_VARIANT tensor that represents the dataset.
"""
self._variant_tensor_attr = variant_tensor
weak_self = weakref.proxy(self)
self._variant_tracker = self._track_trackable(
_VariantTracker(
self._variant_tensor,
# _trace_variant_creation only works when executing eagerly, so we
# don't want to run it immediately. We also want the _VariantTracker
# to have a weak reference to the Dataset to avoid creating
# reference cycles and making work for the garbage collector.
lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access
name="_variant_tracker")
self._graph_attr = ops.get_default_graph()
@property
def _variant_tensor(self):
return self._variant_tensor_attr
@_variant_tensor.setter
def _variant_tensor(self, _):
raise ValueError("The _variant_tensor property is read-only")
@deprecation.deprecated_args(None, "Use external_state_policy instead",
"allow_stateful")
def _as_serialized_graph(
self,
allow_stateful=None,
strip_device_assignment=None,
external_state_policy=distribute_options.ExternalStatePolicy.WARN):
"""Produces serialized graph representation of the dataset.
Args:
allow_stateful: If true, we allow stateful ops to be present in the graph
def. In that case, the state in these ops would be thrown away.
strip_device_assignment: If true, non-local (i.e. job and task) device
assignment is stripped from ops in the serialized graph.
external_state_policy: The ExternalStatePolicy enum that determines how we
handle input pipelines that depend on external state. By default, its
set to WARN.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph.
"""
if compat.forward_compatible(2019, 11, 25) or external_state_policy:
policy = None
if external_state_policy:
policy = external_state_policy.value
return gen_dataset_ops.dataset_to_graph_v2(
self._variant_tensor,
external_state_policy=policy,
strip_device_assignment=strip_device_assignment)
if compat.forward_compatible(2019, 11, 16) or strip_device_assignment:
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor,
allow_stateful=allow_stateful,
strip_device_assignment=strip_device_assignment)
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor, allow_stateful=allow_stateful)
def _trace_variant_creation(self):
"""Traces a function which outputs a variant `tf.Tensor` for this dataset.
Note that creating this function involves evaluating an op, and is currently
only supported when executing eagerly.
Returns:
A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.
"""
variant = self._variant_tensor
if not isinstance(variant, ops.EagerTensor):
raise NotImplementedError(
"Can only export Datasets which were created executing eagerly. "
"Please file a feature request if this is important to you.")
with context.eager_mode(), ops.device("CPU"):
graph_def = graph_pb2.GraphDef().FromString(
self._as_serialized_graph().numpy()) # pylint: disable=protected-access
output_node_name = None
for node in graph_def.node:
if node.op == "_Retval":
if output_node_name is not None:
raise AssertionError(
"Found multiple return values from the dataset's graph, expected "
"only one.")
output_node_name, = node.input
if output_node_name is None:
raise AssertionError("Could not find the dataset's output node.")
# Add functions used in this Dataset to the function's graph, since they
# need to follow it around (and for example be added to a SavedModel which
# references the dataset).
variant_function = wrap_function.function_from_graph_def(
graph_def, inputs=[], outputs=output_node_name + ":0")
for used_function in self._functions():
used_function.function.add_to_graph(variant_function.graph)
return variant_function
@abc.abstractmethod
def _inputs(self):
"""Returns a list of the input datasets of the dataset."""
raise NotImplementedError("Dataset._inputs")
@property
def _graph(self):
return self._graph_attr
@_graph.setter
def _graph(self, _):
raise ValueError("The _graph property is read-only")
def _has_captured_ref(self):
"""Whether this dataset uses a function that captures ref variables.
Returns:
A boolean, which if true indicates that the dataset or one of its inputs
uses a function that captures ref variables.
"""
if context.executing_eagerly():
# RefVariables are not supported in eager mode
return False
def is_tensor_or_parent_ref(tensor):
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
return True
# If the captured tensor is an eager tensor, we cannot trace its inputs.
if isinstance(tensor, ops._EagerTensorBase): # pylint: disable=protected-access
return False
return any(is_tensor_or_parent_ref(x) for x in tensor.op.inputs)
for fn in self._functions():
if any(is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs):
return True
return any(
[input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access
# TODO(jsimsa): Change this to be the transitive closure of functions used
# by this dataset and its inputs.
def _functions(self):
"""Returns a list of functions associated with this dataset.
Returns:
A list of `StructuredFunctionWrapper` objects.
"""
return []
def options(self):
"""Returns the options for this dataset and its inputs.
Returns:
A `tf.data.Options` object representing the dataset options.
"""
options = Options()
for input_dataset in self._inputs():
input_options = input_dataset.options()
if input_options is not None:
options = options.merge(input_options)
return options
def _apply_options(self):
"""Apply options, such as optimization configuration, to the dataset."""
dataset = self
options = self.options()
# (1) Apply threading options
if options.experimental_threading is not None:
t_options = options.experimental_threading
if t_options.max_intra_op_parallelism is not None:
dataset = _MaxIntraOpParallelismDataset(
dataset, t_options.max_intra_op_parallelism)
if t_options.private_threadpool_size is not None:
dataset = _PrivateThreadPoolDataset(dataset,
t_options.private_threadpool_size)
# (2) Apply graph rewrite options
# pylint: disable=protected-access
graph_rewrites = options._graph_rewrites()
graph_rewrite_configs = options._graph_rewrite_configs()
# pylint: enable=protected-access
if graph_rewrites:
if self._has_captured_ref():
warnings.warn(
"tf.data graph rewrites are not compatible with tf.Variable. "
"The following rewrites will be disabled: %s. To enable "
"rewrites, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program." %
", ".join(graph_rewrites))
else:
dataset = _OptimizeDataset(dataset, graph_rewrites,
graph_rewrite_configs)
# (3) Apply autotune options
autotune, algorithm, cpu_budget = options._autotune_settings() # pylint: disable=protected-access
if autotune:
dataset = _ModelDataset(dataset, algorithm, cpu_budget)
# (4) Apply stats aggregator options
if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long
dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, options.experimental_stats.aggregator,
options.experimental_stats.prefix,
options.experimental_stats.counter_prefix)
return dataset
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If not inside of tf.function and not executing eagerly.
"""
if (context.executing_eagerly()
or ops.get_default_graph()._building_function): # pylint: disable=protected-access
return iterator_ops.OwnedIterator(self)
else:
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this dataset.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]).element_spec
TensorSpec(shape=(), dtype=tf.int32, name=None)
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this dataset and specifying the type of individual components.
"""
raise NotImplementedError("Dataset.element_spec")
def __repr__(self):
output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, get_legacy_output_types(self))
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
def as_numpy_iterator(self):
"""Returns an iterator which converts all elements of the dataset to numpy.
Use `as_numpy_iterator` to inspect the content of your dataset. To see
element shapes and types, print dataset elements directly instead of using
`as_numpy_iterator`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset:
... print(element)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)
tf.Tensor(3, shape=(), dtype=int32)
This method requires that you are running in eager mode and the dataset's
element_spec contains only `TensorSpec` components.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset.as_numpy_iterator():
... print(element)
1
2
3
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> print(list(dataset.as_numpy_iterator()))
[1, 2, 3]
`as_numpy_iterator()` will preserve the nested structure of dataset
elements.
>>> dataset = tf.data.Dataset.from_tensor_slices({'a': ([1, 2], [3, 4]),
... 'b': [5, 6]})
>>> list(dataset.as_numpy_iterator()) == [{'a': (1, 3), 'b': 5},
... {'a': (2, 4), 'b': 6}]
True
Returns:
An iterable over the elements of the dataset, with their tensors converted
to numpy arrays.
Raises:
TypeError: if an element contains a non-`Tensor` value.
RuntimeError: if eager execution is not enabled.
"""
if not context.executing_eagerly():
raise RuntimeError("as_numpy_iterator() is not supported while tracing "
"functions")
for component_spec in nest.flatten(self.element_spec):
if not isinstance(component_spec, tensor_spec.TensorSpec):
raise TypeError(
"Dataset.as_numpy_iterator() does not support datasets containing "
+ str(component_spec.value_type))
return _NumpyIterator(self)
@property
def _flat_shapes(self):
"""Returns a list `tf.TensorShapes`s for the element tensor representation.
Returns:
A list `tf.TensorShapes`s for the element tensor representation.
"""
return structure.get_flat_tensor_shapes(self.element_spec)
@property
def _flat_types(self):
"""Returns a list `tf.DType`s for the element tensor representation.
Returns:
A list `tf.DType`s for the element tensor representation.
"""
return structure.get_flat_tensor_types(self.element_spec)
@property
def _flat_structure(self):
"""Helper for setting `output_shapes` and `output_types` attrs of an op.
Most dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element. This helper
function generates these attrs as a keyword argument dictionary, allowing
`Dataset._variant_tensor` implementations to pass `**self._flat_structure`
to the op constructor.
Returns:
A dictionary of keyword arguments that can be passed to a dataset op
constructor.
"""
return {
"output_shapes": self._flat_shapes,
"output_types": self._flat_types,
}
@property
def _type_spec(self):
return DatasetSpec(self.element_spec)
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
>>> dataset = tf.data.Dataset.from_tensors([1, 2, 3])
>>> list(dataset.as_numpy_iterator())
[array([1, 2, 3], dtype=int32)]
>>> dataset = tf.data.Dataset.from_tensors(([1, 2, 3], 'A'))
>>> list(dataset.as_numpy_iterator())
[(array([1, 2, 3], dtype=int32), b'A')]
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this
guide](https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset element.
Returns:
Dataset: A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
The given tensors are sliced along their first dimension. This operation
preserves the structure of the input tensors, removing the first dimension
of each tensor and using it as the dataset dimension. All input tensors
must have the same size in their first dimensions.
>>> # Slicing a 1D tensor produces scalar tensor elements.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> list(dataset.as_numpy_iterator())
[1, 2, 3]
>>> # Slicing a 2D tensor produces 1D tensor elements.
>>> dataset = tf.data.Dataset.from_tensor_slices([[1, 2], [3, 4]])
>>> list(dataset.as_numpy_iterator())
[array([1, 2], dtype=int32), array([3, 4], dtype=int32)]
>>> # Slicing a tuple of 1D tensors produces tuple elements containing
>>> # scalar tensors.
>>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))
>>> list(dataset.as_numpy_iterator())
[(1, 3, 5), (2, 4, 6)]
>>> # Dictionary structure is also preserved.
>>> dataset = tf.data.Dataset.from_tensor_slices({"a": [1, 2], "b": [3, 4]})
>>> list(dataset.as_numpy_iterator()) == [{'a': 1, 'b': 3},
... {'a': 2, 'b': 4}]
True
>>> # Two tensors can be combined into one Dataset object.
>>> features = tf.constant([[1, 3], [2, 1], [3, 3]]) # ==> 3x2 tensor
>>> labels = tf.constant(['A', 'B', 'A']) # ==> 3x1 tensor
>>> dataset = Dataset.from_tensor_slices((features, labels))
>>> # Both the features and the labels tensors can be converted
>>> # to a Dataset object separately and combined after.
>>> features_dataset = Dataset.from_tensor_slices(features)
>>> labels_dataset = Dataset.from_tensor_slices(labels)
>>> dataset = Dataset.zip((features_dataset, labels_dataset))
>>> # A batched feature and label set can be converted to a Dataset
>>> # in similar fashion.
>>> batched_features = tf.constant([[[1, 3], [2, 3]],
... [[2, 1], [1, 2]],
... [[3, 3], [3, 2]]], shape=(3, 2, 2))
>>> batched_labels = tf.constant([['A', 'A'],
... ['B', 'B'],
... ['A', 'B']], shape=(3, 2, 1))
>>> dataset = Dataset.from_tensor_slices((batched_features, batched_labels))
>>> for element in dataset.as_numpy_iterator():
... print(element)
(array([[1, 3],
[2, 3]], dtype=int32), array([[b'A'],
[b'A']], dtype=object))
(array([[2, 1],
[1, 2]], dtype=int32), array([[b'B'],
[b'B']], dtype=object))
(array([[3, 3],
[3, 2]], dtype=int32), array([[b'A'],
[b'B']], dtype=object))
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this guide](
https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset element, with each component having the same size in
the first dimension.
Returns:
Dataset: A `Dataset`.
"""
return TensorSliceDataset(tensors)
class _GeneratorState(object):
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._args = {}
self._iterators = {}
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None, args=None):
"""Creates a `Dataset` whose elements are generated by `generator`.
The `generator` argument must be a callable object that returns
an object that supports the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with the given
`output_types` and (optional) `output_shapes` arguments.
>>> import itertools
>>>
>>> def gen():
... for i in itertools.count(1):
... yield (i, [1] * i)
>>>
>>> dataset = tf.data.Dataset.from_generator(
... gen,
... (tf.int64, tf.int64),
... (tf.TensorShape([]), tf.TensorShape([None])))
>>>
>>> list(dataset.take(3).as_numpy_iterator())
[(1, array([1])), (2, array([1, 1])), (3, array([1, 1, 1]))]
NOTE: The current implementation of `Dataset.from_generator()` uses
`tf.numpy_function` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
serialized in a `GraphDef`, and you should not use this method if you
need to serialize your model and restore it in a different environment.
NOTE: If `generator` depends on mutable global variables or other external
state, be aware that the runtime may invoke `generator` multiple times
(in order to support repeating the `Dataset`) and at any time
between the call to `Dataset.from_generator()` and the production of the
first element from the generator. Mutating global variables or external
state can cause undefined behavior, and we recommend that you explicitly
cache any external state in `generator` before calling
`Dataset.from_generator()`.
Args:
generator: A callable object that returns an object that supports the
`iter()` protocol. If `args` is not specified, `generator` must take no
arguments; otherwise it must take as many arguments as there are values
in `args`.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element yielded by `generator`.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element yielded by `generator`.
args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
and passed to `generator` as NumPy-array arguments.
Returns:
Dataset: A `Dataset`.
"""
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if args is None:
args = ()
else:
args = tuple(ops.convert_n_to_tensor(args, name="args"))
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
generator_state = DatasetV2._GeneratorState(generator)
def get_iterator_id_fn(unused_dummy):
"""Creates a unique `iterator_id` for each pass over the dataset.
The returned `iterator_id` disambiguates between multiple concurrently
existing iterators.
Args:
unused_dummy: Ignored value.
Returns:
A `tf.int64` tensor whose value uniquely identifies an iterator in
`generator_state`.
"""
return script_ops.numpy_function(generator_state.get_next_id, args,
dtypes.int64)
def generator_next_fn(iterator_id_t):
"""Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the
iterator in `generator_state` from which to generate an element.
Returns:
The next element to generate from the iterator.
"""
def generator_py_func(iterator_id):
"""A `py_func` that will be called to invoke the iterator."""
# `next()` raises `StopIteration` when there are no more
# elements remaining to be generated.
values = next(generator_state.get_iterator(iterator_id))
# Use the same _convert function from the py_func() implementation to
# convert the returned values to arrays early, so that we can inspect
# their values.
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that did not match the expected "
"structure. The expected structure was %s, but the yielded "
"element was %s." % (output_types, values)), sys.exc_info()[2])
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access
ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that could not be converted to "
"the expected type. The expected type was %s, but the yielded "
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
# Additional type and shape checking to ensure that the components
# of the generated element match the `output_types` and `output_shapes`
# arguments.
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.numpy_function(generator_py_func,
[iterator_id_t], flattened_types)
# The `py_func()` op drops the inferred shapes, so we add them back in
# here.
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
def finalize_fn(iterator_id_t):
"""Releases host-side state for the iterator with ID `iterator_id_t`."""
def finalize_py_func(iterator_id):
generator_state.iterator_completed(iterator_id)
# We return a dummy value so that the `finalize_fn` has a valid
# signature.
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(0, dtype=np.int64)
return script_ops.numpy_function(finalize_py_func, [iterator_id_t],
dtypes.int64)
# This function associates each traversal of `generator` with a unique
# iterator ID.
def flat_map_fn(dummy_arg):
# The `get_iterator_id_fn` gets a unique ID for the current instance of
# of the generator.
# The `generator_next_fn` gets the next element from the iterator with the
# given ID, and raises StopIteration when that iterator contains no
# more elements.
return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,
finalize_fn)
# A single-element dataset that, each time it is evaluated, contains a
# freshly-generated and unique (for the returned dataset) int64
# ID that will be used to identify the appropriate Python state, which
# is encapsulated in `generator_state`, and captured in
# `get_iterator_id_map_fn`.
dummy = 0
id_dataset = Dataset.from_tensors(dummy)
# A dataset that contains all of the elements generated by a
# single iterator created from `generator`, identified by the
# iterator ID contained in `id_dataset`. Lifting the iteration
# into a flat_map here enables multiple repetitions and/or nested
# versions of the returned dataset to be created, because it forces
# the generation of a new ID for each version.
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args, **kwargs):
"""Creates a `Dataset` of a step-separated range of values.
>>> list(Dataset.range(5).as_numpy_iterator())
[0, 1, 2, 3, 4]
>>> list(Dataset.range(2, 5).as_numpy_iterator())
[2, 3, 4]
>>> list(Dataset.range(1, 5, 2).as_numpy_iterator())
[1, 3]
>>> list(Dataset.range(1, 5, -2).as_numpy_iterator())
[]
>>> list(Dataset.range(5, 1).as_numpy_iterator())
[]
>>> list(Dataset.range(5, 1, -2).as_numpy_iterator())
[5, 3]
>>> list(Dataset.range(2, 5, output_type=tf.int32).as_numpy_iterator())
[2, 3, 4]
>>> list(Dataset.range(1, 5, 2, output_type=tf.float32).as_numpy_iterator())
[1.0, 3.0]
Args:
*args: follows the same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1
len(args) == 2 -> start = args[0], stop = args[1], step = 1
len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]
**kwargs:
- output_type: Its expected dtype. (Optional, default: `tf.int64`).
Returns:
Dataset: A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args, **kwargs)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
>>> # The nested structure of the `datasets` argument determines the
>>> # structure of elements in the resulting dataset.
>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
>>> b = tf.data.Dataset.range(4, 7) # ==> [ 4, 5, 6 ]
>>> ds = tf.data.Dataset.zip((a, b))
>>> list(ds.as_numpy_iterator())
[(1, 4), (2, 5), (3, 6)]
>>> ds = tf.data.Dataset.zip((b, a))
>>> list(ds.as_numpy_iterator())
[(4, 1), (5, 2), (6, 3)]
>>>
>>> # The `datasets` argument may contain an arbitrary number of datasets.
>>> c = tf.data.Dataset.range(7, 13).batch(2) # ==> [ [7, 8],
... # [9, 10],
... # [11, 12] ]
>>> ds = tf.data.Dataset.zip((a, b, c))
>>> for element in ds.as_numpy_iterator():
... print(element)
(1, 4, array([7, 8]))
(2, 5, array([ 9, 10]))
(3, 6, array([11, 12]))
>>>
>>> # The number of elements in the resulting dataset is the same as
>>> # the size of the smallest dataset in `datasets`.
>>> d = tf.data.Dataset.range(13, 15) # ==> [ 13, 14 ]
>>> ds = tf.data.Dataset.zip((a, d))
>>> list(ds.as_numpy_iterator())
[(1, 13), (2, 14)]
Args:
datasets: A nested structure of datasets.
Returns:
Dataset: A `Dataset`.
"""
return ZipDataset(datasets)
def concatenate(self, dataset):
"""Creates a `Dataset` by concatenating the given dataset with this dataset.
>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
>>> b = tf.data.Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]
>>> ds = a.concatenate(b)
>>> list(ds.as_numpy_iterator())
[1, 2, 3, 4, 5, 6, 7]
>>> # The input dataset and dataset to be concatenated should have the same
>>> # nested structures and output types.
>>> c = tf.data.Dataset.zip((a, b))
>>> a.concatenate(c)
Traceback (most recent call last):
TypeError: Two datasets to concatenate have different types
<dtype: 'int64'> and (tf.int64, tf.int64)
>>> d = tf.data.Dataset.from_tensor_slices(["a", "b", "c"])
>>> a.concatenate(d)
Traceback (most recent call last):
TypeError: Two datasets to concatenate have different types
<dtype: 'int64'> and <dtype: 'string'>
Args:
dataset: `Dataset` to be concatenated.
Returns:
Dataset: A `Dataset`.
"""
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
"""Creates a `Dataset` that prefetches elements from this dataset.
Most dataset input pipelines should end with a call to `prefetch`. This
allows later elements to be prepared while the current element is being
processed. This often improves latency and throughput, at the cost of
using additional memory to store prefetched elements.
Note: Like other `Dataset` methods, prefetch operates on the
elements of the input dataset. It has no concept of examples vs. batches.
`examples.prefetch(2)` will prefetch two elements (2 examples),
while `examples.batch(20).prefetch(2)` will prefetch 2 elements
(2 batches, of 20 examples each).
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.prefetch(2)
>>> list(dataset.as_numpy_iterator())
[0, 1, 2]
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum
number of elements that will be buffered when prefetching.
Returns:
Dataset: A `Dataset`.
"""
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern, shuffle=None, seed=None):
"""A dataset of all files matching one or more glob patterns.
The `file_pattern` argument should be a small number of glob patterns.
If your filenames have already been globbed, use
`Dataset.from_tensor_slices(filenames)` instead, as re-globbing every
filename with `list_files` may result in poor performance with remote
storage systems.
NOTE: The default behavior of this method is to return filenames in
a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
to get results in a deterministic order.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset
would produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_ops.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
dataset = Dataset.from_tensor_slices(matching_files)
if shuffle:
# NOTE(mrry): The shuffle buffer size must be greater than zero, but the
# list of files might be empty.
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed)
return dataset
def repeat(self, count=None):
"""Repeats this dataset so each original value is seen `count` times.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.repeat(3)
>>> list(dataset.as_numpy_iterator())
[1, 2, 3, 1, 2, 3, 1, 2, 3]
NOTE: If this dataset is a function of global state (e.g. a random number
generator), then different repetitions may produce different elements.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior (if
`count` is `None` or `-1`) is for the dataset be repeated indefinitely.
Returns:
Dataset: A `Dataset`.
"""
return RepeatDataset(self, count)
def enumerate(self, start=0):
"""Enumerates the elements of this dataset.
It is similar to python's `enumerate`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.enumerate(start=5)
>>> for element in dataset.as_numpy_iterator():
... print(element)
(5, 1)
(6, 2)
(7, 3)
>>> # The nested structure of the input dataset determines the structure of
>>> # elements in the resulting dataset.
>>> dataset = tf.data.Dataset.from_tensor_slices([(7, 8), (9, 10)])
>>> dataset = dataset.enumerate()
>>> for element in dataset.as_numpy_iterator():
... print(element)
(0, array([7, 8], dtype=int32))
(1, array([ 9, 10], dtype=int32))
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
Dataset: A `Dataset`.
"""
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly
samples elements from this buffer, replacing the selected elements with new
elements. For perfect shuffling, a buffer size greater than or equal to the
full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is
set to 1,000, then `shuffle` will initially select a random element from
only the first 1,000 elements in the buffer. Once an element is selected,
its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1,000 element buffer.
`reshuffle_each_iteration` controls whether the shuffle order should be
different for each epoch. In TF 1.X, the idiomatic way to create epochs
was through the `repeat` transformation:
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
>>> dataset = dataset.repeat(2) # doctest: +SKIP
[1, 0, 2, 1, 2, 0]
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
>>> dataset = dataset.repeat(2) # doctest: +SKIP
[1, 0, 2, 1, 0, 2]
In TF 2.0, `tf.data.Dataset` objects are Python iterables which makes it
possible to also create epochs through Python iteration:
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 0, 2]
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 2, 0]
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 0, 2]
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 0, 2]
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
Dataset: A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
"""Caches the elements in this dataset.
The first time the dataset is iterated over, its elements will be cached
either in the specified file or in memory. Subsequent iterations will
use the cached data.
Note: For the cache to be finalized, the input dataset must be iterated
through in its entirety. Otherwise, subsequent iterations will not use
cached data.
>>> dataset = tf.data.Dataset.range(5)
>>> dataset = dataset.map(lambda x: x**2)
>>> dataset = dataset.cache()
>>> # The first time reading through the data will generate the data using
>>> # `range` and `map`.
>>> list(dataset.as_numpy_iterator())
[0, 1, 4, 9, 16]
>>> # Subsequent iterations read from the cache.
>>> list(dataset.as_numpy_iterator())
[0, 1, 4, 9, 16]
When caching to a file, the cached data will persist across runs. Even the
first iteration through the data will read from the cache file. Changing
the input pipeline before the call to `.cache()` will have no effect until
the cache file is removed or the filename is changed.
>>> dataset = tf.data.Dataset.range(5)
>>> dataset = dataset.cache("/path/to/file) # doctest: +SKIP
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[0, 1, 2, 3, 4]
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.cache("/path/to/file") # Same file! # doctest: +SKIP
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[0, 1, 2, 3, 4]
Note: `cache` will produce exactly the same elements during each iteration
through the dataset. If you wish to randomize the iteration order, make sure
to call `shuffle` *after* calling `cache`.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching elements in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
Dataset: A `Dataset`.
"""
return CacheDataset(self, filename)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.take(3)
>>> list(dataset.as_numpy_iterator())
[0, 1, 2]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
Dataset: A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.skip(7)
>>> list(dataset.as_numpy_iterator())
[7, 8, 9]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skipped to form the new dataset.
If `count` is greater than the size of this dataset, the new dataset
will contain no elements. If `count` is -1, skips the entire dataset.
Returns:
Dataset: A `Dataset`.
"""
return SkipDataset(self, count)
def shard(self, num_shards, index):
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
`shard` is deterministic. The Dataset produced by `A.shard(n, i)` will
contain all elements of A whose index mod n = i.
>>> A = tf.data.Dataset.range(10)
>>> B = A.shard(num_shards=3, index=0)
>>> list(B.as_numpy_iterator())
[0, 3, 6, 9]
>>> C = A.shard(num_shards=3, index=1)
>>> list(C.as_numpy_iterator())
[1, 4, 7]
>>> D = A.shard(num_shards=3, index=2)
>>> list(D.as_numpy_iterator())
[2, 5, 8]
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can shard elements as follows:
```python
d = tf.data.TFRecordDataset(input_file)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(pattern)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Returns:
Dataset: A `Dataset`.
Raises:
InvalidArgumentError: if `num_shards` or `index` are illegal values.
Note: error checking is done on a best-effort basis, and errors aren't
guaranteed to be caught upon dataset creation. (e.g. providing in a
placeholder tensor bypasses the early checking, and will instead result
in an error during a session.run call.)
"""
return ShardDataset(self, num_shards, index)
def batch(self, batch_size, drop_remainder=False):
"""Combines consecutive elements of this dataset into batches.
>>> dataset = tf.data.Dataset.range(8)
>>> dataset = dataset.batch(3)
>>> list(dataset.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5]), array([6, 7])]
>>> dataset = tf.data.Dataset.range(8)
>>> dataset = dataset.batch(3, drop_remainder=True)
>>> list(dataset.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5])]
The components of the resulting element will have an additional outer
dimension, which will be `batch_size` (or `N % batch_size` for the last
element if `batch_size` does not divide the number of input elements `N`
evenly and `drop_remainder` is `False`). If your program depends on the
batches having the same outer dimension, you should set the `drop_remainder`
argument to `True` to prevent the smaller batch from being produced.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return BatchDataset(self, batch_size, drop_remainder)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
"""Combines consecutive elements of this dataset into padded batches.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the components of the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padding_shapes`. The `padding_shapes` argument
determines the resulting shape for each dimension of each component in an
output element:
* If the dimension is a constant (e.g. `tf.compat.v1.Dimension(37)`), the
component will be padded out to that length in that dimension.
* If the dimension is unknown (e.g. `tf.compat.v1.Dimension(None)`), the
component will be padded out to the maximum length of all elements in that
dimension.
>>> elements = [[1, 2],
... [3, 4, 5],
... [6, 7],
... [8]]
>>> A = tf.data.Dataset.from_generator(lambda: iter(elements), tf.int32)
>>> # Pad to the smallest per-batch size that fits all elements.
>>> B = A.padded_batch(2, padded_shapes=[None])
>>> for element in B.as_numpy_iterator():
... print(element)
[[1 2 0]
[3 4 5]]
[[6 7]
[8 0]]
>>> # Pad to a fixed size.
>>> C = A.padded_batch(2, padded_shapes=3)
>>> for element in C.as_numpy_iterator():
... print(element)
[[1 2 0]
[3 4 5]]
[[6 7 0]
[8 0 0]]
>>> # Pad with a custom value.
>>> D = A.padded_batch(2, padded_shapes=3, padding_values=-1)
>>> for element in D.as_numpy_iterator():
... print(element)
[[ 1 2 -1]
[ 3 4 5]]
[[ 6 7 -1]
[ 8 -1 -1]]
>>> # Components of nested elements can be padded independently.
>>> elements = [([1, 2, 3], [10]),
... ([4, 5], [11, 12])]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: iter(elements), (tf.int32, tf.int32))
>>> # Pad the first component of the tuple to length 4, and the second
>>> # component to the smallest size that fits.
>>> dataset = dataset.padded_batch(2,
... padded_shapes=([4], [None]),
... padding_values=(-1, 100))
>>> list(dataset.as_numpy_iterator())
[(array([[ 1, 2, 3, -1], [ 4, 5, -1, -1]], dtype=int32),
array([[ 10, 100], [ 11, 12]], dtype=int32))]
See also `tf.data.experimental.dense_to_sparse_batch`, which combines
elements that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector
tensor-like objects representing the shape to which the respective
component of each input element should be padded prior to batching. Any
unknown dimensions (e.g. `tf.compat.v1.Dimension(None)` in a
`tf.TensorShape` or `-1` in a tensor-like object) will be padded to the
maximum size of that dimension in each batch.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the respective
components. None represents that the nested structure should be padded
with default values. Defaults are `0` for numeric types and the empty
string for string types.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,
drop_remainder)
def map(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
This transformation applies `map_func` to each element of this dataset, and
returns a new dataset containing the transformed elements, in the same
order as they appeared in the input. `map_func` can be used to change both
the values and the structure of a dataset's elements. For example, adding 1
to each element, or projecting a subset of element components.
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> dataset = dataset.map(lambda x: x + 1)
>>> list(dataset.as_numpy_iterator())
[2, 3, 4, 5, 6]
The input signature of `map_func` is determined by the structure of each
element in this dataset.
>>> dataset = Dataset.range(5)
>>> # `map_func` takes a single argument of type `tf.Tensor` with the same
>>> # shape and dtype.
>>> result = dataset.map(lambda x: x + 1)
>>> # Each element is a tuple containing two `tf.Tensor` objects.
>>> elements = [(1, "foo"), (2, "bar"), (3, "baz)")]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, (tf.int32, tf.string))
>>> # `map_func` takes two arguments of type `tf.Tensor`. This function
>>> # projects out just the first component.
>>> result = dataset.map(lambda x_int, y_str: x_int)
>>> list(result.as_numpy_iterator())
[1, 2, 3]
>>> # Each element is a dictionary mapping strings to `tf.Tensor` objects.
>>> elements = ([{"a": 1, "b": "foo"},
... {"a": 2, "b": "bar"},
... {"a": 3, "b": "baz"}])
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, {"a": tf.int32, "b": tf.string})
>>> # `map_func` takes a single argument of type `dict` with the same keys
>>> # as the elements.
>>> result = dataset.map(lambda d: str(d["a"]) + d["b"])
The value or values returned by `map_func` determine the structure of each
element in the returned dataset.
>>> dataset = tf.data.Dataset.range(3)
>>> # `map_func` returns two `tf.Tensor` objects.
>>> def g(x):
... return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
>>> result = dataset.map(g)
>>> result.element_spec
(TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(3,), \
dtype=tf.string, name=None))
>>> # Python primitives, lists, and NumPy arrays are implicitly converted to
>>> # `tf.Tensor`.
>>> def h(x):
... return 37.0, ["Foo", "Bar"], np.array([1.0, 2.0], dtype=np.float64)
>>> result = dataset.map(h)
>>> result.element_spec
(TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(2,), \
dtype=tf.string, name=None), TensorSpec(shape=(2,), dtype=tf.float64, \
name=None))
>>> # `map_func` can return nested structures.
>>> def i(x):
... return (37.0, [42, 16]), "foo"
>>> result = dataset.map(i)
>>> result.element_spec
((TensorSpec(shape=(), dtype=tf.float32, name=None),
TensorSpec(shape=(2,), dtype=tf.int32, name=None)),
TensorSpec(shape=(), dtype=tf.string, name=None))
`map_func` can accept as arguments and return any type of dataset element.
Note that irrespective of the context in which `map_func` is defined (eager
vs. graph), tf.data traces the function and executes it as a graph. To use
Python code inside of the function you have two options:
1) Rely on AutoGraph to convert Python code into an equivalent graph
computation. The downside of this approach is that AutoGraph can convert
some but not all Python code.
2) Use `tf.py_function`, which allows you to write arbitrary Python code but
will generally result in worse performance than 1). For example:
>>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
>>> # transform a string tensor to upper case string using a Python function
>>> def upper_case_fn(t: tf.Tensor):
... return t.numpy().decode('utf-8').upper()
>>> d = d.map(lambda x: tf.py_function(func=upper_case_fn,
... inp=[x], Tout=tf.string))
>>> list(d.as_numpy_iterator())
[b'HELLO', b'WORLD']
Args:
map_func: A function mapping a dataset element to another dataset element.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return MapDataset(self, map_func, preserve_cardinality=True)
else:
return ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=True)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Use `flat_map` if you want to make sure that the order of your dataset
stays the same. For example, to flatten a dataset of batches into a
dataset of their elements:
>>> dataset = Dataset.from_tensor_slices([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> dataset = dataset.flat_map(lambda x: Dataset.from_tensor_slices(x))
>>> list(dataset.as_numpy_iterator())
[1, 2, 3, 4, 5, 6, 7, 8, 9]
`tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
`flat_map` produces the same output as
`tf.data.Dataset.interleave(cycle_length=1)`
Args:
map_func: A function mapping a dataset element to a dataset.
Returns:
Dataset: A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
"""Maps `map_func` across this dataset, and interleaves the results.
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
>>> # Preprocess 4 files concurrently, and interleave blocks of 16 records
>>> # from each file.
>>> filenames = ["/var/data/file1.txt", "/var/data/file2.txt",
... "/var/data/file3.txt", "/var/data/file4.txt"]
>>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
>>> def parse_fn(filename):
... return tf.data.Dataset.range(10)
>>> dataset = dataset.interleave(lambda x:
... tf.data.TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
... cycle_length=4, block_length=16)
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> # NOTE: New lines indicate "block" boundaries.
>>> dataset = dataset.interleave(
... lambda x: Dataset.from_tensors(x).repeat(6),
... cycle_length=2, block_length=4)
>>> list(dataset.as_numpy_iterator())
[1, 1, 1, 1, \
2, 2, 2, 2, \
1, 1, \
2, 2, \
3, 3, 3, 3, \
4, 4, 4, 4, \
3, 3, \
4, 4, \
5, 5, 5, 5, \
5, 5]
NOTE: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function. If
`map_func` contains any stateful operations, the order in which
that state is accessed is undefined.
Args:
map_func: A function mapping a dataset element to a dataset.
cycle_length: (Optional.) The number of input elements that will be
processed concurrently. If not specified, the value will be derived from
the number of available CPU cores. If the `num_parallel_calls` argument
is set to `tf.data.experimental.AUTOTUNE`, the `cycle_length` argument
also identifies the maximum degree of parallelism.
block_length: (Optional.) The number of consecutive elements to produce
from each input element before cycling to another input element.
num_parallel_calls: (Optional.) If specified, the implementation creates a
threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return InterleaveDataset(self, map_func, cycle_length, block_length)
else:
return ParallelInterleaveDataset(self, map_func, cycle_length,
block_length, num_parallel_calls)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.filter(lambda x: x < 3)
>>> list(dataset.as_numpy_iterator())
[1, 2]
>>> # `tf.math.equal(x, y)` is required for equality comparison
>>> def filter_fn(x):
... return tf.math.equal(x, 1)
>>> dataset = dataset.filter(filter_fn)
>>> list(dataset.as_numpy_iterator())
[1]
Args:
predicate: A function mapping a dataset element to a boolean.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate)
def apply(self, transformation_func):
"""Applies a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
>>> dataset = tf.data.Dataset.range(100)
>>> def dataset_fn(ds):
... return ds.filter(lambda x: x < 5)
>>> dataset = dataset.apply(dataset_fn)
>>> list(dataset.as_numpy_iterator())
[0, 1, 2, 3, 4]
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
Dataset: The `Dataset` returned by applying `transformation_func` to this
dataset.
"""
dataset = transformation_func(self)
if not isinstance(dataset, DatasetV2):
raise TypeError(
"`transformation_func` must return a Dataset. Got {}.".format(
dataset))
dataset._input_datasets = [self] # pylint: disable=protected-access
return dataset
def window(self, size, shift=None, stride=1, drop_remainder=False):
"""Combines (nests of) input elements into a dataset of (nests of) windows.
A "window" is a finite dataset of flat elements of size `size` (or possibly
fewer if there are not enough input elements to fill the window and
`drop_remainder` evaluates to false).
The `stride` argument determines the stride of the input elements, and the
`shift` argument determines the shift of the window.
>>> dataset = tf.data.Dataset.range(7).window(2)
>>> for window in dataset:
... print(list(window.as_numpy_iterator()))
[0, 1]
[2, 3]
[4, 5]
[6]
>>> dataset = tf.data.Dataset.range(7).window(3, 2, 1, True)
>>> for window in dataset:
... print(list(window.as_numpy_iterator()))
[0, 1, 2]
[2, 3, 4]
[4, 5, 6]
>>> dataset = tf.data.Dataset.range(7).window(3, 1, 2, True)
>>> for window in dataset:
... print(list(window.as_numpy_iterator()))
[0, 2, 4]
[1, 3, 5]
[2, 4, 6]
Note that when the `window` transformation is applied to a dataset of
nested elements, it produces a dataset of nested windows.
>>> nested = ([1, 2, 3, 4], [5, 6, 7, 8])
>>> dataset = tf.data.Dataset.from_tensor_slices(nested).window(2)
>>> for window in dataset:
... def to_numpy(ds):
... return list(ds.as_numpy_iterator())
... print(tuple(to_numpy(component) for component in window))
([1, 2], [5, 6])
([3, 4], [7, 8])
>>> dataset = tf.data.Dataset.from_tensor_slices({'a': [1, 2, 3, 4]})
>>> dataset = dataset.window(2)
>>> for window in dataset:
... def to_numpy(ds):
... return list(ds.as_numpy_iterator())
... print({'a': to_numpy(window['a'])})
{'a': [1, 2]}
{'a': [3, 4]}
Args:
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a window.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. Defaults to
`size`.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether a window should be dropped in case its size is smaller than
`window_size`.
Returns:
Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat
elements created from the (nests of) input elements.
"""
if shift is None:
shift = size
return WindowDataset(self, size, shift, stride, drop_remainder)
def reduce(self, initial_state, reduce_func):
"""Reduces the input dataset to a single element.
The transformation calls `reduce_func` successively on every element of
the input dataset until the dataset is exhausted, aggregating information in
its internal state. The `initial_state` argument is used for the initial
state and the final state is returned as the result.
>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1).numpy()
5
>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y).numpy()
10
Args:
initial_state: An element representing the initial state of the
transformation.
reduce_func: A function that maps `(old_state, input_element)` to
`new_state`. It must take two arguments and return a new element
The structure of `new_state` must match the structure of
`initial_state`.
Returns:
A dataset element corresponding to the final state of the transformation.
"""
with ops.name_scope("initial_state"):
initial_state = structure.normalize_element(initial_state)
state_structure = structure.type_spec_from_value(initial_state)
# Iteratively rerun the reduce function until reaching a fixed point on
# `state_structure`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_structure=(state_structure, self.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
output_classes = wrapped_func.output_classes
state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
state_structure)
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
output_types = wrapped_func.output_types
state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
state_structure)
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(state_types, wrapped_func.output_types))
# Extract shape information from the returned values.
output_shapes = wrapped_func.output_shapes
state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
state_structure)
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# here.
state_structure = structure.convert_legacy_structure(
state_types,
nest.pack_sequence_as(state_shapes, weakened_state_shapes),
state_classes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
dataset = self._apply_options()
# pylint: disable=protected-access
return structure.from_compatible_tensor_list(
state_structure,
gen_dataset_ops.reduce_dataset(
dataset._variant_tensor,
structure.to_tensor_list(state_structure, initial_state),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=structure.get_flat_tensor_shapes(state_structure),
output_types=structure.get_flat_tensor_types(state_structure)))
def unbatch(self):
"""Splits elements of a dataset into multiple elements.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
>>> elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ]
>>> dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64)
>>> dataset = dataset.unbatch()
>>> list(dataset.as_numpy_iterator())
[1, 2, 3, 1, 2, 1, 2, 3, 4]
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
# NOTE(mrry): We must ensure that any non-tensor components in `dataset`
# are normalized to their dense tensor representation, so that the
# non-tensor oblivious unbatching logic will slice them appropriately.
# This leads to a somewhat inefficient re-encoding step for all non-tensor
# components.
#
# TODO(mrry): Consider optimizing this if it turns out to be a bottleneck.
def normalize(arg, *rest):
# pylint: disable=protected-access
if rest:
return structure.to_batched_tensor_list(self.element_spec,
(arg,) + rest)
else:
return structure.to_batched_tensor_list(self.element_spec, arg)
normalized_dataset = self.map(normalize)
# NOTE(mrry): Our `map()` has lost information about the structure of
# non-tensor components, so re-apply the structure of the original dataset.
restructured_dataset = _RestructuredDataset(normalized_dataset,
self.element_spec)
return _UnbatchDataset(restructured_dataset)
def with_options(self, options):
"""Returns a new `tf.data.Dataset` with the given options set.
The options are "global" in the sense they apply to the entire dataset.
If options are set multiple times, they are merged as long as different
options do not use different non-default values.
>>> ds = tf.data.Dataset.range(5)
>>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),
... cycle_length=3,
... num_parallel_calls=3)
>>> options = tf.data.Options()
>>> # This will make the interleave order non-deterministic.
>>> options.experimental_deterministic = False
>>> ds = ds.with_options(options)
Args:
options: A `tf.data.Options` that identifies the options the use.
Returns:
Dataset: A `Dataset` with the given options.
Raises:
ValueError: when an option is set more than once to a non-default value
"""
return _OptionsDataset(self, options)
@tf_export(v1=["data.Dataset"])
class DatasetV1(DatasetV2):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements and a "logical plan" of transformations that act on
those elements.
"""
def __init__(self):
try:
variant_tensor = self._as_variant_tensor()
except AttributeError as e:
if "_as_variant_tensor" in str(e):
raise AttributeError("Please use _variant_tensor instead of "
"_as_variant_tensor() to obtain the variant "
"associated with a dataset")
raise AttributeError("{}: A likely cause of this error is that the super "
"call for this dataset is not the last line of the "
"__init__ method. The base class causes the "
"_as_variant_tensor call in its constructor and "
"if that uses attributes defined in the __init__ "
"method, those attrs need to be defined before the "
"super call.".format(e))
super(DatasetV1, self).__init__(variant_tensor)
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
"""
raise NotImplementedError("Dataset._as_variant_tensor")
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self): # pylint: disable=missing-docstring
if context.executing_eagerly():
return iterator_ops.OwnedIterator(self)
_ensure_same_dataset_graph(self)
# Now that we create datasets at python object creation time, the capture
# by value _make_dataset() function would try to capture these variant
# tensor dataset inputs, which are marked as stateful ops and would throw
# an error if we try and capture them. We therefore traverse the graph
# to find all these ops and whitelist them so that the capturing
# logic instead of throwing an error recreates these ops which is what was
# happening before.
all_ds_ops = traverse.obtain_all_variant_tensor_ops(self)
graph_level_seed, op_level_seed = core_random_seed.get_seed(None)
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=all_ds_ops)
def _make_dataset():
"""Factory function for a dataset."""
# NOTE(mrry): `Defun` does not capture the graph-level seed from the
# enclosing graph, so if a graph-level seed is present we set the local
# graph seed based on a combination of the graph- and op-level seeds.
if graph_level_seed is not None:
assert op_level_seed is not None
core_random_seed.set_random_seed(
(graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))
dataset = self._apply_options()
return dataset._variant_tensor # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
# pylint: disable=protected-access
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset, **self._flat_structure), None,
get_legacy_output_types(self), get_legacy_output_shapes(self),
get_legacy_output_classes(self))
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the same
devices (e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
return self._make_initializable_iterator(shared_name)
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=missing-docstring
if context.executing_eagerly():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled. Use `for element in dataset` instead.")
_ensure_same_dataset_graph(self)
dataset = self._apply_options()
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator_v2(
container="", shared_name=shared_name, **self._flat_structure)
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(
dataset._variant_tensor, # pylint: disable=protected-access
iterator_resource)
# pylint: disable=protected-access
return iterator_ops.Iterator(
iterator_resource, initializer, get_legacy_output_types(dataset),
get_legacy_output_shapes(dataset), get_legacy_output_classes(dataset))
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(dataset)`.")
def output_classes(self):
"""Returns the class of each component of an element of this dataset.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(dataset)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(dataset)`.")
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self.element_spec)
@property
def element_spec(self):
# TODO(b/110122868): Remove this override once all `Dataset` instances
# implement `element_structure`.
return structure.convert_legacy_structure(
self.output_types, self.output_shapes, self.output_classes)
@staticmethod
@functools.wraps(DatasetV2.from_tensors)
def from_tensors(tensors):
return DatasetV1Adapter(DatasetV2.from_tensors(tensors))
@staticmethod
@functools.wraps(DatasetV2.from_tensor_slices)
def from_tensor_slices(tensors):
return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))
@staticmethod
@deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.")
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
Dataset: A `Dataset` of rank-(N-1) sparse tensors.
"""
return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))
@staticmethod
@functools.wraps(DatasetV2.from_generator)
def from_generator(generator, output_types, output_shapes=None, args=None):
return DatasetV1Adapter(DatasetV2.from_generator(
generator, output_types, output_shapes, args))
@staticmethod
@functools.wraps(DatasetV2.range)
def range(*args, **kwargs):
return DatasetV1Adapter(DatasetV2.range(*args, **kwargs))
@staticmethod
@functools.wraps(DatasetV2.zip)
def zip(datasets):
return DatasetV1Adapter(DatasetV2.zip(datasets))
@functools.wraps(DatasetV2.concatenate)
def concatenate(self, dataset):
return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))
@functools.wraps(DatasetV2.prefetch)
def prefetch(self, buffer_size):
return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))
@staticmethod
@functools.wraps(DatasetV2.list_files)
def list_files(file_pattern, shuffle=None, seed=None):
return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))
@functools.wraps(DatasetV2.repeat)
def repeat(self, count=None):
return DatasetV1Adapter(super(DatasetV1, self).repeat(count))
@functools.wraps(DatasetV2.shuffle)
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return DatasetV1Adapter(super(DatasetV1, self).shuffle(
buffer_size, seed, reshuffle_each_iteration))
@functools.wraps(DatasetV2.cache)
def cache(self, filename=""):
return DatasetV1Adapter(super(DatasetV1, self).cache(filename))
@functools.wraps(DatasetV2.take)
def take(self, count):
return DatasetV1Adapter(super(DatasetV1, self).take(count))
@functools.wraps(DatasetV2.skip)
def skip(self, count):
return DatasetV1Adapter(super(DatasetV1, self).skip(count))
@functools.wraps(DatasetV2.shard)
def shard(self, num_shards, index):
return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))
@functools.wraps(DatasetV2.batch)
def batch(self, batch_size, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).batch(
batch_size, drop_remainder))
@functools.wraps(DatasetV2.padded_batch)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).padded_batch(
batch_size, padded_shapes, padding_values, drop_remainder))
@functools.wraps(DatasetV2.map)
def map(self, map_func, num_parallel_calls=None):
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(self, map_func, preserve_cardinality=False))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=False))
@deprecation.deprecated(None, "Use `tf.data.Dataset.map()")
def map_with_legacy_function(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is an escape hatch for existing uses of `map` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map` as this method will be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(
self,
map_func,
preserve_cardinality=False,
use_legacy_function=True))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self,
map_func,
num_parallel_calls,
preserve_cardinality=False,
use_legacy_function=True))
@functools.wraps(DatasetV2.flat_map)
def flat_map(self, map_func):
return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))
@functools.wraps(DatasetV2.interleave)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
return DatasetV1Adapter(super(DatasetV1, self).interleave(
map_func, cycle_length, block_length, num_parallel_calls))
@functools.wraps(DatasetV2.filter)
def filter(self, predicate):
return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))
@deprecation.deprecated(None, "Use `tf.data.Dataset.filter()")
def filter_with_legacy_function(self, predicate):
"""Filters this dataset according to `predicate`.
NOTE: This is an escape hatch for existing uses of `filter` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `filter` as this method will be removed in V2.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate, use_legacy_function=True)
@functools.wraps(DatasetV2.apply)
def apply(self, transformation_func):
return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))
@functools.wraps(DatasetV2.window)
def window(self, size, shift=None, stride=1, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).window(
size, shift, stride, drop_remainder))
@functools.wraps(DatasetV2.unbatch)
def unbatch(self):
return DatasetV1Adapter(super(DatasetV1, self).unbatch())
@functools.wraps(DatasetV2.with_options)
def with_options(self, options):
return DatasetV1Adapter(super(DatasetV1, self).with_options(options))
if tf2.enabled():
Dataset = DatasetV2
else:
Dataset = DatasetV1
class DatasetV1Adapter(DatasetV1):
"""Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API."""
def __init__(self, dataset):
self._dataset = dataset
super(DatasetV1Adapter, self).__init__()
def _as_variant_tensor(self):
return self._dataset._variant_tensor # pylint: disable=protected-access
def _has_captured_ref(self):
return self._dataset._has_captured_ref() # pylint: disable=protected-access
def _inputs(self):
return self._dataset._inputs() # pylint: disable=protected-access
def _functions(self):
return self._dataset._functions() # pylint: disable=protected-access
def options(self):
return self._dataset.options()
@property
def element_spec(self):
return self._dataset.element_spec # pylint: disable=protected-access
def __iter__(self):
return iter(self._dataset)
def _ensure_same_dataset_graph(dataset):
"""Walks the dataset graph to ensure all datasets come from the same graph."""
# pylint: disable=protected-access
current_graph = ops.get_default_graph()
bfs_q = Queue.Queue()
bfs_q.put(dataset)
visited = []
while not bfs_q.empty():
ds = bfs_q.get()
visited.append(ds)
ds_graph = ds._graph
if current_graph != ds_graph:
raise ValueError(
"The graph (" + str(current_graph) + ") of the iterator is different "
"from the graph (" + str(ds_graph) + ") the dataset: " +
str(ds._variant_tensor) + " was created in. If you are using the "
"Estimator API, make sure that no part of the dataset returned by "
"the `input_fn` function is defined outside the `input_fn` function. "
"Please ensure that all datasets in the pipeline are created in the "
"same graph as the iterator.")
for input_ds in ds._inputs():
if input_ds not in visited:
bfs_q.put(input_ds)
@tf_export(v1=["data.make_one_shot_iterator"])
def make_one_shot_iterator(dataset):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not support re-initialization.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A `tf.compat.v1.data.Iterator` over the elements of this dataset.
"""
try:
# Call the defined `_make_one_shot_iterator()` if there is one, because some
# datasets (e.g. for prefetching) override its behavior.
return dataset._make_one_shot_iterator() # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access
@tf_export(v1=["data.make_initializable_iterator"])
def make_initializable_iterator(dataset, shared_name=None):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
# ...
sess.run(iterator.initializer)
```
Args:
dataset: A `tf.data.Dataset`.
shared_name: (Optional.) If non-empty, the returned iterator will be shared
under the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
A `tf.compat.v1.data.Iterator` over the elements of `dataset`.
Raises:
RuntimeError: If eager execution is enabled.
"""
try:
# Call the defined `_make_initializable_iterator()` if there is one, because
# some datasets (e.g. for prefetching) override its behavior.
return dataset._make_initializable_iterator(shared_name) # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name) # pylint: disable=protected-access
@tf_export("data.experimental.get_structure")
def get_structure(dataset_or_iterator):
"""Returns the type specification of an element of a `Dataset` or `Iterator`.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of `dataset_or_iterator` and spacifying the type of individal
components.
Raises:
TypeError: If `dataset_or_iterator` is not a `Dataset` or `Iterator` object.
"""
try:
return dataset_or_iterator.element_spec # pylint: disable=protected-access
except AttributeError:
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator "
"object, but got %s." % type(dataset_or_iterator))
@tf_export(v1=["data.get_output_classes"])
def get_legacy_output_classes(dataset_or_iterator):
"""Returns the output classes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_classes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of Python `type` objects matching the structure of the
dataset / iterator elements and specifying the class of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_shapes"])
def get_legacy_output_shapes(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_shapes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TensorShape` objects matching the structure of
the dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_types"])
def get_legacy_output_types(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_types` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.DType` objects objects matching the structure of
dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export("data.Options")
class Options(options_lib.OptionsBase):
"""Represents options for tf.data.Dataset.
An `Options` object can be, for instance, used to control which graph
optimizations to apply or whether to use performance modeling to dynamically
tune the parallelism of operations such as `tf.data.Dataset.map` or
`tf.data.Dataset.interleave`.
After constructing an `Options` object, use `dataset.with_options(options)` to
apply the options to a dataset.
>>> dataset = tf.data.Dataset.range(3)
>>> options = tf.data.Options()
>>> # Set options here.
>>> dataset = dataset.with_options(options)
"""
experimental_deterministic = options_lib.create_option(
name="experimental_deterministic",
ty=bool,
docstring=
"Whether the outputs need to be produced in deterministic order. If None,"
" defaults to True.")
experimental_distribute = options_lib.create_option(
name="experimental_distribute",
ty=distribute_options.DistributeOptions,
docstring=
"The distribution strategy options associated with the dataset. See "
"`tf.data.experimental.DistributeOptions` for more details.",
default_factory=distribute_options.DistributeOptions)
experimental_optimization = options_lib.create_option(
name="experimental_optimization",
ty=optimization_options.OptimizationOptions,
docstring=
"The optimization options associated with the dataset. See "
"`tf.data.experimental.OptimizationOptions` for more details.",
default_factory=optimization_options.OptimizationOptions)
experimental_slack = options_lib.create_option(
name="experimental_slack",
ty=bool,
docstring="Whether to introduce 'slack' in the last `prefetch` of the "
"input pipeline, if it exists. This may reduce CPU contention with "
"accelerator host-side activity at the start of a step. The slack "
"frequency is determined by the number of devices attached to this "
"input pipeline. If None, defaults to False.")
experimental_stats = options_lib.create_option(
name="experimental_stats",
ty=stats_options.StatsOptions,
docstring=
"The statistics options associated with the dataset. See "
"`tf.data.experimental.StatsOptions` for more details.",
default_factory=stats_options.StatsOptions)
experimental_threading = options_lib.create_option(
name="experimental_threading",
ty=threading_options.ThreadingOptions,
docstring=
"The threading options associated with the dataset. See "
"`tf.data.experimental.ThreadingOptions` for more details.",
default_factory=threading_options.ThreadingOptions)
experimental_external_state_policy = options_lib.create_option(
name="experimental_external_state_policy",
ty=distribute_options.ExternalStatePolicy,
docstring="By default, tf.data will refuse to serialize a dataset or "
"checkpoint its iterator if the dataset contains a stateful op as the "
"serialization / checkpointing won't be able to capture its state. "
"Users can -- at their own risk -- override this restriction by "
"explicitly specifying that they are fine throwing away the state "
"in these ops. There are three settings available - IGNORE: in which we"
"completely ignore any state; WARN: We warn the user that some state "
"might be thrown away; FAIL: We fail if any state is being captured.",
default_factory=lambda: distribute_options.ExternalStatePolicy.WARN)
def _graph_rewrites(self):
"""Produces the list of enabled static graph rewrites."""
result = []
if self.experimental_optimization is not None:
result.extend(self.experimental_optimization._graph_rewrites()) # pylint: disable=protected-access
else:
# Apply default options
result.extend(
optimization_options.OptimizationOptions()._graph_rewrites()) # pylint: disable=protected-access
if self.experimental_deterministic is False:
result.append("make_sloppy")
if self.experimental_stats and self.experimental_stats.latency_all_edges:
result.append("latency_all_edges")
if self.experimental_slack:
result.append("slack")
if (self.experimental_distribute and
self.experimental_distribute._make_stateless): # pylint: disable=protected-access
result.append("make_stateless")
return result
def _graph_rewrite_configs(self):
"""Produces the list of configurations for enabled graph optimizations."""
result = []
if self.experimental_optimization:
result.extend(self.experimental_optimization._graph_rewrite_configs()) # pylint: disable=protected-access
if self.experimental_slack:
num_devices = self.experimental_distribute.num_devices
if num_devices is None:
num_devices = 1
result.append("slack:slack_period:%d" % num_devices)
return result
def _autotune_settings(self):
if self.experimental_optimization is not None:
return self.experimental_optimization._autotune_settings() # pylint: disable=protected-access
# Return default autotune options
return optimization_options.OptimizationOptions()._autotune_settings() # pylint: disable=protected-access
def merge(self, options):
"""Merges itself with the given `tf.data.Options`.
The given `tf.data.Options` can be merged as long as there does not exist an
attribute that is set to different values in `self` and `options`.
Args:
options: a `tf.data.Options` to merge with
Raises:
ValueError: if the given `tf.data.Options` cannot be merged
Returns:
New `tf.data.Options()` object which is the result of merging self with
the input `tf.data.Options`.
"""
return options_lib.merge_options(self, options)
class DatasetSource(DatasetV2):
"""Abstract class representing a dataset with no inputs."""
def _inputs(self):
return []
class UnaryDataset(DatasetV2):
"""Abstract class representing a dataset with one input."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryDataset, self).__init__(variant_tensor)
def _inputs(self):
return [self._input_dataset]
class UnaryUnchangedStructureDataset(UnaryDataset):
"""Represents a unary dataset with the same input and output structure."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryUnchangedStructureDataset, self).__init__(
input_dataset, variant_tensor)
@property
def element_spec(self):
return self._input_dataset.element_spec
class TensorDataset(DatasetSource):
"""A `Dataset` with a single element."""
def __init__(self, element):
"""See `Dataset.from_tensors()` for details."""
element = structure.normalize_element(element)
self._structure = structure.type_spec_from_value(element)
self._tensors = structure.to_tensor_list(self._structure, element)
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class TensorSliceDataset(DatasetSource):
"""A `Dataset` of slices from a dataset element."""
def __init__(self, element):
"""See `Dataset.from_tensor_slices()` for details."""
element = structure.normalize_element(element)
batched_spec = structure.type_spec_from_value(element)
self._tensors = structure.to_batched_tensor_list(batched_spec, element)
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access
batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
self._tensors[0].get_shape()[0]))
for t in self._tensors[1:]:
batch_dim.assert_is_compatible_with(tensor_shape.Dimension(
tensor_shape.dimension_value(t.get_shape()[0])))
variant_tensor = gen_dataset_ops.tensor_slice_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class SparseTensorSliceDataset(DatasetSource):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError(
"`sparse_tensor` must be a `tf.SparseTensor` object. Was {}.".format(
sparse_tensor))
self._sparse_tensor = sparse_tensor
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),
tensor_spec.TensorSpec([None],
self._sparse_tensor.dtype),
tensor_spec.TensorSpec([rank], dtypes.int64))
variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
super(SparseTensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class _VariantDataset(DatasetV2):
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
self._structure = structure
super(_VariantDataset, self).__init__(dataset_variant)
def _inputs(self):
return []
@property
def element_spec(self):
return self._structure
class _NestedVariant(composite_tensor.CompositeTensor):
def __init__(self, variant_tensor, element_spec, dataset_shape):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
self._dataset_shape = dataset_shape
@property
def _type_spec(self):
return DatasetSpec(self._element_spec, self._dataset_shape)
@tf_export("data.experimental.from_variant")
def from_variant(variant, structure):
"""Constructs a dataset from the given variant and structure.
Args:
variant: A scalar `tf.variant` tensor representing a dataset.
structure: A `tf.data.experimental.Structure` object representing the
structure of each element in the dataset.
Returns:
A `tf.data.Dataset` instance.
"""
return _VariantDataset(variant, structure) # pylint: disable=protected-access
@tf_export("data.experimental.to_variant")
def to_variant(dataset):
"""Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset.
"""
return dataset._variant_tensor # pylint: disable=protected-access
@tf_export(
"data.DatasetSpec",
v1=["data.DatasetSpec", "data.experimental.DatasetStructure"])
class DatasetSpec(type_spec.BatchableTypeSpec):
"""Type specification for `tf.data.Dataset`.
See `tf.TypeSpec` for more information about TensorFlow type specifications.
>>> dataset = tf.data.Dataset.range(3)
>>> tf.data.DatasetSpec.from_value(dataset)
DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))
"""
__slots__ = ["_element_spec", "_dataset_shape"]
def __init__(self, element_spec, dataset_shape=()):
self._element_spec = element_spec
self._dataset_shape = tensor_shape.as_shape(dataset_shape)
@property
def value_type(self):
return _VariantDataset
def _serialize(self):
return (self._element_spec, self._dataset_shape)
@property
def _component_specs(self):
return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)
def _to_components(self, value):
return value._variant_tensor # pylint: disable=protected-access
def _from_components(self, components):
# pylint: disable=protected-access
if self._dataset_shape.ndims == 0:
return _VariantDataset(components, self._element_spec)
else:
return _NestedVariant(components, self._element_spec, self._dataset_shape)
def _to_tensor_list(self, value):
return [
ops.convert_to_tensor(
tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access
]
@staticmethod
def from_value(value):
"""Creates a `DatasetSpec` for the given `tf.data.Dataset` value."""
return DatasetSpec(value.element_spec) # pylint: disable=protected-access
def _batch(self, batch_size):
return DatasetSpec(
self._element_spec,
tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))
def _unbatch(self):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return DatasetSpec(self._element_spec, self._dataset_shape[1:])
def _to_batched_tensor_list(self, value):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return self._to_tensor_list(value)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
class StructuredFunctionWrapper(object):
"""A function wrapper that supports structured arguments and return values."""
# pylint: disable=protected-access
def __init__(self,
func,
transformation_name,
dataset=None,
input_classes=None,
input_shapes=None,
input_types=None,
input_structure=None,
add_to_graph=True,
use_legacy_function=False,
defun_kwargs=None):
"""Creates a new `StructuredFunctionWrapper` for the given function.
Args:
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
input_structure: (Optional.) A `Structure` object. If given, this argument
defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph, if it exists.
use_legacy_function: (Optional.) A boolean that determines whether the
function be created using `tensorflow.python.eager.function.defun`
(default behavior) or `tensorflow.python.framework.function.Defun`
(legacy beheavior).
defun_kwargs: (Optional.) A dictionary mapping string argument names to
values. If supplied, will be passed to `function` as keyword arguments.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
`input_shapes`, and `input_types` is passed.
"""
if input_structure is None:
if dataset is None:
if input_classes is None or input_shapes is None or input_types is None:
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = structure.convert_legacy_structure(
input_types, input_shapes, input_classes)
else:
if not (input_classes is None and input_shapes is None and
input_types is None):
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = dataset.element_spec
else:
if not (dataset is None and input_classes is None and input_shapes is None
and input_types is None):
raise ValueError("Either `dataset`, `input_structure`, or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = input_structure
self._func = func
# There is no graph to add in eager mode.
add_to_graph &= not context.executing_eagerly()
# There are some lifetime issues when a legacy function is not added to a
# out-living graph. It's already deprecated so de-priotizing the fix.
add_to_graph |= use_legacy_function
if defun_kwargs is None:
defun_kwargs = {}
readable_transformation_name = transformation_name.replace(
".", "_")[:-2] if len(transformation_name) > 2 else ""
func_name = "_".join(
[readable_transformation_name,
function_utils.get_func_name(func)])
# Sanitize function name to remove symbols that interfere with graph
# construction.
for symbol in ["<", ">", "\\", "'", " "]:
func_name = func_name.replace(symbol, "")
ag_ctx = autograph_ctx.control_status_ctx()
def _warn_if_collections(transformation_name):
"""Prints a warning if the given graph uses common graph collections.
NOTE(mrry): Currently a warning is only generated for resources. Any
variables created will be automatically hoisted out to the outermost scope
using `init_scope()`. Some collections (such as for control-flow contexts)
are benign and should not generate a warning.
Args:
transformation_name: A human-readable name for the transformation.
"""
warnings.warn("Creating resources inside a function passed to %s "
"is not supported. Create each resource outside the "
"function, and capture it inside the function to use it." %
transformation_name, stacklevel=5)
def _wrapper_helper(*args):
"""Wrapper for passing nested structures to and from tf.data functions."""
nested_args = structure.from_compatible_tensor_list(
self._input_structure, args)
if not _should_unpack_args(nested_args):
nested_args = (nested_args,)
ret = autograph.tf_convert(func, ag_ctx)(*nested_args)
# If `func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.numpy_function()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
try:
self._output_structure = structure.type_spec_from_value(ret)
except (ValueError, TypeError):
six.reraise(
TypeError,
TypeError("Unsupported return value from function passed to "
"%s: %s." % (transformation_name, ret)),
sys.exc_info()[2])
return ret
if use_legacy_function:
func_name = func_name + "_" + str(ops.uid())
@function.Defun(
*structure.get_flat_tensor_types(self._input_structure),
func_name=func_name,
**defun_kwargs)
def wrapper_fn(*args):
ret = _wrapper_helper(*args)
# _warn_if_collections(transformation_name, ops.get_default_graph(), 0)
return structure.to_tensor_list(self._output_structure, ret)
self._function = wrapper_fn
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
else:
# Use the private method that will execute `wrapper_fn` but delay
# adding it to the graph in case (e.g.) we need to rerun the function.
self._function._create_definition_if_needed()
if resource_tracker.resources:
_warn_if_collections(transformation_name)
else:
defun_kwargs.update({"func_name": func_name})
# Note: _wrapper_helper will apply autograph based on context.
@eager_function.defun_with_attributes(
input_signature=structure.get_flat_tensor_specs(
self._input_structure),
autograph=False,
attributes=defun_kwargs)
def wrapper_fn(*args): # pylint: disable=missing-docstring
ret = _wrapper_helper(*args)
ret = structure.to_tensor_list(self._output_structure, ret)
return [ops.convert_to_tensor(t) for t in ret]
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
# TODO(b/141462134): Switch to using garbage collection.
self._function = wrapper_fn.get_concrete_function()
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
if resource_tracker.resources:
_warn_if_collections(transformation_name)
outer_graph_seed = ops.get_default_graph().seed
if outer_graph_seed and self._function.graph.seed == outer_graph_seed:
if self._function.graph._seed_used:
warnings.warn(
"Seed %s from outer graph might be getting used by function %s, "
"if the random op has not been provided any seed. Explicitly set "
"the seed in the function if this is not the intended behavior."
%(outer_graph_seed, func_name), stacklevel=4)
# pylint: enable=protected-access
@property
def output_structure(self):
return self._output_structure
@property
def output_classes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_shapes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_types(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._output_structure)
@property
def function(self):
return self._function
class _GeneratorDataset(DatasetSource):
"""A `Dataset` that generates elements by invoking a function."""
def __init__(self, init_args, init_func, next_func, finalize_func):
"""Constructs a `_GeneratorDataset`.
Args:
init_args: A nested structure representing the arguments to `init_func`.
init_func: A TensorFlow function that will be called on `init_args` each
time a C++ iterator over this dataset is constructed. Returns a nested
structure representing the "state" of the dataset.
next_func: A TensorFlow function that will be called on the result of
`init_func` to produce each element, and that raises `OutOfRangeError`
to terminate iteration.
finalize_func: A TensorFlow function that will be called on the result of
`init_func` immediately before a C++ iterator over this dataset is
destroyed. The return value is ignored.
"""
self._init_args = init_args
self._init_structure = structure.type_spec_from_value(init_args)
self._init_func = StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=self._init_structure)
self._next_func = StructuredFunctionWrapper(
next_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
self._finalize_func = StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
variant_tensor = gen_dataset_ops.generator_dataset(
structure.to_tensor_list(self._init_structure, self._init_args) +
self._init_func.function.captured_inputs,
self._next_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
init_func=self._init_func.function,
next_func=self._next_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GeneratorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._next_func.output_structure
def _transformation_name(self):
return "Dataset.from_generator()"
class ZipDataset(DatasetV2):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
for ds in nest.flatten(datasets):
if not isinstance(ds, DatasetV2):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
self._structure = nest.pack_sequence_as(
self._datasets,
[ds.element_spec for ds in nest.flatten(self._datasets)])
variant_tensor = gen_dataset_ops.zip_dataset(
[ds._variant_tensor for ds in nest.flatten(self._datasets)],
**self._flat_structure)
super(ZipDataset, self).__init__(variant_tensor)
def _inputs(self):
return nest.flatten(self._datasets)
@property
def element_spec(self):
return self._structure
class ConcatenateDataset(DatasetV2):
"""A `Dataset` that concatenates its input with given dataset."""
def __init__(self, input_dataset, dataset_to_concatenate):
"""See `Dataset.concatenate()` for details."""
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
output_types = get_legacy_output_types(input_dataset)
if output_types != get_legacy_output_types(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(output_types, get_legacy_output_types(dataset_to_concatenate)))
output_classes = get_legacy_output_classes(input_dataset)
if output_classes != get_legacy_output_classes(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different classes %s and %s" %
(output_classes, get_legacy_output_classes(dataset_to_concatenate)))
input_shapes = get_legacy_output_shapes(self._input_dataset)
output_shapes = nest.pack_sequence_as(input_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(input_shapes),
nest.flatten(get_legacy_output_shapes(
self._dataset_to_concatenate)))
])
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._input_datasets = [input_dataset, dataset_to_concatenate]
# pylint: disable=protected-access
variant_tensor = gen_dataset_ops.concatenate_dataset(
input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,
**self._flat_structure)
# pylint: enable=protected-access
super(ConcatenateDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._input_datasets
@property
def element_spec(self):
return self._structure
class RepeatDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.repeat_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(RepeatDataset, self).__init__(input_dataset, variant_tensor)
class RangeDataset(DatasetSource):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args, **kwargs):
"""See `Dataset.range()` for details."""
self._parse_args(*args, **kwargs)
self._structure = tensor_spec.TensorSpec([], self._output_type)
variant_tensor = gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**self._flat_structure)
super(RangeDataset, self).__init__(variant_tensor)
def _parse_args(self, *args, **kwargs):
"""Parse arguments according to the same rules as the `range()` builtin."""
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
if "output_type" in kwargs:
self._output_type = kwargs["output_type"]
else:
self._output_type = dtypes.int64
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
@property
def element_spec(self):
return self._structure
class _MemoryCacheDeleter(object):
"""An object which cleans up an anonymous memory cache resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
class _MemoryCache(object):
"""Represents a memory cache resource."""
def __init__(self):
super(_MemoryCache, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (gen_dataset_ops.anonymous_memory_cache())
self._resource_deleter = _MemoryCacheDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class CacheDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""See `Dataset.cache()` for details."""
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
if tf2.enabled() and (context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._cache = _MemoryCache()
variant_tensor = gen_dataset_ops.cache_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
cache=self._cache.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.cache_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
**self._flat_structure)
super(CacheDataset, self).__init__(input_dataset, variant_tensor)
class _RandomSeedGeneratorDeleter(object):
"""An object which cleans up an anonymous random seed generator resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
class _RandomSeedGenerator(object):
"""Represents a random seed generator resource."""
def __init__(self, seed, seed2):
super(_RandomSeedGenerator, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (
gen_dataset_ops.anonymous_random_seed_generator(seed=seed, seed2=seed2))
self._resource_deleter = _RandomSeedGeneratorDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class ShuffleDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
Args:
input_dataset: The input dataset.
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
A `Dataset`.
Raises:
ValueError: if invalid arguments are provided.
"""
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
if tf2.enabled() and self._reshuffle_each_iteration and (
context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._seed_generator = _RandomSeedGenerator(self._seed, self._seed2)
variant_tensor = gen_dataset_ops.shuffle_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed_generator=self._seed_generator.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._flat_structure)
super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)
class TakeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.take_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(TakeDataset, self).__init__(input_dataset, variant_tensor)
class SkipDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.skip_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(SkipDataset, self).__init__(input_dataset, variant_tensor)
class ShardDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` for sharding its input."""
def __init__(self, input_dataset, num_shards, index):
"""See `Dataset.shard()` for details."""
self._input_dataset = input_dataset
self._num_shards = ops.convert_to_tensor(
num_shards, dtype=dtypes.int64, name="num_shards")
self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index")
variant_tensor = gen_dataset_ops.shard_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_shards=self._num_shards,
index=self._index,
**self._flat_structure)
super(ShardDataset, self).__init__(input_dataset, variant_tensor)
class BatchDataset(UnaryDataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
# pylint: disable=g-long-lambda
constant_batch_size = tensor_util.constant_value(self._batch_size)
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(constant_batch_size),
input_dataset.element_spec)
else:
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(None),
input_dataset.element_spec)
variant_tensor = gen_dataset_ops.batch_dataset_v2(
input_dataset._variant_tensor,
batch_size=self._batch_size,
drop_remainder=self._drop_remainder,
**self._flat_structure)
super(BatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _NumpyIterator(object):
"""Iterator over a dataset with elements converted to numpy."""
def __init__(self, dataset):
self._iterator = iter(dataset)
def __iter__(self):
return self
def next(self):
return nest.map_structure(lambda x: x.numpy(), next(self._iterator))
def __next__(self):
return self.next()
class _VariantTracker(tracking.CapturableResource):
"""Allows export of functions capturing a Dataset in SavedModels.
When saving a SavedModel, `tf.saved_model.save` traverses the object
graph. Since Datasets reference _VariantTracker objects, that traversal will
find a _VariantTracker for each Dataset and so know how to save and restore
functions which reference the Dataset's variant Tensor.
"""
def __init__(self, variant_tensor, resource_creator):
"""Record that `variant_tensor` is associated with `resource_creator`.
Args:
variant_tensor: The variant-dtype Tensor associated with the Dataset. This
Tensor will be a captured input to functions which use the Dataset, and
is used by saving code to identify the corresponding _VariantTracker.
resource_creator: A zero-argument function which creates a new
variant-dtype Tensor. This function will be included in SavedModels and
run to re-create the Dataset's variant Tensor on restore.
"""
super(_VariantTracker, self).__init__(device="CPU")
self._resource_handle = variant_tensor
self._create_resource = resource_creator
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
"""Returns `True` if `input_component_shape` can be padded to `padded_shape`.
Args:
padded_shape: A `tf.TensorShape`.
input_component_shape: A `tf.TensorShape`.
Returns:
`True` if `input_component_shape` can be padded to `padded_shape`, otherwise
`False`.
"""
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(
padded_shape.dims, input_component_shape.dims):
if (padded_dim.value is not None and input_dim.value is not None
and padded_dim.value < input_dim.value):
return False
return True
def _padded_shape_to_tensor(padded_shape, input_component_shape):
"""Converts `padded_shape` to a `tf.Tensor` representing that shape.
Args:
padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python
sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.
input_component_shape: A `tf.TensorShape`, with which `padded_shape` must
be compatible.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.
Raises:
ValueError: If `padded_shape` is not a shape or not compatible with
`input_component_shape`.
TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.
"""
try:
# Try to convert the `padded_shape` to a `tf.TensorShape`
padded_shape_as_shape = tensor_shape.as_shape(padded_shape)
# We will return the "canonical" tensor representation, which uses
# `-1` in place of `None`.
ret = ops.convert_to_tensor(
[dim if dim is not None else -1
for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
six.reraise(ValueError, ValueError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"shape was %s." % (padded_shape, ret.shape)), sys.exc_info()[2])
if ret.dtype != dtypes.int64:
six.reraise(
TypeError,
TypeError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but "
"its element type was %s." % (padded_shape, ret.dtype.name)),
sys.exc_info()[2])
padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)
if not _is_padded_shape_compatible_with(padded_shape_as_shape,
input_component_shape):
raise ValueError("The padded shape %s is not compatible with the "
"corresponding input component shape %s."
% (padded_shape_as_shape, input_component_shape))
return ret
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
def _padding_values_or_default(padding_values, input_dataset):
"""Returns padding values with None elements replaced with default values."""
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
elif t.base_dtype == dtypes.variant:
error_msg = ("Unable to create padding for field of type 'variant' "
"because t.base_type == dtypes.variant == "
"{}.".format(
t.base_dtype))
raise TypeError(error_msg)
else:
return np.zeros_like(t.as_numpy_dtype())
def value_or_default(value, default):
return default if value is None else value
default_padding = nest.map_structure(make_zero,
get_legacy_output_types(input_dataset))
return nest.map_structure_up_to(padding_values, value_or_default,
padding_values, default_padding)
class PaddedBatchDataset(UnaryDataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,
drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
if sparse.any_sparse(get_legacy_output_classes(input_dataset)):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError(
"Batching of padded sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = _padding_values_or_default(padding_values, input_dataset)
input_shapes = get_legacy_output_shapes(input_dataset)
flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)
flat_padded_shapes_as_tensors = []
for input_component_shape, padded_shape in zip(
nest.flatten(input_shapes), flat_padded_shapes):
flat_padded_shapes_as_tensors.append(
_padded_shape_to_tensor(padded_shape, input_component_shape))
self._padded_shapes = nest.pack_sequence_as(input_shapes,
flat_padded_shapes_as_tensors)
self._padding_values = nest.map_structure_up_to(
input_shapes, _padding_value_to_tensor, padding_values,
get_legacy_output_types(input_dataset))
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _padded_shape_to_batch_shape(s):
return tensor_shape.TensorShape([
tensor_util.constant_value(self._batch_size)
if smart_cond.smart_constant_value(self._drop_remainder) else None
]).concatenate(tensor_util.constant_value_as_shape(s))
output_shapes = nest.map_structure(
_padded_shape_to_batch_shape, self._padded_shapes)
self._structure = structure.convert_legacy_structure(
get_legacy_output_types(self._input_dataset), output_shapes,
get_legacy_output_classes(self._input_dataset))
# pylint: disable=protected-access
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
variant_tensor = gen_dataset_ops.padded_batch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=structure.get_flat_tensor_shapes(self._structure))
else:
variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
drop_remainder=self._drop_remainder,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
class MapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self,
input_dataset,
map_func,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._preserve_cardinality = preserve_cardinality
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
variant_tensor = gen_dataset_ops.map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(MapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class ParallelMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input in parallel."""
def __init__(self,
input_dataset,
map_func,
num_parallel_calls,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
self._preserve_cardinality = preserve_cardinality
variant_tensor = gen_dataset_ops.parallel_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
num_parallel_calls=self._num_parallel_calls,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class FlatMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
**self._flat_structure)
super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.flat_map()"
class InterleaveDataset(UnaryDataset):
"""A `Dataset` that interleaves the result of transformed inputs."""
def __init__(self, input_dataset, map_func, cycle_length, block_length):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
variant_tensor = gen_dataset_ops.interleave_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
f=self._map_func.function,
**self._flat_structure)
super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class ParallelInterleaveDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and interleaves the result."""
def __init__(self, input_dataset, map_func, cycle_length, block_length,
num_parallel_calls):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func.function,
**self._flat_structure)
super(ParallelInterleaveDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class FilterDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate, use_legacy_function=False):
"""See `Dataset.filter()` for details."""
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
predicate,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if not wrapped_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.bool)):
error_msg = ("`predicate` return type must be convertible to a scalar "
"boolean tensor. Was {}.").format(
wrapped_func.output_structure)
raise ValueError(error_msg)
self._predicate = wrapped_func
variant_tensor = gen_dataset_ops.filter_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**self._flat_structure)
super(FilterDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._predicate]
def _transformation_name(self):
return "Dataset.filter()"
class PrefetchDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that asynchronously prefetches its input."""
def __init__(self, input_dataset, buffer_size, slack_period=None):
"""See `Dataset.prefetch()` for details.
Args:
input_dataset: The input dataset.
buffer_size: See `Dataset.prefetch()` for details.
slack_period: (Optional.) An integer. If non-zero, determines the number
of GetNext calls before injecting slack into the execution. This may
reduce CPU contention at the start of a step. Note that a tensorflow
user should not have to set this manually; enable this behavior
automatically via `tf.data.Options.experimental_slack` instead. Defaults
to None.
"""
self._input_dataset = input_dataset
if buffer_size is None:
buffer_size = -1 # This is the sentinel for auto-tuning.
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
slack_period=slack_period,
**self._flat_structure)
super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)
class WindowDataset(UnaryDataset):
"""A dataset that creates window datasets from the input elements."""
def __init__(self, input_dataset, size, shift, stride, drop_remainder):
"""See `window_dataset()` for more details."""
self._input_dataset = input_dataset
self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size")
self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift")
self._stride = ops.convert_to_tensor(
stride, dtype=dtypes.int64, name="stride")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
self._structure = nest.pack_sequence_as(
get_legacy_output_classes(input_dataset), [
DatasetSpec( # pylint: disable=g-complex-comprehension
structure.convert_legacy_structure(
output_type, output_shape, output_class))
for output_class, output_shape, output_type in zip(
nest.flatten(get_legacy_output_classes(input_dataset)),
nest.flatten(get_legacy_output_shapes(input_dataset)),
nest.flatten(get_legacy_output_types(input_dataset)))
])
variant_tensor = gen_dataset_ops.window_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._size,
self._shift,
self._stride,
self._drop_remainder,
**self._flat_structure)
super(WindowDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _OptionsDataset(UnaryUnchangedStructureDataset):
"""An identity `Dataset` that stores options."""
def __init__(self, input_dataset, options):
self._input_dataset = input_dataset
self._options = input_dataset.options()
if self._options:
self._options = self._options.merge(options)
else:
self._options = options
variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access
super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)
def options(self):
return self._options
class _ModelDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and models performance."""
def __init__(self, input_dataset, algorithm, cpu_budget):
self._input_dataset = input_dataset
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
algorithm=algorithm.value,
cpu_budget=cpu_budget,
**self._flat_structure)
super(_ModelDataset, self).__init__(input_dataset, variant_tensor)
class _OptimizeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
def __init__(self, input_dataset, optimizations, optimization_configs=None):
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
if optimization_configs is None:
optimization_configs = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
variant_tensor = gen_dataset_ops.optimize_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._optimizations,
optimization_configs=optimization_configs,
**self._flat_structure)
super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)
class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets a stats aggregator."""
def __init__(self, input_dataset, aggregator, prefix, counter_prefix):
self._input_dataset = input_dataset
self._stats_aggregator = aggregator
self._prefix = prefix
self._counter_prefix = counter_prefix
variant_tensor = ged_ops.set_stats_aggregator_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
self._prefix,
self._counter_prefix,
**self._flat_structure)
super(_SetStatsAggregatorDataset, self).__init__(input_dataset,
variant_tensor)
class _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, overriding intra-op parallelism."""
def __init__(self, input_dataset, max_intra_op_parallelism):
self._input_dataset = input_dataset
self._max_intra_op_parallelism = ops.convert_to_tensor(
max_intra_op_parallelism,
dtype=dtypes.int64,
name="max_intra_op_parallelism")
variant_tensor = ged_ops.max_intra_op_parallelism_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._max_intra_op_parallelism,
**self._flat_structure)
super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,
variant_tensor)
class _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, setting a private threadpool."""
def __init__(self, input_dataset, num_threads):
self._input_dataset = input_dataset
self._num_threads = ops.convert_to_tensor(
num_threads, dtype=dtypes.int64, name="num_threads")
variant_tensor = ged_ops.private_thread_pool_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_threads,
**self._flat_structure)
super(_PrivateThreadPoolDataset, self).__init__(input_dataset,
variant_tensor)
class _RestructuredDataset(UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self, dataset, structure):
self._input_dataset = dataset
self._structure = structure
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _UnbatchDataset(UnaryDataset):
"""A dataset that splits the elements of its input into multiple elements."""
def __init__(self, input_dataset):
"""See `unbatch()` for more details."""
flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access
if any(s.ndims == 0 for s in flat_shapes):
raise ValueError("Cannot unbatch an input with scalar components.")
known_batch_dim = tensor_shape.Dimension(None)
for s in flat_shapes:
try:
known_batch_dim = known_batch_dim.merge_with(s[0])
except ValueError:
raise ValueError("Cannot unbatch an input whose components have "
"different batch sizes.")
self._input_dataset = input_dataset
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access
get_structure(input_dataset))
variant_tensor = ged_ops.unbatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure)
super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
| 39.862517 | 116 | 0.688776 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import sys
import threading
import warnings
import weakref
import numpy as np
import six
from six.moves import queue as Queue
from tensorflow.core.framework import graph_pb2
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import options as options_lib
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import sparse
from tensorflow.python.data.util import structure
from tensorflow.python.data.util import traverse
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as core_random_seed
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest as tf_nest
from tensorflow.python.util.tf_export import tf_export
wrap_function = lazy_loader.LazyLoader(
"wrap_function", globals(),
"tensorflow.python.eager.wrap_function")
autograph_ctx = lazy_loader.LazyLoader(
"autograph_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph.impl.api")
ops.NotDifferentiable("ReduceDataset")
AUTOTUNE = -1
tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
@tf_export("data.Dataset", v1=[])
@six.add_metaclass(abc.ABCMeta)
class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor):
def __init__(self, variant_tensor):
self._variant_tensor_attr = variant_tensor
weak_self = weakref.proxy(self)
self._variant_tracker = self._track_trackable(
_VariantTracker(
self._variant_tensor,
# to have a weak reference to the Dataset to avoid creating
# reference cycles and making work for the garbage collector.
lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access
name="_variant_tracker")
self._graph_attr = ops.get_default_graph()
@property
def _variant_tensor(self):
return self._variant_tensor_attr
@_variant_tensor.setter
def _variant_tensor(self, _):
raise ValueError("The _variant_tensor property is read-only")
@deprecation.deprecated_args(None, "Use external_state_policy instead",
"allow_stateful")
def _as_serialized_graph(
self,
allow_stateful=None,
strip_device_assignment=None,
external_state_policy=distribute_options.ExternalStatePolicy.WARN):
if compat.forward_compatible(2019, 11, 25) or external_state_policy:
policy = None
if external_state_policy:
policy = external_state_policy.value
return gen_dataset_ops.dataset_to_graph_v2(
self._variant_tensor,
external_state_policy=policy,
strip_device_assignment=strip_device_assignment)
if compat.forward_compatible(2019, 11, 16) or strip_device_assignment:
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor,
allow_stateful=allow_stateful,
strip_device_assignment=strip_device_assignment)
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor, allow_stateful=allow_stateful)
def _trace_variant_creation(self):
variant = self._variant_tensor
if not isinstance(variant, ops.EagerTensor):
raise NotImplementedError(
"Can only export Datasets which were created executing eagerly. "
"Please file a feature request if this is important to you.")
with context.eager_mode(), ops.device("CPU"):
graph_def = graph_pb2.GraphDef().FromString(
self._as_serialized_graph().numpy()) # pylint: disable=protected-access
output_node_name = None
for node in graph_def.node:
if node.op == "_Retval":
if output_node_name is not None:
raise AssertionError(
"Found multiple return values from the dataset's graph, expected "
"only one.")
output_node_name, = node.input
if output_node_name is None:
raise AssertionError("Could not find the dataset's output node.")
# Add functions used in this Dataset to the function's graph, since they
variant_function = wrap_function.function_from_graph_def(
graph_def, inputs=[], outputs=output_node_name + ":0")
for used_function in self._functions():
used_function.function.add_to_graph(variant_function.graph)
return variant_function
@abc.abstractmethod
def _inputs(self):
raise NotImplementedError("Dataset._inputs")
@property
def _graph(self):
return self._graph_attr
@_graph.setter
def _graph(self, _):
raise ValueError("The _graph property is read-only")
def _has_captured_ref(self):
if context.executing_eagerly():
return False
def is_tensor_or_parent_ref(tensor):
if tensor.dtype._is_ref_dtype:
return True
if isinstance(tensor, ops._EagerTensorBase):
return False
return any(is_tensor_or_parent_ref(x) for x in tensor.op.inputs)
for fn in self._functions():
if any(is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs):
return True
return any(
[input_dataset._has_captured_ref() for input_dataset in self._inputs()])
def _functions(self):
return []
def options(self):
options = Options()
for input_dataset in self._inputs():
input_options = input_dataset.options()
if input_options is not None:
options = options.merge(input_options)
return options
def _apply_options(self):
dataset = self
options = self.options()
if options.experimental_threading is not None:
t_options = options.experimental_threading
if t_options.max_intra_op_parallelism is not None:
dataset = _MaxIntraOpParallelismDataset(
dataset, t_options.max_intra_op_parallelism)
if t_options.private_threadpool_size is not None:
dataset = _PrivateThreadPoolDataset(dataset,
t_options.private_threadpool_size)
graph_rewrites = options._graph_rewrites()
graph_rewrite_configs = options._graph_rewrite_configs()
if graph_rewrites:
if self._has_captured_ref():
warnings.warn(
"tf.data graph rewrites are not compatible with tf.Variable. "
"The following rewrites will be disabled: %s. To enable "
"rewrites, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program." %
", ".join(graph_rewrites))
else:
dataset = _OptimizeDataset(dataset, graph_rewrites,
graph_rewrite_configs)
autotune, algorithm, cpu_budget = options._autotune_settings()
if autotune:
dataset = _ModelDataset(dataset, algorithm, cpu_budget)
if options.experimental_stats and options.experimental_stats.aggregator:
dataset = _SetStatsAggregatorDataset(
dataset, options.experimental_stats.aggregator,
options.experimental_stats.prefix,
options.experimental_stats.counter_prefix)
return dataset
def __iter__(self):
if (context.executing_eagerly()
or ops.get_default_graph()._building_function):
return iterator_ops.OwnedIterator(self)
else:
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@abc.abstractproperty
def element_spec(self):
raise NotImplementedError("Dataset.element_spec")
def __repr__(self):
output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, get_legacy_output_types(self))
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
def as_numpy_iterator(self):
if not context.executing_eagerly():
raise RuntimeError("as_numpy_iterator() is not supported while tracing "
"functions")
for component_spec in nest.flatten(self.element_spec):
if not isinstance(component_spec, tensor_spec.TensorSpec):
raise TypeError(
"Dataset.as_numpy_iterator() does not support datasets containing "
+ str(component_spec.value_type))
return _NumpyIterator(self)
@property
def _flat_shapes(self):
return structure.get_flat_tensor_shapes(self.element_spec)
@property
def _flat_types(self):
return structure.get_flat_tensor_types(self.element_spec)
@property
def _flat_structure(self):
return {
"output_shapes": self._flat_shapes,
"output_types": self._flat_types,
}
@property
def _type_spec(self):
return DatasetSpec(self.element_spec)
@staticmethod
def from_tensors(tensors):
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
return TensorSliceDataset(tensors)
class _GeneratorState(object):
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0
self._args = {}
self._iterators = {}
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None, args=None):
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if args is None:
args = ()
else:
args = tuple(ops.convert_n_to_tensor(args, name="args"))
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
generator_state = DatasetV2._GeneratorState(generator)
def get_iterator_id_fn(unused_dummy):
return script_ops.numpy_function(generator_state.get_next_id, args,
dtypes.int64)
def generator_next_fn(iterator_id_t):
def generator_py_func(iterator_id):
values = next(generator_state.get_iterator(iterator_id))
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that did not match the expected "
"structure. The expected structure was %s, but the yielded "
"element was %s." % (output_types, values)), sys.exc_info()[2])
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert(
ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that could not be converted to "
"the expected type. The expected type was %s, but the yielded "
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.numpy_function(generator_py_func,
[iterator_id_t], flattened_types)
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
def finalize_fn(iterator_id_t):
def finalize_py_func(iterator_id):
generator_state.iterator_completed(iterator_id)
return np.array(0, dtype=np.int64)
return script_ops.numpy_function(finalize_py_func, [iterator_id_t],
dtypes.int64)
def flat_map_fn(dummy_arg):
return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,
finalize_fn)
dummy = 0
id_dataset = Dataset.from_tensors(dummy)
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args, **kwargs):
return RangeDataset(*args, **kwargs)
@staticmethod
def zip(datasets):
return ZipDataset(datasets)
def concatenate(self, dataset):
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern, shuffle=None, seed=None):
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_ops.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
dataset = Dataset.from_tensor_slices(matching_files)
if shuffle:
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed)
return dataset
def repeat(self, count=None):
return RepeatDataset(self, count)
def enumerate(self, start=0):
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
return CacheDataset(self, filename)
def take(self, count):
return TakeDataset(self, count)
def skip(self, count):
return SkipDataset(self, count)
def shard(self, num_shards, index):
return ShardDataset(self, num_shards, index)
def batch(self, batch_size, drop_remainder=False):
return BatchDataset(self, batch_size, drop_remainder)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,
drop_remainder)
def map(self, map_func, num_parallel_calls=None):
if num_parallel_calls is None:
return MapDataset(self, map_func, preserve_cardinality=True)
else:
return ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=True)
def flat_map(self, map_func):
return FlatMapDataset(self, map_func)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
if num_parallel_calls is None:
return InterleaveDataset(self, map_func, cycle_length, block_length)
else:
return ParallelInterleaveDataset(self, map_func, cycle_length,
block_length, num_parallel_calls)
def filter(self, predicate):
return FilterDataset(self, predicate)
def apply(self, transformation_func):
dataset = transformation_func(self)
if not isinstance(dataset, DatasetV2):
raise TypeError(
"`transformation_func` must return a Dataset. Got {}.".format(
dataset))
dataset._input_datasets = [self]
return dataset
def window(self, size, shift=None, stride=1, drop_remainder=False):
if shift is None:
shift = size
return WindowDataset(self, size, shift, stride, drop_remainder)
def reduce(self, initial_state, reduce_func):
with ops.name_scope("initial_state"):
initial_state = structure.normalize_element(initial_state)
state_structure = structure.type_spec_from_value(initial_state)
need_to_rerun = True
while need_to_rerun:
wrapped_func = StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_structure=(state_structure, self.element_spec),
add_to_graph=False)
output_classes = wrapped_func.output_classes
state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(),
state_structure)
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(state_classes, wrapped_func.output_classes))
output_types = wrapped_func.output_types
state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(),
state_structure)
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(state_types, wrapped_func.output_types))
output_shapes = wrapped_func.output_shapes
state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(),
state_structure)
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
state_structure = structure.convert_legacy_structure(
state_types,
nest.pack_sequence_as(state_shapes, weakened_state_shapes),
state_classes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
dataset = self._apply_options()
return structure.from_compatible_tensor_list(
state_structure,
gen_dataset_ops.reduce_dataset(
dataset._variant_tensor,
structure.to_tensor_list(state_structure, initial_state),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=structure.get_flat_tensor_shapes(state_structure),
output_types=structure.get_flat_tensor_types(state_structure)))
def unbatch(self):
def normalize(arg, *rest):
if rest:
return structure.to_batched_tensor_list(self.element_spec,
(arg,) + rest)
else:
return structure.to_batched_tensor_list(self.element_spec, arg)
normalized_dataset = self.map(normalize)
restructured_dataset = _RestructuredDataset(normalized_dataset,
self.element_spec)
return _UnbatchDataset(restructured_dataset)
def with_options(self, options):
return _OptionsDataset(self, options)
@tf_export(v1=["data.Dataset"])
class DatasetV1(DatasetV2):
def __init__(self):
try:
variant_tensor = self._as_variant_tensor()
except AttributeError as e:
if "_as_variant_tensor" in str(e):
raise AttributeError("Please use _variant_tensor instead of "
"_as_variant_tensor() to obtain the variant "
"associated with a dataset")
raise AttributeError("{}: A likely cause of this error is that the super "
"call for this dataset is not the last line of the "
"__init__ method. The base class causes the "
"_as_variant_tensor call in its constructor and "
"if that uses attributes defined in the __init__ "
"method, those attrs need to be defined before the "
"super call.".format(e))
super(DatasetV1, self).__init__(variant_tensor)
@abc.abstractmethod
def _as_variant_tensor(self):
raise NotImplementedError("Dataset._as_variant_tensor")
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
def make_one_shot_iterator(self):
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self):
if context.executing_eagerly():
return iterator_ops.OwnedIterator(self)
_ensure_same_dataset_graph(self)
all_ds_ops = traverse.obtain_all_variant_tensor_ops(self)
graph_level_seed, op_level_seed = core_random_seed.get_seed(None)
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=all_ds_ops)
def _make_dataset():
if graph_level_seed is not None:
assert op_level_seed is not None
core_random_seed.set_random_seed(
(graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))
dataset = self._apply_options()
return dataset._variant_tensor
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset, **self._flat_structure), None,
get_legacy_output_types(self), get_legacy_output_shapes(self),
get_legacy_output_classes(self))
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
def make_initializable_iterator(self, shared_name=None):
return self._make_initializable_iterator(shared_name)
def _make_initializable_iterator(self, shared_name=None):
if context.executing_eagerly():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled. Use `for element in dataset` instead.")
_ensure_same_dataset_graph(self)
dataset = self._apply_options()
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator_v2(
container="", shared_name=shared_name, **self._flat_structure)
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(
dataset._variant_tensor,
iterator_resource)
return iterator_ops.Iterator(
iterator_resource, initializer, get_legacy_output_types(dataset),
get_legacy_output_shapes(dataset), get_legacy_output_classes(dataset))
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(dataset)`.")
def output_classes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(),
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(dataset)`.")
def output_shapes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(),
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(dataset)`.")
def output_types(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(),
self.element_spec)
@property
def element_spec(self):
return structure.convert_legacy_structure(
self.output_types, self.output_shapes, self.output_classes)
@staticmethod
@functools.wraps(DatasetV2.from_tensors)
def from_tensors(tensors):
return DatasetV1Adapter(DatasetV2.from_tensors(tensors))
@staticmethod
@functools.wraps(DatasetV2.from_tensor_slices)
def from_tensor_slices(tensors):
return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))
@staticmethod
@deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.")
def from_sparse_tensor_slices(sparse_tensor):
return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))
@staticmethod
@functools.wraps(DatasetV2.from_generator)
def from_generator(generator, output_types, output_shapes=None, args=None):
return DatasetV1Adapter(DatasetV2.from_generator(
generator, output_types, output_shapes, args))
@staticmethod
@functools.wraps(DatasetV2.range)
def range(*args, **kwargs):
return DatasetV1Adapter(DatasetV2.range(*args, **kwargs))
@staticmethod
@functools.wraps(DatasetV2.zip)
def zip(datasets):
return DatasetV1Adapter(DatasetV2.zip(datasets))
@functools.wraps(DatasetV2.concatenate)
def concatenate(self, dataset):
return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))
@functools.wraps(DatasetV2.prefetch)
def prefetch(self, buffer_size):
return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))
@staticmethod
@functools.wraps(DatasetV2.list_files)
def list_files(file_pattern, shuffle=None, seed=None):
return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))
@functools.wraps(DatasetV2.repeat)
def repeat(self, count=None):
return DatasetV1Adapter(super(DatasetV1, self).repeat(count))
@functools.wraps(DatasetV2.shuffle)
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return DatasetV1Adapter(super(DatasetV1, self).shuffle(
buffer_size, seed, reshuffle_each_iteration))
@functools.wraps(DatasetV2.cache)
def cache(self, filename=""):
return DatasetV1Adapter(super(DatasetV1, self).cache(filename))
@functools.wraps(DatasetV2.take)
def take(self, count):
return DatasetV1Adapter(super(DatasetV1, self).take(count))
@functools.wraps(DatasetV2.skip)
def skip(self, count):
return DatasetV1Adapter(super(DatasetV1, self).skip(count))
@functools.wraps(DatasetV2.shard)
def shard(self, num_shards, index):
return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))
@functools.wraps(DatasetV2.batch)
def batch(self, batch_size, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).batch(
batch_size, drop_remainder))
@functools.wraps(DatasetV2.padded_batch)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).padded_batch(
batch_size, padded_shapes, padding_values, drop_remainder))
@functools.wraps(DatasetV2.map)
def map(self, map_func, num_parallel_calls=None):
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(self, map_func, preserve_cardinality=False))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=False))
@deprecation.deprecated(None, "Use `tf.data.Dataset.map()")
def map_with_legacy_function(self, map_func, num_parallel_calls=None):
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(
self,
map_func,
preserve_cardinality=False,
use_legacy_function=True))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self,
map_func,
num_parallel_calls,
preserve_cardinality=False,
use_legacy_function=True))
@functools.wraps(DatasetV2.flat_map)
def flat_map(self, map_func):
return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))
@functools.wraps(DatasetV2.interleave)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
return DatasetV1Adapter(super(DatasetV1, self).interleave(
map_func, cycle_length, block_length, num_parallel_calls))
@functools.wraps(DatasetV2.filter)
def filter(self, predicate):
return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))
@deprecation.deprecated(None, "Use `tf.data.Dataset.filter()")
def filter_with_legacy_function(self, predicate):
return FilterDataset(self, predicate, use_legacy_function=True)
@functools.wraps(DatasetV2.apply)
def apply(self, transformation_func):
return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))
@functools.wraps(DatasetV2.window)
def window(self, size, shift=None, stride=1, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).window(
size, shift, stride, drop_remainder))
@functools.wraps(DatasetV2.unbatch)
def unbatch(self):
return DatasetV1Adapter(super(DatasetV1, self).unbatch())
@functools.wraps(DatasetV2.with_options)
def with_options(self, options):
return DatasetV1Adapter(super(DatasetV1, self).with_options(options))
if tf2.enabled():
Dataset = DatasetV2
else:
Dataset = DatasetV1
class DatasetV1Adapter(DatasetV1):
def __init__(self, dataset):
self._dataset = dataset
super(DatasetV1Adapter, self).__init__()
def _as_variant_tensor(self):
return self._dataset._variant_tensor
def _has_captured_ref(self):
return self._dataset._has_captured_ref()
def _inputs(self):
return self._dataset._inputs()
def _functions(self):
return self._dataset._functions()
def options(self):
return self._dataset.options()
@property
def element_spec(self):
return self._dataset.element_spec
def __iter__(self):
return iter(self._dataset)
def _ensure_same_dataset_graph(dataset):
current_graph = ops.get_default_graph()
bfs_q = Queue.Queue()
bfs_q.put(dataset)
visited = []
while not bfs_q.empty():
ds = bfs_q.get()
visited.append(ds)
ds_graph = ds._graph
if current_graph != ds_graph:
raise ValueError(
"The graph (" + str(current_graph) + ") of the iterator is different "
"from the graph (" + str(ds_graph) + ") the dataset: " +
str(ds._variant_tensor) + " was created in. If you are using the "
"Estimator API, make sure that no part of the dataset returned by "
"the `input_fn` function is defined outside the `input_fn` function. "
"Please ensure that all datasets in the pipeline are created in the "
"same graph as the iterator.")
for input_ds in ds._inputs():
if input_ds not in visited:
bfs_q.put(input_ds)
@tf_export(v1=["data.make_one_shot_iterator"])
def make_one_shot_iterator(dataset):
try:
return dataset._make_one_shot_iterator()
except AttributeError:
return DatasetV1Adapter(dataset)._make_one_shot_iterator()
@tf_export(v1=["data.make_initializable_iterator"])
def make_initializable_iterator(dataset, shared_name=None):
try:
return dataset._make_initializable_iterator(shared_name)
except AttributeError:
return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name)
@tf_export("data.experimental.get_structure")
def get_structure(dataset_or_iterator):
try:
return dataset_or_iterator.element_spec
except AttributeError:
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator "
"object, but got %s." % type(dataset_or_iterator))
@tf_export(v1=["data.get_output_classes"])
def get_legacy_output_classes(dataset_or_iterator):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(),
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_shapes"])
def get_legacy_output_shapes(dataset_or_iterator):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(),
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_types"])
def get_legacy_output_types(dataset_or_iterator):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(),
get_structure(dataset_or_iterator))
@tf_export("data.Options")
class Options(options_lib.OptionsBase):
experimental_deterministic = options_lib.create_option(
name="experimental_deterministic",
ty=bool,
docstring=
"Whether the outputs need to be produced in deterministic order. If None,"
" defaults to True.")
experimental_distribute = options_lib.create_option(
name="experimental_distribute",
ty=distribute_options.DistributeOptions,
docstring=
"The distribution strategy options associated with the dataset. See "
"`tf.data.experimental.DistributeOptions` for more details.",
default_factory=distribute_options.DistributeOptions)
experimental_optimization = options_lib.create_option(
name="experimental_optimization",
ty=optimization_options.OptimizationOptions,
docstring=
"The optimization options associated with the dataset. See "
"`tf.data.experimental.OptimizationOptions` for more details.",
default_factory=optimization_options.OptimizationOptions)
experimental_slack = options_lib.create_option(
name="experimental_slack",
ty=bool,
docstring="Whether to introduce 'slack' in the last `prefetch` of the "
"input pipeline, if it exists. This may reduce CPU contention with "
"accelerator host-side activity at the start of a step. The slack "
"frequency is determined by the number of devices attached to this "
"input pipeline. If None, defaults to False.")
experimental_stats = options_lib.create_option(
name="experimental_stats",
ty=stats_options.StatsOptions,
docstring=
"The statistics options associated with the dataset. See "
"`tf.data.experimental.StatsOptions` for more details.",
default_factory=stats_options.StatsOptions)
experimental_threading = options_lib.create_option(
name="experimental_threading",
ty=threading_options.ThreadingOptions,
docstring=
"The threading options associated with the dataset. See "
"`tf.data.experimental.ThreadingOptions` for more details.",
default_factory=threading_options.ThreadingOptions)
experimental_external_state_policy = options_lib.create_option(
name="experimental_external_state_policy",
ty=distribute_options.ExternalStatePolicy,
docstring="By default, tf.data will refuse to serialize a dataset or "
"checkpoint its iterator if the dataset contains a stateful op as the "
"serialization / checkpointing won't be able to capture its state. "
"Users can -- at their own risk -- override this restriction by "
"explicitly specifying that they are fine throwing away the state "
"in these ops. There are three settings available - IGNORE: in which we"
"completely ignore any state; WARN: We warn the user that some state "
"might be thrown away; FAIL: We fail if any state is being captured.",
default_factory=lambda: distribute_options.ExternalStatePolicy.WARN)
def _graph_rewrites(self):
result = []
if self.experimental_optimization is not None:
result.extend(self.experimental_optimization._graph_rewrites()) # pylint: disable=protected-access
else:
# Apply default options
result.extend(
optimization_options.OptimizationOptions()._graph_rewrites()) # pylint: disable=protected-access
if self.experimental_deterministic is False:
result.append("make_sloppy")
if self.experimental_stats and self.experimental_stats.latency_all_edges:
result.append("latency_all_edges")
if self.experimental_slack:
result.append("slack")
if (self.experimental_distribute and
self.experimental_distribute._make_stateless): # pylint: disable=protected-access
result.append("make_stateless")
return result
def _graph_rewrite_configs(self):
result = []
if self.experimental_optimization:
result.extend(self.experimental_optimization._graph_rewrite_configs()) # pylint: disable=protected-access
if self.experimental_slack:
num_devices = self.experimental_distribute.num_devices
if num_devices is None:
num_devices = 1
result.append("slack:slack_period:%d" % num_devices)
return result
def _autotune_settings(self):
if self.experimental_optimization is not None:
return self.experimental_optimization._autotune_settings() # pylint: disable=protected-access
# Return default autotune options
return optimization_options.OptimizationOptions()._autotune_settings() # pylint: disable=protected-access
def merge(self, options):
return options_lib.merge_options(self, options)
class DatasetSource(DatasetV2):
def _inputs(self):
return []
class UnaryDataset(DatasetV2):
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryDataset, self).__init__(variant_tensor)
def _inputs(self):
return [self._input_dataset]
class UnaryUnchangedStructureDataset(UnaryDataset):
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryUnchangedStructureDataset, self).__init__(
input_dataset, variant_tensor)
@property
def element_spec(self):
return self._input_dataset.element_spec
class TensorDataset(DatasetSource):
def __init__(self, element):
element = structure.normalize_element(element)
self._structure = structure.type_spec_from_value(element)
self._tensors = structure.to_tensor_list(self._structure, element)
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class TensorSliceDataset(DatasetSource):
def __init__(self, element):
element = structure.normalize_element(element)
batched_spec = structure.type_spec_from_value(element)
self._tensors = structure.to_batched_tensor_list(batched_spec, element)
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access
batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
self._tensors[0].get_shape()[0]))
for t in self._tensors[1:]:
batch_dim.assert_is_compatible_with(tensor_shape.Dimension(
tensor_shape.dimension_value(t.get_shape()[0])))
variant_tensor = gen_dataset_ops.tensor_slice_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class SparseTensorSliceDataset(DatasetSource):
def __init__(self, sparse_tensor):
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError(
"`sparse_tensor` must be a `tf.SparseTensor` object. Was {}.".format(
sparse_tensor))
self._sparse_tensor = sparse_tensor
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),
tensor_spec.TensorSpec([None],
self._sparse_tensor.dtype),
tensor_spec.TensorSpec([rank], dtypes.int64))
variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
super(SparseTensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class _VariantDataset(DatasetV2):
def __init__(self, dataset_variant, structure):
self._structure = structure
super(_VariantDataset, self).__init__(dataset_variant)
def _inputs(self):
return []
@property
def element_spec(self):
return self._structure
class _NestedVariant(composite_tensor.CompositeTensor):
def __init__(self, variant_tensor, element_spec, dataset_shape):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
self._dataset_shape = dataset_shape
@property
def _type_spec(self):
return DatasetSpec(self._element_spec, self._dataset_shape)
@tf_export("data.experimental.from_variant")
def from_variant(variant, structure):
return _VariantDataset(variant, structure) # pylint: disable=protected-access
@tf_export("data.experimental.to_variant")
def to_variant(dataset):
return dataset._variant_tensor # pylint: disable=protected-access
@tf_export(
"data.DatasetSpec",
v1=["data.DatasetSpec", "data.experimental.DatasetStructure"])
class DatasetSpec(type_spec.BatchableTypeSpec):
__slots__ = ["_element_spec", "_dataset_shape"]
def __init__(self, element_spec, dataset_shape=()):
self._element_spec = element_spec
self._dataset_shape = tensor_shape.as_shape(dataset_shape)
@property
def value_type(self):
return _VariantDataset
def _serialize(self):
return (self._element_spec, self._dataset_shape)
@property
def _component_specs(self):
return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)
def _to_components(self, value):
return value._variant_tensor # pylint: disable=protected-access
def _from_components(self, components):
# pylint: disable=protected-access
if self._dataset_shape.ndims == 0:
return _VariantDataset(components, self._element_spec)
else:
return _NestedVariant(components, self._element_spec, self._dataset_shape)
def _to_tensor_list(self, value):
return [
ops.convert_to_tensor(
tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access
]
@staticmethod
def from_value(value):
return DatasetSpec(value.element_spec) # pylint: disable=protected-access
def _batch(self, batch_size):
return DatasetSpec(
self._element_spec,
tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))
def _unbatch(self):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return DatasetSpec(self._element_spec, self._dataset_shape[1:])
def _to_batched_tensor_list(self, value):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return self._to_tensor_list(value)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
class StructuredFunctionWrapper(object):
# pylint: disable=protected-access
def __init__(self,
func,
transformation_name,
dataset=None,
input_classes=None,
input_shapes=None,
input_types=None,
input_structure=None,
add_to_graph=True,
use_legacy_function=False,
defun_kwargs=None):
if input_structure is None:
if dataset is None:
if input_classes is None or input_shapes is None or input_types is None:
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = structure.convert_legacy_structure(
input_types, input_shapes, input_classes)
else:
if not (input_classes is None and input_shapes is None and
input_types is None):
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = dataset.element_spec
else:
if not (dataset is None and input_classes is None and input_shapes is None
and input_types is None):
raise ValueError("Either `dataset`, `input_structure`, or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = input_structure
self._func = func
# There is no graph to add in eager mode.
add_to_graph &= not context.executing_eagerly()
# There are some lifetime issues when a legacy function is not added to a
# out-living graph. It's already deprecated so de-priotizing the fix.
add_to_graph |= use_legacy_function
if defun_kwargs is None:
defun_kwargs = {}
readable_transformation_name = transformation_name.replace(
".", "_")[:-2] if len(transformation_name) > 2 else ""
func_name = "_".join(
[readable_transformation_name,
function_utils.get_func_name(func)])
for symbol in ["<", ">", "\\", "'", " "]:
func_name = func_name.replace(symbol, "")
ag_ctx = autograph_ctx.control_status_ctx()
def _warn_if_collections(transformation_name):
warnings.warn("Creating resources inside a function passed to %s "
"is not supported. Create each resource outside the "
"function, and capture it inside the function to use it." %
transformation_name, stacklevel=5)
def _wrapper_helper(*args):
nested_args = structure.from_compatible_tensor_list(
self._input_structure, args)
if not _should_unpack_args(nested_args):
nested_args = (nested_args,)
ret = autograph.tf_convert(func, ag_ctx)(*nested_args)
# If `func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.numpy_function()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
try:
self._output_structure = structure.type_spec_from_value(ret)
except (ValueError, TypeError):
six.reraise(
TypeError,
TypeError("Unsupported return value from function passed to "
"%s: %s." % (transformation_name, ret)),
sys.exc_info()[2])
return ret
if use_legacy_function:
func_name = func_name + "_" + str(ops.uid())
@function.Defun(
*structure.get_flat_tensor_types(self._input_structure),
func_name=func_name,
**defun_kwargs)
def wrapper_fn(*args):
ret = _wrapper_helper(*args)
# _warn_if_collections(transformation_name, ops.get_default_graph(), 0)
return structure.to_tensor_list(self._output_structure, ret)
self._function = wrapper_fn
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
else:
# Use the private method that will execute `wrapper_fn` but delay
# adding it to the graph in case (e.g.) we need to rerun the function.
self._function._create_definition_if_needed()
if resource_tracker.resources:
_warn_if_collections(transformation_name)
else:
defun_kwargs.update({"func_name": func_name})
# Note: _wrapper_helper will apply autograph based on context.
@eager_function.defun_with_attributes(
input_signature=structure.get_flat_tensor_specs(
self._input_structure),
autograph=False,
attributes=defun_kwargs)
def wrapper_fn(*args): # pylint: disable=missing-docstring
ret = _wrapper_helper(*args)
ret = structure.to_tensor_list(self._output_structure, ret)
return [ops.convert_to_tensor(t) for t in ret]
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
# TODO(b/141462134): Switch to using garbage collection.
self._function = wrapper_fn.get_concrete_function()
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
if resource_tracker.resources:
_warn_if_collections(transformation_name)
outer_graph_seed = ops.get_default_graph().seed
if outer_graph_seed and self._function.graph.seed == outer_graph_seed:
if self._function.graph._seed_used:
warnings.warn(
"Seed %s from outer graph might be getting used by function %s, "
"if the random op has not been provided any seed. Explicitly set "
"the seed in the function if this is not the intended behavior."
%(outer_graph_seed, func_name), stacklevel=4)
# pylint: enable=protected-access
@property
def output_structure(self):
return self._output_structure
@property
def output_classes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_shapes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_types(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._output_structure)
@property
def function(self):
return self._function
class _GeneratorDataset(DatasetSource):
def __init__(self, init_args, init_func, next_func, finalize_func):
self._init_args = init_args
self._init_structure = structure.type_spec_from_value(init_args)
self._init_func = StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=self._init_structure)
self._next_func = StructuredFunctionWrapper(
next_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
self._finalize_func = StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
variant_tensor = gen_dataset_ops.generator_dataset(
structure.to_tensor_list(self._init_structure, self._init_args) +
self._init_func.function.captured_inputs,
self._next_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
init_func=self._init_func.function,
next_func=self._next_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GeneratorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._next_func.output_structure
def _transformation_name(self):
return "Dataset.from_generator()"
class ZipDataset(DatasetV2):
def __init__(self, datasets):
for ds in nest.flatten(datasets):
if not isinstance(ds, DatasetV2):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
self._structure = nest.pack_sequence_as(
self._datasets,
[ds.element_spec for ds in nest.flatten(self._datasets)])
variant_tensor = gen_dataset_ops.zip_dataset(
[ds._variant_tensor for ds in nest.flatten(self._datasets)],
**self._flat_structure)
super(ZipDataset, self).__init__(variant_tensor)
def _inputs(self):
return nest.flatten(self._datasets)
@property
def element_spec(self):
return self._structure
class ConcatenateDataset(DatasetV2):
def __init__(self, input_dataset, dataset_to_concatenate):
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
output_types = get_legacy_output_types(input_dataset)
if output_types != get_legacy_output_types(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(output_types, get_legacy_output_types(dataset_to_concatenate)))
output_classes = get_legacy_output_classes(input_dataset)
if output_classes != get_legacy_output_classes(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different classes %s and %s" %
(output_classes, get_legacy_output_classes(dataset_to_concatenate)))
input_shapes = get_legacy_output_shapes(self._input_dataset)
output_shapes = nest.pack_sequence_as(input_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(input_shapes),
nest.flatten(get_legacy_output_shapes(
self._dataset_to_concatenate)))
])
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._input_datasets = [input_dataset, dataset_to_concatenate]
# pylint: disable=protected-access
variant_tensor = gen_dataset_ops.concatenate_dataset(
input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,
**self._flat_structure)
# pylint: enable=protected-access
super(ConcatenateDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._input_datasets
@property
def element_spec(self):
return self._structure
class RepeatDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, count):
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.repeat_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(RepeatDataset, self).__init__(input_dataset, variant_tensor)
class RangeDataset(DatasetSource):
def __init__(self, *args, **kwargs):
self._parse_args(*args, **kwargs)
self._structure = tensor_spec.TensorSpec([], self._output_type)
variant_tensor = gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**self._flat_structure)
super(RangeDataset, self).__init__(variant_tensor)
def _parse_args(self, *args, **kwargs):
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
if "output_type" in kwargs:
self._output_type = kwargs["output_type"]
else:
self._output_type = dtypes.int64
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
@property
def element_spec(self):
return self._structure
class _MemoryCacheDeleter(object):
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
class _MemoryCache(object):
def __init__(self):
super(_MemoryCache, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (gen_dataset_ops.anonymous_memory_cache())
self._resource_deleter = _MemoryCacheDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class CacheDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, filename):
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
if tf2.enabled() and (context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._cache = _MemoryCache()
variant_tensor = gen_dataset_ops.cache_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
cache=self._cache.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.cache_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
**self._flat_structure)
super(CacheDataset, self).__init__(input_dataset, variant_tensor)
class _RandomSeedGeneratorDeleter(object):
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
class _RandomSeedGenerator(object):
def __init__(self, seed, seed2):
super(_RandomSeedGenerator, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (
gen_dataset_ops.anonymous_random_seed_generator(seed=seed, seed2=seed2))
self._resource_deleter = _RandomSeedGeneratorDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class ShuffleDataset(UnaryUnchangedStructureDataset):
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
if tf2.enabled() and self._reshuffle_each_iteration and (
context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._seed_generator = _RandomSeedGenerator(self._seed, self._seed2)
variant_tensor = gen_dataset_ops.shuffle_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed_generator=self._seed_generator.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._flat_structure)
super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)
class TakeDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, count):
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.take_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(TakeDataset, self).__init__(input_dataset, variant_tensor)
class SkipDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, count):
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.skip_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(SkipDataset, self).__init__(input_dataset, variant_tensor)
class ShardDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, num_shards, index):
self._input_dataset = input_dataset
self._num_shards = ops.convert_to_tensor(
num_shards, dtype=dtypes.int64, name="num_shards")
self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index")
variant_tensor = gen_dataset_ops.shard_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_shards=self._num_shards,
index=self._index,
**self._flat_structure)
super(ShardDataset, self).__init__(input_dataset, variant_tensor)
class BatchDataset(UnaryDataset):
def __init__(self, input_dataset, batch_size, drop_remainder):
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
# pylint: disable=g-long-lambda
constant_batch_size = tensor_util.constant_value(self._batch_size)
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(constant_batch_size),
input_dataset.element_spec)
else:
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(None),
input_dataset.element_spec)
variant_tensor = gen_dataset_ops.batch_dataset_v2(
input_dataset._variant_tensor,
batch_size=self._batch_size,
drop_remainder=self._drop_remainder,
**self._flat_structure)
super(BatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _NumpyIterator(object):
def __init__(self, dataset):
self._iterator = iter(dataset)
def __iter__(self):
return self
def next(self):
return nest.map_structure(lambda x: x.numpy(), next(self._iterator))
def __next__(self):
return self.next()
class _VariantTracker(tracking.CapturableResource):
def __init__(self, variant_tensor, resource_creator):
super(_VariantTracker, self).__init__(device="CPU")
self._resource_handle = variant_tensor
self._create_resource = resource_creator
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(
padded_shape.dims, input_component_shape.dims):
if (padded_dim.value is not None and input_dim.value is not None
and padded_dim.value < input_dim.value):
return False
return True
def _padded_shape_to_tensor(padded_shape, input_component_shape):
try:
# Try to convert the `padded_shape` to a `tf.TensorShape`
padded_shape_as_shape = tensor_shape.as_shape(padded_shape)
# We will return the "canonical" tensor representation, which uses
# `-1` in place of `None`.
ret = ops.convert_to_tensor(
[dim if dim is not None else -1
for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
six.reraise(ValueError, ValueError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"shape was %s." % (padded_shape, ret.shape)), sys.exc_info()[2])
if ret.dtype != dtypes.int64:
six.reraise(
TypeError,
TypeError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but "
"its element type was %s." % (padded_shape, ret.dtype.name)),
sys.exc_info()[2])
padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)
if not _is_padded_shape_compatible_with(padded_shape_as_shape,
input_component_shape):
raise ValueError("The padded shape %s is not compatible with the "
"corresponding input component shape %s."
% (padded_shape_as_shape, input_component_shape))
return ret
def _padding_value_to_tensor(value, output_type):
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
def _padding_values_or_default(padding_values, input_dataset):
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
elif t.base_dtype == dtypes.variant:
error_msg = ("Unable to create padding for field of type 'variant' "
"because t.base_type == dtypes.variant == "
"{}.".format(
t.base_dtype))
raise TypeError(error_msg)
else:
return np.zeros_like(t.as_numpy_dtype())
def value_or_default(value, default):
return default if value is None else value
default_padding = nest.map_structure(make_zero,
get_legacy_output_types(input_dataset))
return nest.map_structure_up_to(padding_values, value_or_default,
padding_values, default_padding)
class PaddedBatchDataset(UnaryDataset):
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,
drop_remainder):
self._input_dataset = input_dataset
if sparse.any_sparse(get_legacy_output_classes(input_dataset)):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError(
"Batching of padded sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = _padding_values_or_default(padding_values, input_dataset)
input_shapes = get_legacy_output_shapes(input_dataset)
flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)
flat_padded_shapes_as_tensors = []
for input_component_shape, padded_shape in zip(
nest.flatten(input_shapes), flat_padded_shapes):
flat_padded_shapes_as_tensors.append(
_padded_shape_to_tensor(padded_shape, input_component_shape))
self._padded_shapes = nest.pack_sequence_as(input_shapes,
flat_padded_shapes_as_tensors)
self._padding_values = nest.map_structure_up_to(
input_shapes, _padding_value_to_tensor, padding_values,
get_legacy_output_types(input_dataset))
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _padded_shape_to_batch_shape(s):
return tensor_shape.TensorShape([
tensor_util.constant_value(self._batch_size)
if smart_cond.smart_constant_value(self._drop_remainder) else None
]).concatenate(tensor_util.constant_value_as_shape(s))
output_shapes = nest.map_structure(
_padded_shape_to_batch_shape, self._padded_shapes)
self._structure = structure.convert_legacy_structure(
get_legacy_output_types(self._input_dataset), output_shapes,
get_legacy_output_classes(self._input_dataset))
# pylint: disable=protected-access
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
variant_tensor = gen_dataset_ops.padded_batch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=structure.get_flat_tensor_shapes(self._structure))
else:
variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
drop_remainder=self._drop_remainder,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
def _should_unpack_args(args):
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
class MapDataset(UnaryDataset):
def __init__(self,
input_dataset,
map_func,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._preserve_cardinality = preserve_cardinality
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
variant_tensor = gen_dataset_ops.map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(MapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class ParallelMapDataset(UnaryDataset):
def __init__(self,
input_dataset,
map_func,
num_parallel_calls,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
self._preserve_cardinality = preserve_cardinality
variant_tensor = gen_dataset_ops.parallel_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
num_parallel_calls=self._num_parallel_calls,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class FlatMapDataset(UnaryDataset):
def __init__(self, input_dataset, map_func):
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
**self._flat_structure)
super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.flat_map()"
class InterleaveDataset(UnaryDataset):
def __init__(self, input_dataset, map_func, cycle_length, block_length):
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
variant_tensor = gen_dataset_ops.interleave_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
f=self._map_func.function,
**self._flat_structure)
super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class ParallelInterleaveDataset(UnaryDataset):
def __init__(self, input_dataset, map_func, cycle_length, block_length,
num_parallel_calls):
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func.function,
**self._flat_structure)
super(ParallelInterleaveDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class FilterDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, predicate, use_legacy_function=False):
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
predicate,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if not wrapped_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.bool)):
error_msg = ("`predicate` return type must be convertible to a scalar "
"boolean tensor. Was {}.").format(
wrapped_func.output_structure)
raise ValueError(error_msg)
self._predicate = wrapped_func
variant_tensor = gen_dataset_ops.filter_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**self._flat_structure)
super(FilterDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._predicate]
def _transformation_name(self):
return "Dataset.filter()"
class PrefetchDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, buffer_size, slack_period=None):
self._input_dataset = input_dataset
if buffer_size is None:
buffer_size = -1 # This is the sentinel for auto-tuning.
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
slack_period=slack_period,
**self._flat_structure)
super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)
class WindowDataset(UnaryDataset):
def __init__(self, input_dataset, size, shift, stride, drop_remainder):
self._input_dataset = input_dataset
self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size")
self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift")
self._stride = ops.convert_to_tensor(
stride, dtype=dtypes.int64, name="stride")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
self._structure = nest.pack_sequence_as(
get_legacy_output_classes(input_dataset), [
DatasetSpec( # pylint: disable=g-complex-comprehension
structure.convert_legacy_structure(
output_type, output_shape, output_class))
for output_class, output_shape, output_type in zip(
nest.flatten(get_legacy_output_classes(input_dataset)),
nest.flatten(get_legacy_output_shapes(input_dataset)),
nest.flatten(get_legacy_output_types(input_dataset)))
])
variant_tensor = gen_dataset_ops.window_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._size,
self._shift,
self._stride,
self._drop_remainder,
**self._flat_structure)
super(WindowDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _OptionsDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, options):
self._input_dataset = input_dataset
self._options = input_dataset.options()
if self._options:
self._options = self._options.merge(options)
else:
self._options = options
variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access
super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)
def options(self):
return self._options
class _ModelDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, algorithm, cpu_budget):
self._input_dataset = input_dataset
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
algorithm=algorithm.value,
cpu_budget=cpu_budget,
**self._flat_structure)
super(_ModelDataset, self).__init__(input_dataset, variant_tensor)
class _OptimizeDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, optimizations, optimization_configs=None):
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
if optimization_configs is None:
optimization_configs = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
variant_tensor = gen_dataset_ops.optimize_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._optimizations,
optimization_configs=optimization_configs,
**self._flat_structure)
super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)
class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, aggregator, prefix, counter_prefix):
self._input_dataset = input_dataset
self._stats_aggregator = aggregator
self._prefix = prefix
self._counter_prefix = counter_prefix
variant_tensor = ged_ops.set_stats_aggregator_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
self._prefix,
self._counter_prefix,
**self._flat_structure)
super(_SetStatsAggregatorDataset, self).__init__(input_dataset,
variant_tensor)
class _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, max_intra_op_parallelism):
self._input_dataset = input_dataset
self._max_intra_op_parallelism = ops.convert_to_tensor(
max_intra_op_parallelism,
dtype=dtypes.int64,
name="max_intra_op_parallelism")
variant_tensor = ged_ops.max_intra_op_parallelism_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._max_intra_op_parallelism,
**self._flat_structure)
super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,
variant_tensor)
class _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):
def __init__(self, input_dataset, num_threads):
self._input_dataset = input_dataset
self._num_threads = ops.convert_to_tensor(
num_threads, dtype=dtypes.int64, name="num_threads")
variant_tensor = ged_ops.private_thread_pool_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_threads,
**self._flat_structure)
super(_PrivateThreadPoolDataset, self).__init__(input_dataset,
variant_tensor)
class _RestructuredDataset(UnaryDataset):
def __init__(self, dataset, structure):
self._input_dataset = dataset
self._structure = structure
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _UnbatchDataset(UnaryDataset):
def __init__(self, input_dataset):
flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access
if any(s.ndims == 0 for s in flat_shapes):
raise ValueError("Cannot unbatch an input with scalar components.")
known_batch_dim = tensor_shape.Dimension(None)
for s in flat_shapes:
try:
known_batch_dim = known_batch_dim.merge_with(s[0])
except ValueError:
raise ValueError("Cannot unbatch an input whose components have "
"different batch sizes.")
self._input_dataset = input_dataset
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access
get_structure(input_dataset))
variant_tensor = ged_ops.unbatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure)
super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
| true | true |
f7feca1319ebf62bf200c532c66dfe16f40e28fc | 1,927 | py | Python | Tutorial code/Week 3_Pandas tutorial.py | Leanneliansong1/inf1340-programmingfordatascience-fa21 | 8944d019f64f9f25e7b10dafbca18250bf12e9ee | [
"MIT"
] | 8 | 2021-09-15T20:29:21.000Z | 2021-12-18T13:01:04.000Z | Tutorial code/Week 3_Pandas tutorial.py | Leanneliansong1/inf1340-programmingfordatascience-fa21 | 8944d019f64f9f25e7b10dafbca18250bf12e9ee | [
"MIT"
] | 8 | 2021-11-21T02:53:43.000Z | 2021-12-18T04:43:57.000Z | Tutorial code/Week 3_Pandas tutorial.py | Leanneliansong1/inf1340-programmingfordatascience-fa21 | 8944d019f64f9f25e7b10dafbca18250bf12e9ee | [
"MIT"
] | 44 | 2021-09-29T02:29:41.000Z | 2021-12-18T04:22:10.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[3]:
df = pd.read_csv('merc.csv')
# In[4]:
print(df.head(10))
# In[47]:
print(df.head())
# In[48]:
print(df.tail())
# In[49]:
print(df.info())
# In[50]:
#refer to the row index:
print(df.loc[0])
# In[54]:
print(df.loc[[3, 9]])
# In[44]:
print(df.loc[2])
# In[28]:
#use a list of indexes:
print(df.loc[[0, 1]])
# In[29]:
#identifying duplicates
print(df.duplicated())
# In[31]:
#removing duplicates
df.drop_duplicates(inplace = True)
print(df.head())
# In[32]:
#Dealing with null values
#(1) Remove all null values
nona_df = df.dropna()
print(nona_df.to_string())
# In[34]:
#(2) Replace NULL values with the number 200
checknull = df.fillna(200, inplace = True)
print(checknull)
#df.fillna(130, inplace = True)
#print(df)
# In[36]:
# (3) eplace null values Using Mean, Median, or Mode
x = df["price"].mean()
df["price"].fillna(x, inplace = True)
print(df.head())
# In[19]:
data = {
"mileage": [240000, 130000, 20000],
"years": [2003, 2017, 2021]
}
newdf = pd.DataFrame(data, index = ["car1", "car2", "car3"])
print(newdf)
# In[20]:
#refer to the named index:
print(newdf.loc["car2"])
# In[4]:
name_dict = {
'Name': ['a','b','c','d'],
'Score': [90,80,95,20]
}
df = pd.DataFrame(name_dict)
df.to_csv('file_name.csv')
# In[64]:
test1 = [1, 2, 3, 4, 5]
print(type(test1))
# In[65]:
print(test1[-2])
# In[66]:
test2 = (1, 2, 3, 4, 5)
print(type(test2))
# In[67]:
test1[-2] = 6
# In[68]:
print(test1[-2])
# In[69]:
test2[-2] = 6
# In[70]:
test3 = {1, 2, 3, 4, 5}
print(type(test3))
# In[72]:
test4 = {
"Fruit": ["apple", "orange", "watermelon"],
"Weight": [5, 10, 15]
}
# In[73]:
print(test4["Fruit"])
# In[74]:
x = test4.keys()
print(x)
# In[75]:
x = test4.items()
print(x)
# In[ ]:
| 8.526549 | 60 | 0.553191 |
import pandas as pd
df = pd.read_csv('merc.csv')
print(df.head(10))
print(df.head())
print(df.tail())
print(df.info())
print(df.loc[0])
print(df.loc[[3, 9]])
print(df.loc[2])
print(df.loc[[0, 1]])
print(df.duplicated())
df.drop_duplicates(inplace = True)
print(df.head())
nona_df = df.dropna()
print(nona_df.to_string())
checknull = df.fillna(200, inplace = True)
print(checknull)
x = df["price"].mean()
df["price"].fillna(x, inplace = True)
print(df.head())
data = {
"mileage": [240000, 130000, 20000],
"years": [2003, 2017, 2021]
}
newdf = pd.DataFrame(data, index = ["car1", "car2", "car3"])
print(newdf)
print(newdf.loc["car2"])
name_dict = {
'Name': ['a','b','c','d'],
'Score': [90,80,95,20]
}
df = pd.DataFrame(name_dict)
df.to_csv('file_name.csv')
test1 = [1, 2, 3, 4, 5]
print(type(test1))
print(test1[-2])
test2 = (1, 2, 3, 4, 5)
print(type(test2))
test1[-2] = 6
print(test1[-2])
test2[-2] = 6
test3 = {1, 2, 3, 4, 5}
print(type(test3))
test4 = {
"Fruit": ["apple", "orange", "watermelon"],
"Weight": [5, 10, 15]
}
print(test4["Fruit"])
x = test4.keys()
print(x)
x = test4.items()
print(x)
| true | true |
f7fecb00e6bc3e6e0a15eac5efa706c14db730b6 | 65,651 | py | Python | test/integration/component/test_resource_limits.py | ke4qqq/cloudstack | 7e0f1cf4571f9c3f6c26b8d744d2ed102fa7a511 | [
"Apache-2.0"
] | 1 | 2015-02-06T04:17:11.000Z | 2015-02-06T04:17:11.000Z | test/integration/component/test_resource_limits.py | ke4qqq/cloudstack | 7e0f1cf4571f9c3f6c26b8d744d2ed102fa7a511 | [
"Apache-2.0"
] | 6 | 2020-11-16T20:44:23.000Z | 2022-02-01T01:06:16.000Z | test/integration/component/test_resource_limits.py | ke4qqq/cloudstack | 7e0f1cf4571f9c3f6c26b8d744d2ed102fa7a511 | [
"Apache-2.0"
] | 15 | 2017-01-12T11:17:48.000Z | 2019-04-19T10:09:31.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Resource limits
"""
#Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (VirtualMachine,
Snapshot,
Template,
PublicIPAddress,
Account,
Domain,
Volume,
Network,
DiskOffering,
NetworkOffering,
ServiceOffering,
Configurations)
from marvin.lib.common import (list_volumes,
get_domain,
get_zone,
get_template,
update_resource_limit,
list_configurations,
wait_for_cleanup)
from marvin.lib.utils import cleanup_resources
import time
class Services:
"""Test Resource Limits Services
"""
def __init__(self):
self.services = {
"domain": {
"name": "Domain",
},
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
# in MHz
"memory": 128,
# In MBs
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"volume": {
"diskname": "TestDiskServ",
},
"server": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"template": {
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
},
"network_offering": {
"name": 'Network offering',
"displaytext": 'Network offering',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Vpn": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
},
"network": {
"name": "test network",
"displaytext": "test network"
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
}
class TestResourceLimitsAccount(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestResourceLimitsAccount, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account_1 = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Create Account, VMs etc
cls.account_2 = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.disk_offering,
cls.service_offering,
cls.account_1,
cls.account_2
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
# Wait for VMs to expunge
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_01_vm_per_account(self):
"""Test VM limit per account
"""
# Validate the following
# 1. Set user_vm=1 limit for account 1.
# 2. Try to start 2 VMs account 1. Verify start of second VM is denied
# for this account.
# 3. Try to start 2 VMs account 2. Verify 2 SM are started properly
self.debug(
"Updating instance resource limit for account: %s" %
self.account_1.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
0, # Instance
account=self.account_1.name,
domainid=self.account_1.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
# Exception should be raised for second instance (account_1)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_2.name)
# Start 2 instances for account_2
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_2.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
# Verify VM state
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_02_publicip_per_account(self):
"""Test Public IP limit per account
"""
# Validate the following
# 1. Set Public_IP= 2 limit for account 1.
# 2. start 1 VMs account 1
# 3. start 1 VMs account 2
# 4. Acquire 2 IP in account 1. Verify account with limit should be
# denied to acquire more than one IP.
# 5. Acquire 2 IP in account 2. Verify account 2 should be able to
# Acquire IP without any warning
self.debug(
"Updating public IP resource limit for account: %s" %
self.account_1.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
1, # Public Ip
account=self.account_1.name,
domainid=self.account_1.domainid,
max=2
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_2.name)
# Create VM for second account
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
# Verify VM state
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Associating public IP for account: %s" %
virtual_machine_1.account)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_1)
# Sleep to ensure that state is reflected across all the calls
time.sleep(self.services["sleep"])
# Verify Public IP state
self.assertEqual(
public_ip_1.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
# Exception should be raised for second instance (account_1)
with self.assertRaises(Exception):
PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
self.debug(
"Associating public IP for account: %s" %
virtual_machine_2.account)
# Assign Public IP for account 2
public_ip_3 = PublicIPAddress.create(
self.apiclient,
virtual_machine_2.account,
virtual_machine_2.zoneid,
virtual_machine_2.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_3)
# Verify Public IP state
self.assertEqual(
public_ip_3.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
self.debug(
"Associating public IP for account: %s" %
virtual_machine_2.account)
public_ip_4 = PublicIPAddress.create(
self.apiclient,
virtual_machine_2.account,
virtual_machine_2.zoneid,
virtual_machine_2.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_4)
# Verify Public IP state
self.assertEqual(
public_ip_4.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_03_snapshots_per_account(self):
"""Test Snapshot limit per account
"""
# Validate the following
# 1. Set snapshot= 2 limit for account 1.
# 2. start 1 VMs account 1
# 3. start 1 VMs account 2
# 4. Create 2 snapshot in account 1. Verify account with limit should
# be denied to create more than one snapshot.
# 5. Create 2 snapshot in account 2. Verify account 2 should be able to
# create snapshots without any warning
self.debug(
"Updating public IP resource limit for account: %s" %
self.account_1.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
3, # Snapshot
account=self.account_1.name,
domainid=self.account_1.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
# Create VM for second account
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
# Verify VM state
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
# Create a snapshot from the ROOTDISK (Account 1)
snapshot_1 = Snapshot.create(self.apiclient,
volumes[0].id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
self.cleanup.append(snapshot_1)
# Verify Snapshot state
self.assertEqual(
snapshot_1.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_1.state
)
# Exception should be raised for second snapshot (account_1)
with self.assertRaises(Exception):
Snapshot.create(self.apiclient,
volumes[0].id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_2.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
# Create a snapshot from the ROOTDISK (Account 2)
snapshot_2 = Snapshot.create(self.apiclient,
volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(snapshot_2)
# Verify Snapshot state
self.assertEqual(
snapshot_2.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_2.state
)
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
# Create a second snapshot from the ROOTDISK (Account 2)
snapshot_3 = Snapshot.create(self.apiclient,
volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(snapshot_3)
# Verify Snapshot state
self.assertEqual(
snapshot_3.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_3.state
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_04_volumes_per_account(self):
"""Test Volumes limit per account
"""
# Validate the following
# 1. Set volumes=2 limit for account 1.
# 2. Start 1 VMs account 1
# 3. Start 1 VMs account 2
# 4. Create 2 volumes in account 1. Verify account with limit should be
# denied to create more than one volume.
# 5. Create 2 volumes in account 2. Verify account 2 should be able to
# create Volume without any warning
self.debug(
"Updating volume resource limit for account: %s" %
self.account_1.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
2, # Volume
account=self.account_1.name,
domainid=self.account_1.domainid,
max=2
)
self.debug(
"Deploying VM for account: %s" % self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM for account: %s" % self.account_2.name)
# Create VM for second account
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
# Verify VM state
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Create a data volume for account: %s" % self.account_1.name)
volume_1 = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
diskofferingid=self.disk_offering.id
)
self.cleanup.append(volume_1)
# Verify Volume state
self.assertEqual(
volume_1.state in [
'Allocated',
'Ready'
],
True,
"Check Volume state is Ready or not"
)
# Exception should be raised for second snapshot (account_1)
with self.assertRaises(Exception):
Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
diskofferingid=self.disk_offering.id
)
self.debug(
"Create a data volume for account: %s" % self.account_2.name)
# Create volume for Account 2
volume_2 = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
diskofferingid=self.disk_offering.id
)
self.cleanup.append(volume_2)
# Verify Volume state
self.assertEqual(
volume_2.state in [
'Allocated',
'Ready'
],
True,
"Check Volume state is Ready or not"
)
self.debug(
"Create a data volume for account: %s" % self.account_2.name)
# Create a second volume from the ROOTDISK (Account 2)
volume_3 = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
diskofferingid=self.disk_offering.id
)
self.cleanup.append(volume_3)
# Verify Volume state
self.assertEqual(
volume_3.state in [
'Allocated',
'Ready'
],
True,
"Check Volume state is Ready or not"
)
return
@attr(tags=["advanced", "advancedns"], required_hardware="false")
def test_05_templates_per_account(self):
"""Test Templates limit per account
"""
# Validate the following
# 1. Set templates=1 limit for account 1.
# 2. Try to create 2 templates in account 1. Verify account with limit
# should be denied to create more than 1 template.
# 3. Try to create 2 templates in account 2. Verify account 2 should be
# able to create template without any error
try:
apiclient_account1 = self.testClient.getUserApiClient(
UserName=self.account_1.name,
DomainName=self.account_1.domain)
apiclient_account2 = self.testClient.getUserApiClient(
UserName=self.account_2.name,
DomainName=self.account_2.domain)
self.debug(
"Updating template resource limit for account: %s" %
self.account_1.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
4, # Template
account=self.account_1.name,
domainid=self.account_1.domainid,
max=1
)
self.debug(
"Updating volume resource limit for account: %s" %
self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying virtual machine for account: %s" %
self.account_2.name)
# Create VM for second account
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
# Verify VM state
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
virtual_machine_1.stop(self.apiclient)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug(
"Creating template from volume: %s" % volume.id)
# Create a template from the ROOTDISK (Account 1)
template_1 = Template.create(
apiclient_account1,
self.services["template"],
volumeid=volume.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
self.cleanup.append(template_1)
# Verify Template state
self.assertEqual(
template_1.isready,
True,
"Check Template is in ready state or not"
)
except Exception as e:
self.fail("Exception occured: %s" % e)
# Exception should be raised for second snapshot (account_1)
with self.assertRaises(Exception):
Template.create(
apiclient_account1,
self.services["template"],
volumeid=volume.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
try:
virtual_machine_2.stop(self.apiclient)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_2.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug(
"Creating template from volume: %s" % volume.id)
# Create a snapshot from the ROOTDISK (Account 1)
template_2 = Template.create(
apiclient_account2,
self.services["template"],
volumeid=volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(template_2)
# Verify Template state
self.assertEqual(
template_2.isready,
True,
"Check Template is in ready state or not"
)
self.debug(
"Creating template from volume: %s" % volume.id)
# Create a second volume from the ROOTDISK (Account 2)
template_3 = Template.create(
apiclient_account2,
self.services["template"],
volumeid=volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(template_3)
# Verify Template state
self.assertEqual(
template_3.isready,
True,
"Check Template is in ready state or not"
)
except Exception as e:
self.fail("Exception occured: %s" % e)
return
class TestResourceLimitsDomain(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestResourceLimitsDomain, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
# Create Domains, Account etc
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
# Create Service offering and disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
cls.account,
cls.domain
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
# Wait for VMs to expunge
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_01_vm_per_domain(self):
"""Test VM limit per domain
"""
# Validate the following
# 1. Set max VM per domain to 2
# 2. Create account and start 2 VMs. Verify VM state is Up and Running
# 3. Try to create 3rd VM instance. The appropriate error or alert
# should be raised
self.debug(
"Updating instance resource limits for domain: %s" %
self.account.domainid)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
0, # Instance
domainid=self.account.domainid,
max=2
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
# Verify VM state
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
# Exception should be raised for second instance
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_01_publicip_per_domain(self):
"""Test Public IP limit per domain
"""
# Validate the following
# 1. set max no of IPs per domain to 2.
# 2. Create an account in this domain
# 3. Create 1 VM in this domain
# 4. Acquire 1 IP in the domain. IP should be successfully acquired
# 5. Try to acquire 3rd IP in this domain. It should give the user an
# appropriate error and an alert should be generated.
self.debug(
"Updating public IP resource limits for domain: %s" %
self.account.domainid)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
1, # Public Ip
domainid=self.account.domainid,
max=2
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug("Associating public IP for account: %s" % self.account.name)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_1)
# Verify Public IP state
self.assertEqual(
public_ip_1.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
# Exception should be raised for second Public IP
with self.assertRaises(Exception):
PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_03_snapshots_per_domain(self):
"""Test Snapshot limit per domain
"""
# Validate the following
# 1. set max no of snapshots per domain to 1.
# 2. Create an account in this domain
# 3. Create 1 VM in this domain
# 4. Create one snapshot in the domain. Snapshot should be successfully
# created
# 5. Try to create another snapshot in this domain. It should give the
# user an appropriate error and an alert should be generated.
self.debug(
"Updating snapshot resource limits for domain: %s" %
self.account.domainid)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
3, # Snapshot
domainid=self.account.domainid,
max=1
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
# Create a snapshot from the ROOTDISK
snapshot_1 = Snapshot.create(self.apiclient,
volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(snapshot_1)
# Verify Snapshot state
self.assertEqual(
snapshot_1.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_1.state
)
# Exception should be raised for second snapshot
with self.assertRaises(Exception):
Snapshot.create(self.apiclient,
volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_04_volumes_per_domain(self):
"""Test Volumes limit per domain
"""
# Validate the following
# 1. set max no of volume per domain to 1.
# 2. Create an account in this domain
# 3. Create 1 VM in this domain
# 4. Try to Create another VM in the domain. It should give the user an
# appropriate error that Volume limit is exhausted and an alert
# should be generated.
self.debug(
"Updating volume resource limits for domain: %s" %
self.account.domainid)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
2, # Volume
domainid=self.account.domainid,
max=1
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
# Exception should be raised for second volume
with self.assertRaises(Exception):
Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
return
@attr(tags=["advanced", "advancedns"], required_hardware="false")
def test_05_templates_per_domain(self):
"""Test Templates limit per domain
"""
# Validate the following
# 1. set max no of templates per domain to 2.
# 2. Create an account in this domain
# 3. Create 2 templates in this domain. Both template should be in
# ready state
# 4. Try create 3rd template in the domain. It should give the user an
# appropriate error and an alert should be generated.
try:
userapiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
2, # Volume
domainid=self.account.domainid,
max=5
)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
4, # Template
domainid=self.account.domainid,
max=2
)
virtual_machine_1 = VirtualMachine.create(
userapiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
# Verify VM state
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
virtual_machine_1.stop(userapiclient)
# Get the Root disk of VM
volumes = list_volumes(
userapiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating template from volume: %s" % volume.id)
# Create a template from the ROOTDISK
template_1 = Template.create(
userapiclient,
self.services["template"],
volumeid=volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(template_1)
# Verify Template state
self.assertEqual(
template_1.isready,
True,
"Check Template is in ready state or not"
)
self.debug("Creating template from volume: %s" % volume.id)
# Create a template from the ROOTDISK
template_2 = Template.create(
userapiclient,
self.services["template"],
volumeid=volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(template_2)
# Verify Template state
self.assertEqual(
template_2.isready,
True,
"Check Template is in ready state or not"
)
except Exception as e:
self.fail("Exception occured: %s" % e)
# Exception should be raised for second template
with self.assertRaises(Exception):
Template.create(
userapiclient,
self.services["template"],
volumeid=volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
return
class TestMaxAccountNetworks(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMaxAccountNetworks, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.domain = get_domain(cls.api_client)
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value) * 2)
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "eip"])
def test_maxAccountNetworks(self):
"""Test Limit number of guest account specific networks
"""
# Steps for validation
# 1. Fetch max.account.networks from configurations
# 2. Create an account. Create account more that max.accout.network
# 3. Create network should fail
config = Configurations.list(
self.apiclient,
name='max.account.networks',
listall=True
)
self.assertEqual(
isinstance(config, list),
True,
"List configurations should have max.account.networks"
)
config_value = int(config[0].value)
self.debug("max.account.networks: %s" % config_value)
for ctr in range(config_value):
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
self.debug(
"Creating network in account already having networks : %s" %
config_value)
with self.assertRaises(Exception):
Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug('Create network failed (as expected)')
return
| 43.913712 | 128 | 0.424974 |
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (VirtualMachine,
Snapshot,
Template,
PublicIPAddress,
Account,
Domain,
Volume,
Network,
DiskOffering,
NetworkOffering,
ServiceOffering,
Configurations)
from marvin.lib.common import (list_volumes,
get_domain,
get_zone,
get_template,
update_resource_limit,
list_configurations,
wait_for_cleanup)
from marvin.lib.utils import cleanup_resources
import time
class Services:
def __init__(self):
self.services = {
"domain": {
"name": "Domain",
},
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"volume": {
"diskname": "TestDiskServ",
},
"server": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"template": {
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
},
"network_offering": {
"name": 'Network offering',
"displaytext": 'Network offering',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Vpn": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
},
"network": {
"name": "test network",
"displaytext": "test network"
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
}
class TestResourceLimitsAccount(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestResourceLimitsAccount, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
cls.account_1 = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.account_2 = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.disk_offering,
cls.service_offering,
cls.account_1,
cls.account_2
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_01_vm_per_account(self):
self.debug(
"Updating instance resource limit for account: %s" %
self.account_1.name)
update_resource_limit(
self.apiclient,
0,
account=self.account_1.name,
domainid=self.account_1.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine)
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_2.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_2.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_02_publicip_per_account(self):
self.debug(
"Updating public IP resource limit for account: %s" %
self.account_1.name)
update_resource_limit(
self.apiclient,
1,
account=self.account_1.name,
domainid=self.account_1.domainid,
max=2
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_2.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Associating public IP for account: %s" %
virtual_machine_1.account)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_1)
time.sleep(self.services["sleep"])
self.assertEqual(
public_ip_1.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
with self.assertRaises(Exception):
PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
self.debug(
"Associating public IP for account: %s" %
virtual_machine_2.account)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
virtual_machine_2.account,
virtual_machine_2.zoneid,
virtual_machine_2.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_3)
self.assertEqual(
public_ip_3.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
self.debug(
"Associating public IP for account: %s" %
virtual_machine_2.account)
public_ip_4 = PublicIPAddress.create(
self.apiclient,
virtual_machine_2.account,
virtual_machine_2.zoneid,
virtual_machine_2.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_4)
self.assertEqual(
public_ip_4.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_03_snapshots_per_account(self):
self.debug(
"Updating public IP resource limit for account: %s" %
self.account_1.name)
update_resource_limit(
self.apiclient,
3,
account=self.account_1.name,
domainid=self.account_1.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM instance in account: %s" %
self.account_1.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
snapshot_1 = Snapshot.create(self.apiclient,
volumes[0].id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
self.cleanup.append(snapshot_1)
self.assertEqual(
snapshot_1.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_1.state
)
with self.assertRaises(Exception):
Snapshot.create(self.apiclient,
volumes[0].id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_2.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
snapshot_2 = Snapshot.create(self.apiclient,
volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(snapshot_2)
self.assertEqual(
snapshot_2.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_2.state
)
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
snapshot_3 = Snapshot.create(self.apiclient,
volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(snapshot_3)
self.assertEqual(
snapshot_3.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_3.state
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_04_volumes_per_account(self):
self.debug(
"Updating volume resource limit for account: %s" %
self.account_1.name)
update_resource_limit(
self.apiclient,
2,
account=self.account_1.name,
domainid=self.account_1.domainid,
max=2
)
self.debug(
"Deploying VM for account: %s" % self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying VM for account: %s" % self.account_2.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Create a data volume for account: %s" % self.account_1.name)
volume_1 = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
diskofferingid=self.disk_offering.id
)
self.cleanup.append(volume_1)
self.assertEqual(
volume_1.state in [
'Allocated',
'Ready'
],
True,
"Check Volume state is Ready or not"
)
with self.assertRaises(Exception):
Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
diskofferingid=self.disk_offering.id
)
self.debug(
"Create a data volume for account: %s" % self.account_2.name)
volume_2 = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
diskofferingid=self.disk_offering.id
)
self.cleanup.append(volume_2)
self.assertEqual(
volume_2.state in [
'Allocated',
'Ready'
],
True,
"Check Volume state is Ready or not"
)
self.debug(
"Create a data volume for account: %s" % self.account_2.name)
volume_3 = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
diskofferingid=self.disk_offering.id
)
self.cleanup.append(volume_3)
self.assertEqual(
volume_3.state in [
'Allocated',
'Ready'
],
True,
"Check Volume state is Ready or not"
)
return
@attr(tags=["advanced", "advancedns"], required_hardware="false")
def test_05_templates_per_account(self):
try:
apiclient_account1 = self.testClient.getUserApiClient(
UserName=self.account_1.name,
DomainName=self.account_1.domain)
apiclient_account2 = self.testClient.getUserApiClient(
UserName=self.account_2.name,
DomainName=self.account_2.domain)
self.debug(
"Updating template resource limit for account: %s" %
self.account_1.name)
update_resource_limit(
self.apiclient,
4,
account=self.account_1.name,
domainid=self.account_1.domainid,
max=1
)
self.debug(
"Updating volume resource limit for account: %s" %
self.account_1.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account_1.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug(
"Deploying virtual machine for account: %s" %
self.account_2.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_2.name,
domainid=self.account_2.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
virtual_machine_1.stop(self.apiclient)
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug(
"Creating template from volume: %s" % volume.id)
template_1 = Template.create(
apiclient_account1,
self.services["template"],
volumeid=volume.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
self.cleanup.append(template_1)
self.assertEqual(
template_1.isready,
True,
"Check Template is in ready state or not"
)
except Exception as e:
self.fail("Exception occured: %s" % e)
with self.assertRaises(Exception):
Template.create(
apiclient_account1,
self.services["template"],
volumeid=volume.id,
account=self.account_1.name,
domainid=self.account_1.domainid,
)
try:
virtual_machine_2.stop(self.apiclient)
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_2.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug(
"Creating template from volume: %s" % volume.id)
template_2 = Template.create(
apiclient_account2,
self.services["template"],
volumeid=volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(template_2)
self.assertEqual(
template_2.isready,
True,
"Check Template is in ready state or not"
)
self.debug(
"Creating template from volume: %s" % volume.id)
template_3 = Template.create(
apiclient_account2,
self.services["template"],
volumeid=volume.id,
account=self.account_2.name,
domainid=self.account_2.domainid,
)
self.cleanup.append(template_3)
self.assertEqual(
template_3.isready,
True,
"Check Template is in ready state or not"
)
except Exception as e:
self.fail("Exception occured: %s" % e)
return
class TestResourceLimitsDomain(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestResourceLimitsDomain, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
cls.account,
cls.domain
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_01_vm_per_domain(self):
self.debug(
"Updating instance resource limits for domain: %s" %
self.account.domainid)
update_resource_limit(
self.apiclient,
0,
domainid=self.account.domainid,
max=2
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_2)
self.assertEqual(
virtual_machine_2.state,
'Running',
"Check VM state is Running or not"
)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account_1.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_01_publicip_per_domain(self):
self.debug(
"Updating public IP resource limits for domain: %s" %
self.account.domainid)
update_resource_limit(
self.apiclient,
1,
domainid=self.account.domainid,
max=2
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
self.debug("Associating public IP for account: %s" % self.account.name)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
self.cleanup.append(public_ip_1)
self.assertEqual(
public_ip_1.ipaddress.state in [
'Allocated',
'Allocating'
],
True,
"Check Public IP state is allocated or not"
)
with self.assertRaises(Exception):
PublicIPAddress.create(
self.apiclient,
virtual_machine_1.account,
virtual_machine_1.zoneid,
virtual_machine_1.domainid,
self.services["server"]
)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_03_snapshots_per_domain(self):
self.debug(
"Updating snapshot resource limits for domain: %s" %
self.account.domainid)
update_resource_limit(
self.apiclient,
3,
domainid=self.account.domainid,
max=1
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
snapshot_1 = Snapshot.create(self.apiclient,
volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(snapshot_1)
self.assertEqual(
snapshot_1.state in [
'BackedUp',
'CreatedOnPrimary',
'Allocated'
],
True,
"Snapshot state is not valid, it is %s" % snapshot_1.state
)
with self.assertRaises(Exception):
Snapshot.create(self.apiclient,
volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
return
@attr(tags=["advanced", "advancedns", "simulator"], required_hardware="false")
def test_04_volumes_per_domain(self):
self.debug(
"Updating volume resource limits for domain: %s" %
self.account.domainid)
update_resource_limit(
self.apiclient,
2,
domainid=self.account.domainid,
max=1
)
self.debug("Deploying VM for account: %s" % self.account.name)
virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
with self.assertRaises(Exception):
Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
return
@attr(tags=["advanced", "advancedns"], required_hardware="false")
def test_05_templates_per_domain(self):
try:
userapiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
update_resource_limit(
self.apiclient,
2,
domainid=self.account.domainid,
max=5
)
update_resource_limit(
self.apiclient,
4,
domainid=self.account.domainid,
max=2
)
virtual_machine_1 = VirtualMachine.create(
userapiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(virtual_machine_1)
self.assertEqual(
virtual_machine_1.state,
'Running',
"Check VM state is Running or not"
)
virtual_machine_1.stop(userapiclient)
volumes = list_volumes(
userapiclient,
virtualmachineid=virtual_machine_1.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for list volume response return valid data"
)
volume = volumes[0]
self.debug("Creating template from volume: %s" % volume.id)
template_1 = Template.create(
userapiclient,
self.services["template"],
volumeid=volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(template_1)
self.assertEqual(
template_1.isready,
True,
"Check Template is in ready state or not"
)
self.debug("Creating template from volume: %s" % volume.id)
template_2 = Template.create(
userapiclient,
self.services["template"],
volumeid=volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(template_2)
self.assertEqual(
template_2.isready,
True,
"Check Template is in ready state or not"
)
except Exception as e:
self.fail("Exception occured: %s" % e)
with self.assertRaises(Exception):
Template.create(
userapiclient,
self.services["template"],
volumeid=volume.id,
account=self.account.name,
domainid=self.account.domainid,
)
return
class TestMaxAccountNetworks(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMaxAccountNetworks, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.domain = get_domain(cls.api_client)
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
time.sleep(int(interval[0].value) * 2)
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "eip"])
def test_maxAccountNetworks(self):
config = Configurations.list(
self.apiclient,
name='max.account.networks',
listall=True
)
self.assertEqual(
isinstance(config, list),
True,
"List configurations should have max.account.networks"
)
config_value = int(config[0].value)
self.debug("max.account.networks: %s" % config_value)
for ctr in range(config_value):
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
self.debug(
"Creating network in account already having networks : %s" %
config_value)
with self.assertRaises(Exception):
Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug('Create network failed (as expected)')
return
| true | true |
f7fecb5dd903ced38507ad507a79c61876dcae62 | 288 | py | Python | server/forms/complete_task.py | andrewdstrain/Tasky | 605483a709e9d6e964d5e84d6e571d143315b933 | [
"BSD-2-Clause"
] | null | null | null | server/forms/complete_task.py | andrewdstrain/Tasky | 605483a709e9d6e964d5e84d6e571d143315b933 | [
"BSD-2-Clause"
] | null | null | null | server/forms/complete_task.py | andrewdstrain/Tasky | 605483a709e9d6e964d5e84d6e571d143315b933 | [
"BSD-2-Clause"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField
class CompleteTaskForm(FlaskForm):
"""
A Flask form for completing a task
"""
task_id = SelectField('Select Task That Is Complete: ', coerce=int)
submit = SubmitField("Mark Task Complete")
| 26.181818 | 71 | 0.725694 | from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField
class CompleteTaskForm(FlaskForm):
task_id = SelectField('Select Task That Is Complete: ', coerce=int)
submit = SubmitField("Mark Task Complete")
| true | true |
f7fecbcb14a461a742829a7c14154173528c47e1 | 741 | py | Python | Day-109/subtract_prod_sum.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | 1 | 2020-06-15T05:59:01.000Z | 2020-06-15T05:59:01.000Z | Day-109/subtract_prod_sum.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | null | null | null | Day-109/subtract_prod_sum.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | 7 | 2020-01-24T23:03:58.000Z | 2021-05-31T01:00:27.000Z |
"""
* Initial submssion
class Solution:
def subtractProductAndSum(self, n: int) -> int:
num_list = []
for num in str(n):
num_list.append(int(num))
sum_num = sum(num_list)
prod_num = 1
for i in num_list:
prod_num = prod_num * i
print(prod_num - sum_num)
return prod_num - sum_num
"""
class Solution:
def subtractProductAndSum(self, n: int) -> int:
num_list = [int(num) for num in str(n)]
sum_num = sum(num_list)
prod_num = 1
for i in num_list:
prod_num *= i
return prod_num - sum_num
Test = Solution()
Test.subtractProductAndSum(124)
Test.subtractProductAndSum(234)
Test.subtractProductAndSum(590)
| 23.903226 | 51 | 0.59919 |
class Solution:
def subtractProductAndSum(self, n: int) -> int:
num_list = [int(num) for num in str(n)]
sum_num = sum(num_list)
prod_num = 1
for i in num_list:
prod_num *= i
return prod_num - sum_num
Test = Solution()
Test.subtractProductAndSum(124)
Test.subtractProductAndSum(234)
Test.subtractProductAndSum(590)
| true | true |
f7fecc38168cd8e8e5783799cf074aa8927d8b9f | 16,670 | py | Python | otl.py | allenjhuang/otl_timecard | 474e26e6ee561d795245462f640504bc112f3e00 | [
"MIT"
] | null | null | null | otl.py | allenjhuang/otl_timecard | 474e26e6ee561d795245462f640504bc112f3e00 | [
"MIT"
] | null | null | null | otl.py | allenjhuang/otl_timecard | 474e26e6ee561d795245462f640504bc112f3e00 | [
"MIT"
] | null | null | null | from __future__ import annotations
import constants
import selenium_extras.additional_expected_conditions as AdditionalEC
from selenium_extras.additional_exceptions import (
IncorrectLoginDetails, MaxTriesReached, SubtaskNotFound
)
from selenium_extras.wrapper import Browser
from utils import log_wrap
import csv
from datetime import datetime
import logging
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
import time
from typing import Any, Iterator, List, Optional
class OracleTimeAndLabor(Browser):
"""Creates a new hourly timecard using a csv file as reference.
Parameters
----------
browser : str
Valid options are: "chrome", "edge", "firefox", "ie"
driver_path : str, optional
File path to webdriver. Will look in PATH if not set.
driver_wait_time : int, optional
Amount of time in seconds to wait when locating elements before
timing out in seconds.
sso_username : str, optional
Oracle SSO username automatically filled in if provided
sso_password : str, optional
Oracle SSO password automatically filled in if provided
Attributes
----------
driver : selenium.webdriver object
The webdriver object will differ depending on the browser used.
driver_default_wait : selenium.webdriver.support.ui.WebDriverWait
Webdriver with the driver_wait_time amount as its timeout.
"""
def __init__(
self,
browser: str,
driver_path: Optional[str] = None,
default_wait_time: int = 60,
sso_username: Optional[str] = None,
sso_password: Optional[str] = None
) -> None:
super().__init__(browser, driver_path, default_wait_time)
self._default_wait_time: int = default_wait_time
self._sso_username: Optional[str] = sso_username
self._sso_password: Optional[str] = sso_password
@log_wrap(before_msg="Opening the Oracle E-Business Suite website")
def open_oracle_ebusiness_suite(
self,
current_try: int = 1,
max_tries: int = constants.max_tries['open_oracle_ebusiness_suite']
) -> None:
"""Opens the Oracle E-Business Suite website."""
ebusiness_url: str = constants.urls['oracle']['ebusiness']
sso_url: str = constants.urls['oracle']['single_sign_on']
self.driver.get(ebusiness_url)
expected_urls: List[str] = [
ebusiness_url,
sso_url
]
self.driver_default_wait.until(AdditionalEC.url_is_one_of(
expected_urls
))
if self.driver.current_url == ebusiness_url:
pass # Goal of this function reached.
elif self.driver.current_url == sso_url:
self._login_oracle_sso(self._sso_username, self._sso_password)
ebusiness_no_query_parameters_url: str = \
constants.urls['oracle']['ebusiness_no_query_parameters']
sso_hiccup_url: str = \
constants.urls['oracle']['single_sign_on_hiccup']
expected_urls = [
ebusiness_url,
ebusiness_no_query_parameters_url,
sso_url,
sso_hiccup_url
]
self.driver_default_wait.until(
AdditionalEC.any_of(
AdditionalEC.url_is_one_of(expected_urls),
EC.url_contains(ebusiness_no_query_parameters_url)
)
)
if (
self.driver.current_url == ebusiness_url
or self.driver.current_url == ebusiness_no_query_parameters_url
or ebusiness_no_query_parameters_url in self.driver.current_url
):
pass # Goal of this function reached.
elif current_try < max_tries:
# Retry.
self.open_oracle_ebusiness_suite(current_try+1)
elif current_try >= max_tries:
raise MaxTriesReached(
"Too many attempts to open the Oracle E-Business Suite "
"have been made."
)
@log_wrap(before_msg="Navigating to recent timecards")
def navigate_to_recent_timecards(self) -> None:
"""Navigates to Recent Timecards."""
overtime_eligible_otl_link: Any = self.get_element_by_link_text(
"US OTL - Emps Eligible for Overtime (Project Accounting)"
)
overtime_eligible_otl_link.click()
recent_timecards_link: Any = self.get_element_by_link_text(
"Recent Timecards"
)
recent_timecards_link.click()
self.driver_default_wait.until(AdditionalEC.any_of(
EC.url_contains(constants.urls['oracle']['timecards_partial']),
EC.url_contains(constants.urls['oracle']['timecards_alt_partial'])
))
@log_wrap(before_msg="Creating a new timecard")
def create_new_timecard(self) -> None:
"""Creates a new timecard."""
create_timecard_button: Any = self.get_element_by_id(
"Hxccreatetcbutton"
)
create_timecard_button.click()
self.driver_default_wait.until(AdditionalEC.any_of(
EC.url_contains(constants.urls['oracle']['timecards_partial']),
EC.url_contains(constants.urls['oracle']['timecards_alt_partial'])
))
@log_wrap(
before_msg="Begin filling out timecard",
after_msg="Finished filling out timecard"
)
def fill_in_timecard_details(self, timecard_path: str) -> None:
"""Fills out the timecard with details from the csv file."""
with open(timecard_path) as timecard_file:
csv_reader: Iterator[List[str]] = csv.reader(timecard_file)
# Discard the first row since it should only contain the header.
next(csv_reader)
# This is the timecard site's table tbody XPath.
html_table_tbody_xpath: str = \
constants.timecard['html']['table_tbody_xpath']
html_row_num: int = 0
for csv_row_num, csv_row in enumerate(
csv_reader
): # type: int, List[str]
current_html_row_inputs_list: List[Any] = \
self._get_list_of_html_inputs(
self._get_html_row_xpath(
html_row_num, html_table_tbody_xpath
)
)
if self._is_time_entered(csv_row):
if len(current_html_row_inputs_list) == 0:
self._add_html_row(
html_table_tbody_xpath, html_row_num
)
current_html_row_inputs_list = \
self._get_list_of_html_inputs(
self._get_html_row_xpath(
html_row_num, html_table_tbody_xpath
)
)
self._fill_html_row(
html_inputs_list=current_html_row_inputs_list,
csv_row_data=self._row_data_generator(csv_row)
)
html_row_num += 1
@log_wrap(before_msg="Logging into Oracle Single Sign On")
def _login_oracle_sso(
self, username: Optional[str] = None, password: Optional[str] = None
) -> None:
"""Logs into Oracle Single Sign On."""
if username is not None:
username_input: Any = self.get_element_by_id("sso_username")
username_input.send_keys(username)
if password is not None:
password_input: Any = self.get_element_by_id("ssopassword")
password_input.send_keys(password + Keys.RETURN)
if username is None or password is None:
logging.info(
"Please type in the login details and continue within "
f"{self._default_wait_time} seconds."
)
self.driver_default_wait.until(EC.url_changes(
constants.urls['oracle']['single_sign_on']
))
if (
self.driver.current_url
== constants.urls['oracle']['single_sign_on_hiccup']
):
self.driver_default_wait.until(EC.url_changes(
constants.urls['oracle']['single_sign_on_hiccup']
))
if (
self.driver.current_url
== constants.urls['oracle']['single_sign_on']
):
raise IncorrectLoginDetails(
"Invalid login. Please check your username and password."
)
def _get_html_row_xpath(
self, html_row_num: int, html_table_tbody_xpath: str
) -> str:
"""Gets the timecard website's XPath for the HTML rows"""
# XPath is 1-indexed and the first row is the header, so we start at
# the 2nd tr.
return f"{html_table_tbody_xpath}/tr[{html_row_num + 2}]"
def _get_list_of_html_inputs(
self, current_html_row_xpath: str
) -> List[Any]:
"""Gets a list of inputs from the provided xpath."""
return self.get_elements_by_xpath(current_html_row_xpath + "//input")
def _row_data_generator(self, csv_row: List[str]) -> Iterator[str]:
"""Iterates through the data within a row."""
row_len: int = len(csv_row)
current_col: int = 0
while current_col < row_len:
yield csv_row[current_col]
current_col += 1
def _is_time_entered(self, csv_row: List[str]) -> bool:
"""Checks if there are any time entries within a csv row."""
row_len: int = len(csv_row)
is_time_entered: bool = False
for i in range(constants.timecard['num_cols_before_time'], row_len):
if csv_row[i] not in (None, ""):
is_time_entered = True
break
return is_time_entered
@log_wrap(before_msg="Filling out HTML row")
def _fill_html_row(
self, html_inputs_list: List[Any], csv_row_data: Iterator[str]
) -> None:
"""Fills a row on the timecard website with data from the csv."""
for html_input_num, html_input in enumerate(
html_inputs_list
): # type: int, Any
# Get the data from the csv.
cell_data: Optional[str] = None
if (html_input_num < constants.timecard['num_cols_before_time']):
# The html and csv cols are still matching here.
cell_data = next(csv_row_data)
# The total csv columns and total html inputs don't match, so we
# have to ignore the html input for hours.
elif self._is_on_hours_html_input(html_input_num) is False:
data_to_be_checked: str = next(csv_row_data)
if data_to_be_checked in (None, ""):
continue # Empty cell, so continue
else: # Not an empty cell
parsed_data: Optional[datetime] = self._parse_time(
data_to_be_checked
)
if parsed_data is not None:
cell_data = self._convert_into_time_format(parsed_data)
else: # Could not parse data
continue
else: # self._is_on_hours_html_input(html_input_num) is True
continue
# Fill input with cell_data.
is_entry_success: bool = False
for current_wait_time in range(
0,
self._default_wait_time,
constants.timecard['sleep_time']['wait_for_data_entry']
):
# Ensures the keys are sent, even with the website's heavy
# javascript validation.
html_input.clear()
html_input.send_keys(cell_data)
# Mandatory sleep after inputting data for the Project field,
# else there may be pop-ups when filling in the Task field.
if html_input_num == 0:
# Trigger javascript by clicking away from current input.
html_inputs_list[1].click()
time.sleep(
constants.timecard['sleep_time']['after_project_field']
)
if (
html_input.get_attribute("value") != cell_data
):
time.sleep(
constants.timecard['sleep_time']['wait_for_data_entry']
)
else: # Input value matches the cell_data
is_entry_success = True
break
if is_entry_success is False:
raise TimeoutError(
"Default wait time exceeded for data entry."
)
def _is_on_hours_html_input(self, html_input_num: int) -> bool:
"""Checks if the html_input[html_input_num] is an input for hours."""
# Every third input is the input for hours.
return (
(html_input_num - constants.timecard['num_cols_before_time'])
% 3 == 2
)
def _parse_time(self, data: str) -> Optional[datetime]:
"""Converts string into a datetime object."""
parsed_data: Optional[datetime] = None
# Accept these formats.
for time_format in ["%H:%M", "%I:%M:%S %p", "%I:%M %p", "%X"]:
try:
parsed_data = datetime.strptime(
data, time_format
)
break
except (TypeError, ValueError):
pass
return parsed_data
def _convert_into_time_format(self, data: datetime) -> str:
"""Converts datetime object to the website's accepted time format."""
return data.strftime("%H:%M")
@log_wrap(before_msg="Adding HTML row")
def _add_html_row(
self, html_table_tbody_xpath: str, current_html_row_num: int
) -> None:
"""Requests additional rows for input on the timecard website."""
add_row_button: Any = self.get_element_by_xpath(
html_table_tbody_xpath
+ "//button[contains(., 'Add Another Row')]"
)
# Wait a bit before clicking in case other things are still loading.
time.sleep(
constants.timecard['sleep_time']['before_adding_html_row']
)
add_row_button.click()
# Wait until a new HTML row is added.
current_add_row_wait_time: int = 0
add_row_button_click_counter: int = 1
while len(
self._get_list_of_html_inputs(self._get_html_row_xpath(
current_html_row_num, html_table_tbody_xpath
))
) == 0:
self._raise_error_if_invalid_subtask()
time.sleep(
constants.timecard['sleep_time']['after_adding_html_row']
)
current_add_row_wait_time += \
constants.timecard['sleep_time']['after_adding_html_row']
# Click the button once more halfway through waiting just in case.
if (
add_row_button_click_counter == 1
and current_add_row_wait_time > self._default_wait_time * 0.5
):
add_row_button.click()
add_row_button_click_counter += 1
if current_add_row_wait_time > self._default_wait_time:
raise TimeoutError(
"Default wait time exceeded for adding HTML row."
)
def _raise_error_if_invalid_subtask(self) -> None:
if (
len(
self.get_elements_by_xpath(
"//h1[contains(text(), 'Error')]"
)
) > 0
and len(self.get_elements_by_link_text("Task")) > 0
and len(
self.get_elements_by_xpath(
"//div[contains(text(), 'Select a valid value.')]"
)
) > 0
):
raise SubtaskNotFound(
"Please check if the offending subtask exists."
)
| 42.74359 | 80 | 0.566407 | from __future__ import annotations
import constants
import selenium_extras.additional_expected_conditions as AdditionalEC
from selenium_extras.additional_exceptions import (
IncorrectLoginDetails, MaxTriesReached, SubtaskNotFound
)
from selenium_extras.wrapper import Browser
from utils import log_wrap
import csv
from datetime import datetime
import logging
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
import time
from typing import Any, Iterator, List, Optional
class OracleTimeAndLabor(Browser):
def __init__(
self,
browser: str,
driver_path: Optional[str] = None,
default_wait_time: int = 60,
sso_username: Optional[str] = None,
sso_password: Optional[str] = None
) -> None:
super().__init__(browser, driver_path, default_wait_time)
self._default_wait_time: int = default_wait_time
self._sso_username: Optional[str] = sso_username
self._sso_password: Optional[str] = sso_password
@log_wrap(before_msg="Opening the Oracle E-Business Suite website")
def open_oracle_ebusiness_suite(
self,
current_try: int = 1,
max_tries: int = constants.max_tries['open_oracle_ebusiness_suite']
) -> None:
ebusiness_url: str = constants.urls['oracle']['ebusiness']
sso_url: str = constants.urls['oracle']['single_sign_on']
self.driver.get(ebusiness_url)
expected_urls: List[str] = [
ebusiness_url,
sso_url
]
self.driver_default_wait.until(AdditionalEC.url_is_one_of(
expected_urls
))
if self.driver.current_url == ebusiness_url:
pass
elif self.driver.current_url == sso_url:
self._login_oracle_sso(self._sso_username, self._sso_password)
ebusiness_no_query_parameters_url: str = \
constants.urls['oracle']['ebusiness_no_query_parameters']
sso_hiccup_url: str = \
constants.urls['oracle']['single_sign_on_hiccup']
expected_urls = [
ebusiness_url,
ebusiness_no_query_parameters_url,
sso_url,
sso_hiccup_url
]
self.driver_default_wait.until(
AdditionalEC.any_of(
AdditionalEC.url_is_one_of(expected_urls),
EC.url_contains(ebusiness_no_query_parameters_url)
)
)
if (
self.driver.current_url == ebusiness_url
or self.driver.current_url == ebusiness_no_query_parameters_url
or ebusiness_no_query_parameters_url in self.driver.current_url
):
pass
elif current_try < max_tries:
self.open_oracle_ebusiness_suite(current_try+1)
elif current_try >= max_tries:
raise MaxTriesReached(
"Too many attempts to open the Oracle E-Business Suite "
"have been made."
)
@log_wrap(before_msg="Navigating to recent timecards")
def navigate_to_recent_timecards(self) -> None:
overtime_eligible_otl_link: Any = self.get_element_by_link_text(
"US OTL - Emps Eligible for Overtime (Project Accounting)"
)
overtime_eligible_otl_link.click()
recent_timecards_link: Any = self.get_element_by_link_text(
"Recent Timecards"
)
recent_timecards_link.click()
self.driver_default_wait.until(AdditionalEC.any_of(
EC.url_contains(constants.urls['oracle']['timecards_partial']),
EC.url_contains(constants.urls['oracle']['timecards_alt_partial'])
))
@log_wrap(before_msg="Creating a new timecard")
def create_new_timecard(self) -> None:
create_timecard_button: Any = self.get_element_by_id(
"Hxccreatetcbutton"
)
create_timecard_button.click()
self.driver_default_wait.until(AdditionalEC.any_of(
EC.url_contains(constants.urls['oracle']['timecards_partial']),
EC.url_contains(constants.urls['oracle']['timecards_alt_partial'])
))
@log_wrap(
before_msg="Begin filling out timecard",
after_msg="Finished filling out timecard"
)
def fill_in_timecard_details(self, timecard_path: str) -> None:
with open(timecard_path) as timecard_file:
csv_reader: Iterator[List[str]] = csv.reader(timecard_file)
next(csv_reader)
html_table_tbody_xpath: str = \
constants.timecard['html']['table_tbody_xpath']
html_row_num: int = 0
for csv_row_num, csv_row in enumerate(
csv_reader
): # type: int, List[str]
current_html_row_inputs_list: List[Any] = \
self._get_list_of_html_inputs(
self._get_html_row_xpath(
html_row_num, html_table_tbody_xpath
)
)
if self._is_time_entered(csv_row):
if len(current_html_row_inputs_list) == 0:
self._add_html_row(
html_table_tbody_xpath, html_row_num
)
current_html_row_inputs_list = \
self._get_list_of_html_inputs(
self._get_html_row_xpath(
html_row_num, html_table_tbody_xpath
)
)
self._fill_html_row(
html_inputs_list=current_html_row_inputs_list,
csv_row_data=self._row_data_generator(csv_row)
)
html_row_num += 1
@log_wrap(before_msg="Logging into Oracle Single Sign On")
def _login_oracle_sso(
self, username: Optional[str] = None, password: Optional[str] = None
) -> None:
if username is not None:
username_input: Any = self.get_element_by_id("sso_username")
username_input.send_keys(username)
if password is not None:
password_input: Any = self.get_element_by_id("ssopassword")
password_input.send_keys(password + Keys.RETURN)
if username is None or password is None:
logging.info(
"Please type in the login details and continue within "
f"{self._default_wait_time} seconds."
)
self.driver_default_wait.until(EC.url_changes(
constants.urls['oracle']['single_sign_on']
))
if (
self.driver.current_url
== constants.urls['oracle']['single_sign_on_hiccup']
):
self.driver_default_wait.until(EC.url_changes(
constants.urls['oracle']['single_sign_on_hiccup']
))
if (
self.driver.current_url
== constants.urls['oracle']['single_sign_on']
):
raise IncorrectLoginDetails(
"Invalid login. Please check your username and password."
)
def _get_html_row_xpath(
self, html_row_num: int, html_table_tbody_xpath: str
) -> str:
# XPath is 1-indexed and the first row is the header, so we start at
# the 2nd tr.
return f"{html_table_tbody_xpath}/tr[{html_row_num + 2}]"
def _get_list_of_html_inputs(
self, current_html_row_xpath: str
) -> List[Any]:
return self.get_elements_by_xpath(current_html_row_xpath + "//input")
def _row_data_generator(self, csv_row: List[str]) -> Iterator[str]:
row_len: int = len(csv_row)
current_col: int = 0
while current_col < row_len:
yield csv_row[current_col]
current_col += 1
def _is_time_entered(self, csv_row: List[str]) -> bool:
row_len: int = len(csv_row)
is_time_entered: bool = False
for i in range(constants.timecard['num_cols_before_time'], row_len):
if csv_row[i] not in (None, ""):
is_time_entered = True
break
return is_time_entered
@log_wrap(before_msg="Filling out HTML row")
def _fill_html_row(
self, html_inputs_list: List[Any], csv_row_data: Iterator[str]
) -> None:
for html_input_num, html_input in enumerate(
html_inputs_list
): # type: int, Any
# Get the data from the csv.
cell_data: Optional[str] = None
if (html_input_num < constants.timecard['num_cols_before_time']):
# The html and csv cols are still matching here.
cell_data = next(csv_row_data)
# The total csv columns and total html inputs don't match, so we
elif self._is_on_hours_html_input(html_input_num) is False:
data_to_be_checked: str = next(csv_row_data)
if data_to_be_checked in (None, ""):
continue
else:
parsed_data: Optional[datetime] = self._parse_time(
data_to_be_checked
)
if parsed_data is not None:
cell_data = self._convert_into_time_format(parsed_data)
else:
continue
else:
continue
is_entry_success: bool = False
for current_wait_time in range(
0,
self._default_wait_time,
constants.timecard['sleep_time']['wait_for_data_entry']
):
# javascript validation.
html_input.clear()
html_input.send_keys(cell_data)
# Mandatory sleep after inputting data for the Project field,
# else there may be pop-ups when filling in the Task field.
if html_input_num == 0:
# Trigger javascript by clicking away from current input.
html_inputs_list[1].click()
time.sleep(
constants.timecard['sleep_time']['after_project_field']
)
if (
html_input.get_attribute("value") != cell_data
):
time.sleep(
constants.timecard['sleep_time']['wait_for_data_entry']
)
else: # Input value matches the cell_data
is_entry_success = True
break
if is_entry_success is False:
raise TimeoutError(
"Default wait time exceeded for data entry."
)
def _is_on_hours_html_input(self, html_input_num: int) -> bool:
# Every third input is the input for hours.
return (
(html_input_num - constants.timecard['num_cols_before_time'])
% 3 == 2
)
def _parse_time(self, data: str) -> Optional[datetime]:
parsed_data: Optional[datetime] = None
# Accept these formats.
for time_format in ["%H:%M", "%I:%M:%S %p", "%I:%M %p", "%X"]:
try:
parsed_data = datetime.strptime(
data, time_format
)
break
except (TypeError, ValueError):
pass
return parsed_data
def _convert_into_time_format(self, data: datetime) -> str:
return data.strftime("%H:%M")
@log_wrap(before_msg="Adding HTML row")
def _add_html_row(
self, html_table_tbody_xpath: str, current_html_row_num: int
) -> None:
add_row_button: Any = self.get_element_by_xpath(
html_table_tbody_xpath
+ "//button[contains(., 'Add Another Row')]"
)
# Wait a bit before clicking in case other things are still loading.
time.sleep(
constants.timecard['sleep_time']['before_adding_html_row']
)
add_row_button.click()
# Wait until a new HTML row is added.
current_add_row_wait_time: int = 0
add_row_button_click_counter: int = 1
while len(
self._get_list_of_html_inputs(self._get_html_row_xpath(
current_html_row_num, html_table_tbody_xpath
))
) == 0:
self._raise_error_if_invalid_subtask()
time.sleep(
constants.timecard['sleep_time']['after_adding_html_row']
)
current_add_row_wait_time += \
constants.timecard['sleep_time']['after_adding_html_row']
# Click the button once more halfway through waiting just in case.
if (
add_row_button_click_counter == 1
and current_add_row_wait_time > self._default_wait_time * 0.5
):
add_row_button.click()
add_row_button_click_counter += 1
if current_add_row_wait_time > self._default_wait_time:
raise TimeoutError(
"Default wait time exceeded for adding HTML row."
)
def _raise_error_if_invalid_subtask(self) -> None:
if (
len(
self.get_elements_by_xpath(
"//h1[contains(text(), 'Error')]"
)
) > 0
and len(self.get_elements_by_link_text("Task")) > 0
and len(
self.get_elements_by_xpath(
"//div[contains(text(), 'Select a valid value.')]"
)
) > 0
):
raise SubtaskNotFound(
"Please check if the offending subtask exists."
)
| true | true |
f7fecd139876716900add92c19f29a1441b59c06 | 67,206 | py | Python | tests/test_wrappers.py | saleweaver/python-amazon-mws | a6d58fbcb0543b48f462184f86671fcafc55b194 | [
"Unlicense"
] | null | null | null | tests/test_wrappers.py | saleweaver/python-amazon-mws | a6d58fbcb0543b48f462184f86671fcafc55b194 | [
"Unlicense"
] | null | null | null | tests/test_wrappers.py | saleweaver/python-amazon-mws | a6d58fbcb0543b48f462184f86671fcafc55b194 | [
"Unlicense"
] | null | null | null | import pytest
import xmltodict
from mws.mws import DataWrapper, DictWrapper, MWSError
def test_content_md5_comparison():
data = b'abc\tdef'
hash = 'Zj+Bh1BJ8HzBb9ToK28qFQ=='
DataWrapper(data, {'content-md5': hash})
def test_content_md5_check_raises_exception_if_fails():
data = b'abc\tdef'
hash = 'notthehash'
with pytest.raises(MWSError):
DataWrapper(data, {'content-md5': hash})
def test_decode_byte_xml():
xml = b'<?xml version="1.0"?><ListMatchingProductsResponse xmlns="http://mws.amazonservices.com/schema/Products/2011-10-01"><ListMatchingProductsResult><Products xmlns:ns2="http://mws.amazonservices.com/schema/Products/2011-10-01/default.xsd"><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8891808660</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Mizielinska, Aleksandra</ns2:Creator><ns2:Creator Role="Autore">Mizielinski, Daniel</ns2:Creator><ns2:Creator Role="Traduttore">Parisi, V.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">14.80312</ns2:Height><ns2:Length Units="inches">10.86612</ns2:Length><ns2:Width Units="inches">1.06299</ns2:Width><ns2:Weight Units="pounds">3.17</ns2:Weight></ns2:ItemDimensions><ns2:Label>Mondadori Electa</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>25.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori Electa</ns2:Manufacturer><ns2:NumberOfPages>144</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.8661417314</ns2:Height><ns2:Length Units="inches">14.9606299060</ns2:Length><ns2:Width Units="inches">11.0236220360</ns2:Width><ns2:Weight Units="pounds">3.1746565728</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2016-10-25</ns2:PublicationDate><ns2:Publisher>Mondadori Electa</ns2:Publisher><ns2:ReleaseDate>2016-10-25</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61K2xircqJL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">55</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori Electa</ns2:Studio><ns2:Title>Mappe. Un atlante per viaggiare tra terra, mari e culture del mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2843</Rank></SalesRank><SalesRank><ProductCategoryId>13064701031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13077570031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13064711031</ProductCategoryId><Rank>15</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8858014308</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>Passioni</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Fontebuoni, A.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">10.31494</ns2:Height><ns2:Length Units="inches">12.20470</ns2:Length><ns2:Width Units="inches">0.86614</ns2:Width></ns2:ItemDimensions><ns2:Label>Gribaudo</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>24.90</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Gribaudo</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>256</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.1023622036</ns2:Height><ns2:Length Units="inches">12.1653543183</ns2:Length><ns2:Width Units="inches">10.2362204620</ns2:Width><ns2:Weight Units="pounds">3.4392112872</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2015-11-05</ns2:PublicationDate><ns2:Publisher>Gribaudo</ns2:Publisher><ns2:ReleaseDate>2015-11-05</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61UmGSV5reL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">62</ns2:Width></ns2:SmallImage><ns2:Studio>Gribaudo</ns2:Studio><ns2:Title>Le grandi mappe. Oltre 60 capolavori raccontano l\'evoluzione dell\'uomo, la sua storia e la sua cultura. Ediz. illustrata</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23519</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>40</Rank></SalesRank><SalesRank><ProductCategoryId>508856031</ProductCategoryId><Rank>452</Rank></SalesRank><SalesRank><ProductCategoryId>508758031</ProductCategoryId><Rank>3211</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8807890283</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Brand>UNIVERSALE ECONOMICA. SAGGI</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Sala, V. B.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">5.47243</ns2:Height><ns2:Length Units="inches">8.77951</ns2:Length><ns2:Width Units="inches">1.49606</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Feltrinelli</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Feltrinelli</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>526</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.59842519522</ns2:Height><ns2:Length Units="inches">8.7007873927</ns2:Length><ns2:Width Units="inches">5.49999999439</ns2:Width><ns2:Weight Units="pounds">1.6755131912</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-11-23</ns2:PublicationDate><ns2:Publisher>Feltrinelli</ns2:Publisher><ns2:ReleaseDate>2017-11-23</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61jo5I7vBjL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">48</ns2:Width></ns2:SmallImage><ns2:Studio>Feltrinelli</ns2:Studio><ns2:Title>La storia del mondo in dodici mappe</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>18925</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>34</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8811149843</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Wilford, John Noble</ns2:Creator><ns2:Creator Role="Traduttore">Gianna Lonza</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.97636</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.41732</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>30.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>478</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.7401574714</ns2:Length><ns2:Width Units="inches">6.2992125920</ns2:Width><ns2:Weight Units="pounds">1.4991433816</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2018-11-22</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2018-11-22</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KTEY8nMgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">54</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>I signori delle mappe. La storia avventurosa dell\'invenzione della cartografia</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23845</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>41</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B084FZWQHD</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">Frasante, Marco</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.5</ns2:Height><ns2:Length Units="inches">5.5</ns2:Length><ns2:Width Units="inches">0.2</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Independently published</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Sconosciuto</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>12.69</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Independently published</ns2:Manufacturer><ns2:NumberOfPages>87</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.2</ns2:Height><ns2:Length Units="inches">8.5</ns2:Length><ns2:Width Units="inches">5.5</ns2:Width><ns2:Weight Units="pounds">0.37</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-09</ns2:PublicationDate><ns2:Publisher>Independently published</ns2:Publisher><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51Na9vFKvgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">49</ns2:Width></ns2:SmallImage><ns2:Studio>Independently published</ns2:Studio><ns2:Title>Mappe Mentali e Mappe Concettuali: La Guida Pi\xc3\xb9 Completa Per Memorizzare e Apprendere Qualsiasi Cosa In Modo Semplice e Veloce</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2663</Rank></SalesRank><SalesRank><ProductCategoryId>508885031</ProductCategoryId><Rank>192</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881160771X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Illustratore">Easton, G.</ns2:Creator><ns2:Creator Role="Illustratore">Smith, J.</ns2:Creator><ns2:Creator Role="Illustratore">Hawkins, E.</ns2:Creator><ns2:Creator Role="Illustratore">Crane, P.</ns2:Creator><ns2:Creator Role="Traduttore">Caraffini, S.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">12.40155</ns2:Height><ns2:Length Units="inches">10.03935</ns2:Length><ns2:Width Units="inches">0.59055</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>20.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>80</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.5511811018</ns2:Height><ns2:Length Units="inches">12.2047243970</ns2:Length><ns2:Width Units="inches">9.8425196750</ns2:Width><ns2:Weight Units="pounds">1.6975594174</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-13</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2020-02-13</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/514B0NG7gvL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">57</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 12 mappe che spiegano il mondo ai ragazzi</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>24610</Rank></SalesRank><SalesRank><ProductCategoryId>13064684031</ProductCategoryId><Rank>7</Rank></SalesRank><SalesRank><ProductCategoryId>13064569031</ProductCategoryId><Rank>35</Rank></SalesRank><SalesRank><ProductCategoryId>13077656031</ProductCategoryId><Rank>35</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881167378X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Traduttore">Merlini, R.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.77951</ns2:Height><ns2:Length Units="inches">5.66928</ns2:Length><ns2:Width Units="inches">1.37795</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>313</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.5826771566</ns2:Length><ns2:Width Units="inches">5.7086614115</ns2:Width><ns2:Weight Units="pounds">1.1904962148</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-06-08</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2017-06-08</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51DYJDPBKmL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 10 mappe che spiegano il mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>35380</Rank></SalesRank><SalesRank><ProductCategoryId>508819031</ProductCategoryId><Rank>197</Rank></SalesRank><SalesRank><ProductCategoryId>508812031</ProductCategoryId><Rank>678</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8804712279</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>LE SCIE. NUOVA SERIE STRANIERI</ns2:Brand><ns2:Creator Role="Autore">Moller, Violet</ns2:Creator><ns2:Creator Role="Traduttore">Vanni, L.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">9.44880</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.18110</ns2:Width></ns2:ItemDimensions><ns2:Label>Mondadori</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>22.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>325</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4173228332</ns2:Height><ns2:Length Units="inches">9.4488188880</ns2:Length><ns2:Width Units="inches">6.4960629855</ns2:Width><ns2:Weight Units="pounds">1.543235834</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2019-05-28</ns2:PublicationDate><ns2:Publisher>Mondadori</ns2:Publisher><ns2:ReleaseDate>2019-05-28</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51ntMunIvhL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori</ns2:Studio><ns2:Title>La mappa dei libri perduti. Come la conoscenza antica \xc3\xa8 stata perduta e ritrovata: una storia in sette citt\xc3\xa0</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>63759</Rank></SalesRank><SalesRank><ProductCategoryId>508810031</ProductCategoryId><Rank>692</Rank></SalesRank><SalesRank><ProductCategoryId>508879031</ProductCategoryId><Rank>15957</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B008RJFRTK</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>App</ns2:Binding><ns2:Brand>MY.COM</ns2:Brand><ns2:HardwarePlatform>Android</ns2:HardwarePlatform><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>MY.COM</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>arabic</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>chinese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>czech</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>dutch</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>english</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>french</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>german</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>japanese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>korean</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>polish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>portuguese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>russian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>spanish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>vietnamese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>0.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>MY.COM</ns2:Manufacturer><ns2:OperatingSystem>Android</ns2:OperatingSystem><ns2:PartNumber>com.mapswithme.maps.pro</ns2:PartNumber><ns2:ProductGroup>Mobile Application</ns2:ProductGroup><ns2:ProductTypeName>MOBILE_APPLICATION</ns2:ProductTypeName><ns2:Publisher>MY.COM</ns2:Publisher><ns2:ReleaseDate>2016-01-14</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KevuswqEL._SL75_.png</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">75</ns2:Width></ns2:SmallImage><ns2:Studio>MY.COM</ns2:Studio><ns2:Title>MAPS.ME \xe2\x80\x94 Mappe Offline</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings/></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8832706571</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">aa.vv.</ns2:Creator><ns2:Genre>Diritto</ns2:Genre><ns2:Label>Neldiritto Editore</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>90.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Neldiritto Editore</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>1200</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">3.0708661386</ns2:Height><ns2:Length Units="inches">9.8425196750</ns2:Length><ns2:Width Units="inches">6.7716535364</ns2:Width><ns2:Weight Units="pounds">5.291094288000000881849048</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-01-24</ns2:PublicationDate><ns2:Publisher>Neldiritto Editore</ns2:Publisher><ns2:ReleaseDate>2020-01-24</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/41HeNbq4xKL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">53</ns2:Width></ns2:SmallImage><ns2:Studio>Neldiritto Editore</ns2:Studio><ns2:Title>Concorso Magistratura 2020: Mappe e schemi di Diritto civile-Diritto penale-Diritto amministrativo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>62044</Rank></SalesRank><SalesRank><ProductCategoryId>1346646031</ProductCategoryId><Rank>617</Rank></SalesRank><SalesRank><ProductCategoryId>1346648031</ProductCategoryId><Rank>754</Rank></SalesRank></SalesRankings></Product></Products></ListMatchingProductsResult><ResponseMetadata><RequestId>d384713e-7c79-4a6d-81cd-d0aa68c7b409</RequestId></ResponseMetadata></ListMatchingProductsResponse>'
expected_json = {"ListMatchingProductsResult": {"Products": {"Product": [{"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8891808660"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Creator": [{"value": "Mizielinska, Aleksandra", "Role": {"value": "Autore"}}, {"value": "Mizielinski, Daniel", "Role": {"value": "Autore"}}, {"value": "Parisi, V.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "14.80312", "Units": {"value": "inches"}}, "Length": {"value": "10.86612", "Units": {"value": "inches"}}, "Width": {"value": "1.06299", "Units": {"value": "inches"}}, "Weight": {"value": "3.17", "Units": {"value": "pounds"}}}, "Label": {"value": "Mondadori Electa"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "25.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Mondadori Electa"}, "NumberOfPages": {"value": "144"}, "PackageDimensions": {"Height": {"value": "0.8661417314", "Units": {"value": "inches"}}, "Length": {"value": "14.9606299060", "Units": {"value": "inches"}}, "Width": {"value": "11.0236220360", "Units": {"value": "inches"}}, "Weight": {"value": "3.1746565728", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2016-10-25"}, "Publisher": {"value": "Mondadori Electa"}, "ReleaseDate": {"value": "2016-10-25"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61K2xircqJL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "55", "Units": {"value": "pixels"}}}, "Studio": {"value": "Mondadori Electa"}, "Title": {"value": "Mappe. Un atlante per viaggiare tra terra, mari e culture del mondo"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "2843"}}, {"ProductCategoryId": {"value": "13064701031"}, "Rank": {"value": "2"}}, {"ProductCategoryId": {"value": "13077570031"}, "Rank": {"value": "2"}}, {"ProductCategoryId": {"value": "13064711031"}, "Rank": {"value": "15"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8858014308"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "Passioni"}, "Creator": [{"value": "Brotton, Jerry", "Role": {"value": "Autore"}}, {"value": "Fontebuoni, A.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "10.31494", "Units": {"value": "inches"}}, "Length": {"value": "12.20470", "Units": {"value": "inches"}}, "Width": {"value": "0.86614", "Units": {"value": "inches"}}}, "Label": {"value": "Gribaudo"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "24.90"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Gribaudo"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "256"}, "PackageDimensions": {"Height": {"value": "1.1023622036", "Units": {"value": "inches"}}, "Length": {"value": "12.1653543183", "Units": {"value": "inches"}}, "Width": {"value": "10.2362204620", "Units": {"value": "inches"}}, "Weight": {"value": "3.4392112872", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2015-11-05"}, "Publisher": {"value": "Gribaudo"}, "ReleaseDate": {"value": "2015-11-05"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61UmGSV5reL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "62", "Units": {"value": "pixels"}}}, "Studio": {"value": "Gribaudo"}, "Title": {"value": "Le grandi mappe. Oltre 60 capolavori raccontano l'evoluzione dell'uomo, la sua storia e la sua cultura. Ediz. illustrata"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "23519"}}, {"ProductCategoryId": {"value": "508875031"}, "Rank": {"value": "40"}}, {"ProductCategoryId": {"value": "508856031"}, "Rank": {"value": "452"}}, {"ProductCategoryId": {"value": "508758031"}, "Rank": {"value": "3211"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8807890283"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina flessibile"}, "Brand": {"value": "UNIVERSALE ECONOMICA. SAGGI"}, "Creator": [{"value": "Brotton, Jerry", "Role": {"value": "Autore"}}, {"value": "Sala, V. B.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "5.47243", "Units": {"value": "inches"}}, "Length": {"value": "8.77951", "Units": {"value": "inches"}}, "Width": {"value": "1.49606", "Units": {"value": "inches"}}}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "Feltrinelli"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "19.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Feltrinelli"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "526"}, "PackageDimensions": {"Height": {"value": "1.59842519522", "Units": {"value": "inches"}}, "Length": {"value": "8.7007873927", "Units": {"value": "inches"}}, "Width": {"value": "5.49999999439", "Units": {"value": "inches"}}, "Weight": {"value": "1.6755131912", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2017-11-23"}, "Publisher": {"value": "Feltrinelli"}, "ReleaseDate": {"value": "2017-11-23"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61jo5I7vBjL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "48", "Units": {"value": "pixels"}}}, "Studio": {"value": "Feltrinelli"}, "Title": {"value": "La storia del mondo in dodici mappe"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "18925"}}, {"ProductCategoryId": {"value": "508875031"}, "Rank": {"value": "34"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8811149843"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "SAGGI"}, "Creator": [{"value": "Wilford, John Noble", "Role": {"value": "Autore"}}, {"value": "Gianna Lonza", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "8.97636", "Units": {"value": "inches"}}, "Length": {"value": "6.69290", "Units": {"value": "inches"}}, "Width": {"value": "1.41732", "Units": {"value": "inches"}}}, "Label": {"value": "Garzanti"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "30.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Garzanti"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "478"}, "PackageDimensions": {"Height": {"value": "1.4960629906", "Units": {"value": "inches"}}, "Length": {"value": "8.7401574714", "Units": {"value": "inches"}}, "Width": {"value": "6.2992125920", "Units": {"value": "inches"}}, "Weight": {"value": "1.4991433816", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2018-11-22"}, "Publisher": {"value": "Garzanti"}, "ReleaseDate": {"value": "2018-11-22"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61KTEY8nMgL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "54", "Units": {"value": "pixels"}}}, "Studio": {"value": "Garzanti"}, "Title": {"value": "I signori delle mappe. La storia avventurosa dell'invenzione della cartografia"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "23845"}}, {"ProductCategoryId": {"value": "508875031"}, "Rank": {"value": "41"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "B084FZWQHD"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina flessibile"}, "Creator": {"value": "Frasante, Marco", "Role": {"value": "Autore"}}, "ItemDimensions": {"Height": {"value": "8.5", "Units": {"value": "inches"}}, "Length": {"value": "5.5", "Units": {"value": "inches"}}, "Width": {"value": "0.2", "Units": {"value": "inches"}}}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "Independently published"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}, {"Name": {"value": "italian"}, "Type": {"value": "Sconosciuto"}}]}, "ListPrice": {"Amount": {"value": "12.69"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Independently published"}, "NumberOfPages": {"value": "87"}, "PackageDimensions": {"Height": {"value": "0.2", "Units": {"value": "inches"}}, "Length": {"value": "8.5", "Units": {"value": "inches"}}, "Width": {"value": "5.5", "Units": {"value": "inches"}}, "Weight": {"value": "0.37", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2020-02-09"}, "Publisher": {"value": "Independently published"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/51Na9vFKvgL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "49", "Units": {"value": "pixels"}}}, "Studio": {"value": "Independently published"}, "Title": {"value": "Mappe Mentali e Mappe Concettuali: La Guida Pi\u00f9 Completa Per Memorizzare e Apprendere Qualsiasi Cosa In Modo Semplice e Veloce"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "2663"}}, {"ProductCategoryId": {"value": "508885031"}, "Rank": {"value": "192"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "881160771X"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "SAGGI"}, "Creator": [{"value": "Marshall, Tim", "Role": {"value": "Autore"}}, {"value": "Easton, G.", "Role": {"value": "Illustratore"}}, {"value": "Smith, J.", "Role": {"value": "Illustratore"}}, {"value": "Hawkins, E.", "Role": {"value": "Illustratore"}}, {"value": "Crane, P.", "Role": {"value": "Illustratore"}}, {"value": "Caraffini, S.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "12.40155", "Units": {"value": "inches"}}, "Length": {"value": "10.03935", "Units": {"value": "inches"}}, "Width": {"value": "0.59055", "Units": {"value": "inches"}}}, "Label": {"value": "Garzanti"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "20.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Garzanti"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "80"}, "PackageDimensions": {"Height": {"value": "0.5511811018", "Units": {"value": "inches"}}, "Length": {"value": "12.2047243970", "Units": {"value": "inches"}}, "Width": {"value": "9.8425196750", "Units": {"value": "inches"}}, "Weight": {"value": "1.6975594174", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2020-02-13"}, "Publisher": {"value": "Garzanti"}, "ReleaseDate": {"value": "2020-02-13"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/514B0NG7gvL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "57", "Units": {"value": "pixels"}}}, "Studio": {"value": "Garzanti"}, "Title": {"value": "Le 12 mappe che spiegano il mondo ai ragazzi"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "24610"}}, {"ProductCategoryId": {"value": "13064684031"}, "Rank": {"value": "7"}}, {"ProductCategoryId": {"value": "13064569031"}, "Rank": {"value": "35"}}, {"ProductCategoryId": {"value": "13077656031"}, "Rank": {"value": "35"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "881167378X"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Creator": [{"value": "Marshall, Tim", "Role": {"value": "Autore"}}, {"value": "Merlini, R.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "8.77951", "Units": {"value": "inches"}}, "Length": {"value": "5.66928", "Units": {"value": "inches"}}, "Width": {"value": "1.37795", "Units": {"value": "inches"}}}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "Garzanti"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "19.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Garzanti"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "313"}, "PackageDimensions": {"Height": {"value": "1.4960629906", "Units": {"value": "inches"}}, "Length": {"value": "8.5826771566", "Units": {"value": "inches"}}, "Width": {"value": "5.7086614115", "Units": {"value": "inches"}}, "Weight": {"value": "1.1904962148", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2017-06-08"}, "Publisher": {"value": "Garzanti"}, "ReleaseDate": {"value": "2017-06-08"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/51DYJDPBKmL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "50", "Units": {"value": "pixels"}}}, "Studio": {"value": "Garzanti"}, "Title": {"value": "Le 10 mappe che spiegano il mondo"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "35380"}}, {"ProductCategoryId": {"value": "508819031"}, "Rank": {"value": "197"}}, {"ProductCategoryId": {"value": "508812031"}, "Rank": {"value": "678"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8804712279"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "LE SCIE. NUOVA SERIE STRANIERI"}, "Creator": [{"value": "Moller, Violet", "Role": {"value": "Autore"}}, {"value": "Vanni, L.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "9.44880", "Units": {"value": "inches"}}, "Length": {"value": "6.69290", "Units": {"value": "inches"}}, "Width": {"value": "1.18110", "Units": {"value": "inches"}}}, "Label": {"value": "Mondadori"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "22.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Mondadori"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "325"}, "PackageDimensions": {"Height": {"value": "1.4173228332", "Units": {"value": "inches"}}, "Length": {"value": "9.4488188880", "Units": {"value": "inches"}}, "Width": {"value": "6.4960629855", "Units": {"value": "inches"}}, "Weight": {"value": "1.543235834", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2019-05-28"}, "Publisher": {"value": "Mondadori"}, "ReleaseDate": {"value": "2019-05-28"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/51ntMunIvhL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "50", "Units": {"value": "pixels"}}}, "Studio": {"value": "Mondadori"}, "Title": {"value": "La mappa dei libri perduti. Come la conoscenza antica \u00e8 stata perduta e ritrovata: una storia in sette citt\u00e0"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "63759"}}, {"ProductCategoryId": {"value": "508810031"}, "Rank": {"value": "692"}}, {"ProductCategoryId": {"value": "508879031"}, "Rank": {"value": "15957"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "B008RJFRTK"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "App"}, "Brand": {"value": "MY.COM"}, "HardwarePlatform": {"value": "Android"}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "MY.COM"}, "Languages": {"Language": [{"Name": {"value": "arabic"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "chinese"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "czech"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "dutch"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "english"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "french"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "german"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "japanese"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "korean"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "polish"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "portuguese"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "russian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "spanish"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "vietnamese"}, "Type": {"value": "Pubblicato"}}]}, "ListPrice": {"Amount": {"value": "0.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "MY.COM"}, "OperatingSystem": {"value": "Android"}, "PartNumber": {"value": "com.mapswithme.maps.pro"}, "ProductGroup": {"value": "Mobile Application"}, "ProductTypeName": {"value": "MOBILE_APPLICATION"}, "Publisher": {"value": "MY.COM"}, "ReleaseDate": {"value": "2016-01-14"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61KevuswqEL._SL75_.png"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "75", "Units": {"value": "pixels"}}}, "Studio": {"value": "MY.COM"}, "Title": {"value": "MAPS.ME \u2014 Mappe Offline"}}}, "Relationships": {}, "SalesRankings": {}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8832706571"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina flessibile"}, "Creator": {"value": "aa.vv.", "Role": {"value": "Autore"}}, "Genre": {"value": "Diritto"}, "Label": {"value": "Neldiritto Editore"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "90.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Neldiritto Editore"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "1200"}, "PackageDimensions": {"Height": {"value": "3.0708661386", "Units": {"value": "inches"}}, "Length": {"value": "9.8425196750", "Units": {"value": "inches"}}, "Width": {"value": "6.7716535364", "Units": {"value": "inches"}}, "Weight": {"value": "5.291094288000000881849048", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2020-01-24"}, "Publisher": {"value": "Neldiritto Editore"}, "ReleaseDate": {"value": "2020-01-24"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/41HeNbq4xKL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "53", "Units": {"value": "pixels"}}}, "Studio": {"value": "Neldiritto Editore"}, "Title": {"value": "Concorso Magistratura 2020: Mappe e schemi di Diritto civile-Diritto penale-Diritto amministrativo"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "62044"}}, {"ProductCategoryId": {"value": "1346646031"}, "Rank": {"value": "617"}}, {"ProductCategoryId": {"value": "1346648031"}, "Rank": {"value": "754"}}]}}]}}, "ResponseMetadata": {"RequestId": {"value": "d384713e-7c79-4a6d-81cd-d0aa68c7b409"}}}
parsed_json = DictWrapper(xml).parsed
assert parsed_json == expected_json
def test_decode_xml_custom_parser_xmltodict():
xml = '<?xml version="1.0"?><ListMatchingProductsResponse xmlns="http://mws.amazonservices.com/schema/Products/2011-10-01"><ListMatchingProductsResult><Products xmlns:ns2="http://mws.amazonservices.com/schema/Products/2011-10-01/default.xsd"><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8891808660</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Mizielinska, Aleksandra</ns2:Creator><ns2:Creator Role="Autore">Mizielinski, Daniel</ns2:Creator><ns2:Creator Role="Traduttore">Parisi, V.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">14.80312</ns2:Height><ns2:Length Units="inches">10.86612</ns2:Length><ns2:Width Units="inches">1.06299</ns2:Width><ns2:Weight Units="pounds">3.17</ns2:Weight></ns2:ItemDimensions><ns2:Label>Mondadori Electa</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>25.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori Electa</ns2:Manufacturer><ns2:NumberOfPages>144</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.8661417314</ns2:Height><ns2:Length Units="inches">14.9606299060</ns2:Length><ns2:Width Units="inches">11.0236220360</ns2:Width><ns2:Weight Units="pounds">3.1746565728</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2016-10-25</ns2:PublicationDate><ns2:Publisher>Mondadori Electa</ns2:Publisher><ns2:ReleaseDate>2016-10-25</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61K2xircqJL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">55</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori Electa</ns2:Studio><ns2:Title>Mappe. Un atlante per viaggiare tra terra, mari e culture del mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2843</Rank></SalesRank><SalesRank><ProductCategoryId>13064701031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13077570031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13064711031</ProductCategoryId><Rank>15</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8858014308</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>Passioni</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Fontebuoni, A.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">10.31494</ns2:Height><ns2:Length Units="inches">12.20470</ns2:Length><ns2:Width Units="inches">0.86614</ns2:Width></ns2:ItemDimensions><ns2:Label>Gribaudo</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>24.90</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Gribaudo</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>256</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.1023622036</ns2:Height><ns2:Length Units="inches">12.1653543183</ns2:Length><ns2:Width Units="inches">10.2362204620</ns2:Width><ns2:Weight Units="pounds">3.4392112872</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2015-11-05</ns2:PublicationDate><ns2:Publisher>Gribaudo</ns2:Publisher><ns2:ReleaseDate>2015-11-05</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61UmGSV5reL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">62</ns2:Width></ns2:SmallImage><ns2:Studio>Gribaudo</ns2:Studio><ns2:Title>Le grandi mappe. Oltre 60 capolavori raccontano l\'evoluzione dell\'uomo, la sua storia e la sua cultura. Ediz. illustrata</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23519</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>40</Rank></SalesRank><SalesRank><ProductCategoryId>508856031</ProductCategoryId><Rank>452</Rank></SalesRank><SalesRank><ProductCategoryId>508758031</ProductCategoryId><Rank>3211</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8807890283</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Brand>UNIVERSALE ECONOMICA. SAGGI</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Sala, V. B.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">5.47243</ns2:Height><ns2:Length Units="inches">8.77951</ns2:Length><ns2:Width Units="inches">1.49606</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Feltrinelli</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Feltrinelli</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>526</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.59842519522</ns2:Height><ns2:Length Units="inches">8.7007873927</ns2:Length><ns2:Width Units="inches">5.49999999439</ns2:Width><ns2:Weight Units="pounds">1.6755131912</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-11-23</ns2:PublicationDate><ns2:Publisher>Feltrinelli</ns2:Publisher><ns2:ReleaseDate>2017-11-23</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61jo5I7vBjL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">48</ns2:Width></ns2:SmallImage><ns2:Studio>Feltrinelli</ns2:Studio><ns2:Title>La storia del mondo in dodici mappe</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>18925</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>34</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8811149843</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Wilford, John Noble</ns2:Creator><ns2:Creator Role="Traduttore">Gianna Lonza</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.97636</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.41732</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>30.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>478</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.7401574714</ns2:Length><ns2:Width Units="inches">6.2992125920</ns2:Width><ns2:Weight Units="pounds">1.4991433816</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2018-11-22</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2018-11-22</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KTEY8nMgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">54</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>I signori delle mappe. La storia avventurosa dell\'invenzione della cartografia</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23845</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>41</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B084FZWQHD</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">Frasante, Marco</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.5</ns2:Height><ns2:Length Units="inches">5.5</ns2:Length><ns2:Width Units="inches">0.2</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Independently published</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Sconosciuto</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>12.69</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Independently published</ns2:Manufacturer><ns2:NumberOfPages>87</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.2</ns2:Height><ns2:Length Units="inches">8.5</ns2:Length><ns2:Width Units="inches">5.5</ns2:Width><ns2:Weight Units="pounds">0.37</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-09</ns2:PublicationDate><ns2:Publisher>Independently published</ns2:Publisher><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51Na9vFKvgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">49</ns2:Width></ns2:SmallImage><ns2:Studio>Independently published</ns2:Studio><ns2:Title>Mappe Mentali e Mappe Concettuali: La Guida Pi\xc3\xb9 Completa Per Memorizzare e Apprendere Qualsiasi Cosa In Modo Semplice e Veloce</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2663</Rank></SalesRank><SalesRank><ProductCategoryId>508885031</ProductCategoryId><Rank>192</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881160771X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Illustratore">Easton, G.</ns2:Creator><ns2:Creator Role="Illustratore">Smith, J.</ns2:Creator><ns2:Creator Role="Illustratore">Hawkins, E.</ns2:Creator><ns2:Creator Role="Illustratore">Crane, P.</ns2:Creator><ns2:Creator Role="Traduttore">Caraffini, S.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">12.40155</ns2:Height><ns2:Length Units="inches">10.03935</ns2:Length><ns2:Width Units="inches">0.59055</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>20.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>80</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.5511811018</ns2:Height><ns2:Length Units="inches">12.2047243970</ns2:Length><ns2:Width Units="inches">9.8425196750</ns2:Width><ns2:Weight Units="pounds">1.6975594174</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-13</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2020-02-13</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/514B0NG7gvL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">57</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 12 mappe che spiegano il mondo ai ragazzi</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>24610</Rank></SalesRank><SalesRank><ProductCategoryId>13064684031</ProductCategoryId><Rank>7</Rank></SalesRank><SalesRank><ProductCategoryId>13064569031</ProductCategoryId><Rank>35</Rank></SalesRank><SalesRank><ProductCategoryId>13077656031</ProductCategoryId><Rank>35</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881167378X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Traduttore">Merlini, R.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.77951</ns2:Height><ns2:Length Units="inches">5.66928</ns2:Length><ns2:Width Units="inches">1.37795</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>313</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.5826771566</ns2:Length><ns2:Width Units="inches">5.7086614115</ns2:Width><ns2:Weight Units="pounds">1.1904962148</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-06-08</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2017-06-08</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51DYJDPBKmL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 10 mappe che spiegano il mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>35380</Rank></SalesRank><SalesRank><ProductCategoryId>508819031</ProductCategoryId><Rank>197</Rank></SalesRank><SalesRank><ProductCategoryId>508812031</ProductCategoryId><Rank>678</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8804712279</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>LE SCIE. NUOVA SERIE STRANIERI</ns2:Brand><ns2:Creator Role="Autore">Moller, Violet</ns2:Creator><ns2:Creator Role="Traduttore">Vanni, L.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">9.44880</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.18110</ns2:Width></ns2:ItemDimensions><ns2:Label>Mondadori</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>22.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>325</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4173228332</ns2:Height><ns2:Length Units="inches">9.4488188880</ns2:Length><ns2:Width Units="inches">6.4960629855</ns2:Width><ns2:Weight Units="pounds">1.543235834</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2019-05-28</ns2:PublicationDate><ns2:Publisher>Mondadori</ns2:Publisher><ns2:ReleaseDate>2019-05-28</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51ntMunIvhL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori</ns2:Studio><ns2:Title>La mappa dei libri perduti. Come la conoscenza antica \xc3\xa8 stata perduta e ritrovata: una storia in sette citt\xc3\xa0</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>63759</Rank></SalesRank><SalesRank><ProductCategoryId>508810031</ProductCategoryId><Rank>692</Rank></SalesRank><SalesRank><ProductCategoryId>508879031</ProductCategoryId><Rank>15957</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B008RJFRTK</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>App</ns2:Binding><ns2:Brand>MY.COM</ns2:Brand><ns2:HardwarePlatform>Android</ns2:HardwarePlatform><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>MY.COM</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>arabic</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>chinese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>czech</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>dutch</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>english</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>french</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>german</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>japanese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>korean</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>polish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>portuguese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>russian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>spanish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>vietnamese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>0.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>MY.COM</ns2:Manufacturer><ns2:OperatingSystem>Android</ns2:OperatingSystem><ns2:PartNumber>com.mapswithme.maps.pro</ns2:PartNumber><ns2:ProductGroup>Mobile Application</ns2:ProductGroup><ns2:ProductTypeName>MOBILE_APPLICATION</ns2:ProductTypeName><ns2:Publisher>MY.COM</ns2:Publisher><ns2:ReleaseDate>2016-01-14</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KevuswqEL._SL75_.png</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">75</ns2:Width></ns2:SmallImage><ns2:Studio>MY.COM</ns2:Studio><ns2:Title>MAPS.ME \xe2\x80\x94 Mappe Offline</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings/></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8832706571</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">aa.vv.</ns2:Creator><ns2:Genre>Diritto</ns2:Genre><ns2:Label>Neldiritto Editore</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>90.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Neldiritto Editore</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>1200</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">3.0708661386</ns2:Height><ns2:Length Units="inches">9.8425196750</ns2:Length><ns2:Width Units="inches">6.7716535364</ns2:Width><ns2:Weight Units="pounds">5.291094288000000881849048</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-01-24</ns2:PublicationDate><ns2:Publisher>Neldiritto Editore</ns2:Publisher><ns2:ReleaseDate>2020-01-24</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/41HeNbq4xKL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">53</ns2:Width></ns2:SmallImage><ns2:Studio>Neldiritto Editore</ns2:Studio><ns2:Title>Concorso Magistratura 2020: Mappe e schemi di Diritto civile-Diritto penale-Diritto amministrativo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>62044</Rank></SalesRank><SalesRank><ProductCategoryId>1346646031</ProductCategoryId><Rank>617</Rank></SalesRank><SalesRank><ProductCategoryId>1346648031</ProductCategoryId><Rank>754</Rank></SalesRank></SalesRankings></Product></Products></ListMatchingProductsResult><ResponseMetadata><RequestId>d384713e-7c79-4a6d-81cd-d0aa68c7b409</RequestId></ResponseMetadata></ListMatchingProductsResponse>'
parsed_json = DictWrapper(xml, xml_parser=xmltodict.parse).parsed
print(parsed_json)
| 2,100.1875 | 22,724 | 0.733789 | import pytest
import xmltodict
from mws.mws import DataWrapper, DictWrapper, MWSError
def test_content_md5_comparison():
data = b'abc\tdef'
hash = 'Zj+Bh1BJ8HzBb9ToK28qFQ=='
DataWrapper(data, {'content-md5': hash})
def test_content_md5_check_raises_exception_if_fails():
data = b'abc\tdef'
hash = 'notthehash'
with pytest.raises(MWSError):
DataWrapper(data, {'content-md5': hash})
def test_decode_byte_xml():
xml = b'<?xml version="1.0"?><ListMatchingProductsResponse xmlns="http://mws.amazonservices.com/schema/Products/2011-10-01"><ListMatchingProductsResult><Products xmlns:ns2="http://mws.amazonservices.com/schema/Products/2011-10-01/default.xsd"><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8891808660</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Mizielinska, Aleksandra</ns2:Creator><ns2:Creator Role="Autore">Mizielinski, Daniel</ns2:Creator><ns2:Creator Role="Traduttore">Parisi, V.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">14.80312</ns2:Height><ns2:Length Units="inches">10.86612</ns2:Length><ns2:Width Units="inches">1.06299</ns2:Width><ns2:Weight Units="pounds">3.17</ns2:Weight></ns2:ItemDimensions><ns2:Label>Mondadori Electa</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>25.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori Electa</ns2:Manufacturer><ns2:NumberOfPages>144</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.8661417314</ns2:Height><ns2:Length Units="inches">14.9606299060</ns2:Length><ns2:Width Units="inches">11.0236220360</ns2:Width><ns2:Weight Units="pounds">3.1746565728</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2016-10-25</ns2:PublicationDate><ns2:Publisher>Mondadori Electa</ns2:Publisher><ns2:ReleaseDate>2016-10-25</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61K2xircqJL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">55</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori Electa</ns2:Studio><ns2:Title>Mappe. Un atlante per viaggiare tra terra, mari e culture del mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2843</Rank></SalesRank><SalesRank><ProductCategoryId>13064701031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13077570031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13064711031</ProductCategoryId><Rank>15</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8858014308</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>Passioni</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Fontebuoni, A.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">10.31494</ns2:Height><ns2:Length Units="inches">12.20470</ns2:Length><ns2:Width Units="inches">0.86614</ns2:Width></ns2:ItemDimensions><ns2:Label>Gribaudo</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>24.90</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Gribaudo</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>256</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.1023622036</ns2:Height><ns2:Length Units="inches">12.1653543183</ns2:Length><ns2:Width Units="inches">10.2362204620</ns2:Width><ns2:Weight Units="pounds">3.4392112872</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2015-11-05</ns2:PublicationDate><ns2:Publisher>Gribaudo</ns2:Publisher><ns2:ReleaseDate>2015-11-05</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61UmGSV5reL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">62</ns2:Width></ns2:SmallImage><ns2:Studio>Gribaudo</ns2:Studio><ns2:Title>Le grandi mappe. Oltre 60 capolavori raccontano l\'evoluzione dell\'uomo, la sua storia e la sua cultura. Ediz. illustrata</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23519</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>40</Rank></SalesRank><SalesRank><ProductCategoryId>508856031</ProductCategoryId><Rank>452</Rank></SalesRank><SalesRank><ProductCategoryId>508758031</ProductCategoryId><Rank>3211</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8807890283</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Brand>UNIVERSALE ECONOMICA. SAGGI</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Sala, V. B.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">5.47243</ns2:Height><ns2:Length Units="inches">8.77951</ns2:Length><ns2:Width Units="inches">1.49606</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Feltrinelli</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Feltrinelli</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>526</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.59842519522</ns2:Height><ns2:Length Units="inches">8.7007873927</ns2:Length><ns2:Width Units="inches">5.49999999439</ns2:Width><ns2:Weight Units="pounds">1.6755131912</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-11-23</ns2:PublicationDate><ns2:Publisher>Feltrinelli</ns2:Publisher><ns2:ReleaseDate>2017-11-23</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61jo5I7vBjL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">48</ns2:Width></ns2:SmallImage><ns2:Studio>Feltrinelli</ns2:Studio><ns2:Title>La storia del mondo in dodici mappe</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>18925</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>34</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8811149843</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Wilford, John Noble</ns2:Creator><ns2:Creator Role="Traduttore">Gianna Lonza</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.97636</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.41732</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>30.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>478</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.7401574714</ns2:Length><ns2:Width Units="inches">6.2992125920</ns2:Width><ns2:Weight Units="pounds">1.4991433816</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2018-11-22</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2018-11-22</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KTEY8nMgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">54</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>I signori delle mappe. La storia avventurosa dell\'invenzione della cartografia</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23845</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>41</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B084FZWQHD</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">Frasante, Marco</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.5</ns2:Height><ns2:Length Units="inches">5.5</ns2:Length><ns2:Width Units="inches">0.2</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Independently published</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Sconosciuto</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>12.69</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Independently published</ns2:Manufacturer><ns2:NumberOfPages>87</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.2</ns2:Height><ns2:Length Units="inches">8.5</ns2:Length><ns2:Width Units="inches">5.5</ns2:Width><ns2:Weight Units="pounds">0.37</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-09</ns2:PublicationDate><ns2:Publisher>Independently published</ns2:Publisher><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51Na9vFKvgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">49</ns2:Width></ns2:SmallImage><ns2:Studio>Independently published</ns2:Studio><ns2:Title>Mappe Mentali e Mappe Concettuali: La Guida Pi\xc3\xb9 Completa Per Memorizzare e Apprendere Qualsiasi Cosa In Modo Semplice e Veloce</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2663</Rank></SalesRank><SalesRank><ProductCategoryId>508885031</ProductCategoryId><Rank>192</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881160771X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Illustratore">Easton, G.</ns2:Creator><ns2:Creator Role="Illustratore">Smith, J.</ns2:Creator><ns2:Creator Role="Illustratore">Hawkins, E.</ns2:Creator><ns2:Creator Role="Illustratore">Crane, P.</ns2:Creator><ns2:Creator Role="Traduttore">Caraffini, S.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">12.40155</ns2:Height><ns2:Length Units="inches">10.03935</ns2:Length><ns2:Width Units="inches">0.59055</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>20.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>80</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.5511811018</ns2:Height><ns2:Length Units="inches">12.2047243970</ns2:Length><ns2:Width Units="inches">9.8425196750</ns2:Width><ns2:Weight Units="pounds">1.6975594174</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-13</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2020-02-13</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/514B0NG7gvL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">57</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 12 mappe che spiegano il mondo ai ragazzi</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>24610</Rank></SalesRank><SalesRank><ProductCategoryId>13064684031</ProductCategoryId><Rank>7</Rank></SalesRank><SalesRank><ProductCategoryId>13064569031</ProductCategoryId><Rank>35</Rank></SalesRank><SalesRank><ProductCategoryId>13077656031</ProductCategoryId><Rank>35</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881167378X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Traduttore">Merlini, R.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.77951</ns2:Height><ns2:Length Units="inches">5.66928</ns2:Length><ns2:Width Units="inches">1.37795</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>313</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.5826771566</ns2:Length><ns2:Width Units="inches">5.7086614115</ns2:Width><ns2:Weight Units="pounds">1.1904962148</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-06-08</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2017-06-08</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51DYJDPBKmL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 10 mappe che spiegano il mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>35380</Rank></SalesRank><SalesRank><ProductCategoryId>508819031</ProductCategoryId><Rank>197</Rank></SalesRank><SalesRank><ProductCategoryId>508812031</ProductCategoryId><Rank>678</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8804712279</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>LE SCIE. NUOVA SERIE STRANIERI</ns2:Brand><ns2:Creator Role="Autore">Moller, Violet</ns2:Creator><ns2:Creator Role="Traduttore">Vanni, L.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">9.44880</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.18110</ns2:Width></ns2:ItemDimensions><ns2:Label>Mondadori</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>22.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>325</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4173228332</ns2:Height><ns2:Length Units="inches">9.4488188880</ns2:Length><ns2:Width Units="inches">6.4960629855</ns2:Width><ns2:Weight Units="pounds">1.543235834</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2019-05-28</ns2:PublicationDate><ns2:Publisher>Mondadori</ns2:Publisher><ns2:ReleaseDate>2019-05-28</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51ntMunIvhL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori</ns2:Studio><ns2:Title>La mappa dei libri perduti. Come la conoscenza antica \xc3\xa8 stata perduta e ritrovata: una storia in sette citt\xc3\xa0</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>63759</Rank></SalesRank><SalesRank><ProductCategoryId>508810031</ProductCategoryId><Rank>692</Rank></SalesRank><SalesRank><ProductCategoryId>508879031</ProductCategoryId><Rank>15957</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B008RJFRTK</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>App</ns2:Binding><ns2:Brand>MY.COM</ns2:Brand><ns2:HardwarePlatform>Android</ns2:HardwarePlatform><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>MY.COM</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>arabic</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>chinese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>czech</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>dutch</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>english</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>french</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>german</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>japanese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>korean</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>polish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>portuguese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>russian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>spanish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>vietnamese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>0.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>MY.COM</ns2:Manufacturer><ns2:OperatingSystem>Android</ns2:OperatingSystem><ns2:PartNumber>com.mapswithme.maps.pro</ns2:PartNumber><ns2:ProductGroup>Mobile Application</ns2:ProductGroup><ns2:ProductTypeName>MOBILE_APPLICATION</ns2:ProductTypeName><ns2:Publisher>MY.COM</ns2:Publisher><ns2:ReleaseDate>2016-01-14</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KevuswqEL._SL75_.png</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">75</ns2:Width></ns2:SmallImage><ns2:Studio>MY.COM</ns2:Studio><ns2:Title>MAPS.ME \xe2\x80\x94 Mappe Offline</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings/></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8832706571</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">aa.vv.</ns2:Creator><ns2:Genre>Diritto</ns2:Genre><ns2:Label>Neldiritto Editore</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>90.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Neldiritto Editore</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>1200</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">3.0708661386</ns2:Height><ns2:Length Units="inches">9.8425196750</ns2:Length><ns2:Width Units="inches">6.7716535364</ns2:Width><ns2:Weight Units="pounds">5.291094288000000881849048</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-01-24</ns2:PublicationDate><ns2:Publisher>Neldiritto Editore</ns2:Publisher><ns2:ReleaseDate>2020-01-24</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/41HeNbq4xKL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">53</ns2:Width></ns2:SmallImage><ns2:Studio>Neldiritto Editore</ns2:Studio><ns2:Title>Concorso Magistratura 2020: Mappe e schemi di Diritto civile-Diritto penale-Diritto amministrativo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>62044</Rank></SalesRank><SalesRank><ProductCategoryId>1346646031</ProductCategoryId><Rank>617</Rank></SalesRank><SalesRank><ProductCategoryId>1346648031</ProductCategoryId><Rank>754</Rank></SalesRank></SalesRankings></Product></Products></ListMatchingProductsResult><ResponseMetadata><RequestId>d384713e-7c79-4a6d-81cd-d0aa68c7b409</RequestId></ResponseMetadata></ListMatchingProductsResponse>'
expected_json = {"ListMatchingProductsResult": {"Products": {"Product": [{"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8891808660"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Creator": [{"value": "Mizielinska, Aleksandra", "Role": {"value": "Autore"}}, {"value": "Mizielinski, Daniel", "Role": {"value": "Autore"}}, {"value": "Parisi, V.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "14.80312", "Units": {"value": "inches"}}, "Length": {"value": "10.86612", "Units": {"value": "inches"}}, "Width": {"value": "1.06299", "Units": {"value": "inches"}}, "Weight": {"value": "3.17", "Units": {"value": "pounds"}}}, "Label": {"value": "Mondadori Electa"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "25.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Mondadori Electa"}, "NumberOfPages": {"value": "144"}, "PackageDimensions": {"Height": {"value": "0.8661417314", "Units": {"value": "inches"}}, "Length": {"value": "14.9606299060", "Units": {"value": "inches"}}, "Width": {"value": "11.0236220360", "Units": {"value": "inches"}}, "Weight": {"value": "3.1746565728", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2016-10-25"}, "Publisher": {"value": "Mondadori Electa"}, "ReleaseDate": {"value": "2016-10-25"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61K2xircqJL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "55", "Units": {"value": "pixels"}}}, "Studio": {"value": "Mondadori Electa"}, "Title": {"value": "Mappe. Un atlante per viaggiare tra terra, mari e culture del mondo"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "2843"}}, {"ProductCategoryId": {"value": "13064701031"}, "Rank": {"value": "2"}}, {"ProductCategoryId": {"value": "13077570031"}, "Rank": {"value": "2"}}, {"ProductCategoryId": {"value": "13064711031"}, "Rank": {"value": "15"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8858014308"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "Passioni"}, "Creator": [{"value": "Brotton, Jerry", "Role": {"value": "Autore"}}, {"value": "Fontebuoni, A.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "10.31494", "Units": {"value": "inches"}}, "Length": {"value": "12.20470", "Units": {"value": "inches"}}, "Width": {"value": "0.86614", "Units": {"value": "inches"}}}, "Label": {"value": "Gribaudo"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "24.90"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Gribaudo"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "256"}, "PackageDimensions": {"Height": {"value": "1.1023622036", "Units": {"value": "inches"}}, "Length": {"value": "12.1653543183", "Units": {"value": "inches"}}, "Width": {"value": "10.2362204620", "Units": {"value": "inches"}}, "Weight": {"value": "3.4392112872", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2015-11-05"}, "Publisher": {"value": "Gribaudo"}, "ReleaseDate": {"value": "2015-11-05"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61UmGSV5reL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "62", "Units": {"value": "pixels"}}}, "Studio": {"value": "Gribaudo"}, "Title": {"value": "Le grandi mappe. Oltre 60 capolavori raccontano l'evoluzione dell'uomo, la sua storia e la sua cultura. Ediz. illustrata"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "23519"}}, {"ProductCategoryId": {"value": "508875031"}, "Rank": {"value": "40"}}, {"ProductCategoryId": {"value": "508856031"}, "Rank": {"value": "452"}}, {"ProductCategoryId": {"value": "508758031"}, "Rank": {"value": "3211"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8807890283"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina flessibile"}, "Brand": {"value": "UNIVERSALE ECONOMICA. SAGGI"}, "Creator": [{"value": "Brotton, Jerry", "Role": {"value": "Autore"}}, {"value": "Sala, V. B.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "5.47243", "Units": {"value": "inches"}}, "Length": {"value": "8.77951", "Units": {"value": "inches"}}, "Width": {"value": "1.49606", "Units": {"value": "inches"}}}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "Feltrinelli"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "19.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Feltrinelli"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "526"}, "PackageDimensions": {"Height": {"value": "1.59842519522", "Units": {"value": "inches"}}, "Length": {"value": "8.7007873927", "Units": {"value": "inches"}}, "Width": {"value": "5.49999999439", "Units": {"value": "inches"}}, "Weight": {"value": "1.6755131912", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2017-11-23"}, "Publisher": {"value": "Feltrinelli"}, "ReleaseDate": {"value": "2017-11-23"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61jo5I7vBjL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "48", "Units": {"value": "pixels"}}}, "Studio": {"value": "Feltrinelli"}, "Title": {"value": "La storia del mondo in dodici mappe"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "18925"}}, {"ProductCategoryId": {"value": "508875031"}, "Rank": {"value": "34"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8811149843"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "SAGGI"}, "Creator": [{"value": "Wilford, John Noble", "Role": {"value": "Autore"}}, {"value": "Gianna Lonza", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "8.97636", "Units": {"value": "inches"}}, "Length": {"value": "6.69290", "Units": {"value": "inches"}}, "Width": {"value": "1.41732", "Units": {"value": "inches"}}}, "Label": {"value": "Garzanti"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "30.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Garzanti"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "478"}, "PackageDimensions": {"Height": {"value": "1.4960629906", "Units": {"value": "inches"}}, "Length": {"value": "8.7401574714", "Units": {"value": "inches"}}, "Width": {"value": "6.2992125920", "Units": {"value": "inches"}}, "Weight": {"value": "1.4991433816", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2018-11-22"}, "Publisher": {"value": "Garzanti"}, "ReleaseDate": {"value": "2018-11-22"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61KTEY8nMgL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "54", "Units": {"value": "pixels"}}}, "Studio": {"value": "Garzanti"}, "Title": {"value": "I signori delle mappe. La storia avventurosa dell'invenzione della cartografia"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "23845"}}, {"ProductCategoryId": {"value": "508875031"}, "Rank": {"value": "41"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "B084FZWQHD"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina flessibile"}, "Creator": {"value": "Frasante, Marco", "Role": {"value": "Autore"}}, "ItemDimensions": {"Height": {"value": "8.5", "Units": {"value": "inches"}}, "Length": {"value": "5.5", "Units": {"value": "inches"}}, "Width": {"value": "0.2", "Units": {"value": "inches"}}}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "Independently published"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}, {"Name": {"value": "italian"}, "Type": {"value": "Sconosciuto"}}]}, "ListPrice": {"Amount": {"value": "12.69"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Independently published"}, "NumberOfPages": {"value": "87"}, "PackageDimensions": {"Height": {"value": "0.2", "Units": {"value": "inches"}}, "Length": {"value": "8.5", "Units": {"value": "inches"}}, "Width": {"value": "5.5", "Units": {"value": "inches"}}, "Weight": {"value": "0.37", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2020-02-09"}, "Publisher": {"value": "Independently published"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/51Na9vFKvgL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "49", "Units": {"value": "pixels"}}}, "Studio": {"value": "Independently published"}, "Title": {"value": "Mappe Mentali e Mappe Concettuali: La Guida Pi\u00f9 Completa Per Memorizzare e Apprendere Qualsiasi Cosa In Modo Semplice e Veloce"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "2663"}}, {"ProductCategoryId": {"value": "508885031"}, "Rank": {"value": "192"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "881160771X"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "SAGGI"}, "Creator": [{"value": "Marshall, Tim", "Role": {"value": "Autore"}}, {"value": "Easton, G.", "Role": {"value": "Illustratore"}}, {"value": "Smith, J.", "Role": {"value": "Illustratore"}}, {"value": "Hawkins, E.", "Role": {"value": "Illustratore"}}, {"value": "Crane, P.", "Role": {"value": "Illustratore"}}, {"value": "Caraffini, S.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "12.40155", "Units": {"value": "inches"}}, "Length": {"value": "10.03935", "Units": {"value": "inches"}}, "Width": {"value": "0.59055", "Units": {"value": "inches"}}}, "Label": {"value": "Garzanti"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "20.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Garzanti"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "80"}, "PackageDimensions": {"Height": {"value": "0.5511811018", "Units": {"value": "inches"}}, "Length": {"value": "12.2047243970", "Units": {"value": "inches"}}, "Width": {"value": "9.8425196750", "Units": {"value": "inches"}}, "Weight": {"value": "1.6975594174", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2020-02-13"}, "Publisher": {"value": "Garzanti"}, "ReleaseDate": {"value": "2020-02-13"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/514B0NG7gvL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "57", "Units": {"value": "pixels"}}}, "Studio": {"value": "Garzanti"}, "Title": {"value": "Le 12 mappe che spiegano il mondo ai ragazzi"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "24610"}}, {"ProductCategoryId": {"value": "13064684031"}, "Rank": {"value": "7"}}, {"ProductCategoryId": {"value": "13064569031"}, "Rank": {"value": "35"}}, {"ProductCategoryId": {"value": "13077656031"}, "Rank": {"value": "35"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "881167378X"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Creator": [{"value": "Marshall, Tim", "Role": {"value": "Autore"}}, {"value": "Merlini, R.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "8.77951", "Units": {"value": "inches"}}, "Length": {"value": "5.66928", "Units": {"value": "inches"}}, "Width": {"value": "1.37795", "Units": {"value": "inches"}}}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "Garzanti"}, "Languages": {"Language": [{"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Lingua originale"}}]}, "ListPrice": {"Amount": {"value": "19.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Garzanti"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "313"}, "PackageDimensions": {"Height": {"value": "1.4960629906", "Units": {"value": "inches"}}, "Length": {"value": "8.5826771566", "Units": {"value": "inches"}}, "Width": {"value": "5.7086614115", "Units": {"value": "inches"}}, "Weight": {"value": "1.1904962148", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2017-06-08"}, "Publisher": {"value": "Garzanti"}, "ReleaseDate": {"value": "2017-06-08"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/51DYJDPBKmL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "50", "Units": {"value": "pixels"}}}, "Studio": {"value": "Garzanti"}, "Title": {"value": "Le 10 mappe che spiegano il mondo"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "35380"}}, {"ProductCategoryId": {"value": "508819031"}, "Rank": {"value": "197"}}, {"ProductCategoryId": {"value": "508812031"}, "Rank": {"value": "678"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8804712279"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina rigida"}, "Brand": {"value": "LE SCIE. NUOVA SERIE STRANIERI"}, "Creator": [{"value": "Moller, Violet", "Role": {"value": "Autore"}}, {"value": "Vanni, L.", "Role": {"value": "Traduttore"}}], "ItemDimensions": {"Height": {"value": "9.44880", "Units": {"value": "inches"}}, "Length": {"value": "6.69290", "Units": {"value": "inches"}}, "Width": {"value": "1.18110", "Units": {"value": "inches"}}}, "Label": {"value": "Mondadori"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "22.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Mondadori"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "325"}, "PackageDimensions": {"Height": {"value": "1.4173228332", "Units": {"value": "inches"}}, "Length": {"value": "9.4488188880", "Units": {"value": "inches"}}, "Width": {"value": "6.4960629855", "Units": {"value": "inches"}}, "Weight": {"value": "1.543235834", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2019-05-28"}, "Publisher": {"value": "Mondadori"}, "ReleaseDate": {"value": "2019-05-28"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/51ntMunIvhL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "50", "Units": {"value": "pixels"}}}, "Studio": {"value": "Mondadori"}, "Title": {"value": "La mappa dei libri perduti. Come la conoscenza antica \u00e8 stata perduta e ritrovata: una storia in sette citt\u00e0"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "63759"}}, {"ProductCategoryId": {"value": "508810031"}, "Rank": {"value": "692"}}, {"ProductCategoryId": {"value": "508879031"}, "Rank": {"value": "15957"}}]}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "B008RJFRTK"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "App"}, "Brand": {"value": "MY.COM"}, "HardwarePlatform": {"value": "Android"}, "IsAdultProduct": {"value": "false"}, "Label": {"value": "MY.COM"}, "Languages": {"Language": [{"Name": {"value": "arabic"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "chinese"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "czech"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "dutch"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "english"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "french"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "german"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "japanese"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "korean"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "polish"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "portuguese"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "russian"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "spanish"}, "Type": {"value": "Pubblicato"}}, {"Name": {"value": "vietnamese"}, "Type": {"value": "Pubblicato"}}]}, "ListPrice": {"Amount": {"value": "0.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "MY.COM"}, "OperatingSystem": {"value": "Android"}, "PartNumber": {"value": "com.mapswithme.maps.pro"}, "ProductGroup": {"value": "Mobile Application"}, "ProductTypeName": {"value": "MOBILE_APPLICATION"}, "Publisher": {"value": "MY.COM"}, "ReleaseDate": {"value": "2016-01-14"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/61KevuswqEL._SL75_.png"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "75", "Units": {"value": "pixels"}}}, "Studio": {"value": "MY.COM"}, "Title": {"value": "MAPS.ME \u2014 Mappe Offline"}}}, "Relationships": {}, "SalesRankings": {}}, {"Identifiers": {"MarketplaceASIN": {"MarketplaceId": {"value": "APJ6JRA9NG5V4"}, "ASIN": {"value": "8832706571"}}}, "AttributeSets": {"ItemAttributes": {"lang": {"value": "it-IT"}, "Binding": {"value": "Copertina flessibile"}, "Creator": {"value": "aa.vv.", "Role": {"value": "Autore"}}, "Genre": {"value": "Diritto"}, "Label": {"value": "Neldiritto Editore"}, "Languages": {"Language": {"Name": {"value": "italian"}, "Type": {"value": "Pubblicato"}}}, "ListPrice": {"Amount": {"value": "90.00"}, "CurrencyCode": {"value": "EUR"}}, "Manufacturer": {"value": "Neldiritto Editore"}, "NumberOfItems": {"value": "1"}, "NumberOfPages": {"value": "1200"}, "PackageDimensions": {"Height": {"value": "3.0708661386", "Units": {"value": "inches"}}, "Length": {"value": "9.8425196750", "Units": {"value": "inches"}}, "Width": {"value": "6.7716535364", "Units": {"value": "inches"}}, "Weight": {"value": "5.291094288000000881849048", "Units": {"value": "pounds"}}}, "ProductGroup": {"value": "Libro"}, "ProductTypeName": {"value": "ABIS_BOOK"}, "PublicationDate": {"value": "2020-01-24"}, "Publisher": {"value": "Neldiritto Editore"}, "ReleaseDate": {"value": "2020-01-24"}, "SmallImage": {"URL": {"value": "http://ecx.images-amazon.com/images/I/41HeNbq4xKL._SL75_.jpg"}, "Height": {"value": "75", "Units": {"value": "pixels"}}, "Width": {"value": "53", "Units": {"value": "pixels"}}}, "Studio": {"value": "Neldiritto Editore"}, "Title": {"value": "Concorso Magistratura 2020: Mappe e schemi di Diritto civile-Diritto penale-Diritto amministrativo"}}}, "Relationships": {}, "SalesRankings": {"SalesRank": [{"ProductCategoryId": {"value": "book_display_on_website"}, "Rank": {"value": "62044"}}, {"ProductCategoryId": {"value": "1346646031"}, "Rank": {"value": "617"}}, {"ProductCategoryId": {"value": "1346648031"}, "Rank": {"value": "754"}}]}}]}}, "ResponseMetadata": {"RequestId": {"value": "d384713e-7c79-4a6d-81cd-d0aa68c7b409"}}}
parsed_json = DictWrapper(xml).parsed
assert parsed_json == expected_json
def test_decode_xml_custom_parser_xmltodict():
xml = '<?xml version="1.0"?><ListMatchingProductsResponse xmlns="http://mws.amazonservices.com/schema/Products/2011-10-01"><ListMatchingProductsResult><Products xmlns:ns2="http://mws.amazonservices.com/schema/Products/2011-10-01/default.xsd"><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8891808660</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Mizielinska, Aleksandra</ns2:Creator><ns2:Creator Role="Autore">Mizielinski, Daniel</ns2:Creator><ns2:Creator Role="Traduttore">Parisi, V.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">14.80312</ns2:Height><ns2:Length Units="inches">10.86612</ns2:Length><ns2:Width Units="inches">1.06299</ns2:Width><ns2:Weight Units="pounds">3.17</ns2:Weight></ns2:ItemDimensions><ns2:Label>Mondadori Electa</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>25.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori Electa</ns2:Manufacturer><ns2:NumberOfPages>144</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.8661417314</ns2:Height><ns2:Length Units="inches">14.9606299060</ns2:Length><ns2:Width Units="inches">11.0236220360</ns2:Width><ns2:Weight Units="pounds">3.1746565728</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2016-10-25</ns2:PublicationDate><ns2:Publisher>Mondadori Electa</ns2:Publisher><ns2:ReleaseDate>2016-10-25</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61K2xircqJL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">55</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori Electa</ns2:Studio><ns2:Title>Mappe. Un atlante per viaggiare tra terra, mari e culture del mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2843</Rank></SalesRank><SalesRank><ProductCategoryId>13064701031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13077570031</ProductCategoryId><Rank>2</Rank></SalesRank><SalesRank><ProductCategoryId>13064711031</ProductCategoryId><Rank>15</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8858014308</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>Passioni</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Fontebuoni, A.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">10.31494</ns2:Height><ns2:Length Units="inches">12.20470</ns2:Length><ns2:Width Units="inches">0.86614</ns2:Width></ns2:ItemDimensions><ns2:Label>Gribaudo</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>24.90</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Gribaudo</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>256</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.1023622036</ns2:Height><ns2:Length Units="inches">12.1653543183</ns2:Length><ns2:Width Units="inches">10.2362204620</ns2:Width><ns2:Weight Units="pounds">3.4392112872</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2015-11-05</ns2:PublicationDate><ns2:Publisher>Gribaudo</ns2:Publisher><ns2:ReleaseDate>2015-11-05</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61UmGSV5reL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">62</ns2:Width></ns2:SmallImage><ns2:Studio>Gribaudo</ns2:Studio><ns2:Title>Le grandi mappe. Oltre 60 capolavori raccontano l\'evoluzione dell\'uomo, la sua storia e la sua cultura. Ediz. illustrata</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23519</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>40</Rank></SalesRank><SalesRank><ProductCategoryId>508856031</ProductCategoryId><Rank>452</Rank></SalesRank><SalesRank><ProductCategoryId>508758031</ProductCategoryId><Rank>3211</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8807890283</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Brand>UNIVERSALE ECONOMICA. SAGGI</ns2:Brand><ns2:Creator Role="Autore">Brotton, Jerry</ns2:Creator><ns2:Creator Role="Traduttore">Sala, V. B.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">5.47243</ns2:Height><ns2:Length Units="inches">8.77951</ns2:Length><ns2:Width Units="inches">1.49606</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Feltrinelli</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Feltrinelli</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>526</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.59842519522</ns2:Height><ns2:Length Units="inches">8.7007873927</ns2:Length><ns2:Width Units="inches">5.49999999439</ns2:Width><ns2:Weight Units="pounds">1.6755131912</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-11-23</ns2:PublicationDate><ns2:Publisher>Feltrinelli</ns2:Publisher><ns2:ReleaseDate>2017-11-23</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61jo5I7vBjL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">48</ns2:Width></ns2:SmallImage><ns2:Studio>Feltrinelli</ns2:Studio><ns2:Title>La storia del mondo in dodici mappe</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>18925</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>34</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8811149843</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Wilford, John Noble</ns2:Creator><ns2:Creator Role="Traduttore">Gianna Lonza</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.97636</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.41732</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>30.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>478</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.7401574714</ns2:Length><ns2:Width Units="inches">6.2992125920</ns2:Width><ns2:Weight Units="pounds">1.4991433816</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2018-11-22</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2018-11-22</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KTEY8nMgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">54</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>I signori delle mappe. La storia avventurosa dell\'invenzione della cartografia</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>23845</Rank></SalesRank><SalesRank><ProductCategoryId>508875031</ProductCategoryId><Rank>41</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B084FZWQHD</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">Frasante, Marco</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.5</ns2:Height><ns2:Length Units="inches">5.5</ns2:Length><ns2:Width Units="inches">0.2</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Independently published</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Sconosciuto</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>12.69</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Independently published</ns2:Manufacturer><ns2:NumberOfPages>87</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.2</ns2:Height><ns2:Length Units="inches">8.5</ns2:Length><ns2:Width Units="inches">5.5</ns2:Width><ns2:Weight Units="pounds">0.37</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-09</ns2:PublicationDate><ns2:Publisher>Independently published</ns2:Publisher><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51Na9vFKvgL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">49</ns2:Width></ns2:SmallImage><ns2:Studio>Independently published</ns2:Studio><ns2:Title>Mappe Mentali e Mappe Concettuali: La Guida Pi\xc3\xb9 Completa Per Memorizzare e Apprendere Qualsiasi Cosa In Modo Semplice e Veloce</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>2663</Rank></SalesRank><SalesRank><ProductCategoryId>508885031</ProductCategoryId><Rank>192</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881160771X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>SAGGI</ns2:Brand><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Illustratore">Easton, G.</ns2:Creator><ns2:Creator Role="Illustratore">Smith, J.</ns2:Creator><ns2:Creator Role="Illustratore">Hawkins, E.</ns2:Creator><ns2:Creator Role="Illustratore">Crane, P.</ns2:Creator><ns2:Creator Role="Traduttore">Caraffini, S.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">12.40155</ns2:Height><ns2:Length Units="inches">10.03935</ns2:Length><ns2:Width Units="inches">0.59055</ns2:Width></ns2:ItemDimensions><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>20.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>80</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">0.5511811018</ns2:Height><ns2:Length Units="inches">12.2047243970</ns2:Length><ns2:Width Units="inches">9.8425196750</ns2:Width><ns2:Weight Units="pounds">1.6975594174</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-02-13</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2020-02-13</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/514B0NG7gvL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">57</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 12 mappe che spiegano il mondo ai ragazzi</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>24610</Rank></SalesRank><SalesRank><ProductCategoryId>13064684031</ProductCategoryId><Rank>7</Rank></SalesRank><SalesRank><ProductCategoryId>13064569031</ProductCategoryId><Rank>35</Rank></SalesRank><SalesRank><ProductCategoryId>13077656031</ProductCategoryId><Rank>35</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>881167378X</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Creator Role="Autore">Marshall, Tim</ns2:Creator><ns2:Creator Role="Traduttore">Merlini, R.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">8.77951</ns2:Height><ns2:Length Units="inches">5.66928</ns2:Length><ns2:Width Units="inches">1.37795</ns2:Width></ns2:ItemDimensions><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>Garzanti</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Lingua originale</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>19.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Garzanti</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>313</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4960629906</ns2:Height><ns2:Length Units="inches">8.5826771566</ns2:Length><ns2:Width Units="inches">5.7086614115</ns2:Width><ns2:Weight Units="pounds">1.1904962148</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2017-06-08</ns2:PublicationDate><ns2:Publisher>Garzanti</ns2:Publisher><ns2:ReleaseDate>2017-06-08</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51DYJDPBKmL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Garzanti</ns2:Studio><ns2:Title>Le 10 mappe che spiegano il mondo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>35380</Rank></SalesRank><SalesRank><ProductCategoryId>508819031</ProductCategoryId><Rank>197</Rank></SalesRank><SalesRank><ProductCategoryId>508812031</ProductCategoryId><Rank>678</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8804712279</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina rigida</ns2:Binding><ns2:Brand>LE SCIE. NUOVA SERIE STRANIERI</ns2:Brand><ns2:Creator Role="Autore">Moller, Violet</ns2:Creator><ns2:Creator Role="Traduttore">Vanni, L.</ns2:Creator><ns2:ItemDimensions><ns2:Height Units="inches">9.44880</ns2:Height><ns2:Length Units="inches">6.69290</ns2:Length><ns2:Width Units="inches">1.18110</ns2:Width></ns2:ItemDimensions><ns2:Label>Mondadori</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>22.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Mondadori</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>325</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">1.4173228332</ns2:Height><ns2:Length Units="inches">9.4488188880</ns2:Length><ns2:Width Units="inches">6.4960629855</ns2:Width><ns2:Weight Units="pounds">1.543235834</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2019-05-28</ns2:PublicationDate><ns2:Publisher>Mondadori</ns2:Publisher><ns2:ReleaseDate>2019-05-28</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/51ntMunIvhL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">50</ns2:Width></ns2:SmallImage><ns2:Studio>Mondadori</ns2:Studio><ns2:Title>La mappa dei libri perduti. Come la conoscenza antica \xc3\xa8 stata perduta e ritrovata: una storia in sette citt\xc3\xa0</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>63759</Rank></SalesRank><SalesRank><ProductCategoryId>508810031</ProductCategoryId><Rank>692</Rank></SalesRank><SalesRank><ProductCategoryId>508879031</ProductCategoryId><Rank>15957</Rank></SalesRank></SalesRankings></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>B008RJFRTK</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>App</ns2:Binding><ns2:Brand>MY.COM</ns2:Brand><ns2:HardwarePlatform>Android</ns2:HardwarePlatform><ns2:IsAdultProduct>false</ns2:IsAdultProduct><ns2:Label>MY.COM</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>arabic</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>chinese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>czech</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>dutch</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>english</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>french</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>german</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>japanese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>korean</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>polish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>portuguese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>russian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>spanish</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language><ns2:Language><ns2:Name>vietnamese</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>0.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>MY.COM</ns2:Manufacturer><ns2:OperatingSystem>Android</ns2:OperatingSystem><ns2:PartNumber>com.mapswithme.maps.pro</ns2:PartNumber><ns2:ProductGroup>Mobile Application</ns2:ProductGroup><ns2:ProductTypeName>MOBILE_APPLICATION</ns2:ProductTypeName><ns2:Publisher>MY.COM</ns2:Publisher><ns2:ReleaseDate>2016-01-14</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/61KevuswqEL._SL75_.png</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">75</ns2:Width></ns2:SmallImage><ns2:Studio>MY.COM</ns2:Studio><ns2:Title>MAPS.ME \xe2\x80\x94 Mappe Offline</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings/></Product><Product><Identifiers><MarketplaceASIN><MarketplaceId>APJ6JRA9NG5V4</MarketplaceId><ASIN>8832706571</ASIN></MarketplaceASIN></Identifiers><AttributeSets><ns2:ItemAttributes xml:lang="it-IT"><ns2:Binding>Copertina flessibile</ns2:Binding><ns2:Creator Role="Autore">aa.vv.</ns2:Creator><ns2:Genre>Diritto</ns2:Genre><ns2:Label>Neldiritto Editore</ns2:Label><ns2:Languages><ns2:Language><ns2:Name>italian</ns2:Name><ns2:Type>Pubblicato</ns2:Type></ns2:Language></ns2:Languages><ns2:ListPrice><ns2:Amount>90.00</ns2:Amount><ns2:CurrencyCode>EUR</ns2:CurrencyCode></ns2:ListPrice><ns2:Manufacturer>Neldiritto Editore</ns2:Manufacturer><ns2:NumberOfItems>1</ns2:NumberOfItems><ns2:NumberOfPages>1200</ns2:NumberOfPages><ns2:PackageDimensions><ns2:Height Units="inches">3.0708661386</ns2:Height><ns2:Length Units="inches">9.8425196750</ns2:Length><ns2:Width Units="inches">6.7716535364</ns2:Width><ns2:Weight Units="pounds">5.291094288000000881849048</ns2:Weight></ns2:PackageDimensions><ns2:ProductGroup>Libro</ns2:ProductGroup><ns2:ProductTypeName>ABIS_BOOK</ns2:ProductTypeName><ns2:PublicationDate>2020-01-24</ns2:PublicationDate><ns2:Publisher>Neldiritto Editore</ns2:Publisher><ns2:ReleaseDate>2020-01-24</ns2:ReleaseDate><ns2:SmallImage><ns2:URL>http://ecx.images-amazon.com/images/I/41HeNbq4xKL._SL75_.jpg</ns2:URL><ns2:Height Units="pixels">75</ns2:Height><ns2:Width Units="pixels">53</ns2:Width></ns2:SmallImage><ns2:Studio>Neldiritto Editore</ns2:Studio><ns2:Title>Concorso Magistratura 2020: Mappe e schemi di Diritto civile-Diritto penale-Diritto amministrativo</ns2:Title></ns2:ItemAttributes></AttributeSets><Relationships/><SalesRankings><SalesRank><ProductCategoryId>book_display_on_website</ProductCategoryId><Rank>62044</Rank></SalesRank><SalesRank><ProductCategoryId>1346646031</ProductCategoryId><Rank>617</Rank></SalesRank><SalesRank><ProductCategoryId>1346648031</ProductCategoryId><Rank>754</Rank></SalesRank></SalesRankings></Product></Products></ListMatchingProductsResult><ResponseMetadata><RequestId>d384713e-7c79-4a6d-81cd-d0aa68c7b409</RequestId></ResponseMetadata></ListMatchingProductsResponse>'
parsed_json = DictWrapper(xml, xml_parser=xmltodict.parse).parsed
print(parsed_json)
| true | true |
f7fecd44c586cbe798fb78a4150b7d767a8f21e1 | 44 | py | Python | golf/__init__.py | pincoin/withthai-chatbot | 7ce71c84fcc38c40ecb7df283d30e5ec2315ecd2 | [
"MIT"
] | 1 | 2020-09-20T10:09:02.000Z | 2020-09-20T10:09:02.000Z | golf/__init__.py | pincoin/withthai-chatbot | 7ce71c84fcc38c40ecb7df283d30e5ec2315ecd2 | [
"MIT"
] | 7 | 2021-04-08T19:39:58.000Z | 2022-03-12T00:48:17.000Z | golf/__init__.py | pincoin/withthai | 73d7c6aedf79883c3bd44d7944746b53f8eb110e | [
"MIT"
] | null | null | null | default_app_config = 'golf.apps.GolfConfig'
| 22 | 43 | 0.818182 | default_app_config = 'golf.apps.GolfConfig'
| true | true |
f7fecd9cf3017c7f85b5204165d11ff0f1364596 | 1,506 | py | Python | tests/util/test_stringutils.py | phiberoptick/synapse | c1ddbbde4fb948cf740d4c59869157943d3711c6 | [
"Apache-2.0"
] | null | null | null | tests/util/test_stringutils.py | phiberoptick/synapse | c1ddbbde4fb948cf740d4c59869157943d3711c6 | [
"Apache-2.0"
] | 1 | 2021-05-13T17:27:16.000Z | 2021-05-19T14:38:32.000Z | tests/util/test_stringutils.py | phiberoptick/synapse | c1ddbbde4fb948cf740d4c59869157943d3711c6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.errors import SynapseError
from synapse.util.stringutils import assert_valid_client_secret
from .. import unittest
class StringUtilsTestCase(unittest.TestCase):
def test_client_secret_regex(self):
"""Ensure that client_secret does not contain illegal characters"""
good = [
"abcde12345",
"ABCabc123",
"_--something==_",
"...--==-18913",
"8Dj2odd-e9asd.cd==_--ddas-secret-",
]
bad = [
"--+-/secret",
"\\dx--dsa288",
"",
"AAS//",
"asdj**",
">X><Z<!!-)))",
"a@b.com",
]
for client_secret in good:
assert_valid_client_secret(client_secret)
for client_secret in bad:
with self.assertRaises(SynapseError):
assert_valid_client_secret(client_secret)
| 31.375 | 75 | 0.624834 |
from synapse.api.errors import SynapseError
from synapse.util.stringutils import assert_valid_client_secret
from .. import unittest
class StringUtilsTestCase(unittest.TestCase):
def test_client_secret_regex(self):
good = [
"abcde12345",
"ABCabc123",
"_--something==_",
"...--==-18913",
"8Dj2odd-e9asd.cd==_--ddas-secret-",
]
bad = [
"--+-/secret",
"\\dx--dsa288",
"",
"AAS//",
"asdj**",
">X><Z<!!-)))",
"a@b.com",
]
for client_secret in good:
assert_valid_client_secret(client_secret)
for client_secret in bad:
with self.assertRaises(SynapseError):
assert_valid_client_secret(client_secret)
| true | true |
f7fecfc4aed022686aed6218eed432ea2542554b | 2,216 | py | Python | tests_sql/test_sql_interestingness.py | cclauss/lux | bcb9b3d56a3fc560a5d86ae5aa747c58935241c4 | [
"Apache-2.0"
] | 3,731 | 2020-01-16T01:06:30.000Z | 2022-03-31T21:01:04.000Z | tests_sql/test_sql_interestingness.py | cclauss/lux | bcb9b3d56a3fc560a5d86ae5aa747c58935241c4 | [
"Apache-2.0"
] | 393 | 2020-01-29T04:52:27.000Z | 2022-03-31T20:02:19.000Z | tests_sql/test_sql_interestingness.py | cclauss/lux | bcb9b3d56a3fc560a5d86ae5aa747c58935241c4 | [
"Apache-2.0"
] | 304 | 2020-01-29T03:00:50.000Z | 2022-03-25T22:31:20.000Z | # Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import pandas as pd
import numpy as np
import psycopg2
from lux.interestingness.interestingness import interestingness
def test_interestingness_1_0_1(global_var):
tbl = lux.LuxSQLTable()
tbl.set_SQL_table("cars")
tbl.set_intent(
[
lux.Clause(attribute="origin", filter_op="=", value="USA"),
lux.Clause(attribute="cylinders"),
]
)
tbl._repr_html_()
filter_score = tbl.recommendation["Filter"][0].score
assert tbl.current_vis[0].score == 0
assert filter_score > 0
tbl.clear_intent()
def test_interestingness_0_1_1(global_var):
tbl = lux.LuxSQLTable()
tbl.set_SQL_table("cars")
tbl.set_intent(
[
lux.Clause(attribute="origin", filter_op="=", value="?"),
lux.Clause(attribute="milespergal"),
]
)
tbl._repr_html_()
assert interestingness(tbl.recommendation["Current Vis"][0], tbl) != None
assert str(tbl.recommendation["Current Vis"][0]._inferred_intent[2].value) == "USA"
tbl.clear_intent()
def test_interestingness_1_1_1(global_var):
tbl = lux.LuxSQLTable()
tbl.set_SQL_table("cars")
tbl.set_intent(
[
lux.Clause(attribute="horsepower"),
lux.Clause(attribute="origin", filter_op="=", value="USA", bin_size=20),
]
)
tbl._repr_html_()
assert interestingness(tbl.recommendation["Enhance"][0], tbl) != None
# check for top recommended Filter graph score is not none
assert interestingness(tbl.recommendation["Filter"][0], tbl) != None
tbl.clear_intent()
| 30.777778 | 87 | 0.679603 |
from .context import lux
import pytest
import pandas as pd
import numpy as np
import psycopg2
from lux.interestingness.interestingness import interestingness
def test_interestingness_1_0_1(global_var):
tbl = lux.LuxSQLTable()
tbl.set_SQL_table("cars")
tbl.set_intent(
[
lux.Clause(attribute="origin", filter_op="=", value="USA"),
lux.Clause(attribute="cylinders"),
]
)
tbl._repr_html_()
filter_score = tbl.recommendation["Filter"][0].score
assert tbl.current_vis[0].score == 0
assert filter_score > 0
tbl.clear_intent()
def test_interestingness_0_1_1(global_var):
tbl = lux.LuxSQLTable()
tbl.set_SQL_table("cars")
tbl.set_intent(
[
lux.Clause(attribute="origin", filter_op="=", value="?"),
lux.Clause(attribute="milespergal"),
]
)
tbl._repr_html_()
assert interestingness(tbl.recommendation["Current Vis"][0], tbl) != None
assert str(tbl.recommendation["Current Vis"][0]._inferred_intent[2].value) == "USA"
tbl.clear_intent()
def test_interestingness_1_1_1(global_var):
tbl = lux.LuxSQLTable()
tbl.set_SQL_table("cars")
tbl.set_intent(
[
lux.Clause(attribute="horsepower"),
lux.Clause(attribute="origin", filter_op="=", value="USA", bin_size=20),
]
)
tbl._repr_html_()
assert interestingness(tbl.recommendation["Enhance"][0], tbl) != None
assert interestingness(tbl.recommendation["Filter"][0], tbl) != None
tbl.clear_intent()
| true | true |
f7fecffa8949305853621ce7e29517a3bd41f663 | 2,101 | py | Python | src/datashare/setup.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | 1 | 2022-02-18T00:16:47.000Z | 2022-02-18T00:16:47.000Z | src/datashare/setup.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | null | null | null | src/datashare/setup.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
VERSION = '0.1.1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
# TODO: Add any additional SDK dependencies here
DEPENDENCIES = []
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='datashare',
version=VERSION,
description='Microsoft Azure Command-Line Tools DataShareManagementClient Extension',
# TODO: Update author and email, if applicable
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/main/src/datashare',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_datashare': ['azext_metadata.json']},
)
| 36.224138 | 95 | 0.625892 |
from codecs import open
from setuptools import setup, find_packages
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
VERSION = '0.1.1'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='datashare',
version=VERSION,
description='Microsoft Azure Command-Line Tools DataShareManagementClient Extension',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/main/src/datashare',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_datashare': ['azext_metadata.json']},
)
| true | true |
f7fed01b22cb381dde6d5baead29e224e7de51d3 | 1,200 | py | Python | crash_course/ch09/exec/ice_cream_stand.py | dantin/python-by-example | 5769c7a332ebd60fd54e477b6813f2f2a0f3f37f | [
"BSD-3-Clause"
] | null | null | null | crash_course/ch09/exec/ice_cream_stand.py | dantin/python-by-example | 5769c7a332ebd60fd54e477b6813f2f2a0f3f37f | [
"BSD-3-Clause"
] | null | null | null | crash_course/ch09/exec/ice_cream_stand.py | dantin/python-by-example | 5769c7a332ebd60fd54e477b6813f2f2a0f3f37f | [
"BSD-3-Clause"
] | null | null | null |
class Restaurant():
"""A simple Restaurant class."""
def __init__(self, name, cuisine_type):
"""Initialize restaurant name and cuisine type attributes."""
self.restaurant_name = name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
"""Print restaurant description."""
print(self.restaurant_name.title() + ' is a ' + self.cuisine_type +
' restaurant.')
def open_restaurant(self):
"""Simulate open restaurant."""
print(self.restaurant_name.title() + ' is open.')
class IceCreamStand(Restaurant):
"""A specialize restaurant that make ice cream."""
def __init__(self, name, cuisine_type, *flavors):
"""Initialize ice cream restaurant with flavors."""
super().__init__(name, cuisine_type)
self.flavors = [x for x in flavors]
def display_flavors(self):
"""Print ice cream flavors."""
self.describe_restaurant()
print('It has the following flavors:')
for flavor in self.flavors:
print(' - ' + flavor)
restaurant = IceCreamStand('Ice Goods', 'ice cream', 'chocolate', 'vanilla', 'mint')
restaurant.display_flavors()
| 30.769231 | 84 | 0.635833 |
class Restaurant():
def __init__(self, name, cuisine_type):
self.restaurant_name = name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print(self.restaurant_name.title() + ' is a ' + self.cuisine_type +
' restaurant.')
def open_restaurant(self):
print(self.restaurant_name.title() + ' is open.')
class IceCreamStand(Restaurant):
def __init__(self, name, cuisine_type, *flavors):
super().__init__(name, cuisine_type)
self.flavors = [x for x in flavors]
def display_flavors(self):
self.describe_restaurant()
print('It has the following flavors:')
for flavor in self.flavors:
print(' - ' + flavor)
restaurant = IceCreamStand('Ice Goods', 'ice cream', 'chocolate', 'vanilla', 'mint')
restaurant.display_flavors()
| true | true |
f7fed066f2b8742d63342321e424b30c75c45e03 | 3,841 | py | Python | src/OTLMOW/PostenMapping/Model/Post060352280.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/PostenMapping/Model/Post060352280.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/PostenMapping/Model/Post060352280.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060352280(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.52280',
beschrijving='Bestrating van gebakken straatstenen, standaardkwaliteitsklasse B volgens 6-3.6, dikformaat, hoogte ca. 80 mm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen.formaatVanBestratingselement',
dotnotatie='formaatVanBestratingselement',
defaultWaarde='dikformaat-(ca.-200-x-ca.-65-mm)',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen.standaardkwaliteitsklasse',
dotnotatie='standaardkwaliteitsklasse',
defaultWaarde='b',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='8',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')])
| 49.24359 | 148 | 0.587607 |
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
class Post060352280(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.52280',
beschrijving='Bestrating van gebakken straatstenen, standaardkwaliteitsklasse B volgens 6-3.6, dikformaat, hoogte ca. 80 mm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen.formaatVanBestratingselement',
dotnotatie='formaatVanBestratingselement',
defaultWaarde='dikformaat-(ca.-200-x-ca.-65-mm)',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen.standaardkwaliteitsklasse',
dotnotatie='standaardkwaliteitsklasse',
defaultWaarde='b',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='8',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52280')])
| true | true |
f7fed0ca952176ae8d9122861f76d3cb01ccc893 | 3,520 | py | Python | interactive.py | Zakobian/WD_gas_disk_imaging | b8bda209e541b442f44fdb6109de8f2f72ec38cf | [
"MIT"
] | null | null | null | interactive.py | Zakobian/WD_gas_disk_imaging | b8bda209e541b442f44fdb6109de8f2f72ec38cf | [
"MIT"
] | null | null | null | interactive.py | Zakobian/WD_gas_disk_imaging | b8bda209e541b442f44fdb6109de8f2f72ec38cf | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from plot_one import plot_me_one
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.integrate import simps
M_sun=1.989*10**30;
R_sun=696340*10**3;
M=0.62*M_sun
r_star=0.0151*R_sun
G=6.67408*10**(-11);
####
####
#### This code will create the sandbox and allow user to play around with densities. To begin one needs a density to start with.
#### You can generate one by running one of the other programs.
#### The controls are:
####
#### c - switches between drawing circles and drawing by hand. Circles are drawn between inner and outer radius
#### B - sets color/density to 0
#### b - decreases current color/density by 1
#### w - increases current color/density by 1
#### backspace - Plot the emission lines from current density
#### Esc - close
####
img=np.load("density_todraw.npy")
# variables
ix = -1
iy = -1
drawing = False
size=img.shape[0]
color=1
circle=True
consts={'e':0.0,
'b':0.0,
'view_angle':np.pi/2,
'inclination_angle':np.pi/5,
'r_max':550*r_star,
'r_min':r_star
}
def on_change(val):
consts['b']=4*(val-100)/100
print(4*(val-100)/100)
def draw_rectangle_with_drag(event, x, y, flags, param):
global ix, iy,ir, drawing, img
if event == cv2.EVENT_LBUTTONDOWN and circle:
if not drawing:
ix = x
iy = y
ir = np.sqrt((ix-size//2)**2+(iy-size//2)**2)
if drawing:
r = np.sqrt((x-size//2)**2+(y-size//2)**2)
print(r,ir)
cv2.circle(img, (size//2, size//2), ((r+ir)/2).astype(int), color=color, thickness=np.abs((r-ir)/2).astype(int))
print('drawn 1')
print(x,y)
drawing = not drawing
if event == cv2.EVENT_LBUTTONDOWN and not circle:
drawing = True
ix=x
iy=y
elif event == cv2.EVENT_MOUSEMOVE and not circle:
if drawing == True:
cv2.line(img,(ix,iy),(x,y),color,50)
ix=x
iy=y
elif event == cv2.EVENT_LBUTTONUP and not circle:
if(drawing):
cv2.line(img,(ix,iy),(x,y),color,50)
drawing = False
cv2.namedWindow(winname = "Density of gas")
cv2.createTrackbar('Emissivity(b)', "Density of gas", 100, 200, on_change)
cv2.setMouseCallback("Density of gas",
draw_rectangle_with_drag)
fig_hist = plt.figure(1)
ax_hist = fig_hist.add_subplot(1, 1, 1)
plt.ion()
plt.xlabel("Velocity/Wavelength")
plt.ylabel("Flux")
inst_names=['Xshooter','MIKE2']
for j,inst_name in enumerate(inst_names):
x,y=np.loadtxt('data/SiII'+'_'+inst_name+'.csv', delimiter=',', unpack=True)
area = simps((y-1),x)
y=(y-1)/area
ax_hist.plot(x,y, linewidth=1,label=inst_name)
while True:
# imgC = cv2.applyColorMap(img, cv2.COLORMAP_JET)
if img.max()!=0: cv2.imshow("Density of gas", img/img.max())
else: cv2.imshow("Density of gas", img)
k = cv2.waitKey(33)
if k == 27:
break
elif k== ord(' '):
print('Plotting')
plot_me_one(img,ax_hist,consts)
plt.show()
plt.pause(0.001)
elif k== ord('B'):
color=0
print('Density now: '+str(color))
elif k== ord('b'):
color-=1
print('Density now: '+str(color))
elif k== ord('w'):
color+=1
print('Density now: '+str(color))
elif k== ord('c'):
circle = not circle
drawing=False
if(circle):
print('Now in circle mode')
else:
print('Now in drawing mode')
cv2.destroyAllWindows()
| 25.142857 | 128 | 0.601705 | import cv2
import numpy as np
from plot_one import plot_me_one
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.integrate import simps
M_sun=1.989*10**30;
R_sun=696340*10**3;
M=0.62*M_sun
r_star=0.0151*R_sun
G=6.67408*10**(-11);
1)
ax_hist = fig_hist.add_subplot(1, 1, 1)
plt.ion()
plt.xlabel("Velocity/Wavelength")
plt.ylabel("Flux")
inst_names=['Xshooter','MIKE2']
for j,inst_name in enumerate(inst_names):
x,y=np.loadtxt('data/SiII'+'_'+inst_name+'.csv', delimiter=',', unpack=True)
area = simps((y-1),x)
y=(y-1)/area
ax_hist.plot(x,y, linewidth=1,label=inst_name)
while True:
if img.max()!=0: cv2.imshow("Density of gas", img/img.max())
else: cv2.imshow("Density of gas", img)
k = cv2.waitKey(33)
if k == 27:
break
elif k== ord(' '):
print('Plotting')
plot_me_one(img,ax_hist,consts)
plt.show()
plt.pause(0.001)
elif k== ord('B'):
color=0
print('Density now: '+str(color))
elif k== ord('b'):
color-=1
print('Density now: '+str(color))
elif k== ord('w'):
color+=1
print('Density now: '+str(color))
elif k== ord('c'):
circle = not circle
drawing=False
if(circle):
print('Now in circle mode')
else:
print('Now in drawing mode')
cv2.destroyAllWindows()
| true | true |
f7fed22af91d208a084c6f4b88b64460b550416e | 1,744 | py | Python | HeavyIonsAnalysis/Configuration/python/Skims_HI_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | HeavyIonsAnalysis/Configuration/python/Skims_HI_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | HeavyIonsAnalysis/Configuration/python/Skims_HI_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContentHeavyIons_cff import FEVTEventContent
skimContent = FEVTEventContent.clone()
skimContent.outputCommands.append("drop *_MEtoEDMConverter_*_*")
skimContent.outputCommands.append("drop *_*_*_SKIM")
#####################
from HeavyIonsAnalysis.Configuration.HI_DiJetSkim_cff import *
diJetSkimPath = cms.Path( diJetSkimSequence )
SKIMStreamDiJet = cms.FilteredStream(
responsible = 'HI PAG',
name = 'DiJet',
paths = (diJetSkimPath),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
#####################
from HeavyIonsAnalysis.Configuration.HI_PhotonSkim_cff import *
photonSkimPath = cms.Path( photonSkimSequence )
SKIMStreamPhoton = cms.FilteredStream(
responsible = 'HI PAG',
name = 'Photon',
paths = (photonSkimPath),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
zEESkimPath = cms.Path( zEESkimSequence )
SKIMStreamZEE = cms.FilteredStream(
responsible = 'HI PAG',
name = 'ZEE',
paths = (zEESkimPath),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
#####################
from HeavyIonsAnalysis.Configuration.HI_ZMMSkim_cff import *
zMMSkimPath = cms.Path( zMMSkimSequence )
SKIMStreamZMM = cms.FilteredStream(
responsible = 'HI PAG',
name = 'ZMM',
paths = (zMMSkimPath),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
#####################
| 29.066667 | 81 | 0.694381 | import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContentHeavyIons_cff import FEVTEventContent
skimContent = FEVTEventContent.clone()
skimContent.outputCommands.append("drop *_MEtoEDMConverter_*_*")
skimContent.outputCommands.append("drop *_*_*_SKIM")
diJetSkimPath),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
s = (photonSkimPath),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
zEESkimPath = cms.Path( zEESkimSequence )
SKIMStreamZEE = cms.FilteredStream(
responsible = 'HI PAG',
name = 'ZEE',
paths = (zEESkimPath),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
h),
content = skimContent.outputCommands,
selectEvents = cms.untracked.PSet(),
dataTier = cms.untracked.string('RAW-RECO')
)
| true | true |
f7fed37f0ac950cb449ff8c1e416ea2aa1b2b483 | 21,267 | py | Python | src/plus_train.py | Lorraine333/joint-order-cbow | ed22c7b037a8d016a8982c8341bc02edc5054fc3 | [
"Apache-2.0"
] | null | null | null | src/plus_train.py | Lorraine333/joint-order-cbow | ed22c7b037a8d016a8982c8341bc02edc5054fc3 | [
"Apache-2.0"
] | null | null | null | src/plus_train.py | Lorraine333/joint-order-cbow | ed22c7b037a8d016a8982c8341bc02edc5054fc3 | [
"Apache-2.0"
] | null | null | null | """Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License."""
"""Trains and Evaluates the network."""
from __future__ import division
from __future__ import print_function
# from tensorflow.python.client import timeline
import time
import random
import tensorflow as tf
import plus_input_data as input_data
import numpy as np
from plus_model import tf_model
import plus_eval_model
import map_eval
import plus_eval_model as eval_model
from collections import defaultdict
import pickle
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 1e-2, 'Initial learning rate.')
flags.DEFINE_float('cbow_learning_rate',1e-3, 'cbow learning rate')
flags.DEFINE_float('regularization', 0.0, 'l2 regularization parameters')
flags.DEFINE_boolean('save', False, 'Save the model')
flags.DEFINE_boolean('update_embed', True, 'Update the embeddings')
flags.DEFINE_integer('max_steps', 10000, 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size', 800, 'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', '../TransE_Order/data', 'Directory to put the data.')
flags.DEFINE_integer('cbow_step', 1000, 'Number of steps to run cbow trainer.')
flags.DEFINE_integer('embed_dim', 50, 'word embedding dimension')
flags.DEFINE_float('eps', 0., 'hierarchical error threshold')
flags.DEFINE_float('margin', 5, 'hinge loss margin')
flags.DEFINE_boolean('overfit', False, 'Over fit the dev data to check model')
flags.DEFINE_float('lower_scale', 0, 'lower initialize value for embeddings')
flags.DEFINE_float('higher_scale', 0.1, 'higher initialize value for embeddings')
flags.DEFINE_boolean('kb_only', True, 'whether to train kb only')
# flags.DEFINE_boolean('special_neg_sample', False, 'Whether to find negative examples from the not relation')
flags.DEFINE_integer('print_every',100,'Every 20 step, print out the evaluation results')
flags.DEFINE_float('alpha',0.01, 'regularization on error Function')
flags.DEFINE_boolean('rel_acc', True, 'check the different relation accurancy for test dataset')
flags.DEFINE_boolean('error_analysis', True, 'do error analysis for evaluation data')
flags.DEFINE_string('params_file', './params/','file to save parameters')
flags.DEFINE_string('error_file','./error_analysis/','dictionary to save error analysis result')
flags.DEFINE_string('ouput_file', './result/', 'print the result to this file')
flags.DEFINE_string('train', 'new_isa', 'training on both noisa and isa relatiosn')
flags.DEFINE_string('test','new_isa', 'test on isa relations')
flags.DEFINE_string('eval', 'acc', 'evaluate on MAP')
flags.DEFINE_integer('rel_size', 35, 'relation_size')
# lstm parameters
flags.DEFINE_integer('hidden_dim',100, 'lstm hidden layer dimension')
flags.DEFINE_boolean('peephole',True, 'whether to use peephole in lstm layer')
flags.DEFINE_string('tuple_model', 'ave', 'how to compose term vector, can choose from ave or lstm')
# word2vec parameters
flags.DEFINE_float('epsilon', 1e-6, 'epsilon for optimizor')
flags.DEFINE_float('beta1',1.0, 'weight on order_embedding loss')
flags.DEFINE_float('beta2',1.0, 'Weight on word2vec loss')
flags.DEFINE_string("word2vec_train_data", 'text8', "Training text file.")
flags.DEFINE_integer('word2vec_batch_size', 256, 'Batch size. Must divide evenly into the dataset sizes.') #256 #512
flags.DEFINE_integer('data_shard_rows', 256*600, 'num text "lines" for training in one shard') #256*600
flags.DEFINE_integer('data_shard_cols', 100, 'num tokens per text line') #100
flags.DEFINE_integer('vocab_size', 80000, 'vocab_size')
flags.DEFINE_float('num_neg_samples', 30, 'num_neg_samples')
flags.DEFINE_integer("window_size", 5, "The number of words to predict to the left and right ")
# nearest neighbor parameters
flags.DEFINE_integer('knn', 10, 'how many neighbors want to check')
# big wiki
# flags.DEFINE_string("word2vec_train_data", '../acl_cbow/data/binary-wackypedia-1-4-ukwac-', "Training text file.")
# flags.DEFINE_integer('word2vec_batch_size', 512, 'Batch size. Must divide evenly into the dataset sizes.') #256 #512
# flags.DEFINE_integer('data_shard_rows', 512*600, 'num text "lines" for training in one shard') #256*600
# flags.DEFINE_integer('data_shard_cols', 200, 'num tokens per text line') #100
def placeholder_inputs(batch_size):
placeholder = {}
#positive example term1
placeholder['t1_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))
placeholder['t1_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))
placeholder['t1_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))
# positive example term2
placeholder['t2_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))
placeholder['t2_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))
placeholder['t2_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))
#negative example term1
# placeholder['nt1_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))
# placeholder['nt1_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))
# placeholder['nt1_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))
#negative exmaple term2
# placeholder['nt2_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))
# placeholder['nt2_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))
# placeholder['nt2_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))
#positive relation
placeholder['rel_placeholder'] = tf.placeholder(tf.int32, shape=[None])
placeholder['rel_msk_placeholder'] = tf.placeholder(tf.float32, shape=[None, 1])
#negative relation
# placeholder['nrel_placeholder'] = tf.placeholder(tf.int32, shape=[None])
# placeholder['nrel_msk_placeholder'] = tf.placeholder(tf.float32, shape=[None, 1])
#positive label
placeholder['label_placeholder'] = tf.placeholder(tf.float32, shape=[None])
#negative label
# placeholder['nlabel_placeholder'] = tf.placeholder(tf.float32, shape=[None])
#word2vec input
placeholder['row_indices'] = tf.placeholder(tf.int32, shape = [FLAGS.word2vec_batch_size])
placeholder['real_batch_size'] = tf.placeholder(tf.int32, shape = [])
placeholder['data_shard'] = tf.placeholder(tf.int32, shape=[FLAGS.data_shard_rows, FLAGS.data_shard_cols])
return placeholder
def fill_feed_dict(data_set, placeholder, row_indices, rel):
r_idx, t1_idx, t2_idx, labels = data_set.next_batch(FLAGS.batch_size)
t1x, t1mask, t1length= input_data.prepare_data(t1_idx)
t2x, t2mask, t2length = input_data.prepare_data(t2_idx)
# print('r_idx', r_idx.shape)
relmsk = input_data.rel_msk(r_idx, rel)
#random find negative examples from the same batch
# nr_idx, nt1_idx, nt2_idx, nlabels = input_data.find_neg(r_idx, t1_idx, t2_idx, labels)
# nt1x, nt1mask, nt1length= input_data.prepare_data(nt1_idx)
# nt2x, nt2mask, nt2length = input_data.prepare_data(nt2_idx)
# nrelmsk = input_data.rel_msk(nr_idx, rel)
feed_dict = {
placeholder['t1_idx_placeholder']: t1x,
placeholder['t1_msk_placeholder']: t1mask,
placeholder['t1_length_placeholder']: t1length,
placeholder['t2_idx_placeholder']: t2x,
placeholder['t2_msk_placeholder']: t2mask,
placeholder['t2_length_placeholder']: t2length,
# placeholder['nt1_idx_placeholder']: nt1x,
# placeholder['nt1_msk_placeholder']: nt1mask,
# placeholder['nt1_length_placeholder']: nt1length,
# placeholder['nt2_idx_placeholder']: nt2x,
# placeholder['nt2_msk_placeholder']: nt2mask,
# placeholder['nt2_length_placeholder']: nt2length,
placeholder['rel_placeholder']: r_idx,
# placeholder['nrel_placeholder']: nr_idx,
placeholder['label_placeholder']: labels,
# placeholder['nlabel_placeholder']: nlabels,
placeholder['rel_msk_placeholder']: relmsk,
# placeholder['nrel_msk_placeholder']: nrelmsk,
placeholder['row_indices']: row_indices,
placeholder['real_batch_size']: len(row_indices),
}
return feed_dict
def partial_word2vec_fill_feed_dict(placeholder, data_shared):
feed_dict = {
placeholder['data_shard']: data_shared,
}
return feed_dict
def run_training():
accu_list = []
train_accu_list = []
test_accu_list = []
curr_best = 0
outfile = open(FLAGS.ouput_file+'newtask_abs_l1_learning_rate_'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_eval'+str(FLAGS.eval)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)+'.txt', 'wt')
error_file_name = FLAGS.error_file+'newtask_abs_l1_learning_rate'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_eval'+str(FLAGS.eval)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)
fname = FLAGS.params_file+'newtask_abs_l1_learning_rate'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_eval'+str(FLAGS.eval)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)+'.pkl'
data_sets = input_data.read_data_sets(FLAGS, outfile)
# special_neg_sample = FLAGS.special_neg_sample
if FLAGS.overfit:
train_data = data_sets.dev
else:
train_data = data_sets.train
with tf.Graph().as_default():
placeholder = placeholder_inputs(FLAGS.batch_size)
print('Build Model...', file = outfile)
model = tf_model(data_sets.words, data_sets.We, data_sets.rel, data_sets.Rel, placeholder, FLAGS)
print('Build Loss Function...', file = outfile)
# loss = model.loss()
kb_loss = model.kbc_loss()
cbow_loss = model.cbow_loss()
print('Build Encode Function...', file = outfile)
if FLAGS.tuple_model == 'ave':
embed_t1 = model.tuple_embedding(placeholder['t1_idx_placeholder'], placeholder['t1_msk_placeholder'], placeholder['t1_length_placeholder'], model.getWe())
embed_t2 = model.tuple_embedding(placeholder['t2_idx_placeholder'], placeholder['t2_msk_placeholder'], placeholder['t2_length_placeholder'], model.getWe())
# embed_nt1 = model.tuple_embedding(placeholder['nt1_idx_placeholder'], placeholder['nt1_msk_placeholder'], placeholder['nt1_length_placeholder'], model.getWe())
# embed_nt2 = model.tuple_embedding(placeholder['nt2_idx_placeholder'], placeholder['nt2_msk_placeholder'], placeholder['nt2_length_placeholder'], model.getWe())
elif FLAGS.tuple_model == 'lstm':
with tf.variable_scope('term_embed', reuse = True):
embed_t1 = model.tuple_lstm_embedding(placeholder['t1_idx_placeholder'], placeholder['t1_msk_placeholder'], placeholder['t1_length_placeholder'], model.getWe())
with tf.variable_scope('term_embed', reuse = True):
embed_t2 = model.tuple_lstm_embedding(placeholder['t2_idx_placeholder'], placeholder['t2_msk_placeholder'], placeholder['t2_length_placeholder'], model.getWe())
# with tf.variable_scope('term_embed', reuse = True):
# embed_nt1 = model.tuple_lstm_embedding(placeholder['nt1_idx_placeholder'], placeholder['nt1_msk_placeholder'], placeholder['nt1_length_placeholder'], model.getWe())
# with tf.variable_scope('term_embed', reuse = True):
# embed_nt2 = model.tuple_lstm_embedding(placeholder['nt2_idx_placeholder'], placeholder['nt2_msk_placeholder'], placeholder['nt2_length_placeholder'], model.getWe())
else:
print('Sorry, currently only support lstm terms and average terms')
embed_r = model.rel_embedding(model.getRel(), placeholder['rel_placeholder'], placeholder['rel_msk_placeholder'])
# embed_nr = model.rel_embedding(model.getRel(), placeholder['nrel_placeholder'], placeholder['nrel_msk_placeholder'])
print('Build Hierarchical Error Function...', file = outfile)
h_error = model.hierarchical_error(embed_t1, embed_r, embed_t2, FLAGS.eps, FLAGS.batch_size, FLAGS.embed_dim)
nh_error = model.neg_hier_error()
# nh_error = model.neg_hier_error(embed_nt1, embed_nr, embed_nt2, FLAGS.eps, FLAGS.margin, FLAGS.batch_size, FLAGS.embed_dim)
# test_h_error = model.test_hierarchical_error(embed_t1, embed_r, embed_t2, FLAGS.eps, FLAGS.batch_size, FLAGS.embed_dim)
print('Build Training Function...', file = outfile)
# train_op = model.training(loss, FLAGS.learning_rate)
kb_train_op = model.training(kb_loss, FLAGS.epsilon, FLAGS.learning_rate)
cbow_train_op = model.training(cbow_loss, FLAGS.epsilon, FLAGS.cbow_learning_rate)
data_shared = np.asarray(data_sets.word2vec_data[:FLAGS.data_shard_rows*FLAGS.data_shard_cols]).reshape((FLAGS.data_shard_rows, FLAGS.data_shard_cols))
model_we = model.getWe()
model_rel = model.getRel()
summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
partial_feed_dict = partial_word2vec_fill_feed_dict(placeholder, data_shared)
sess.run(model.assign_data_shard_var, feed_dict = partial_feed_dict)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=sess.graph)
# profile
# run_opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
# profile
perm = np.arange(FLAGS.data_shard_rows)
word2vec_idx = 0
for step in range(FLAGS.max_steps):
start_time = time.time()
if (word2vec_idx + FLAGS.word2vec_batch_size) > FLAGS.data_shard_rows:
random.shuffle(perm)
word2vec_idx = 0
# row_indices = perm[:FLAGS.word2vec_batch_size]
row_indices = perm[word2vec_idx:word2vec_idx+FLAGS.word2vec_batch_size]
word2vec_idx += FLAGS.word2vec_batch_size
feed_dict = fill_feed_dict(train_data, placeholder, row_indices, data_sets.rel)
# print('feed_dict', time.time()-start_time)
if(FLAGS.kb_only):
t1 = time.time()
# _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)
_, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)
# print('kb_train_op, ',time.time()-t1)
elif(step<FLAGS.cbow_step):
t1 = time.time()
# _, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)
_, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict)
# _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)
# print('cbow_train_op, ',time.time()-t1)
else:
t1 = time.time()
# _,loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
# _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)
# _, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)
_, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)
# print('kb_train_op', time.time()-t1)
t2 = time.time()
_, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict)
# print('cbow_train_op, ', time.time()-t2)
he_error = sess.run(h_error, feed_dict=feed_dict)
duration = time.time() - start_time
# if (train_data.index_in_epoch + FLAGS.batch_size) > train_data.num_examples:
# if (FLAGS.save):
# saver.save(sess, FLAGS.train_dir, global_step=step)
if (step%(FLAGS.print_every) == 0):
embed = sess.run(model_we,feed_dict=feed_dict)
print(step, file = outfile)
print('*'*80, file = outfile)
if(FLAGS.kb_only):
print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)
elif(step<FLAGS.cbow_step):
print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration), file = outfile)
print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration))
# print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)
else:
print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)
print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration), file = outfile)
if FLAGS.eval == 'map':
print('MAP Evaluation......', file = outfile)
train_map = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.train_test, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)
train_accu_list.append(train_map)
print('Training MAP:%.5f' %train_map, file = outfile)
dev_map = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.dev, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)
print('Dev MAP:%.5f' %dev_map, file = outfile)
accuracy = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.devtest, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)
accu_list.append(accuracy)
print('Devtest MAP:%.5f' %accuracy, file = outfile)
print('', file = outfile)
if FLAGS.eval == 'acc':
train_acc = eval_model.do_train_eval(sess, h_error, nh_error, placeholder, train_data, train_data.epochs_completed, data_sets.train_neg, curr_best, FLAGS, error_file_name, outfile, data_sets.rel, data_sets.words)
train_accu_list.append(train_acc)
dev2_acc, test_acc, wrong_indices, wrong_preds = eval_model.do_eval(sess, h_error, placeholder, data_sets.dev, data_sets.devtest, data_sets.test, train_data.epochs_completed,curr_best, FLAGS, error_file_name, outfile, data_sets.rel, data_sets.words)
accu_list.append(dev2_acc)
test_accu_list.append(test_acc)
eval_model.knn(data_sets.nn_data, data_sets.words, embed, FLAGS.knn, outfile)
# print("Accuracy for Devtest: %.5f" % dev2_acc)
# print("Accuracy for Test: %.5f" %test_acc)
# print ('')
print("Accuracy for Devtest: %.5f" % dev2_acc, file = outfile)
print("Accuracy for Test: %.5f" %test_acc, file = outfile)
print ('', file = outfile)
if FLAGS.save and dev2_acc > curr_best:
print('saving model')
f = open(fname,'wb')
save_model = {}
save_model['embeddings'] = sess.run(model_we, feed_dict=feed_dict)
save_model['rel'] = sess.run(model_rel, feed_dict = feed_dict)
pickle.dump(save_model, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
# tl = timeline.Timeline(run_metadata.step_stats)
# ctf = tl.generate_chrome_trace_format(show_memory=True)
# # ctf = tl.generate_chrome_trace_format()
# with open('timeline_cache.json', 'w') as f:
# f.write(ctf)
print('Average of Top 10 Training Score', np.mean(sorted(train_accu_list, reverse = True)[:10]), file = outfile)
opt_idx = np.argmax(np.asarray(accu_list))
print('Epoch', opt_idx, file = outfile)
print('Best Dev2 Score: %.5f' %accu_list[opt_idx], file = outfile)
print('Best Test Score: %.5f' %test_accu_list[opt_idx], file = outfile)
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
| 59.405028 | 583 | 0.723421 |
from __future__ import division
from __future__ import print_function
import time
import random
import tensorflow as tf
import plus_input_data as input_data
import numpy as np
from plus_model import tf_model
import plus_eval_model
import map_eval
import plus_eval_model as eval_model
from collections import defaultdict
import pickle
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 1e-2, 'Initial learning rate.')
flags.DEFINE_float('cbow_learning_rate',1e-3, 'cbow learning rate')
flags.DEFINE_float('regularization', 0.0, 'l2 regularization parameters')
flags.DEFINE_boolean('save', False, 'Save the model')
flags.DEFINE_boolean('update_embed', True, 'Update the embeddings')
flags.DEFINE_integer('max_steps', 10000, 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size', 800, 'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', '../TransE_Order/data', 'Directory to put the data.')
flags.DEFINE_integer('cbow_step', 1000, 'Number of steps to run cbow trainer.')
flags.DEFINE_integer('embed_dim', 50, 'word embedding dimension')
flags.DEFINE_float('eps', 0., 'hierarchical error threshold')
flags.DEFINE_float('margin', 5, 'hinge loss margin')
flags.DEFINE_boolean('overfit', False, 'Over fit the dev data to check model')
flags.DEFINE_float('lower_scale', 0, 'lower initialize value for embeddings')
flags.DEFINE_float('higher_scale', 0.1, 'higher initialize value for embeddings')
flags.DEFINE_boolean('kb_only', True, 'whether to train kb only')
flags.DEFINE_integer('print_every',100,'Every 20 step, print out the evaluation results')
flags.DEFINE_float('alpha',0.01, 'regularization on error Function')
flags.DEFINE_boolean('rel_acc', True, 'check the different relation accurancy for test dataset')
flags.DEFINE_boolean('error_analysis', True, 'do error analysis for evaluation data')
flags.DEFINE_string('params_file', './params/','file to save parameters')
flags.DEFINE_string('error_file','./error_analysis/','dictionary to save error analysis result')
flags.DEFINE_string('ouput_file', './result/', 'print the result to this file')
flags.DEFINE_string('train', 'new_isa', 'training on both noisa and isa relatiosn')
flags.DEFINE_string('test','new_isa', 'test on isa relations')
flags.DEFINE_string('eval', 'acc', 'evaluate on MAP')
flags.DEFINE_integer('rel_size', 35, 'relation_size')
flags.DEFINE_integer('hidden_dim',100, 'lstm hidden layer dimension')
flags.DEFINE_boolean('peephole',True, 'whether to use peephole in lstm layer')
flags.DEFINE_string('tuple_model', 'ave', 'how to compose term vector, can choose from ave or lstm')
flags.DEFINE_float('epsilon', 1e-6, 'epsilon for optimizor')
flags.DEFINE_float('beta1',1.0, 'weight on order_embedding loss')
flags.DEFINE_float('beta2',1.0, 'Weight on word2vec loss')
flags.DEFINE_string("word2vec_train_data", 'text8', "Training text file.")
flags.DEFINE_integer('word2vec_batch_size', 256, 'Batch size. Must divide evenly into the dataset sizes.') gs.DEFINE_integer('data_shard_rows', 256*600, 'num text "lines" for training in one shard')
flags.DEFINE_integer('data_shard_cols', 100, 'num tokens per text line')
flags.DEFINE_integer('vocab_size', 80000, 'vocab_size')
flags.DEFINE_float('num_neg_samples', 30, 'num_neg_samples')
flags.DEFINE_integer("window_size", 5, "The number of words to predict to the left and right ")
flags.DEFINE_integer('knn', 10, 'how many neighbors want to check')
s(batch_size):
placeholder = {}
placeholder['t1_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))
placeholder['t1_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))
placeholder['t1_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))
placeholder['t2_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))
placeholder['t2_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))
placeholder['t2_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))
placeholder['rel_placeholder'] = tf.placeholder(tf.int32, shape=[None])
placeholder['rel_msk_placeholder'] = tf.placeholder(tf.float32, shape=[None, 1])
placeholder['label_placeholder'] = tf.placeholder(tf.float32, shape=[None])
placeholder['row_indices'] = tf.placeholder(tf.int32, shape = [FLAGS.word2vec_batch_size])
placeholder['real_batch_size'] = tf.placeholder(tf.int32, shape = [])
placeholder['data_shard'] = tf.placeholder(tf.int32, shape=[FLAGS.data_shard_rows, FLAGS.data_shard_cols])
return placeholder
def fill_feed_dict(data_set, placeholder, row_indices, rel):
r_idx, t1_idx, t2_idx, labels = data_set.next_batch(FLAGS.batch_size)
t1x, t1mask, t1length= input_data.prepare_data(t1_idx)
t2x, t2mask, t2length = input_data.prepare_data(t2_idx)
relmsk = input_data.rel_msk(r_idx, rel)
feed_dict = {
placeholder['t1_idx_placeholder']: t1x,
placeholder['t1_msk_placeholder']: t1mask,
placeholder['t1_length_placeholder']: t1length,
placeholder['t2_idx_placeholder']: t2x,
placeholder['t2_msk_placeholder']: t2mask,
placeholder['t2_length_placeholder']: t2length,
placeholder['rel_placeholder']: r_idx,
placeholder['label_placeholder']: labels,
placeholder['rel_msk_placeholder']: relmsk,
placeholder['row_indices']: row_indices,
placeholder['real_batch_size']: len(row_indices),
}
return feed_dict
def partial_word2vec_fill_feed_dict(placeholder, data_shared):
feed_dict = {
placeholder['data_shard']: data_shared,
}
return feed_dict
def run_training():
accu_list = []
train_accu_list = []
test_accu_list = []
curr_best = 0
outfile = open(FLAGS.ouput_file+'newtask_abs_l1_learning_rate_'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_eval'+str(FLAGS.eval)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)+'.txt', 'wt')
error_file_name = FLAGS.error_file+'newtask_abs_l1_learning_rate'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_eval'+str(FLAGS.eval)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)
fname = FLAGS.params_file+'newtask_abs_l1_learning_rate'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_eval'+str(FLAGS.eval)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)+'.pkl'
data_sets = input_data.read_data_sets(FLAGS, outfile)
if FLAGS.overfit:
train_data = data_sets.dev
else:
train_data = data_sets.train
with tf.Graph().as_default():
placeholder = placeholder_inputs(FLAGS.batch_size)
print('Build Model...', file = outfile)
model = tf_model(data_sets.words, data_sets.We, data_sets.rel, data_sets.Rel, placeholder, FLAGS)
print('Build Loss Function...', file = outfile)
kb_loss = model.kbc_loss()
cbow_loss = model.cbow_loss()
print('Build Encode Function...', file = outfile)
if FLAGS.tuple_model == 'ave':
embed_t1 = model.tuple_embedding(placeholder['t1_idx_placeholder'], placeholder['t1_msk_placeholder'], placeholder['t1_length_placeholder'], model.getWe())
embed_t2 = model.tuple_embedding(placeholder['t2_idx_placeholder'], placeholder['t2_msk_placeholder'], placeholder['t2_length_placeholder'], model.getWe())
elif FLAGS.tuple_model == 'lstm':
with tf.variable_scope('term_embed', reuse = True):
embed_t1 = model.tuple_lstm_embedding(placeholder['t1_idx_placeholder'], placeholder['t1_msk_placeholder'], placeholder['t1_length_placeholder'], model.getWe())
with tf.variable_scope('term_embed', reuse = True):
embed_t2 = model.tuple_lstm_embedding(placeholder['t2_idx_placeholder'], placeholder['t2_msk_placeholder'], placeholder['t2_length_placeholder'], model.getWe())
else:
print('Sorry, currently only support lstm terms and average terms')
embed_r = model.rel_embedding(model.getRel(), placeholder['rel_placeholder'], placeholder['rel_msk_placeholder'])
print('Build Hierarchical Error Function...', file = outfile)
h_error = model.hierarchical_error(embed_t1, embed_r, embed_t2, FLAGS.eps, FLAGS.batch_size, FLAGS.embed_dim)
nh_error = model.neg_hier_error()
print('Build Training Function...', file = outfile)
kb_train_op = model.training(kb_loss, FLAGS.epsilon, FLAGS.learning_rate)
cbow_train_op = model.training(cbow_loss, FLAGS.epsilon, FLAGS.cbow_learning_rate)
data_shared = np.asarray(data_sets.word2vec_data[:FLAGS.data_shard_rows*FLAGS.data_shard_cols]).reshape((FLAGS.data_shard_rows, FLAGS.data_shard_cols))
model_we = model.getWe()
model_rel = model.getRel()
summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
partial_feed_dict = partial_word2vec_fill_feed_dict(placeholder, data_shared)
sess.run(model.assign_data_shard_var, feed_dict = partial_feed_dict)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=sess.graph)
perm = np.arange(FLAGS.data_shard_rows)
word2vec_idx = 0
for step in range(FLAGS.max_steps):
start_time = time.time()
if (word2vec_idx + FLAGS.word2vec_batch_size) > FLAGS.data_shard_rows:
random.shuffle(perm)
word2vec_idx = 0
row_indices = perm[word2vec_idx:word2vec_idx+FLAGS.word2vec_batch_size]
word2vec_idx += FLAGS.word2vec_batch_size
feed_dict = fill_feed_dict(train_data, placeholder, row_indices, data_sets.rel)
if(FLAGS.kb_only):
t1 = time.time()
_, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)
elif(step<FLAGS.cbow_step):
t1 = time.time()
_, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict)
else:
t1 = time.time()
_, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)
t2 = time.time()
_, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict)
he_error = sess.run(h_error, feed_dict=feed_dict)
duration = time.time() - start_time
if (step%(FLAGS.print_every) == 0):
embed = sess.run(model_we,feed_dict=feed_dict)
print(step, file = outfile)
print('*'*80, file = outfile)
if(FLAGS.kb_only):
print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)
elif(step<FLAGS.cbow_step):
print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration), file = outfile)
print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration))
else:
print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)
print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration), file = outfile)
if FLAGS.eval == 'map':
print('MAP Evaluation......', file = outfile)
train_map = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.train_test, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)
train_accu_list.append(train_map)
print('Training MAP:%.5f' %train_map, file = outfile)
dev_map = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.dev, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)
print('Dev MAP:%.5f' %dev_map, file = outfile)
accuracy = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.devtest, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)
accu_list.append(accuracy)
print('Devtest MAP:%.5f' %accuracy, file = outfile)
print('', file = outfile)
if FLAGS.eval == 'acc':
train_acc = eval_model.do_train_eval(sess, h_error, nh_error, placeholder, train_data, train_data.epochs_completed, data_sets.train_neg, curr_best, FLAGS, error_file_name, outfile, data_sets.rel, data_sets.words)
train_accu_list.append(train_acc)
dev2_acc, test_acc, wrong_indices, wrong_preds = eval_model.do_eval(sess, h_error, placeholder, data_sets.dev, data_sets.devtest, data_sets.test, train_data.epochs_completed,curr_best, FLAGS, error_file_name, outfile, data_sets.rel, data_sets.words)
accu_list.append(dev2_acc)
test_accu_list.append(test_acc)
eval_model.knn(data_sets.nn_data, data_sets.words, embed, FLAGS.knn, outfile)
print("Accuracy for Devtest: %.5f" % dev2_acc, file = outfile)
print("Accuracy for Test: %.5f" %test_acc, file = outfile)
print ('', file = outfile)
if FLAGS.save and dev2_acc > curr_best:
print('saving model')
f = open(fname,'wb')
save_model = {}
save_model['embeddings'] = sess.run(model_we, feed_dict=feed_dict)
save_model['rel'] = sess.run(model_rel, feed_dict = feed_dict)
pickle.dump(save_model, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
Training Score', np.mean(sorted(train_accu_list, reverse = True)[:10]), file = outfile)
opt_idx = np.argmax(np.asarray(accu_list))
print('Epoch', opt_idx, file = outfile)
print('Best Dev2 Score: %.5f' %accu_list[opt_idx], file = outfile)
print('Best Test Score: %.5f' %test_accu_list[opt_idx], file = outfile)
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
| true | true |
f7fed59fc952b05ba0983864f343b9b6ad086682 | 3,040 | py | Python | mysite/mysite/settings.py | Akinwale7/Complete-Blog-Application-with-Search-Engine-Built-With-Django | c0095c44549b39c1fce292a2bd84fb042c967558 | [
"MIT"
] | 26 | 2019-10-04T20:37:43.000Z | 2021-11-15T19:54:29.000Z | mysite/mysite/settings.py | Akinwale7/Complete-Blog-Application-with-Search-Engine-Built-With-Django | c0095c44549b39c1fce292a2bd84fb042c967558 | [
"MIT"
] | 1 | 2022-01-14T11:29:11.000Z | 2022-01-14T11:29:11.000Z | mysite/mysite/settings.py | Akinwale7/Complete-Blog-Application-with-Search-Engine-Built-With-Django | c0095c44549b39c1fce292a2bd84fb042c967558 | [
"MIT"
] | 29 | 2019-05-19T11:43:02.000Z | 2021-11-16T13:05:30.000Z | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't$6p8^=z+%8_zm+qb%s8&!!sh@j%)lg4byd@nc8(s5#ozoz&-l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'taggit',
'django.contrib.sites',
'django.contrib.sitemaps',
'haystack',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
#EMAIL_HOST = 'smtp.gmail.com'
#EMAIL_HOST_USER = 'my_account@gmail.com'
#EMAIL_HOST_PASSWORD = ''
#EMAIL_PORT = 587
#EMAIL_USE_TLS = True
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr/blog'
},
}
| 24.516129 | 71 | 0.689474 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 't$6p8^=z+%8_zm+qb%s8&!!sh@j%)lg4byd@nc8(s5#ozoz&-l'
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'taggit',
'django.contrib.sites',
'django.contrib.sitemaps',
'haystack',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
#EMAIL_HOST = 'smtp.gmail.com'
#EMAIL_HOST_USER = 'my_account@gmail.com'
#EMAIL_HOST_PASSWORD = ''
#EMAIL_PORT = 587
#EMAIL_USE_TLS = True
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr/blog'
},
}
| true | true |
f7fed5a8343a644ce42cf08be59a8acb56b644f0 | 29,286 | py | Python | mne/beamformer/tests/test_dics.py | mdovgialo/mne-python | 8ccc3999da6c15efa03840230c13aeb7bab5618d | [
"BSD-3-Clause"
] | 1 | 2021-06-07T09:40:55.000Z | 2021-06-07T09:40:55.000Z | mne/beamformer/tests/test_dics.py | mdovgialo/mne-python | 8ccc3999da6c15efa03840230c13aeb7bab5618d | [
"BSD-3-Clause"
] | 2 | 2020-09-18T00:09:20.000Z | 2020-11-10T17:47:10.000Z | mne/beamformer/tests/test_dics.py | mdovgialo/mne-python | 8ccc3999da6c15efa03840230c13aeb7bab5618d | [
"BSD-3-Clause"
] | 1 | 2021-01-07T23:08:52.000Z | 2021-01-07T23:08:52.000Z | # Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Britta Westner <britta.wstnr@gmail.com>
#
# License: BSD 3 clause
import copy as cp
import os.path as op
import pytest
from numpy.testing import assert_array_equal, assert_allclose
import numpy as np
import mne
from mne.datasets import testing
from mne.beamformer import (make_dics, apply_dics, apply_dics_epochs,
apply_dics_csd, read_beamformer, Beamformer)
from mne.beamformer._compute_beamformer import _prepare_beamformer_input
from mne.beamformer._dics import _prepare_noise_csd
from mne.time_frequency import csd_morlet
from mne.utils import object_diff, requires_h5py, catch_logging
from mne.proj import compute_proj_evoked, make_projector
from mne.surface import _compute_nearest
from mne.beamformer.tests.test_lcmv import _assert_weight_norm
from mne.time_frequency import CrossSpectralDensity
from mne.time_frequency.csd import _sym_mat_to_vector
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
subjects_dir = op.join(data_path, 'subjects')
@pytest.fixture(scope='module', params=[testing._pytest_param()])
def _load_forward():
"""Load forward models."""
fwd_free = mne.read_forward_solution(fname_fwd)
fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False)
fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False)
fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True,
use_cps=False)
fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True,
use_cps=False)
fwd_vol = mne.read_forward_solution(fname_fwd_vol)
return fwd_free, fwd_surf, fwd_fixed, fwd_vol
def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default
"""Simulate an oscillator on the cortex."""
source_vertno = fwd['src'][0]['vertno'][idx]
sfreq = 50. # Hz.
times = np.arange(10 * sfreq) / sfreq # 10 seconds of data
signal = np.sin(20 * 2 * np.pi * times) # 20 Hz oscillator
signal[:len(times) // 2] *= 2 # Make signal louder at the beginning
signal *= 1e-9 # Scale to be in the ballpark of MEG data
# Construct a SourceEstimate object that describes the signal at the
# cortical level.
stc = mne.SourceEstimate(
signal[np.newaxis, :],
vertices=[[source_vertno], []],
tmin=0,
tstep=1 / sfreq,
subject='sample',
)
# Create an info object that holds information about the sensors
info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
info.update(fwd['info']) # Merge in sensor position information
# heavily decimate sensors to make it much faster
info = mne.pick_info(info, np.arange(info['nchan'])[::5])
fwd = mne.pick_channels_forward(fwd, info['ch_names'])
# Run the simulated signal through the forward model, obtaining
# simulated sensor data.
raw = mne.apply_forward_raw(fwd, stc, info)
# Add a little noise
random = np.random.RandomState(42)
noise = random.randn(*raw._data.shape) * 1e-14
raw._data += noise
# Define a single epoch (weird baseline but shouldn't matter)
epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0,
tmax=raw.times[-1], baseline=(0., 0.), preload=True)
evoked = epochs.average()
# Compute the cross-spectral density matrix
csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5)
labels = mne.read_labels_from_annot(
'sample', hemi='lh', subjects_dir=subjects_dir)
label = [
label for label in labels if np.in1d(source_vertno, label.vertices)[0]]
assert len(label) == 1
label = label[0]
vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
assert vertices[source_ind] == source_vertno
return epochs, evoked, csd, source_vertno, label, vertices, source_ind
idx_param = pytest.mark.parametrize('idx', [
0,
pytest.param(100, marks=pytest.mark.slowtest),
200,
pytest.param(233, marks=pytest.mark.slowtest),
])
def _rand_csd(rng, info):
scales = mne.make_ad_hoc_cov(info).data
n = scales.size
# Some random complex correlation structure (with channel scalings)
data = rng.randn(n, n) + 1j * rng.randn(n, n)
data = data @ data.conj().T
data *= scales
data *= scales[:, np.newaxis]
data.flat[::n + 1] = scales
return data
def _make_rand_csd(info, csd):
rng = np.random.RandomState(0)
data = _rand_csd(rng, info)
# now we need to have the same null space as the data csd
s, u = np.linalg.eigh(csd.get_data(csd.frequencies[0]))
mask = np.abs(s) >= s[-1] * 1e-7
rank = mask.sum()
assert rank == len(data) == len(info['ch_names'])
noise_csd = CrossSpectralDensity(
_sym_mat_to_vector(data), info['ch_names'], 0., csd.n_fft)
return noise_csd, rank
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_h5py
@idx_param
@pytest.mark.parametrize('whiten', [
pytest.param(False, marks=pytest.mark.slowtest),
True,
])
def test_make_dics(tmpdir, _load_forward, idx, whiten):
"""Test making DICS beamformer filters."""
# We only test proper handling of parameters here. Testing the results is
# done in test_apply_dics_timeseries and test_apply_dics_csd.
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, _, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None)
if whiten:
noise_csd, rank = _make_rand_csd(epochs.info, csd)
assert rank == len(epochs.info['ch_names']) == 62
else:
noise_csd = None
epochs.pick_types(meg='grad')
with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="notexistent",
noise_csd=noise_csd)
with pytest.raises(ValueError, match='rank, if str'):
make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd)
with pytest.raises(TypeError, match='rank must be'):
make_dics(epochs.info, fwd_fixed, csd, rank=1., noise_csd=noise_csd)
# Test if fixed forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='forward operator with free ori'):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_free, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test if volume forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_vol, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test invalid combinations of parameters
with pytest.raises(ValueError, match='reduce_rank cannot be used with'):
make_dics(epochs.info, fwd_free, csd, inversion='single',
reduce_rank=True, noise_csd=noise_csd)
# TODO: Restore this?
# with pytest.raises(ValueError, match='not stable with depth'):
# make_dics(epochs.info, fwd_free, csd, weight_norm='unit-noise-gain',
# inversion='single', depth=None)
# Sanity checks on the returned filters
n_freq = len(csd.frequencies)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
n_verts = len(vertices)
n_orient = 3
n_channels = len(epochs.ch_names)
# Test return values
weight_norm = 'unit-noise-gain'
inversion = 'single'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None, real_filter=False,
noise_csd=noise_csd, inversion=inversion)
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert np.iscomplexobj(filters['weights'])
assert filters['csd'].ch_names == epochs.ch_names
assert isinstance(filters['csd'], CrossSpectralDensity)
assert filters['ch_names'] == epochs.ch_names
assert_array_equal(filters['proj'], np.eye(n_channels))
assert_array_equal(filters['vertices'][0], vertices)
assert_array_equal(filters['vertices'][1], []) # Label was on the LH
assert filters['subject'] == fwd_free['src']._subject
assert filters['pick_ori'] is None
assert filters['is_free_ori']
assert filters['inversion'] == inversion
assert filters['weight_norm'] == weight_norm
assert 'DICS' in repr(filters)
assert 'subject "sample"' in repr(filters)
assert str(len(vertices)) in repr(filters)
assert str(n_channels) in repr(filters)
assert 'rank' not in repr(filters)
_, noise_cov = _prepare_noise_csd(csd, noise_csd, real_filter=False)
_, _, _, _, G, _, _, _ = _prepare_beamformer_input(
epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None,
noise_cov=noise_cov)
G.shape = (n_channels, n_verts, n_orient)
G = G.transpose(1, 2, 0).conj() # verts, orient, ch
_assert_weight_norm(filters, G)
inversion = 'matrix'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None,
noise_csd=noise_csd, inversion=inversion)
_assert_weight_norm(filters, G)
weight_norm = 'unit-noise-gain-invariant'
inversion = 'single'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None,
noise_csd=noise_csd, inversion=inversion)
_assert_weight_norm(filters, G)
# Test picking orientations. Also test weight norming under these different
# conditions.
weight_norm = 'unit-noise-gain'
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', weight_norm=weight_norm,
depth=None, noise_csd=noise_csd, inversion=inversion)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert not filters['is_free_ori']
_assert_weight_norm(filters, G)
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='max-power', weight_norm=weight_norm,
depth=None, noise_csd=noise_csd, inversion=inversion)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert not filters['is_free_ori']
_assert_weight_norm(filters, G)
# From here on, only work on a single frequency
csd = csd[0]
# Test using a real-valued filter
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', real_filter=True,
noise_csd=noise_csd)
assert not np.iscomplexobj(filters['weights'])
# Test forward normalization. When inversion='single', the power of a
# unit-noise CSD should be 1, even without weight normalization.
if not whiten:
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
# Using [:, :] syntax for in-place broadcasting
csd_noise._data[:, :] = np.eye(csd.n_channels)[inds][:, np.newaxis]
filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label,
weight_norm=None, depth=1., noise_csd=noise_csd,
inversion='single')
w = filters['weights'][0][:3]
assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-6,
atol=0)
# Test turning off both forward and weight normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
weight_norm=None, depth=None, noise_csd=noise_csd)
w = filters['weights'][0][:3]
assert not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0,
rtol=1e-2, atol=0)
# Test neural-activity-index weight normalization. It should be a scaled
# version of the unit-noise-gain beamformer.
filters_nai = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='nai', depth=None, noise_csd=noise_csd)
w_nai = filters_nai['weights'][0]
filters_ung = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd)
w_ung = filters_ung['weights'][0]
assert_allclose(np.corrcoef(np.abs(w_nai).ravel(),
np.abs(w_ung).ravel()), 1, atol=1e-7)
# Test whether spatial filter contains src_type
assert 'src_type' in filters
fname = op.join(str(tmpdir), 'filters-dics.h5')
filters.save(fname)
filters_read = read_beamformer(fname)
assert isinstance(filters, Beamformer)
assert isinstance(filters_read, Beamformer)
for key in ['tmin', 'tmax']: # deal with strictness of object_diff
setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key)))
assert object_diff(filters, filters_read) == ''
def _fwd_dist(power, fwd, vertices, source_ind, tidx=1):
idx = np.argmax(power.data[:, tidx])
rr_got = fwd['src'][0]['rr'][vertices[idx]]
rr_want = fwd['src'][0]['rr'][vertices[source_ind]]
return np.linalg.norm(rr_got - rr_want)
@idx_param
@pytest.mark.parametrize('inversion, weight_norm', [
('single', None),
('matrix', 'unit-noise-gain'),
])
def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm):
"""Test applying a DICS beamformer to a CSD matrix."""
fwd_free, fwd_surf, fwd_fixed, _ = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
reg = 1 # Lots of regularization for our toy dataset
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_free, csd)
epochs.pick_types(meg='grad')
# Try different types of forward models
assert label.hemi == 'lh'
for fwd in [fwd_free, fwd_surf, fwd_fixed]:
filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg,
inversion=inversion, weight_norm=weight_norm)
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
# Did we find the true source at 20 Hz?
dist = _fwd_dist(power, fwd_free, vertices, source_ind)
assert dist == 0.
# Is the signal stronger at 20 Hz than 10?
assert power.data[source_ind, 1] > power.data[source_ind, 0]
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power'])
@pytest.mark.parametrize('inversion', ['single', 'matrix'])
@idx_param
def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx):
"""Test picking different orientations and inversion modes."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
epochs.pick_types(meg='grad')
reg_ = 5 if inversion == 'matrix' else 1
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, depth=None,
weight_norm='unit-noise-gain')
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
# This is 0. for unit-noise-gain-invariant:
assert dist <= (0.02 if inversion == 'matrix' else 0.)
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test unit-noise-gain weighting
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
csd_noise._data[...] = np.eye(csd.n_channels)[inds][:, np.newaxis]
noise_power, f = apply_dics_csd(csd_noise, filters)
want_norm = 3 if pick_ori is None else 1.
assert_allclose(noise_power.data, want_norm, atol=1e-7)
# Test filter with forward normalization instead of weight
# normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, weight_norm=None,
depth=1.)
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
mat_tol = {0: 0.055, 100: 0.20, 200: 0.015, 233: 0.035}[idx]
max_ = (mat_tol if inversion == 'matrix' else 0.)
assert 0 <= dist <= max_
assert power.data[source_ind, 1] > power.data[source_ind, 0]
def _nearest_vol_ind(fwd_vol, fwd, vertices, source_ind):
return _compute_nearest(
fwd_vol['source_rr'],
fwd['src'][0]['rr'][vertices][source_ind][np.newaxis])[0]
@idx_param
def test_real(_load_forward, idx):
"""Test using a real-valued filter."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
epochs.pick_types(meg='grad')
reg = 1 # Lots of regularization for our toy dataset
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg,
real_filter=True, inversion='single')
# Also test here that no warings are thrown - implemented to check whether
# src should not be None warning occurs:
with pytest.warns(None) as w:
power, f = apply_dics_csd(csd, filters_real)
assert len(w) == 0
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
assert dist == 0
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test rank reduction
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5,
pick_ori='max-power', inversion='matrix',
reduce_rank=True)
power, f = apply_dics_csd(csd, filters_real)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
assert dist == 0
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test computing source power on a volume source space
filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg,
inversion='single')
power, f = apply_dics_csd(csd, filters_vol)
vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind)
assert f == [10, 20]
dist = _fwd_dist(
power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind)
vol_tols = {100: 0.008, 200: 0.008}
assert dist <= vol_tols.get(idx, 0.)
assert power.data[vol_source_ind, 1] > power.data[vol_source_ind, 0]
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='spatial filter does not contain '
'src_type'):
apply_dics_csd(csd, filters_vol)
@pytest.mark.filterwarnings("ignore:The use of several sensor types with the"
":RuntimeWarning")
@idx_param
def test_apply_dics_timeseries(_load_forward, idx):
"""Test DICS applied to timeseries data."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, evoked, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
reg = 5 # Lots of regularization for our toy dataset
with pytest.raises(ValueError, match='several sensor types'):
make_dics(evoked.info, fwd_surf, csd)
evoked.pick_types(meg='grad')
multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label,
reg=reg)
# Sanity checks on the resulting STC after applying DICS on evoked
stcs = apply_dics(evoked, multiple_filters)
assert isinstance(stcs, list)
assert len(stcs) == len(multiple_filters['weights'])
assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1])
assert_allclose(stcs[0].times, evoked.times)
# Applying filters for multiple frequencies on epoch data should fail
with pytest.raises(ValueError, match='computed for a single frequency'):
apply_dics_epochs(epochs, multiple_filters)
# From now on, only apply filters with a single frequency (20 Hz).
csd20 = csd.pick_frequency(20)
filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg,
inversion='single')
# Sanity checks on the resulting STC after applying DICS on epochs.
# Also test here that no warnings are thrown - implemented to check whether
# src should not be None warning occurs
with pytest.warns(None) as w:
stcs = apply_dics_epochs(epochs, filters)
assert len(w) == 0
assert isinstance(stcs, list)
assert len(stcs) == 1
assert_array_equal(stcs[0].vertices[0], filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], filters['vertices'][1])
assert_allclose(stcs[0].times, epochs.times)
# Did we find the source?
stc = (stcs[0] ** 2).mean()
dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0)
assert dist == 0
# Apply filters to evoked
stc = apply_dics(evoked, filters)
stc = (stc ** 2).mean()
dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0)
assert dist == 0
# Test if wrong channel selection is detected in application of filter
evoked_ch = cp.deepcopy(evoked)
evoked_ch.pick_channels(evoked_ch.ch_names[:-1])
with pytest.raises(ValueError, match='MEG 2633 which is not present'):
apply_dics(evoked_ch, filters)
# Test whether projections are applied, by adding a custom projection
filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label)
stc_noproj = apply_dics(evoked, filters_noproj)
evoked_proj = evoked.copy()
p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0)
proj_matrix = make_projector(p, evoked_proj.ch_names)[0]
evoked_proj.info['projs'] += p
filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label)
assert_array_equal(filters_proj['proj'], proj_matrix)
stc_proj = apply_dics(evoked_proj, filters_proj)
assert np.any(np.not_equal(stc_noproj.data, stc_proj.data))
# Test detecting incompatible projections
filters_proj['proj'] = filters_proj['proj'][:-1, :-1]
with pytest.raises(ValueError, match='operands could not be broadcast'):
apply_dics(evoked_proj, filters_proj)
# Test returning a generator
stcs = apply_dics_epochs(epochs, filters, return_generator=False)
stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True)
assert_array_equal(stcs[0].data, next(stcs_gen).data)
# Test computing timecourses on a volume source space
filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg,
inversion='single')
stc = apply_dics(evoked, filters_vol)
stc = (stc ** 2).mean()
assert stc.data.shape[1] == 1
vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind)
dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind,
tidx=0)
vol_tols = {100: 0.008, 200: 0.015}
vol_tol = vol_tols.get(idx, 0.)
assert dist <= vol_tol
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'):
apply_dics_epochs(epochs, filters_vol)
def _cov_as_csd(cov, info):
rng = np.random.RandomState(0)
assert cov['data'].ndim == 2
assert len(cov['data']) == len(cov['names'])
# we need to make this have at least some complex structure
data = cov['data'] + 1e-1 * _rand_csd(rng, info)
assert data.dtype == np.complex128
return CrossSpectralDensity(_sym_mat_to_vector(data), cov['names'], 0., 16)
# Just test free ori here (assume fixed is same as LCMV if these are)
# Changes here should be synced with test_lcmv.py
@pytest.mark.slowtest
@pytest.mark.parametrize(
'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter', [
(0.05, None, 'unit-noise-gain-invariant', False, None, 26, 28, False),
(0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, False),
(0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, True),
(0.05, None, 'unit-noise-gain', False, None, 13, 14, False),
(0.05, None, 'unit-noise-gain', True, None, 35, 37, False),
(0.05, None, 'nai', True, None, 35, 37, False),
(0.05, None, None, True, None, 12, 14, False),
(0.05, None, None, True, 0.8, 39, 43, False),
(0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20,
False),
(0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, False),
(0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, True),
(0.05, 'max-power', 'nai', True, None, 21, 24, False),
(0.05, 'max-power', None, True, None, 7, 10, False),
(0.05, 'max-power', None, True, 0.8, 15, 18, False),
# skip most no-reg tests, assume others are equal to LCMV if these are
(0.00, None, None, True, None, 21, 32, False),
(0.00, 'max-power', None, True, None, 13, 19, False),
])
def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm,
use_cov, depth, lower, upper, real_filter):
"""Test localization bias for free-orientation DICS."""
evoked, fwd, noise_cov, data_cov, want = bias_params_free
noise_csd = _cov_as_csd(noise_cov, evoked.info)
data_csd = _cov_as_csd(data_cov, evoked.info)
del noise_cov, data_cov
if not use_cov:
evoked.pick_types(meg='grad')
noise_csd = None
loc = apply_dics(evoked, make_dics(
evoked.info, fwd, data_csd, reg, noise_csd, pick_ori=pick_ori,
weight_norm=weight_norm, depth=depth, real_filter=real_filter)).data
loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc)
# Compute the percentage of sources for which there is no loc bias:
perc = (want == np.argmax(loc, axis=0)).mean() * 100
assert lower <= perc <= upper
@testing.requires_testing_data
@idx_param
@pytest.mark.parametrize('whiten', (False, True))
def test_make_dics_rank(_load_forward, idx, whiten):
"""Test making DICS beamformer filters with rank param."""
_, fwd_surf, fwd_fixed, _ = _load_forward
epochs, _, csd, _, label, _, _ = _simulate_data(fwd_fixed, idx)
if whiten:
noise_csd, want_rank = _make_rand_csd(epochs.info, csd)
kind = 'mag + grad'
else:
noise_csd = None
epochs.pick_types(meg='grad')
want_rank = len(epochs.ch_names)
assert want_rank == 41
kind = 'grad'
with catch_logging() as log:
filters = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
verbose=True)
log = log.getvalue()
assert f'Estimated rank ({kind}): {want_rank}' in log, log
stc, _ = apply_dics_csd(csd, filters)
other_rank = want_rank - 1 # shouldn't make a huge difference
use_rank = dict(meg=other_rank)
if not whiten:
# XXX it's a bug that our rank functions don't treat "meg"
# properly here...
use_rank['grad'] = use_rank.pop('meg')
with catch_logging() as log:
filters_2 = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
rank=use_rank, verbose=True)
log = log.getvalue()
assert f'Computing rank from covariance with rank={use_rank}' in log, log
stc_2, _ = apply_dics_csd(csd, filters_2)
corr = np.corrcoef(stc_2.data.ravel(), stc.data.ravel())[0, 1]
assert 0.8 < corr < 0.99999
# degenerate conditions
if whiten:
# make rank deficient
data = noise_csd.get_data(0.)
data[0] = data[:0] = 0
noise_csd._data[:, 0] = _sym_mat_to_vector(data)
with pytest.raises(ValueError, match='meg data rank.*the noise rank'):
filters = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
verbose=True)
| 43.258493 | 79 | 0.661818 |
import copy as cp
import os.path as op
import pytest
from numpy.testing import assert_array_equal, assert_allclose
import numpy as np
import mne
from mne.datasets import testing
from mne.beamformer import (make_dics, apply_dics, apply_dics_epochs,
apply_dics_csd, read_beamformer, Beamformer)
from mne.beamformer._compute_beamformer import _prepare_beamformer_input
from mne.beamformer._dics import _prepare_noise_csd
from mne.time_frequency import csd_morlet
from mne.utils import object_diff, requires_h5py, catch_logging
from mne.proj import compute_proj_evoked, make_projector
from mne.surface import _compute_nearest
from mne.beamformer.tests.test_lcmv import _assert_weight_norm
from mne.time_frequency import CrossSpectralDensity
from mne.time_frequency.csd import _sym_mat_to_vector
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
subjects_dir = op.join(data_path, 'subjects')
@pytest.fixture(scope='module', params=[testing._pytest_param()])
def _load_forward():
fwd_free = mne.read_forward_solution(fname_fwd)
fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False)
fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False)
fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True,
use_cps=False)
fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True,
use_cps=False)
fwd_vol = mne.read_forward_solution(fname_fwd_vol)
return fwd_free, fwd_surf, fwd_fixed, fwd_vol
def _simulate_data(fwd, idx):
source_vertno = fwd['src'][0]['vertno'][idx]
sfreq = 50.
times = np.arange(10 * sfreq) / sfreq
signal = np.sin(20 * 2 * np.pi * times)
signal[:len(times) // 2] *= 2
signal *= 1e-9
stc = mne.SourceEstimate(
signal[np.newaxis, :],
vertices=[[source_vertno], []],
tmin=0,
tstep=1 / sfreq,
subject='sample',
)
info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
info.update(fwd['info'])
info = mne.pick_info(info, np.arange(info['nchan'])[::5])
fwd = mne.pick_channels_forward(fwd, info['ch_names'])
raw = mne.apply_forward_raw(fwd, stc, info)
random = np.random.RandomState(42)
noise = random.randn(*raw._data.shape) * 1e-14
raw._data += noise
epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0,
tmax=raw.times[-1], baseline=(0., 0.), preload=True)
evoked = epochs.average()
# Compute the cross-spectral density matrix
csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5)
labels = mne.read_labels_from_annot(
'sample', hemi='lh', subjects_dir=subjects_dir)
label = [
label for label in labels if np.in1d(source_vertno, label.vertices)[0]]
assert len(label) == 1
label = label[0]
vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
assert vertices[source_ind] == source_vertno
return epochs, evoked, csd, source_vertno, label, vertices, source_ind
idx_param = pytest.mark.parametrize('idx', [
0,
pytest.param(100, marks=pytest.mark.slowtest),
200,
pytest.param(233, marks=pytest.mark.slowtest),
])
def _rand_csd(rng, info):
scales = mne.make_ad_hoc_cov(info).data
n = scales.size
# Some random complex correlation structure (with channel scalings)
data = rng.randn(n, n) + 1j * rng.randn(n, n)
data = data @ data.conj().T
data *= scales
data *= scales[:, np.newaxis]
data.flat[::n + 1] = scales
return data
def _make_rand_csd(info, csd):
rng = np.random.RandomState(0)
data = _rand_csd(rng, info)
# now we need to have the same null space as the data csd
s, u = np.linalg.eigh(csd.get_data(csd.frequencies[0]))
mask = np.abs(s) >= s[-1] * 1e-7
rank = mask.sum()
assert rank == len(data) == len(info['ch_names'])
noise_csd = CrossSpectralDensity(
_sym_mat_to_vector(data), info['ch_names'], 0., csd.n_fft)
return noise_csd, rank
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_h5py
@idx_param
@pytest.mark.parametrize('whiten', [
pytest.param(False, marks=pytest.mark.slowtest),
True,
])
def test_make_dics(tmpdir, _load_forward, idx, whiten):
# We only test proper handling of parameters here. Testing the results is
# done in test_apply_dics_timeseries and test_apply_dics_csd.
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, _, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None)
if whiten:
noise_csd, rank = _make_rand_csd(epochs.info, csd)
assert rank == len(epochs.info['ch_names']) == 62
else:
noise_csd = None
epochs.pick_types(meg='grad')
with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="notexistent",
noise_csd=noise_csd)
with pytest.raises(ValueError, match='rank, if str'):
make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd)
with pytest.raises(TypeError, match='rank must be'):
make_dics(epochs.info, fwd_fixed, csd, rank=1., noise_csd=noise_csd)
# Test if fixed forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='forward operator with free ori'):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_free, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test if volume forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_vol, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test invalid combinations of parameters
with pytest.raises(ValueError, match='reduce_rank cannot be used with'):
make_dics(epochs.info, fwd_free, csd, inversion='single',
reduce_rank=True, noise_csd=noise_csd)
# TODO: Restore this?
# with pytest.raises(ValueError, match='not stable with depth'):
# make_dics(epochs.info, fwd_free, csd, weight_norm='unit-noise-gain',
# inversion='single', depth=None)
# Sanity checks on the returned filters
n_freq = len(csd.frequencies)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
n_verts = len(vertices)
n_orient = 3
n_channels = len(epochs.ch_names)
# Test return values
weight_norm = 'unit-noise-gain'
inversion = 'single'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None, real_filter=False,
noise_csd=noise_csd, inversion=inversion)
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert np.iscomplexobj(filters['weights'])
assert filters['csd'].ch_names == epochs.ch_names
assert isinstance(filters['csd'], CrossSpectralDensity)
assert filters['ch_names'] == epochs.ch_names
assert_array_equal(filters['proj'], np.eye(n_channels))
assert_array_equal(filters['vertices'][0], vertices)
assert_array_equal(filters['vertices'][1], []) # Label was on the LH
assert filters['subject'] == fwd_free['src']._subject
assert filters['pick_ori'] is None
assert filters['is_free_ori']
assert filters['inversion'] == inversion
assert filters['weight_norm'] == weight_norm
assert 'DICS' in repr(filters)
assert 'subject "sample"' in repr(filters)
assert str(len(vertices)) in repr(filters)
assert str(n_channels) in repr(filters)
assert 'rank' not in repr(filters)
_, noise_cov = _prepare_noise_csd(csd, noise_csd, real_filter=False)
_, _, _, _, G, _, _, _ = _prepare_beamformer_input(
epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None,
noise_cov=noise_cov)
G.shape = (n_channels, n_verts, n_orient)
G = G.transpose(1, 2, 0).conj() # verts, orient, ch
_assert_weight_norm(filters, G)
inversion = 'matrix'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None,
noise_csd=noise_csd, inversion=inversion)
_assert_weight_norm(filters, G)
weight_norm = 'unit-noise-gain-invariant'
inversion = 'single'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None,
noise_csd=noise_csd, inversion=inversion)
_assert_weight_norm(filters, G)
# Test picking orientations. Also test weight norming under these different
# conditions.
weight_norm = 'unit-noise-gain'
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', weight_norm=weight_norm,
depth=None, noise_csd=noise_csd, inversion=inversion)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert not filters['is_free_ori']
_assert_weight_norm(filters, G)
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='max-power', weight_norm=weight_norm,
depth=None, noise_csd=noise_csd, inversion=inversion)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert not filters['is_free_ori']
_assert_weight_norm(filters, G)
# From here on, only work on a single frequency
csd = csd[0]
# Test using a real-valued filter
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', real_filter=True,
noise_csd=noise_csd)
assert not np.iscomplexobj(filters['weights'])
# Test forward normalization. When inversion='single', the power of a
# unit-noise CSD should be 1, even without weight normalization.
if not whiten:
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
# Using [:, :] syntax for in-place broadcasting
csd_noise._data[:, :] = np.eye(csd.n_channels)[inds][:, np.newaxis]
filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label,
weight_norm=None, depth=1., noise_csd=noise_csd,
inversion='single')
w = filters['weights'][0][:3]
assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-6,
atol=0)
# Test turning off both forward and weight normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
weight_norm=None, depth=None, noise_csd=noise_csd)
w = filters['weights'][0][:3]
assert not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0,
rtol=1e-2, atol=0)
# Test neural-activity-index weight normalization. It should be a scaled
# version of the unit-noise-gain beamformer.
filters_nai = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='nai', depth=None, noise_csd=noise_csd)
w_nai = filters_nai['weights'][0]
filters_ung = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd)
w_ung = filters_ung['weights'][0]
assert_allclose(np.corrcoef(np.abs(w_nai).ravel(),
np.abs(w_ung).ravel()), 1, atol=1e-7)
# Test whether spatial filter contains src_type
assert 'src_type' in filters
fname = op.join(str(tmpdir), 'filters-dics.h5')
filters.save(fname)
filters_read = read_beamformer(fname)
assert isinstance(filters, Beamformer)
assert isinstance(filters_read, Beamformer)
for key in ['tmin', 'tmax']: # deal with strictness of object_diff
setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key)))
assert object_diff(filters, filters_read) == ''
def _fwd_dist(power, fwd, vertices, source_ind, tidx=1):
idx = np.argmax(power.data[:, tidx])
rr_got = fwd['src'][0]['rr'][vertices[idx]]
rr_want = fwd['src'][0]['rr'][vertices[source_ind]]
return np.linalg.norm(rr_got - rr_want)
@idx_param
@pytest.mark.parametrize('inversion, weight_norm', [
('single', None),
('matrix', 'unit-noise-gain'),
])
def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm):
fwd_free, fwd_surf, fwd_fixed, _ = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
reg = 1 # Lots of regularization for our toy dataset
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_free, csd)
epochs.pick_types(meg='grad')
# Try different types of forward models
assert label.hemi == 'lh'
for fwd in [fwd_free, fwd_surf, fwd_fixed]:
filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg,
inversion=inversion, weight_norm=weight_norm)
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
# Did we find the true source at 20 Hz?
dist = _fwd_dist(power, fwd_free, vertices, source_ind)
assert dist == 0.
# Is the signal stronger at 20 Hz than 10?
assert power.data[source_ind, 1] > power.data[source_ind, 0]
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power'])
@pytest.mark.parametrize('inversion', ['single', 'matrix'])
@idx_param
def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx):
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
epochs.pick_types(meg='grad')
reg_ = 5 if inversion == 'matrix' else 1
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, depth=None,
weight_norm='unit-noise-gain')
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
# This is 0. for unit-noise-gain-invariant:
assert dist <= (0.02 if inversion == 'matrix' else 0.)
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test unit-noise-gain weighting
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
csd_noise._data[...] = np.eye(csd.n_channels)[inds][:, np.newaxis]
noise_power, f = apply_dics_csd(csd_noise, filters)
want_norm = 3 if pick_ori is None else 1.
assert_allclose(noise_power.data, want_norm, atol=1e-7)
# Test filter with forward normalization instead of weight
# normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, weight_norm=None,
depth=1.)
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
mat_tol = {0: 0.055, 100: 0.20, 200: 0.015, 233: 0.035}[idx]
max_ = (mat_tol if inversion == 'matrix' else 0.)
assert 0 <= dist <= max_
assert power.data[source_ind, 1] > power.data[source_ind, 0]
def _nearest_vol_ind(fwd_vol, fwd, vertices, source_ind):
return _compute_nearest(
fwd_vol['source_rr'],
fwd['src'][0]['rr'][vertices][source_ind][np.newaxis])[0]
@idx_param
def test_real(_load_forward, idx):
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
epochs.pick_types(meg='grad')
reg = 1 # Lots of regularization for our toy dataset
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg,
real_filter=True, inversion='single')
# Also test here that no warings are thrown - implemented to check whether
# src should not be None warning occurs:
with pytest.warns(None) as w:
power, f = apply_dics_csd(csd, filters_real)
assert len(w) == 0
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
assert dist == 0
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test rank reduction
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5,
pick_ori='max-power', inversion='matrix',
reduce_rank=True)
power, f = apply_dics_csd(csd, filters_real)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
assert dist == 0
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test computing source power on a volume source space
filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg,
inversion='single')
power, f = apply_dics_csd(csd, filters_vol)
vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind)
assert f == [10, 20]
dist = _fwd_dist(
power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind)
vol_tols = {100: 0.008, 200: 0.008}
assert dist <= vol_tols.get(idx, 0.)
assert power.data[vol_source_ind, 1] > power.data[vol_source_ind, 0]
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='spatial filter does not contain '
'src_type'):
apply_dics_csd(csd, filters_vol)
@pytest.mark.filterwarnings("ignore:The use of several sensor types with the"
":RuntimeWarning")
@idx_param
def test_apply_dics_timeseries(_load_forward, idx):
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, evoked, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
reg = 5 # Lots of regularization for our toy dataset
with pytest.raises(ValueError, match='several sensor types'):
make_dics(evoked.info, fwd_surf, csd)
evoked.pick_types(meg='grad')
multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label,
reg=reg)
# Sanity checks on the resulting STC after applying DICS on evoked
stcs = apply_dics(evoked, multiple_filters)
assert isinstance(stcs, list)
assert len(stcs) == len(multiple_filters['weights'])
assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1])
assert_allclose(stcs[0].times, evoked.times)
# Applying filters for multiple frequencies on epoch data should fail
with pytest.raises(ValueError, match='computed for a single frequency'):
apply_dics_epochs(epochs, multiple_filters)
# From now on, only apply filters with a single frequency (20 Hz).
csd20 = csd.pick_frequency(20)
filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg,
inversion='single')
# Sanity checks on the resulting STC after applying DICS on epochs.
# Also test here that no warnings are thrown - implemented to check whether
# src should not be None warning occurs
with pytest.warns(None) as w:
stcs = apply_dics_epochs(epochs, filters)
assert len(w) == 0
assert isinstance(stcs, list)
assert len(stcs) == 1
assert_array_equal(stcs[0].vertices[0], filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], filters['vertices'][1])
assert_allclose(stcs[0].times, epochs.times)
# Did we find the source?
stc = (stcs[0] ** 2).mean()
dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0)
assert dist == 0
# Apply filters to evoked
stc = apply_dics(evoked, filters)
stc = (stc ** 2).mean()
dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0)
assert dist == 0
# Test if wrong channel selection is detected in application of filter
evoked_ch = cp.deepcopy(evoked)
evoked_ch.pick_channels(evoked_ch.ch_names[:-1])
with pytest.raises(ValueError, match='MEG 2633 which is not present'):
apply_dics(evoked_ch, filters)
# Test whether projections are applied, by adding a custom projection
filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label)
stc_noproj = apply_dics(evoked, filters_noproj)
evoked_proj = evoked.copy()
p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0)
proj_matrix = make_projector(p, evoked_proj.ch_names)[0]
evoked_proj.info['projs'] += p
filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label)
assert_array_equal(filters_proj['proj'], proj_matrix)
stc_proj = apply_dics(evoked_proj, filters_proj)
assert np.any(np.not_equal(stc_noproj.data, stc_proj.data))
# Test detecting incompatible projections
filters_proj['proj'] = filters_proj['proj'][:-1, :-1]
with pytest.raises(ValueError, match='operands could not be broadcast'):
apply_dics(evoked_proj, filters_proj)
# Test returning a generator
stcs = apply_dics_epochs(epochs, filters, return_generator=False)
stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True)
assert_array_equal(stcs[0].data, next(stcs_gen).data)
# Test computing timecourses on a volume source space
filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg,
inversion='single')
stc = apply_dics(evoked, filters_vol)
stc = (stc ** 2).mean()
assert stc.data.shape[1] == 1
vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind)
dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind,
tidx=0)
vol_tols = {100: 0.008, 200: 0.015}
vol_tol = vol_tols.get(idx, 0.)
assert dist <= vol_tol
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'):
apply_dics_epochs(epochs, filters_vol)
def _cov_as_csd(cov, info):
rng = np.random.RandomState(0)
assert cov['data'].ndim == 2
assert len(cov['data']) == len(cov['names'])
# we need to make this have at least some complex structure
data = cov['data'] + 1e-1 * _rand_csd(rng, info)
assert data.dtype == np.complex128
return CrossSpectralDensity(_sym_mat_to_vector(data), cov['names'], 0., 16)
# Just test free ori here (assume fixed is same as LCMV if these are)
# Changes here should be synced with test_lcmv.py
@pytest.mark.slowtest
@pytest.mark.parametrize(
'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter', [
(0.05, None, 'unit-noise-gain-invariant', False, None, 26, 28, False),
(0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, False),
(0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, True),
(0.05, None, 'unit-noise-gain', False, None, 13, 14, False),
(0.05, None, 'unit-noise-gain', True, None, 35, 37, False),
(0.05, None, 'nai', True, None, 35, 37, False),
(0.05, None, None, True, None, 12, 14, False),
(0.05, None, None, True, 0.8, 39, 43, False),
(0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20,
False),
(0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, False),
(0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, True),
(0.05, 'max-power', 'nai', True, None, 21, 24, False),
(0.05, 'max-power', None, True, None, 7, 10, False),
(0.05, 'max-power', None, True, 0.8, 15, 18, False),
# skip most no-reg tests, assume others are equal to LCMV if these are
(0.00, None, None, True, None, 21, 32, False),
(0.00, 'max-power', None, True, None, 13, 19, False),
])
def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm,
use_cov, depth, lower, upper, real_filter):
evoked, fwd, noise_cov, data_cov, want = bias_params_free
noise_csd = _cov_as_csd(noise_cov, evoked.info)
data_csd = _cov_as_csd(data_cov, evoked.info)
del noise_cov, data_cov
if not use_cov:
evoked.pick_types(meg='grad')
noise_csd = None
loc = apply_dics(evoked, make_dics(
evoked.info, fwd, data_csd, reg, noise_csd, pick_ori=pick_ori,
weight_norm=weight_norm, depth=depth, real_filter=real_filter)).data
loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc)
# Compute the percentage of sources for which there is no loc bias:
perc = (want == np.argmax(loc, axis=0)).mean() * 100
assert lower <= perc <= upper
@testing.requires_testing_data
@idx_param
@pytest.mark.parametrize('whiten', (False, True))
def test_make_dics_rank(_load_forward, idx, whiten):
_, fwd_surf, fwd_fixed, _ = _load_forward
epochs, _, csd, _, label, _, _ = _simulate_data(fwd_fixed, idx)
if whiten:
noise_csd, want_rank = _make_rand_csd(epochs.info, csd)
kind = 'mag + grad'
else:
noise_csd = None
epochs.pick_types(meg='grad')
want_rank = len(epochs.ch_names)
assert want_rank == 41
kind = 'grad'
with catch_logging() as log:
filters = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
verbose=True)
log = log.getvalue()
assert f'Estimated rank ({kind}): {want_rank}' in log, log
stc, _ = apply_dics_csd(csd, filters)
other_rank = want_rank - 1 # shouldn't make a huge difference
use_rank = dict(meg=other_rank)
if not whiten:
use_rank['grad'] = use_rank.pop('meg')
with catch_logging() as log:
filters_2 = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
rank=use_rank, verbose=True)
log = log.getvalue()
assert f'Computing rank from covariance with rank={use_rank}' in log, log
stc_2, _ = apply_dics_csd(csd, filters_2)
corr = np.corrcoef(stc_2.data.ravel(), stc.data.ravel())[0, 1]
assert 0.8 < corr < 0.99999
if whiten:
data = noise_csd.get_data(0.)
data[0] = data[:0] = 0
noise_csd._data[:, 0] = _sym_mat_to_vector(data)
with pytest.raises(ValueError, match='meg data rank.*the noise rank'):
filters = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
verbose=True)
| true | true |
f7fed5e0b420fc6c1292823998d4805ba4e2c1cd | 398 | py | Python | lm/utils/registry.py | akashmehra/nlp | 0b2a0690a631d2614d3b124e502e5e651816c645 | [
"MIT"
] | null | null | null | lm/utils/registry.py | akashmehra/nlp | 0b2a0690a631d2614d3b124e502e5e651816c645 | [
"MIT"
] | null | null | null | lm/utils/registry.py | akashmehra/nlp | 0b2a0690a631d2614d3b124e502e5e651816c645 | [
"MIT"
] | null | null | null | from typing import Callable, Any
opts = {}
class Registry(object):
def __init__(self, key: str):
self.key = key
def __call__(self, func: Callable[[], Any]) -> Callable[[], Any]:
def wrapper(*args, **kwargs) -> Any:
key = args[0].__class__.__name__
x = func(*args, **kwargs)
opts[key] = x
return x
return wrapper
| 26.533333 | 69 | 0.535176 | from typing import Callable, Any
opts = {}
class Registry(object):
def __init__(self, key: str):
self.key = key
def __call__(self, func: Callable[[], Any]) -> Callable[[], Any]:
def wrapper(*args, **kwargs) -> Any:
key = args[0].__class__.__name__
x = func(*args, **kwargs)
opts[key] = x
return x
return wrapper
| true | true |
f7fed6d8c611fec64d022efbfcf9bf5d02d6db47 | 6,917 | py | Python | detectron/datasets/dataset_catalog.py | Jiayuan-Gu/Detectron | 0375b05e867eae12be0ca8c420c9efad687689bf | [
"Apache-2.0"
] | 2 | 2018-09-05T13:54:26.000Z | 2019-09-26T13:36:32.000Z | detectron/datasets/dataset_catalog.py | Jiayuan-Gu/Detectron | 0375b05e867eae12be0ca8c420c9efad687689bf | [
"Apache-2.0"
] | null | null | null | detectron/datasets/dataset_catalog.py | Jiayuan-Gu/Detectron | 0375b05e867eae12be0ca8c420c9efad687689bf | [
"Apache-2.0"
] | 3 | 2018-07-26T08:43:06.000Z | 2020-06-06T17:48:17.000Z | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Collection of available datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
# Path to data dir
_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# Required dataset entry keys
_IM_DIR = 'image_directory'
_ANN_FN = 'annotation_file'
# Optional dataset entry keys
_IM_PREFIX = 'image_prefix'
_DEVKIT_DIR = 'devkit_directory'
_RAW_DIR = 'raw_dir'
# Available datasets
_DATASETS = {
'cityscapes_fine_instanceonly_seg_train': {
_IM_DIR:
_DATA_DIR + '/cityscapes/images',
_ANN_FN:
_DATA_DIR + '/cityscapes/annotations/instancesonly_gtFine_train.json',
_RAW_DIR:
_DATA_DIR + '/cityscapes/raw'
},
'cityscapes_fine_instanceonly_seg_val': {
_IM_DIR:
_DATA_DIR + '/cityscapes/images',
# use filtered validation as there is an issue converting contours
_ANN_FN:
_DATA_DIR + '/cityscapes/annotations/instancesonly_filtered_gtFine_val.json',
_RAW_DIR:
_DATA_DIR + '/cityscapes/raw'
},
'cityscapes_fine_instanceonly_seg_test': {
_IM_DIR:
_DATA_DIR + '/cityscapes/images',
_ANN_FN:
_DATA_DIR + '/cityscapes/annotations/instancesonly_gtFine_test.json',
_RAW_DIR:
_DATA_DIR + '/cityscapes/raw'
},
'coco_2014_train': {
_IM_DIR:
_DATA_DIR + '/coco/coco_train2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_train2014.json'
},
'coco_2014_val': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_val2014.json'
},
'coco_2014_minival': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_minival2014.json'
},
'coco_2014_valminusminival': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_valminusminival2014.json'
},
'coco_2015_test': {
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2015.json'
},
'coco_2015_test-dev': {
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2015.json'
},
'coco_2017_test': { # 2017 test uses 2015 test images
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2017.json',
_IM_PREFIX:
'COCO_test2015_'
},
'coco_2017_test-dev': { # 2017 test-dev uses 2015 test images
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2017.json',
_IM_PREFIX:
'COCO_test2015_'
},
'coco_stuff_train': {
_IM_DIR:
_DATA_DIR + '/coco/coco_train2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/coco_stuff_train.json'
},
'coco_stuff_val': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/coco_stuff_val.json'
},
'keypoints_coco_2014_train': {
_IM_DIR:
_DATA_DIR + '/coco/coco_train2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_train2014.json'
},
'keypoints_coco_2014_val': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_val2014.json'
},
'keypoints_coco_2014_minival': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_minival2014.json'
},
'keypoints_coco_2014_valminusminival': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_valminusminival2014.json'
},
'keypoints_coco_2015_test': {
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2015.json'
},
'keypoints_coco_2015_test-dev': {
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2015.json'
},
'voc_2007_trainval': {
_IM_DIR:
_DATA_DIR + '/VOC2007/JPEGImages',
_ANN_FN:
_DATA_DIR + '/VOC2007/annotations/voc_2007_trainval.json',
_DEVKIT_DIR:
_DATA_DIR + '/VOC2007/VOCdevkit2007'
},
'voc_2007_test': {
_IM_DIR:
_DATA_DIR + '/VOC2007/JPEGImages',
_ANN_FN:
_DATA_DIR + '/VOC2007/annotations/voc_2007_test.json',
_DEVKIT_DIR:
_DATA_DIR + '/VOC2007/VOCdevkit2007'
},
'voc_2012_trainval': {
_IM_DIR:
_DATA_DIR + '/VOC2012/JPEGImages',
_ANN_FN:
_DATA_DIR + '/VOC2012/annotations/voc_2012_trainval.json',
_DEVKIT_DIR:
_DATA_DIR + '/VOC2012/VOCdevkit2012'
}
}
def datasets():
"""Retrieve the list of available dataset names."""
return _DATASETS.keys()
def contains(name):
"""Determine if the dataset is in the catalog."""
return name in _DATASETS.keys()
def get_im_dir(name):
"""Retrieve the image directory for the dataset."""
return _DATASETS[name][_IM_DIR]
def get_ann_fn(name):
"""Retrieve the annotation file for the dataset."""
return _DATASETS[name][_ANN_FN]
def get_im_prefix(name):
"""Retrieve the image prefix for the dataset."""
return _DATASETS[name][_IM_PREFIX] if _IM_PREFIX in _DATASETS[name] else ''
def get_devkit_dir(name):
"""Retrieve the devkit dir for the dataset."""
return _DATASETS[name][_DEVKIT_DIR]
def get_raw_dir(name):
"""Retrieve the raw dir for the dataset."""
return _DATASETS[name][_RAW_DIR]
| 30.742222 | 89 | 0.619488 |
'coco_stuff_val': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/coco_stuff_val.json'
},
'keypoints_coco_2014_train': {
_IM_DIR:
_DATA_DIR + '/coco/coco_train2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_train2014.json'
},
'keypoints_coco_2014_val': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_val2014.json'
},
'keypoints_coco_2014_minival': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_minival2014.json'
},
'keypoints_coco_2014_valminusminival': {
_IM_DIR:
_DATA_DIR + '/coco/coco_val2014',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_valminusminival2014.json'
},
'keypoints_coco_2015_test': {
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2015.json'
},
'keypoints_coco_2015_test-dev': {
_IM_DIR:
_DATA_DIR + '/coco/coco_test2015',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2015.json'
},
'voc_2007_trainval': {
_IM_DIR:
_DATA_DIR + '/VOC2007/JPEGImages',
_ANN_FN:
_DATA_DIR + '/VOC2007/annotations/voc_2007_trainval.json',
_DEVKIT_DIR:
_DATA_DIR + '/VOC2007/VOCdevkit2007'
},
'voc_2007_test': {
_IM_DIR:
_DATA_DIR + '/VOC2007/JPEGImages',
_ANN_FN:
_DATA_DIR + '/VOC2007/annotations/voc_2007_test.json',
_DEVKIT_DIR:
_DATA_DIR + '/VOC2007/VOCdevkit2007'
},
'voc_2012_trainval': {
_IM_DIR:
_DATA_DIR + '/VOC2012/JPEGImages',
_ANN_FN:
_DATA_DIR + '/VOC2012/annotations/voc_2012_trainval.json',
_DEVKIT_DIR:
_DATA_DIR + '/VOC2012/VOCdevkit2012'
}
}
def datasets():
return _DATASETS.keys()
def contains(name):
return name in _DATASETS.keys()
def get_im_dir(name):
return _DATASETS[name][_IM_DIR]
def get_ann_fn(name):
return _DATASETS[name][_ANN_FN]
def get_im_prefix(name):
return _DATASETS[name][_IM_PREFIX] if _IM_PREFIX in _DATASETS[name] else ''
def get_devkit_dir(name):
return _DATASETS[name][_DEVKIT_DIR]
def get_raw_dir(name):
return _DATASETS[name][_RAW_DIR]
| true | true |
f7fed76c2431a92eb39d4b1b3c549d2debee531f | 799 | py | Python | hearthstone/migrations/0008_auto_20190111_1448.py | Robilol/django-hearthstone | 49f2eab62e4cddca27af3cbdb7b0bcf01d73d719 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | hearthstone/migrations/0008_auto_20190111_1448.py | Robilol/django-hearthstone | 49f2eab62e4cddca27af3cbdb7b0bcf01d73d719 | [
"PSF-2.0",
"BSD-3-Clause"
] | 13 | 2018-11-11T17:05:08.000Z | 2019-01-22T17:25:18.000Z | hearthstone/migrations/0008_auto_20190111_1448.py | Robilol/django-hearthstone | 49f2eab62e4cddca27af3cbdb7b0bcf01d73d719 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.dev20190111110410 on 2019-01-11 14:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hearthstone', '0007_exchange'),
]
operations = [
migrations.RenameField(
model_name='exchange',
old_name='card1_id',
new_name='card1',
),
migrations.RenameField(
model_name='exchange',
old_name='card2_id',
new_name='card2',
),
migrations.RenameField(
model_name='exchange',
old_name='user1_id',
new_name='user1',
),
migrations.RenameField(
model_name='exchange',
old_name='user2_id',
new_name='user2',
),
]
| 23.5 | 63 | 0.536921 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hearthstone', '0007_exchange'),
]
operations = [
migrations.RenameField(
model_name='exchange',
old_name='card1_id',
new_name='card1',
),
migrations.RenameField(
model_name='exchange',
old_name='card2_id',
new_name='card2',
),
migrations.RenameField(
model_name='exchange',
old_name='user1_id',
new_name='user1',
),
migrations.RenameField(
model_name='exchange',
old_name='user2_id',
new_name='user2',
),
]
| true | true |
f7fed76e38956c868956936e9c92633f79a9f6e3 | 1,478 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/apigateway/models/BindJdsfGroup.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/apigateway/models/BindJdsfGroup.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/apigateway/models/BindJdsfGroup.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class BindJdsfGroup(object):
def __init__(self, environment, jdsfId, jdsfName=None, jdsfRegistryName=None, jdsfParam=None, jdsfRegion=None, groupType=None):
"""
:param jdsfName: (Optional) 微服务网关名称
:param jdsfRegistryName: (Optional) 微服务注册中心ID
:param environment: 环境信息
:param jdsfId: 微服务网关ID
:param jdsfParam: (Optional) VPC类型时,传vpc对应的IP:port
:param jdsfRegion: (Optional) 微服务网关region ID
:param groupType: (Optional) 分组类型:api_group(api分组)、jdsf_group(微服务分组)默认api_group
"""
self.jdsfName = jdsfName
self.jdsfRegistryName = jdsfRegistryName
self.environment = environment
self.jdsfId = jdsfId
self.jdsfParam = jdsfParam
self.jdsfRegion = jdsfRegion
self.groupType = groupType
| 36.95 | 131 | 0.710419 |
class BindJdsfGroup(object):
def __init__(self, environment, jdsfId, jdsfName=None, jdsfRegistryName=None, jdsfParam=None, jdsfRegion=None, groupType=None):
self.jdsfName = jdsfName
self.jdsfRegistryName = jdsfRegistryName
self.environment = environment
self.jdsfId = jdsfId
self.jdsfParam = jdsfParam
self.jdsfRegion = jdsfRegion
self.groupType = groupType
| true | true |
f7fed7b3b050c2189dd0024e52e4d3aa35ac6f30 | 630 | py | Python | saleor/account/migrations/0051_alter_customerevent_user.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 15,337 | 2015-01-12T02:11:52.000Z | 2021-10-05T19:19:29.000Z | saleor/account/migrations/0051_alter_customerevent_user.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 7,486 | 2015-02-11T10:52:13.000Z | 2021-10-06T09:37:15.000Z | saleor/account/migrations/0051_alter_customerevent_user.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
] | 5,864 | 2015-01-16T14:52:54.000Z | 2021-10-05T23:01:15.000Z | # Generated by Django 3.2.2 on 2021-07-01 08:45
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("account", "0050_auto_20210506_1058"),
]
operations = [
migrations.AlterField(
model_name="customerevent",
name="user",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="events",
to=settings.AUTH_USER_MODEL,
),
),
]
| 24.230769 | 60 | 0.587302 |
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("account", "0050_auto_20210506_1058"),
]
operations = [
migrations.AlterField(
model_name="customerevent",
name="user",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="events",
to=settings.AUTH_USER_MODEL,
),
),
]
| true | true |
f7fed994bc43d09b40e7eb4b7623f8e869e6f1e5 | 841 | bzl | Python | src/test/java/com/google/devtools/build/skydoc/testdata/filter_rules_test/input.bzl | xylocarp-whelky/bazel | 4ba404f7ed0473df3f0effa016c107ef677464f6 | [
"Apache-2.0"
] | 5 | 2019-01-10T09:41:11.000Z | 2020-07-15T12:02:22.000Z | src/test/java/com/google/devtools/build/skydoc/testdata/filter_rules_test/input.bzl | installation00/bazel | 6f38f345a1bd278a71170c5d80aba3928afdc6ec | [
"Apache-2.0"
] | 1 | 2019-09-26T16:39:28.000Z | 2019-09-26T16:39:28.000Z | src/test/java/com/google/devtools/build/skydoc/testdata/filter_rules_test/input.bzl | installation00/bazel | 6f38f345a1bd278a71170c5d80aba3928afdc6ec | [
"Apache-2.0"
] | 3 | 2019-05-05T01:52:36.000Z | 2020-11-04T03:16:14.000Z | load(
":testdata/filter_rules_test/dep.bzl",
"my_rule_impl",
dep_rule = "my_rule",
)
def my_rule_impl(ctx):
return struct()
my_rule = rule(
implementation = my_rule_impl,
doc = "This is my rule. It does stuff.",
attrs = {
"first": attr.label(
mandatory = True,
doc = "first my_rule doc string",
allow_single_file = True,
),
"second": attr.string_dict(mandatory = True),
},
)
other_rule = rule(
implementation = my_rule_impl,
doc = "This is another rule.",
attrs = {
"test": attr.string_dict(mandatory = True),
},
)
whitelisted_dep_rule = dep_rule
yet_another_rule = rule(
implementation = my_rule_impl,
doc = "This is yet another rule",
attrs = {
"test": attr.string_dict(mandatory = True),
},
)
| 21.025 | 53 | 0.590963 | load(
":testdata/filter_rules_test/dep.bzl",
"my_rule_impl",
dep_rule = "my_rule",
)
def my_rule_impl(ctx):
return struct()
my_rule = rule(
implementation = my_rule_impl,
doc = "This is my rule. It does stuff.",
attrs = {
"first": attr.label(
mandatory = True,
doc = "first my_rule doc string",
allow_single_file = True,
),
"second": attr.string_dict(mandatory = True),
},
)
other_rule = rule(
implementation = my_rule_impl,
doc = "This is another rule.",
attrs = {
"test": attr.string_dict(mandatory = True),
},
)
whitelisted_dep_rule = dep_rule
yet_another_rule = rule(
implementation = my_rule_impl,
doc = "This is yet another rule",
attrs = {
"test": attr.string_dict(mandatory = True),
},
)
| true | true |
f7feda1a571ad4902a85db29d67310b1eee98a37 | 6,086 | py | Python | sympy/printing/tests/test_tree.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 2 | 2019-05-18T22:36:49.000Z | 2019-05-24T05:56:16.000Z | sympy/printing/tests/test_tree.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 2 | 2020-08-18T15:21:59.000Z | 2020-08-18T19:35:29.000Z | sympy/printing/tests/test_tree.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 3 | 2021-02-16T16:40:49.000Z | 2022-03-07T18:28:41.000Z | from sympy.printing.tree import tree
from sympy.testing.pytest import XFAIL
# Remove this flag after making _assumptions cache deterministic.
@XFAIL
def test_print_tree_MatAdd():
from sympy.matrices.expressions import MatrixSymbol
A = MatrixSymbol('A', 3, 3)
B = MatrixSymbol('B', 3, 3)
test_str = [
'MatAdd: A + B\n',
'algebraic: False\n',
'commutative: False\n',
'complex: False\n',
'composite: False\n',
'even: False\n',
'extended_negative: False\n',
'extended_nonnegative: False\n',
'extended_nonpositive: False\n',
'extended_nonzero: False\n',
'extended_positive: False\n',
'extended_real: False\n',
'imaginary: False\n',
'integer: False\n',
'irrational: False\n',
'negative: False\n',
'noninteger: False\n',
'nonnegative: False\n',
'nonpositive: False\n',
'nonzero: False\n',
'odd: False\n',
'positive: False\n',
'prime: False\n',
'rational: False\n',
'real: False\n',
'transcendental: False\n',
'zero: False\n',
'+-MatrixSymbol: A\n',
'| algebraic: False\n',
'| commutative: False\n',
'| complex: False\n',
'| composite: False\n',
'| even: False\n',
'| extended_negative: False\n',
'| extended_nonnegative: False\n',
'| extended_nonpositive: False\n',
'| extended_nonzero: False\n',
'| extended_positive: False\n',
'| extended_real: False\n',
'| imaginary: False\n',
'| integer: False\n',
'| irrational: False\n',
'| negative: False\n',
'| noninteger: False\n',
'| nonnegative: False\n',
'| nonpositive: False\n',
'| nonzero: False\n',
'| odd: False\n',
'| positive: False\n',
'| prime: False\n',
'| rational: False\n',
'| real: False\n',
'| transcendental: False\n',
'| zero: False\n',
'| +-Symbol: A\n',
'| | commutative: True\n',
'| +-Integer: 3\n',
'| | algebraic: True\n',
'| | commutative: True\n',
'| | complex: True\n',
'| | extended_negative: False\n',
'| | extended_nonnegative: True\n',
'| | extended_real: True\n',
'| | finite: True\n',
'| | hermitian: True\n',
'| | imaginary: False\n',
'| | infinite: False\n',
'| | integer: True\n',
'| | irrational: False\n',
'| | negative: False\n',
'| | noninteger: False\n',
'| | nonnegative: True\n',
'| | rational: True\n',
'| | real: True\n',
'| | transcendental: False\n',
'| +-Integer: 3\n',
'| algebraic: True\n',
'| commutative: True\n',
'| complex: True\n',
'| extended_negative: False\n',
'| extended_nonnegative: True\n',
'| extended_real: True\n',
'| finite: True\n',
'| hermitian: True\n',
'| imaginary: False\n',
'| infinite: False\n',
'| integer: True\n',
'| irrational: False\n',
'| negative: False\n',
'| noninteger: False\n',
'| nonnegative: True\n',
'| rational: True\n',
'| real: True\n',
'| transcendental: False\n',
'+-MatrixSymbol: B\n',
' algebraic: False\n',
' commutative: False\n',
' complex: False\n',
' composite: False\n',
' even: False\n',
' extended_negative: False\n',
' extended_nonnegative: False\n',
' extended_nonpositive: False\n',
' extended_nonzero: False\n',
' extended_positive: False\n',
' extended_real: False\n',
' imaginary: False\n',
' integer: False\n',
' irrational: False\n',
' negative: False\n',
' noninteger: False\n',
' nonnegative: False\n',
' nonpositive: False\n',
' nonzero: False\n',
' odd: False\n',
' positive: False\n',
' prime: False\n',
' rational: False\n',
' real: False\n',
' transcendental: False\n',
' zero: False\n',
' +-Symbol: B\n',
' | commutative: True\n',
' +-Integer: 3\n',
' | algebraic: True\n',
' | commutative: True\n',
' | complex: True\n',
' | extended_negative: False\n',
' | extended_nonnegative: True\n',
' | extended_real: True\n',
' | finite: True\n',
' | hermitian: True\n',
' | imaginary: False\n',
' | infinite: False\n',
' | integer: True\n',
' | irrational: False\n',
' | negative: False\n',
' | noninteger: False\n',
' | nonnegative: True\n',
' | rational: True\n',
' | real: True\n',
' | transcendental: False\n',
' +-Integer: 3\n',
' algebraic: True\n',
' commutative: True\n',
' complex: True\n',
' extended_negative: False\n',
' extended_nonnegative: True\n',
' extended_real: True\n',
' finite: True\n',
' hermitian: True\n',
' imaginary: False\n',
' infinite: False\n',
' integer: True\n',
' irrational: False\n',
' negative: False\n',
' noninteger: False\n',
' nonnegative: True\n',
' rational: True\n',
' real: True\n',
' transcendental: False\n'
]
assert tree(A + B) == "".join(test_str)
def test_print_tree_MatAdd_noassumptions():
from sympy.matrices.expressions import MatrixSymbol
A = MatrixSymbol('A', 3, 3)
B = MatrixSymbol('B', 3, 3)
test_str = \
"""MatAdd: A + B
+-MatrixSymbol: A
| +-Symbol: A
| +-Integer: 3
| +-Integer: 3
+-MatrixSymbol: B
+-Symbol: B
+-Integer: 3
+-Integer: 3
"""
assert tree(A + B, assumptions=False) == test_str
| 30.893401 | 65 | 0.490799 | from sympy.printing.tree import tree
from sympy.testing.pytest import XFAIL
@XFAIL
def test_print_tree_MatAdd():
from sympy.matrices.expressions import MatrixSymbol
A = MatrixSymbol('A', 3, 3)
B = MatrixSymbol('B', 3, 3)
test_str = [
'MatAdd: A + B\n',
'algebraic: False\n',
'commutative: False\n',
'complex: False\n',
'composite: False\n',
'even: False\n',
'extended_negative: False\n',
'extended_nonnegative: False\n',
'extended_nonpositive: False\n',
'extended_nonzero: False\n',
'extended_positive: False\n',
'extended_real: False\n',
'imaginary: False\n',
'integer: False\n',
'irrational: False\n',
'negative: False\n',
'noninteger: False\n',
'nonnegative: False\n',
'nonpositive: False\n',
'nonzero: False\n',
'odd: False\n',
'positive: False\n',
'prime: False\n',
'rational: False\n',
'real: False\n',
'transcendental: False\n',
'zero: False\n',
'+-MatrixSymbol: A\n',
'| algebraic: False\n',
'| commutative: False\n',
'| complex: False\n',
'| composite: False\n',
'| even: False\n',
'| extended_negative: False\n',
'| extended_nonnegative: False\n',
'| extended_nonpositive: False\n',
'| extended_nonzero: False\n',
'| extended_positive: False\n',
'| extended_real: False\n',
'| imaginary: False\n',
'| integer: False\n',
'| irrational: False\n',
'| negative: False\n',
'| noninteger: False\n',
'| nonnegative: False\n',
'| nonpositive: False\n',
'| nonzero: False\n',
'| odd: False\n',
'| positive: False\n',
'| prime: False\n',
'| rational: False\n',
'| real: False\n',
'| transcendental: False\n',
'| zero: False\n',
'| +-Symbol: A\n',
'| | commutative: True\n',
'| +-Integer: 3\n',
'| | algebraic: True\n',
'| | commutative: True\n',
'| | complex: True\n',
'| | extended_negative: False\n',
'| | extended_nonnegative: True\n',
'| | extended_real: True\n',
'| | finite: True\n',
'| | hermitian: True\n',
'| | imaginary: False\n',
'| | infinite: False\n',
'| | integer: True\n',
'| | irrational: False\n',
'| | negative: False\n',
'| | noninteger: False\n',
'| | nonnegative: True\n',
'| | rational: True\n',
'| | real: True\n',
'| | transcendental: False\n',
'| +-Integer: 3\n',
'| algebraic: True\n',
'| commutative: True\n',
'| complex: True\n',
'| extended_negative: False\n',
'| extended_nonnegative: True\n',
'| extended_real: True\n',
'| finite: True\n',
'| hermitian: True\n',
'| imaginary: False\n',
'| infinite: False\n',
'| integer: True\n',
'| irrational: False\n',
'| negative: False\n',
'| noninteger: False\n',
'| nonnegative: True\n',
'| rational: True\n',
'| real: True\n',
'| transcendental: False\n',
'+-MatrixSymbol: B\n',
' algebraic: False\n',
' commutative: False\n',
' complex: False\n',
' composite: False\n',
' even: False\n',
' extended_negative: False\n',
' extended_nonnegative: False\n',
' extended_nonpositive: False\n',
' extended_nonzero: False\n',
' extended_positive: False\n',
' extended_real: False\n',
' imaginary: False\n',
' integer: False\n',
' irrational: False\n',
' negative: False\n',
' noninteger: False\n',
' nonnegative: False\n',
' nonpositive: False\n',
' nonzero: False\n',
' odd: False\n',
' positive: False\n',
' prime: False\n',
' rational: False\n',
' real: False\n',
' transcendental: False\n',
' zero: False\n',
' +-Symbol: B\n',
' | commutative: True\n',
' +-Integer: 3\n',
' | algebraic: True\n',
' | commutative: True\n',
' | complex: True\n',
' | extended_negative: False\n',
' | extended_nonnegative: True\n',
' | extended_real: True\n',
' | finite: True\n',
' | hermitian: True\n',
' | imaginary: False\n',
' | infinite: False\n',
' | integer: True\n',
' | irrational: False\n',
' | negative: False\n',
' | noninteger: False\n',
' | nonnegative: True\n',
' | rational: True\n',
' | real: True\n',
' | transcendental: False\n',
' +-Integer: 3\n',
' algebraic: True\n',
' commutative: True\n',
' complex: True\n',
' extended_negative: False\n',
' extended_nonnegative: True\n',
' extended_real: True\n',
' finite: True\n',
' hermitian: True\n',
' imaginary: False\n',
' infinite: False\n',
' integer: True\n',
' irrational: False\n',
' negative: False\n',
' noninteger: False\n',
' nonnegative: True\n',
' rational: True\n',
' real: True\n',
' transcendental: False\n'
]
assert tree(A + B) == "".join(test_str)
def test_print_tree_MatAdd_noassumptions():
from sympy.matrices.expressions import MatrixSymbol
A = MatrixSymbol('A', 3, 3)
B = MatrixSymbol('B', 3, 3)
test_str = \
"""MatAdd: A + B
+-MatrixSymbol: A
| +-Symbol: A
| +-Integer: 3
| +-Integer: 3
+-MatrixSymbol: B
+-Symbol: B
+-Integer: 3
+-Integer: 3
"""
assert tree(A + B, assumptions=False) == test_str
| true | true |
f7feda89047c4629e8c00b7c3cfff82f31a45883 | 8,828 | py | Python | multiprocessing_parse.py | qftphys/Software-for-visualising-magnetic-layers | 7e4c5680b8e87aa677bdf4c912cbccdcb11b09a3 | [
"MIT"
] | null | null | null | multiprocessing_parse.py | qftphys/Software-for-visualising-magnetic-layers | 7e4c5680b8e87aa677bdf4c912cbccdcb11b09a3 | [
"MIT"
] | null | null | null | multiprocessing_parse.py | qftphys/Software-for-visualising-magnetic-layers | 7e4c5680b8e87aa677bdf4c912cbccdcb11b09a3 | [
"MIT"
] | null | null | null | import numpy as np
import os
import glob
from multiprocessing import Pool
from cython_modules.cython_parse import *
from binaryornot.check import is_binary
import re
def asynchronous_pool_order(func, args, object_list, timeout=20):
pool = Pool()
output_list = []
multiple_results = [pool.apply_async(func, (object_list[i], *args))
for i in range(len(object_list))]
for result in multiple_results:
output_list.append(result.get(timeout=timeout))
return output_list
class MultiprocessingParse:
@staticmethod
def compose_trigger_list(files, plot_data):
"""
"""
# TODO: FIND A DRIVER NAMES AND IMPLEMENT THEM IF THERE ARE OTHERS
driver_class = 'MinDriver'
match_string = '(^.*)(Oxs_' + driver_class + \
'-Magnetization-)([0-9]{2})(-)(.*)(.omf)'
regex = re.compile(match_string)
st = []
# probe file
filename = files[0]
column_name = None
try:
m = regex.search(os.path.basename(filename))
if m is None:
raise AttributeError
column_name = driver_class +'::Iteration'
except AttributeError:
driver_class = 'TimeDriver'
match_string = '(^.*)(Oxs_' + driver_class + \
'-Magnetization-)([0-9]{2})(-)(.*)(.omf)'
column_name = driver_class +'::Iteration'
regex = re.compile(match_string)
for filename in files:
m = regex.search(os.path.basename(filename))
if m is not None:
st.append(int(m.groups()[4]))
else:
print(filename)
trigger_list = plot_data.index[plot_data[column_name].isin(st)]
try:
assert len(files) == len(trigger_list)
except AssertionError:
# duplicates appeared, take first and drop rest
unique_stages = plot_data[column_name][~plot_data[column_name].duplicated(keep='first')]
trigger_list = unique_stages.index[unique_stages.isin(st)]
return trigger_list
@staticmethod
def guess_file_type(directory):
supported_extensions = [('.omf', '*.odt'), ('.ovf', '.notsuppyet')]
voted_extension = None
files_in_directory = os.listdir(directory)
# NOTE: decide what extension is found in directory
# could be both .omf or .ovf but not mixed
# omit .odt files if not really specified
for filename in files_in_directory:
for file_ext in supported_extensions:
if filename.endswith(file_ext[0]):
voted_extension = file_ext
break
if voted_extension is not None:
break
# loop may end and the value may still be None,
# this means invalid directory
# tbh I am not sure but it helps fix issue
if voted_extension is None:
raise ValueError("Invalid Directory")
print("SUPPORTED EXTENSION DETECTED {}".format(voted_extension))
files_in_directory = [os.path.join(directory, filename)
for filename in files_in_directory
if filename.endswith(voted_extension)]
files_in_directory = sorted(files_in_directory)
return files_in_directory, voted_extension
@staticmethod
def readFile(path):
"""
Function loads one selected file.
:param path: path to file which user wants to load (String)
:return: depends on filetype:
if .odt - odt_data, stages
if .omf || .ovf - rawVectorData, header
"""
if ".odt" in path:
odt_data, stages = getPlotData(path)
return odt_data, stages
elif ".omf" in path or ".ovf" in path:
rawVectorData = None
if is_binary(path):
headers, rawVectorData = MultiprocessingParse.readBinary([path])
header = headers[0]
elif not is_binary(path):
rawVectorData = MultiprocessingParse.readText([path])
header = getFileHeader(path)
else:
raise RuntimeError("multiprocessing_parse.py readFile:" +\
" Can't detect encoding!")
return rawVectorData, header
else:
raise ValueError("Invalid file! Must have .odt, .omf " + \
"or .ovf extension!")
@staticmethod
def readFolder(directory, multipleFileHeaders=False):
"""
dumps process-ready format from directory
Returns raw numpy array of vectors, file_header_files and odt data for
2d plotting
:param directory
:return rawVectorData, file_headers, getPlotData
"""
files_in_directory, ext = MultiprocessingParse.guess_file_type(
directory)
ext_files = glob.glob(os.path.join(directory, '*' + ext[0]))
test_file = os.path.join(directory, ext_files[0])
stages = len(ext_files)
plot_file = glob.glob(os.path.join(directory, ext[1]))
# look for .odt or .txt in current directory
if len(plot_file) > 1:
raise ValueError("plot file extension conflict (too many)")
#TODO error window
elif not plot_file or plot_file is None:
plot_data = None
plot_file = None
# NOTE: this should recognize both .omf and .ovf files
trigger_list = None
if plot_file is not None:
plot_data, stages0 = getPlotData(plot_file[0])
print(stages0, stages)
if stages0 != stages:
if stages0 > stages:
trigger_list = MultiprocessingParse.\
compose_trigger_list(ext_files,
plot_data)
stages = len(trigger_list)
print(trigger_list)
print("TRIGGER LIST : {}, {}".format(stages,
len(trigger_list)))
elif stages0 < stages:
raise ValueError("Odt cannot have fewer stages that files")
else:
plot_data = None
if not is_binary(test_file):
rawVectorData = MultiprocessingParse.readText(files_in_directory)
file_for_header = glob.glob(os.path.join(directory, '*' + ext[0]))
# virtually any will do
if not file_for_header:
raise ValueError("no .omf or .ovf file has been found")
header = getFileHeader(file_for_header[0])
else:
headers, rawVectorData = MultiprocessingParse.readBinary(
files_in_directory)
header = headers[0]
if not header:
raise ValueError("no .omf or .ovf file has been found")
return rawVectorData, header, plot_data, stages, trigger_list
@staticmethod
def readBinary(files_in_directory):
"""
:param files_in_directory: is a list of binary filenames
in a directory
:return numpy array of vectors form .omf files
"""
text_pool = Pool()
output = asynchronous_pool_order(binary_format_reader, (),
files_in_directory)
output = np.array(output)
headers = output[:, 0]
rawVectorData = output[:, 1]
# test this solution, turn dtype object to float64
rawVectorData = np.array([x for x in rawVectorData], dtype=np.float32)
if rawVectorData is None or headers is None:
raise TypeError("\nNo vectors created")
assert rawVectorData.dtype == np.float32
return headers, rawVectorData
@staticmethod
def readText(files_in_directory):
"""
:param files_in_directory: is a list of text filenames in a directory
:return numpy array of vectors form .omf files
"""
# use multiprocessing
text_pool = Pool()
rawVectorData = []
rawVectorData = asynchronous_pool_order(getRawVectors, (),
files_in_directory,
timeout=20)
if not rawVectorData:
raise TypeError("\nNo vectors created")
rawVectorData = np.array(rawVectorData, dtype=np.float32)
assert rawVectorData.dtype == np.float32
return rawVectorData
| 40.87037 | 100 | 0.559243 | import numpy as np
import os
import glob
from multiprocessing import Pool
from cython_modules.cython_parse import *
from binaryornot.check import is_binary
import re
def asynchronous_pool_order(func, args, object_list, timeout=20):
pool = Pool()
output_list = []
multiple_results = [pool.apply_async(func, (object_list[i], *args))
for i in range(len(object_list))]
for result in multiple_results:
output_list.append(result.get(timeout=timeout))
return output_list
class MultiprocessingParse:
@staticmethod
def compose_trigger_list(files, plot_data):
driver_class = 'MinDriver'
match_string = '(^.*)(Oxs_' + driver_class + \
'-Magnetization-)([0-9]{2})(-)(.*)(.omf)'
regex = re.compile(match_string)
st = []
filename = files[0]
column_name = None
try:
m = regex.search(os.path.basename(filename))
if m is None:
raise AttributeError
column_name = driver_class +'::Iteration'
except AttributeError:
driver_class = 'TimeDriver'
match_string = '(^.*)(Oxs_' + driver_class + \
'-Magnetization-)([0-9]{2})(-)(.*)(.omf)'
column_name = driver_class +'::Iteration'
regex = re.compile(match_string)
for filename in files:
m = regex.search(os.path.basename(filename))
if m is not None:
st.append(int(m.groups()[4]))
else:
print(filename)
trigger_list = plot_data.index[plot_data[column_name].isin(st)]
try:
assert len(files) == len(trigger_list)
except AssertionError:
unique_stages = plot_data[column_name][~plot_data[column_name].duplicated(keep='first')]
trigger_list = unique_stages.index[unique_stages.isin(st)]
return trigger_list
@staticmethod
def guess_file_type(directory):
supported_extensions = [('.omf', '*.odt'), ('.ovf', '.notsuppyet')]
voted_extension = None
files_in_directory = os.listdir(directory)
for filename in files_in_directory:
for file_ext in supported_extensions:
if filename.endswith(file_ext[0]):
voted_extension = file_ext
break
if voted_extension is not None:
break
if voted_extension is None:
raise ValueError("Invalid Directory")
print("SUPPORTED EXTENSION DETECTED {}".format(voted_extension))
files_in_directory = [os.path.join(directory, filename)
for filename in files_in_directory
if filename.endswith(voted_extension)]
files_in_directory = sorted(files_in_directory)
return files_in_directory, voted_extension
@staticmethod
def readFile(path):
if ".odt" in path:
odt_data, stages = getPlotData(path)
return odt_data, stages
elif ".omf" in path or ".ovf" in path:
rawVectorData = None
if is_binary(path):
headers, rawVectorData = MultiprocessingParse.readBinary([path])
header = headers[0]
elif not is_binary(path):
rawVectorData = MultiprocessingParse.readText([path])
header = getFileHeader(path)
else:
raise RuntimeError("multiprocessing_parse.py readFile:" +\
" Can't detect encoding!")
return rawVectorData, header
else:
raise ValueError("Invalid file! Must have .odt, .omf " + \
"or .ovf extension!")
@staticmethod
def readFolder(directory, multipleFileHeaders=False):
files_in_directory, ext = MultiprocessingParse.guess_file_type(
directory)
ext_files = glob.glob(os.path.join(directory, '*' + ext[0]))
test_file = os.path.join(directory, ext_files[0])
stages = len(ext_files)
plot_file = glob.glob(os.path.join(directory, ext[1]))
# look for .odt or .txt in current directory
if len(plot_file) > 1:
raise ValueError("plot file extension conflict (too many)")
#TODO error window
elif not plot_file or plot_file is None:
plot_data = None
plot_file = None
# NOTE: this should recognize both .omf and .ovf files
trigger_list = None
if plot_file is not None:
plot_data, stages0 = getPlotData(plot_file[0])
print(stages0, stages)
if stages0 != stages:
if stages0 > stages:
trigger_list = MultiprocessingParse.\
compose_trigger_list(ext_files,
plot_data)
stages = len(trigger_list)
print(trigger_list)
print("TRIGGER LIST : {}, {}".format(stages,
len(trigger_list)))
elif stages0 < stages:
raise ValueError("Odt cannot have fewer stages that files")
else:
plot_data = None
if not is_binary(test_file):
rawVectorData = MultiprocessingParse.readText(files_in_directory)
file_for_header = glob.glob(os.path.join(directory, '*' + ext[0]))
# virtually any will do
if not file_for_header:
raise ValueError("no .omf or .ovf file has been found")
header = getFileHeader(file_for_header[0])
else:
headers, rawVectorData = MultiprocessingParse.readBinary(
files_in_directory)
header = headers[0]
if not header:
raise ValueError("no .omf or .ovf file has been found")
return rawVectorData, header, plot_data, stages, trigger_list
@staticmethod
def readBinary(files_in_directory):
text_pool = Pool()
output = asynchronous_pool_order(binary_format_reader, (),
files_in_directory)
output = np.array(output)
headers = output[:, 0]
rawVectorData = output[:, 1]
# test this solution, turn dtype object to float64
rawVectorData = np.array([x for x in rawVectorData], dtype=np.float32)
if rawVectorData is None or headers is None:
raise TypeError("\nNo vectors created")
assert rawVectorData.dtype == np.float32
return headers, rawVectorData
@staticmethod
def readText(files_in_directory):
# use multiprocessing
text_pool = Pool()
rawVectorData = []
rawVectorData = asynchronous_pool_order(getRawVectors, (),
files_in_directory,
timeout=20)
if not rawVectorData:
raise TypeError("\nNo vectors created")
rawVectorData = np.array(rawVectorData, dtype=np.float32)
assert rawVectorData.dtype == np.float32
return rawVectorData
| true | true |
f7fedb7734ebdc600d584a84f3541e266bfa503c | 7,928 | py | Python | tests/sentry/api/endpoints/test_project_release_files.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/endpoints/test_project_release_files.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 8 | 2019-12-28T23:49:55.000Z | 2022-03-02T04:34:18.000Z | tests/sentry/api/endpoints/test_project_release_files.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import six
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from sentry.models import File, Release, ReleaseFile
from sentry.testutils import APITestCase
class ReleaseFilesListTest(APITestCase):
def test_simple(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
releasefile = ReleaseFile.objects.create(
organization_id=project.organization_id,
release=release,
file=File.objects.create(
name='application.js',
type='release.file',
),
name='http://example.com/application.js'
)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(releasefile.id)
class ReleaseFileCreateTest(APITestCase):
def test_simple(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'name': 'http://example.com/application.js',
'header': 'X-SourceMap: http://example.com',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 201, response.content
releasefile = ReleaseFile.objects.get(release=release)
assert releasefile.name == 'http://example.com/application.js'
assert releasefile.ident == ReleaseFile.get_ident('http://example.com/application.js')
assert releasefile.file.headers == {
'Content-Type': 'application/javascript',
'X-SourceMap': 'http://example.com',
}
def test_no_file(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'header': 'X-SourceMap: http://example.com',
}, format='multipart')
assert response.status_code == 400, response.content
def test_missing_name(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'header': 'X-SourceMap: http://example.com',
'file': SimpleUploadedFile('', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
def test_invalid_name(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'name': 'http://exa\tmple.com/applic\nati\ron.js\n',
'header': 'X-SourceMap: http://example.com/test.map.js',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
def test_bad_headers(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'name': 'http://example.com/application.js',
'header': 'lol',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
response = self.client.post(url, {
'name': 'http://example.com/application.js',
'header': 'X-SourceMap: http://example.com/\r\n\ntest.map.js\n',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
def test_duplicate_file(self):
project = self.create_project(name='foo')
release = Release.objects.create(
project_id=project.id,
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
data = {
'name': 'http://example.com/application.js',
'header': 'X-SourceMap: http://example.com',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}
response = self.client.post(url, data, format='multipart')
assert response.status_code == 201, response.content
releasefile = ReleaseFile.objects.get(release=release)
assert releasefile.name == 'http://example.com/application.js'
assert releasefile.file.headers == {
'Content-Type': 'application/javascript',
'X-SourceMap': 'http://example.com',
}
# Now upload it again!
response = self.client.post(url, data, format='multipart')
assert response.status_code == 409, response.content
| 34.320346 | 94 | 0.589934 | from __future__ import absolute_import
import six
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from sentry.models import File, Release, ReleaseFile
from sentry.testutils import APITestCase
class ReleaseFilesListTest(APITestCase):
def test_simple(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
releasefile = ReleaseFile.objects.create(
organization_id=project.organization_id,
release=release,
file=File.objects.create(
name='application.js',
type='release.file',
),
name='http://example.com/application.js'
)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(releasefile.id)
class ReleaseFileCreateTest(APITestCase):
def test_simple(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'name': 'http://example.com/application.js',
'header': 'X-SourceMap: http://example.com',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 201, response.content
releasefile = ReleaseFile.objects.get(release=release)
assert releasefile.name == 'http://example.com/application.js'
assert releasefile.ident == ReleaseFile.get_ident('http://example.com/application.js')
assert releasefile.file.headers == {
'Content-Type': 'application/javascript',
'X-SourceMap': 'http://example.com',
}
def test_no_file(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'header': 'X-SourceMap: http://example.com',
}, format='multipart')
assert response.status_code == 400, response.content
def test_missing_name(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'header': 'X-SourceMap: http://example.com',
'file': SimpleUploadedFile('', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
def test_invalid_name(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'name': 'http://exa\tmple.com/applic\nati\ron.js\n',
'header': 'X-SourceMap: http://example.com/test.map.js',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
def test_bad_headers(self):
project = self.create_project(name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
response = self.client.post(url, {
'name': 'http://example.com/application.js',
'header': 'lol',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
response = self.client.post(url, {
'name': 'http://example.com/application.js',
'header': 'X-SourceMap: http://example.com/\r\n\ntest.map.js\n',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}, format='multipart')
assert response.status_code == 400, response.content
def test_duplicate_file(self):
project = self.create_project(name='foo')
release = Release.objects.create(
project_id=project.id,
organization_id=project.organization_id,
version='1',
)
release.add_project(project)
url = reverse('sentry-api-0-project-release-files', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'version': release.version,
})
self.login_as(user=self.user)
data = {
'name': 'http://example.com/application.js',
'header': 'X-SourceMap: http://example.com',
'file': SimpleUploadedFile('application.js', b'function() { }',
content_type='application/javascript'),
}
response = self.client.post(url, data, format='multipart')
assert response.status_code == 201, response.content
releasefile = ReleaseFile.objects.get(release=release)
assert releasefile.name == 'http://example.com/application.js'
assert releasefile.file.headers == {
'Content-Type': 'application/javascript',
'X-SourceMap': 'http://example.com',
}
response = self.client.post(url, data, format='multipart')
assert response.status_code == 409, response.content
| true | true |
f7fedc2e0417ea5154371b501db465be1fe72b36 | 355 | py | Python | yt/frontends/tipsy/setup.py | danielgrassinger/yt_new_frontend | 5f91d2fb8721c4c5da0af543a6256ed979cd9fc9 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/frontends/tipsy/setup.py | danielgrassinger/yt_new_frontend | 5f91d2fb8721c4c5da0af543a6256ed979cd9fc9 | [
"BSD-3-Clause-Clear"
] | 1 | 2016-04-05T22:30:14.000Z | 2016-04-05T22:30:14.000Z | yt/frontends/tipsy/setup.py | danielgrassinger/yt_new_frontend | 5f91d2fb8721c4c5da0af543a6256ed979cd9fc9 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-12-05T05:51:09.000Z | 2020-12-05T05:51:09.000Z | #!/usr/bin/env python
import setuptools
import os
import sys
import os.path
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('tipsy', parent_package, top_path)
config.make_config_py() # installs __config__.py
#config.make_svn_version_py()
return config
| 25.357143 | 61 | 0.76338 |
import setuptools
import os
import sys
import os.path
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('tipsy', parent_package, top_path)
config.make_config_py()
return config
| true | true |
f7fee44aa20c72aaa998e09bd0f3f7f79a17d4bc | 2,965 | py | Python | main.py | limdool/03-Text-adventure-1 | 5c8098b7e5dd33412b3aa2b1f9b573028cb60725 | [
"MIT"
] | 1 | 2019-09-18T16:20:33.000Z | 2019-09-18T16:20:33.000Z | main.py | limdool/03-Text-adventure-1 | 5c8098b7e5dd33412b3aa2b1f9b573028cb60725 | [
"MIT"
] | null | null | null | main.py | limdool/03-Text-adventure-1 | 5c8098b7e5dd33412b3aa2b1f9b573028cb60725 | [
"MIT"
] | 1 | 2019-09-28T16:03:18.000Z | 2019-09-28T16:03:18.000Z | import sys, logging, json, os
#check to make sure we are running the right version of Python
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
#turn on logging, in case we have to leave ourselves debugging messages
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
def render(game,current):
'''Print out a description of the current location'''
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
# I could not figure out a way to make this code run without
# typing the above lines a lot, so sorry about that. It has
# something to do with global variables...
r = game["rooms"]
c = r[current]
print("\n")
print(c["name"])
print(c["desc"])
return True
def check_input(game, current, verbs):
'''Request input from the player, validate the input'''
user_input = input("Would you like to go NORTH, SOUTH, EAST, or WEST? ").strip().upper()
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
if (len(user_input)):
user_input = normalizedVerb(user_input,game["verbs"])
return user_input
def update(user_input, game, current, verb):
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
game["rooms"][current]["exits"]
for e in game["rooms"][current]["exits"]:
if user_input == e["verb"] and e['target'] !='NoExit':
return e['target']
else:
if user_input != "QUIT".upper():
print("\nYou shake your head and try again...")
else:
print("\n\n\nY O U ' L L B E B A C K . . . \n\n")
return current
def normalizedVerb(user_input, verbs):
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
for v in game["verbs"]:
if user_input == v['v']:
return v['map']
return ""
def main():
game = {}
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
current = "START"
quit=False
while not quit:
#Render the world first
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
render(game['rooms'],current)
#Check for player input
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
user_input = check_input(game["rooms"], current,game["verbs"])
#Update the state of the world
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
current = update(user_input, game["rooms"], current, game["verbs"])
if user_input == "QUIT".upper():
quit = True
return render
if __name__ == '__main__':
main() | 33.693182 | 112 | 0.61855 | import sys, logging, json, os
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
def render(game,current):
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
r = game["rooms"]
c = r[current]
print("\n")
print(c["name"])
print(c["desc"])
return True
def check_input(game, current, verbs):
user_input = input("Would you like to go NORTH, SOUTH, EAST, or WEST? ").strip().upper()
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
if (len(user_input)):
user_input = normalizedVerb(user_input,game["verbs"])
return user_input
def update(user_input, game, current, verb):
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
game["rooms"][current]["exits"]
for e in game["rooms"][current]["exits"]:
if user_input == e["verb"] and e['target'] !='NoExit':
return e['target']
else:
if user_input != "QUIT".upper():
print("\nYou shake your head and try again...")
else:
print("\n\n\nY O U ' L L B E B A C K . . . \n\n")
return current
def normalizedVerb(user_input, verbs):
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
for v in game["verbs"]:
if user_input == v['v']:
return v['map']
return ""
def main():
game = {}
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
current = "START"
quit=False
while not quit:
#Render the world first
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
render(game['rooms'],current)
#Check for player input
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
user_input = check_input(game["rooms"], current,game["verbs"])
#Update the state of the world
with open('THE_UNREAL_ESTATE.json') as json_file:
game = json.load(json_file)
current = update(user_input, game["rooms"], current, game["verbs"])
if user_input == "QUIT".upper():
quit = True
return render
if __name__ == '__main__':
main() | true | true |
f7fee4a50c1d51bdfe2aadc905d93a2a83afc3cb | 1,035 | py | Python | src/sagemaker/tensorflow/__init__.py | mufaddal-rohawala/sagemaker-python-sdk | 72c12bb1481c368f799b17c38d07c5fc34864093 | [
"Apache-2.0"
] | null | null | null | src/sagemaker/tensorflow/__init__.py | mufaddal-rohawala/sagemaker-python-sdk | 72c12bb1481c368f799b17c38d07c5fc34864093 | [
"Apache-2.0"
] | 20 | 2021-09-17T20:50:11.000Z | 2021-12-09T00:29:02.000Z | src/sagemaker/tensorflow/__init__.py | mufaddal-rohawala/sagemaker-python-sdk | 72c12bb1481c368f799b17c38d07c5fc34864093 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Classes for using TensorFlow and TensorFlow Serving with Amazon SageMaker."""
from __future__ import absolute_import
from sagemaker.tensorflow.estimator import TensorFlow # noqa: F401 (imported but unused)
from sagemaker.tensorflow.model import TensorFlowModel, TensorFlowPredictor # noqa: F401
from sagemaker.tensorflow.processing import TensorFlowProcessor # noqa: F401
from sagemaker.tensorflow.training_compiler.config import TrainingCompilerConfig # noqa: F401
| 49.285714 | 94 | 0.789372 |
from __future__ import absolute_import
from sagemaker.tensorflow.estimator import TensorFlow
from sagemaker.tensorflow.model import TensorFlowModel, TensorFlowPredictor
from sagemaker.tensorflow.processing import TensorFlowProcessor
from sagemaker.tensorflow.training_compiler.config import TrainingCompilerConfig
| true | true |
f7fee58d9e4b265f09b550c78c78d8b82b7d691b | 9,947 | py | Python | dhnx/plotting.py | rbv83/DHNx | e236d720c7ec3c0f400648b96141454557d35476 | [
"MIT"
] | 14 | 2020-06-25T14:03:21.000Z | 2021-11-25T12:53:08.000Z | dhnx/plotting.py | rbv83/DHNx | e236d720c7ec3c0f400648b96141454557d35476 | [
"MIT"
] | 51 | 2020-02-19T14:42:38.000Z | 2022-03-23T08:30:31.000Z | dhnx/plotting.py | oemof-heat/district_heating_simulation | edb5c9be17f74d7f200c1eb6a17000a26633bdc3 | [
"MIT"
] | 3 | 2020-10-23T15:54:11.000Z | 2022-02-28T12:53:09.000Z | # -*- coding: utf-8
"""
This module is designed to hold functions for visualization.
This file is part of project dhnx (). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location:
SPDX-License-Identifier: MIT
"""
import logging
from collections import namedtuple
import folium as fol
import matplotlib.collections as collections
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from folium.features import DivIcon
logger = logging.getLogger()
logger.setLevel(logging.INFO)
cartopy_installed = True
try:
from cartopy import crs as ccrs
from cartopy.io.img_tiles import Stamen
except ImportError:
logging.info("Cartopy is not installed. Background maps will not be drawn.")
cartopy_installed = False
class InteractiveMap():
r"""
An interactive map of a network.ThermalNetwork.
"""
def __init__(self, thermal_network):
self.node_data = self.collect_node_data(thermal_network)
self.edge_data = thermal_network.components.pipes
self.edge_data['value'] = 1
self.node_id = self.node_data.index
self.lat = self.node_data['lat']
self.lon = self.node_data['lon']
self.component_type = self.node_data['component_type']
self._add_colors()
@staticmethod
def collect_node_data(thermal_network):
node_data = {
list_name: thermal_network.components[list_name].copy() for list_name in [
'consumers',
'producers',
'forks'
]
}
for k, v in node_data.items():
v.index = [k + '-' + str(id) for id in v.index]
return pd.concat(node_data.values())
def _add_colors(self):
color = {'producer': '#ff0000',
'consumer': '#00ff00',
'split': '#000000'}
self.node_data = (
self.node_data
.assign(node_color=self.node_data['component_type'])
.replace({'node_color': color}))
return self.node_data['node_color']
@staticmethod
def _get_bearing(p1, p2):
'''
Returns compass bearing from p1 to p2
Parameters
p1 : namedtuple with lat lon
p2 : namedtuple with lat lon
Return
compass bearing of type float
'''
y = p2[0] - p1[0]
x = p2[1] - p1[1]
bearing = np.arctan2(x, y) / np.pi * 180
# adjusting for compass bearing
if bearing < 0:
return bearing + 360
return bearing
def _get_arrows(self, locations, color='black', size=8, n_arrows=3):
'''
Get a list of correctly placed and rotated
arrows/markers to be plotted
Parameters
locations : list of lists of lat lons that represent the
start and end of the line.
eg [[41.1132, -96.1993],[41.3810, -95.8021]]
color : default is 'black'
size : default is 8
n_arrows : number of arrows to create. default is 3
Return
list of arrows/markers
'''
Point = namedtuple('Point', field_names=['lat', 'lon'])
# creating point from our Point named tuple
p1 = Point(locations[0][0], locations[0][1])
p2 = Point(locations[1][0], locations[1][1])
# getting the rotation needed for our marker.
# Subtracting 90 to account for the marker's orientation
# of due East(get_bearing returns North)
rotation = self._get_bearing(p1, p2) - 90
# get an evenly space list of lats and lons for our arrows
# note that I'm discarding the first and last for aesthetics
# as I'm using markers to denote the start and end
arrow_lats = np.linspace(p1.lat, p2.lat, n_arrows + 2)[1:n_arrows + 1]
arrow_lons = np.linspace(p1.lon, p2.lon, n_arrows + 2)[1:n_arrows + 1]
arrows = []
# creating each "arrow" and appending them to our arrows list
for points in zip(arrow_lats, arrow_lons):
arrows.append(
fol.RegularPolygonMarker(
location=points,
color=color, number_of_sides=3,
radius=size, rotation=rotation, fill=True))
return arrows
def draw(self):
# create map
m = fol.Map(location=[self.lat.mean(), self.lon.mean()],
zoom_start=14)
for i in range(0, len(self.node_data)):
# draw nodes
fol.CircleMarker([self.lat[i], self.lon[i]],
# popup=data['node_id'][i],
color=self.node_data['node_color'][i],
fill_color=self.node_data['node_color'][i],
radius=20).add_to(m)
# draw node ids
fol.Marker(
[self.lat[i], self.lon[i]],
icon=DivIcon(
icon_size=(-35, 75),
icon_anchor=(0, 0),
html='<div style="font-size: 16pt">%s</div>'
% self.node_data.index[i]
)
).add_to(m)
for i in range(0, len(self.edge_data)):
# linewidth settings
lw_avg = self.edge_data['value'].mean()
lw = self.edge_data['value'][i] / lw_avg
fol.PolyLine(locations=[[self.lat[self.edge_data['from_node'][i]],
self.lon[self.edge_data['from_node'][i]]],
[self.lat[self.edge_data['to_node'][i]],
self.lon[self.edge_data['to_node'][i]]]],
color='orange',
weight=lw * 3).add_to(m)
arrows = self._get_arrows(
locations=[[self.lat[self.edge_data['from_node'][i]],
self.lon[self.edge_data['from_node'][i]]],
[self.lat[self.edge_data['to_node'][i]],
self.lon[self.edge_data['to_node'][i]]]],
color='orange', n_arrows=3)
for arrow in arrows:
arrow.add_to(m)
return m
class StaticMap():
r"""
A static map of a network.ThermalNetwork.
"""
def __init__(self, thermal_network, figsize=(5, 5), node_size=3,
edge_width=3, node_color='r', edge_color='g'):
self.graph = thermal_network.to_nx_graph()
self.figsize = figsize
self.node_size = node_size
self.edge_width = edge_width
self.node_color = node_color
self.edge_color = edge_color
self.positions = {node_id: np.array([data['lon'], data['lat']])
for node_id, data in self.graph.nodes(data=True)}
self.extent = self._get_extent()
def _get_extent(self):
lon = [pos[0] for pos in self.positions.values()]
lat = [pos[1] for pos in self.positions.values()]
extent = np.array([np.min(lon), np.max(lon), np.min(lat), np.max(lat)])
delta = [extent[1] - extent[0], extent[3] - extent[2]]
extent = extent.astype(float)
extent += 0.1 * np.array([-delta[0], delta[0], -delta[1], delta[1]])
return extent
def draw(self, bgcolor='w', no_axis=False, background_map=False,
use_geom=False, edge_color='b', edge_linewidth=2,
edge_alpha=1, node_size=40, node_color='r', node_alpha=1,
edgecolor='r', node_zorder=1):
"""
This function has been adapted from osmnx plots.plot_graph() function.
"""
if background_map:
if not cartopy_installed:
logging.warning('To draw background map, cartopy must be installed.')
background_map = False
if background_map:
imagery = Stamen(style='toner-lite')
zoom_level = 15
fig, ax = plt.subplots(
figsize=self.figsize,
subplot_kw={'projection': imagery.crs}
)
ax.set_extent(self.extent, crs=ccrs.Geodetic())
ax.add_image(imagery, zoom_level, alpha=1, interpolation='bilinear')
else:
fig, ax = plt.subplots(figsize=self.figsize, facecolor=bgcolor)
lines = []
for u, v, data in self.graph.edges(data=True):
if 'geometry' in data and use_geom:
# if it has a geometry attribute (a list of line segments), add them
# to the list of lines to plot
xs, ys = data['geometry'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = self.graph.nodes[u]['lon']
y1 = self.graph.nodes[u]['lat']
x2 = self.graph.nodes[v]['lon']
y2 = self.graph.nodes[v]['lat']
line = [(x1, y1), (x2, y2)]
lines.append(line)
# add the lines to the axis as a linecollection
lc = collections.LineCollection(lines,
colors=edge_color,
linewidths=edge_linewidth,
alpha=edge_alpha,
zorder=2)
ax.add_collection(lc)
node_Xs = [float(x) for _, x in self.graph.nodes(data='lon')]
node_Ys = [float(y) for _, y in self.graph.nodes(data='lat')]
ax.scatter(node_Xs,
node_Ys,
s=node_size,
c=node_color,
alpha=node_alpha,
edgecolor=edgecolor,
zorder=node_zorder)
if no_axis:
ax = plt.gca()
ax.set_axis_off()
return fig, ax
| 34.658537 | 86 | 0.542877 |
import logging
from collections import namedtuple
import folium as fol
import matplotlib.collections as collections
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from folium.features import DivIcon
logger = logging.getLogger()
logger.setLevel(logging.INFO)
cartopy_installed = True
try:
from cartopy import crs as ccrs
from cartopy.io.img_tiles import Stamen
except ImportError:
logging.info("Cartopy is not installed. Background maps will not be drawn.")
cartopy_installed = False
class InteractiveMap():
def __init__(self, thermal_network):
self.node_data = self.collect_node_data(thermal_network)
self.edge_data = thermal_network.components.pipes
self.edge_data['value'] = 1
self.node_id = self.node_data.index
self.lat = self.node_data['lat']
self.lon = self.node_data['lon']
self.component_type = self.node_data['component_type']
self._add_colors()
@staticmethod
def collect_node_data(thermal_network):
node_data = {
list_name: thermal_network.components[list_name].copy() for list_name in [
'consumers',
'producers',
'forks'
]
}
for k, v in node_data.items():
v.index = [k + '-' + str(id) for id in v.index]
return pd.concat(node_data.values())
def _add_colors(self):
color = {'producer': '#ff0000',
'consumer': '#00ff00',
'split': '#000000'}
self.node_data = (
self.node_data
.assign(node_color=self.node_data['component_type'])
.replace({'node_color': color}))
return self.node_data['node_color']
@staticmethod
def _get_bearing(p1, p2):
y = p2[0] - p1[0]
x = p2[1] - p1[1]
bearing = np.arctan2(x, y) / np.pi * 180
if bearing < 0:
return bearing + 360
return bearing
def _get_arrows(self, locations, color='black', size=8, n_arrows=3):
Point = namedtuple('Point', field_names=['lat', 'lon'])
p1 = Point(locations[0][0], locations[0][1])
p2 = Point(locations[1][0], locations[1][1])
# of due East(get_bearing returns North)
rotation = self._get_bearing(p1, p2) - 90
# get an evenly space list of lats and lons for our arrows
# note that I'm discarding the first and last for aesthetics
arrow_lats = np.linspace(p1.lat, p2.lat, n_arrows + 2)[1:n_arrows + 1]
arrow_lons = np.linspace(p1.lon, p2.lon, n_arrows + 2)[1:n_arrows + 1]
arrows = []
# creating each "arrow" and appending them to our arrows list
for points in zip(arrow_lats, arrow_lons):
arrows.append(
fol.RegularPolygonMarker(
location=points,
color=color, number_of_sides=3,
radius=size, rotation=rotation, fill=True))
return arrows
def draw(self):
# create map
m = fol.Map(location=[self.lat.mean(), self.lon.mean()],
zoom_start=14)
for i in range(0, len(self.node_data)):
# draw nodes
fol.CircleMarker([self.lat[i], self.lon[i]],
# popup=data['node_id'][i],
color=self.node_data['node_color'][i],
fill_color=self.node_data['node_color'][i],
radius=20).add_to(m)
# draw node ids
fol.Marker(
[self.lat[i], self.lon[i]],
icon=DivIcon(
icon_size=(-35, 75),
icon_anchor=(0, 0),
html='<div style="font-size: 16pt">%s</div>'
% self.node_data.index[i]
)
).add_to(m)
for i in range(0, len(self.edge_data)):
# linewidth settings
lw_avg = self.edge_data['value'].mean()
lw = self.edge_data['value'][i] / lw_avg
fol.PolyLine(locations=[[self.lat[self.edge_data['from_node'][i]],
self.lon[self.edge_data['from_node'][i]]],
[self.lat[self.edge_data['to_node'][i]],
self.lon[self.edge_data['to_node'][i]]]],
color='orange',
weight=lw * 3).add_to(m)
arrows = self._get_arrows(
locations=[[self.lat[self.edge_data['from_node'][i]],
self.lon[self.edge_data['from_node'][i]]],
[self.lat[self.edge_data['to_node'][i]],
self.lon[self.edge_data['to_node'][i]]]],
color='orange', n_arrows=3)
for arrow in arrows:
arrow.add_to(m)
return m
class StaticMap():
def __init__(self, thermal_network, figsize=(5, 5), node_size=3,
edge_width=3, node_color='r', edge_color='g'):
self.graph = thermal_network.to_nx_graph()
self.figsize = figsize
self.node_size = node_size
self.edge_width = edge_width
self.node_color = node_color
self.edge_color = edge_color
self.positions = {node_id: np.array([data['lon'], data['lat']])
for node_id, data in self.graph.nodes(data=True)}
self.extent = self._get_extent()
def _get_extent(self):
lon = [pos[0] for pos in self.positions.values()]
lat = [pos[1] for pos in self.positions.values()]
extent = np.array([np.min(lon), np.max(lon), np.min(lat), np.max(lat)])
delta = [extent[1] - extent[0], extent[3] - extent[2]]
extent = extent.astype(float)
extent += 0.1 * np.array([-delta[0], delta[0], -delta[1], delta[1]])
return extent
def draw(self, bgcolor='w', no_axis=False, background_map=False,
use_geom=False, edge_color='b', edge_linewidth=2,
edge_alpha=1, node_size=40, node_color='r', node_alpha=1,
edgecolor='r', node_zorder=1):
if background_map:
if not cartopy_installed:
logging.warning('To draw background map, cartopy must be installed.')
background_map = False
if background_map:
imagery = Stamen(style='toner-lite')
zoom_level = 15
fig, ax = plt.subplots(
figsize=self.figsize,
subplot_kw={'projection': imagery.crs}
)
ax.set_extent(self.extent, crs=ccrs.Geodetic())
ax.add_image(imagery, zoom_level, alpha=1, interpolation='bilinear')
else:
fig, ax = plt.subplots(figsize=self.figsize, facecolor=bgcolor)
lines = []
for u, v, data in self.graph.edges(data=True):
if 'geometry' in data and use_geom:
# if it has a geometry attribute (a list of line segments), add them
# to the list of lines to plot
xs, ys = data['geometry'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
x1 = self.graph.nodes[u]['lon']
y1 = self.graph.nodes[u]['lat']
x2 = self.graph.nodes[v]['lon']
y2 = self.graph.nodes[v]['lat']
line = [(x1, y1), (x2, y2)]
lines.append(line)
lc = collections.LineCollection(lines,
colors=edge_color,
linewidths=edge_linewidth,
alpha=edge_alpha,
zorder=2)
ax.add_collection(lc)
node_Xs = [float(x) for _, x in self.graph.nodes(data='lon')]
node_Ys = [float(y) for _, y in self.graph.nodes(data='lat')]
ax.scatter(node_Xs,
node_Ys,
s=node_size,
c=node_color,
alpha=node_alpha,
edgecolor=edgecolor,
zorder=node_zorder)
if no_axis:
ax = plt.gca()
ax.set_axis_off()
return fig, ax
| true | true |
f7fee5bdabe6f581fa5d488875d2a2c44514f0e6 | 4,353 | py | Python | httpie/config.py | HenryGessau/httpie | 85ba9ad8eaa718d7f9dbcb7129168d6a877f3d30 | [
"BSD-3-Clause"
] | 2 | 2022-01-31T18:18:58.000Z | 2022-01-31T18:26:35.000Z | httpie/config.py | isidentical/httpie | 85ba9ad8eaa718d7f9dbcb7129168d6a877f3d30 | [
"BSD-3-Clause"
] | 2 | 2022-03-05T19:16:08.000Z | 2022-03-05T19:16:09.000Z | httpie/config.py | isidentical/httpie | 85ba9ad8eaa718d7f9dbcb7129168d6a877f3d30 | [
"BSD-3-Clause"
] | null | null | null | import json
import os
from pathlib import Path
from typing import Any, Dict, Union
from . import __version__
from .compat import is_windows
from .encoding import UTF8
ENV_XDG_CONFIG_HOME = 'XDG_CONFIG_HOME'
ENV_HTTPIE_CONFIG_DIR = 'HTTPIE_CONFIG_DIR'
DEFAULT_CONFIG_DIRNAME = 'httpie'
DEFAULT_RELATIVE_XDG_CONFIG_HOME = Path('.config')
DEFAULT_RELATIVE_LEGACY_CONFIG_DIR = Path('.httpie')
DEFAULT_WINDOWS_CONFIG_DIR = Path(
os.path.expandvars('%APPDATA%')) / DEFAULT_CONFIG_DIRNAME
def get_default_config_dir() -> Path:
"""
Return the path to the httpie configuration directory.
This directory isn't guaranteed to exist, and nor are any of its
ancestors (only the legacy ~/.httpie, if returned, is guaranteed to exist).
XDG Base Directory Specification support:
<https://wiki.archlinux.org/index.php/XDG_Base_Directory>
$XDG_CONFIG_HOME is supported; $XDG_CONFIG_DIRS is not
"""
# 1. explicitly set through env
env_config_dir = os.environ.get(ENV_HTTPIE_CONFIG_DIR)
if env_config_dir:
return Path(env_config_dir)
# 2. Windows
if is_windows:
return DEFAULT_WINDOWS_CONFIG_DIR
home_dir = Path.home()
# 3. legacy ~/.httpie
legacy_config_dir = home_dir / DEFAULT_RELATIVE_LEGACY_CONFIG_DIR
if legacy_config_dir.exists():
return legacy_config_dir
# 4. XDG
xdg_config_home_dir = os.environ.get(
ENV_XDG_CONFIG_HOME, # 4.1. explicit
home_dir / DEFAULT_RELATIVE_XDG_CONFIG_HOME # 4.2. default
)
return Path(xdg_config_home_dir) / DEFAULT_CONFIG_DIRNAME
DEFAULT_CONFIG_DIR = get_default_config_dir()
class ConfigFileError(Exception):
pass
def read_raw_config(config_type: str, path: Path) -> Dict[str, Any]:
try:
with path.open(encoding=UTF8) as f:
try:
return json.load(f)
except ValueError as e:
raise ConfigFileError(
f'invalid {config_type} file: {e} [{path}]'
)
except FileNotFoundError:
pass
except OSError as e:
raise ConfigFileError(f'cannot read {config_type} file: {e}')
class BaseConfigDict(dict):
name = None
helpurl = None
about = None
def __init__(self, path: Path):
super().__init__()
self.path = path
def ensure_directory(self):
self.path.parent.mkdir(mode=0o700, parents=True, exist_ok=True)
def is_new(self) -> bool:
return not self.path.exists()
def pre_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Hook for processing the incoming config data."""
return data
def post_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Hook for processing the outgoing config data."""
return data
def load(self):
config_type = type(self).__name__.lower()
data = read_raw_config(config_type, self.path)
if data is not None:
data = self.pre_process_data(data)
self.update(data)
def save(self, *, bump_version: bool = False):
self.setdefault('__meta__', {})
if bump_version or 'httpie' not in self['__meta__']:
self['__meta__']['httpie'] = __version__
if self.helpurl:
self['__meta__']['help'] = self.helpurl
if self.about:
self['__meta__']['about'] = self.about
self.ensure_directory()
json_string = json.dumps(
obj=self.post_process_data(self),
indent=4,
sort_keys=True,
ensure_ascii=True,
)
self.path.write_text(json_string + '\n', encoding=UTF8)
@property
def version(self):
return self.get(
'__meta__', {}
).get('httpie', __version__)
class Config(BaseConfigDict):
FILENAME = 'config.json'
DEFAULTS = {
'default_options': []
}
def __init__(self, directory: Union[str, Path] = DEFAULT_CONFIG_DIR):
self.directory = Path(directory)
super().__init__(path=self.directory / self.FILENAME)
self.update(self.DEFAULTS)
@property
def default_options(self) -> list:
return self['default_options']
@property
def plugins_dir(self) -> Path:
return Path(self.get('plugins_dir', self.directory / 'plugins')).resolve()
| 28.083871 | 82 | 0.644153 | import json
import os
from pathlib import Path
from typing import Any, Dict, Union
from . import __version__
from .compat import is_windows
from .encoding import UTF8
ENV_XDG_CONFIG_HOME = 'XDG_CONFIG_HOME'
ENV_HTTPIE_CONFIG_DIR = 'HTTPIE_CONFIG_DIR'
DEFAULT_CONFIG_DIRNAME = 'httpie'
DEFAULT_RELATIVE_XDG_CONFIG_HOME = Path('.config')
DEFAULT_RELATIVE_LEGACY_CONFIG_DIR = Path('.httpie')
DEFAULT_WINDOWS_CONFIG_DIR = Path(
os.path.expandvars('%APPDATA%')) / DEFAULT_CONFIG_DIRNAME
def get_default_config_dir() -> Path:
env_config_dir = os.environ.get(ENV_HTTPIE_CONFIG_DIR)
if env_config_dir:
return Path(env_config_dir)
if is_windows:
return DEFAULT_WINDOWS_CONFIG_DIR
home_dir = Path.home()
legacy_config_dir = home_dir / DEFAULT_RELATIVE_LEGACY_CONFIG_DIR
if legacy_config_dir.exists():
return legacy_config_dir
xdg_config_home_dir = os.environ.get(
ENV_XDG_CONFIG_HOME,
home_dir / DEFAULT_RELATIVE_XDG_CONFIG_HOME
)
return Path(xdg_config_home_dir) / DEFAULT_CONFIG_DIRNAME
DEFAULT_CONFIG_DIR = get_default_config_dir()
class ConfigFileError(Exception):
pass
def read_raw_config(config_type: str, path: Path) -> Dict[str, Any]:
try:
with path.open(encoding=UTF8) as f:
try:
return json.load(f)
except ValueError as e:
raise ConfigFileError(
f'invalid {config_type} file: {e} [{path}]'
)
except FileNotFoundError:
pass
except OSError as e:
raise ConfigFileError(f'cannot read {config_type} file: {e}')
class BaseConfigDict(dict):
name = None
helpurl = None
about = None
def __init__(self, path: Path):
super().__init__()
self.path = path
def ensure_directory(self):
self.path.parent.mkdir(mode=0o700, parents=True, exist_ok=True)
def is_new(self) -> bool:
return not self.path.exists()
def pre_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
return data
def post_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
return data
def load(self):
config_type = type(self).__name__.lower()
data = read_raw_config(config_type, self.path)
if data is not None:
data = self.pre_process_data(data)
self.update(data)
def save(self, *, bump_version: bool = False):
self.setdefault('__meta__', {})
if bump_version or 'httpie' not in self['__meta__']:
self['__meta__']['httpie'] = __version__
if self.helpurl:
self['__meta__']['help'] = self.helpurl
if self.about:
self['__meta__']['about'] = self.about
self.ensure_directory()
json_string = json.dumps(
obj=self.post_process_data(self),
indent=4,
sort_keys=True,
ensure_ascii=True,
)
self.path.write_text(json_string + '\n', encoding=UTF8)
@property
def version(self):
return self.get(
'__meta__', {}
).get('httpie', __version__)
class Config(BaseConfigDict):
FILENAME = 'config.json'
DEFAULTS = {
'default_options': []
}
def __init__(self, directory: Union[str, Path] = DEFAULT_CONFIG_DIR):
self.directory = Path(directory)
super().__init__(path=self.directory / self.FILENAME)
self.update(self.DEFAULTS)
@property
def default_options(self) -> list:
return self['default_options']
@property
def plugins_dir(self) -> Path:
return Path(self.get('plugins_dir', self.directory / 'plugins')).resolve()
| true | true |
f7fee6a666686928ba0dfb3d210196a36c39857e | 816 | py | Python | service/kafka.py | tbicr/sites-availability-checker | d1c72c5b9ef226d76d5925c6a880919dcb137094 | [
"MIT"
] | null | null | null | service/kafka.py | tbicr/sites-availability-checker | d1c72c5b9ef226d76d5925c6a880919dcb137094 | [
"MIT"
] | null | null | null | service/kafka.py | tbicr/sites-availability-checker | d1c72c5b9ef226d76d5925c6a880919dcb137094 | [
"MIT"
] | null | null | null | import aiokafka
from aiokafka.helpers import create_ssl_context
from service import config
from service.entities import Event
async def kafka_producer_factory(config):
if config["ssl_context"]:
config = dict(config, ssl_context=create_ssl_context(**config["ssl_context"]))
producer = aiokafka.AIOKafkaProducer(**config)
await producer.start()
return producer
async def kafka_consumer_factory(topic, config):
if config["ssl_context"]:
config = dict(config, ssl_context=create_ssl_context(**config["ssl_context"]))
consumer = aiokafka.AIOKafkaConsumer(topic, **config)
await consumer.start()
return consumer
async def put_results_to_kafka(producer: aiokafka.AIOKafkaProducer, event: Event):
await producer.send_and_wait(config.KAFKA_TOPIC, event.serialize())
| 31.384615 | 86 | 0.761029 | import aiokafka
from aiokafka.helpers import create_ssl_context
from service import config
from service.entities import Event
async def kafka_producer_factory(config):
if config["ssl_context"]:
config = dict(config, ssl_context=create_ssl_context(**config["ssl_context"]))
producer = aiokafka.AIOKafkaProducer(**config)
await producer.start()
return producer
async def kafka_consumer_factory(topic, config):
if config["ssl_context"]:
config = dict(config, ssl_context=create_ssl_context(**config["ssl_context"]))
consumer = aiokafka.AIOKafkaConsumer(topic, **config)
await consumer.start()
return consumer
async def put_results_to_kafka(producer: aiokafka.AIOKafkaProducer, event: Event):
await producer.send_and_wait(config.KAFKA_TOPIC, event.serialize())
| true | true |
f7fee83ddebacf020bc16659880094a458ff4356 | 10,124 | py | Python | scripts/build_signal_pages.py | malariagen/agam-selection-atlas | 6150b47b4ec15558193912ef2612aaae48567c58 | [
"MIT"
] | 3 | 2018-03-02T14:53:48.000Z | 2019-12-25T23:46:59.000Z | scripts/build_signal_pages.py | malariagen/agam-selection-atlas | 6150b47b4ec15558193912ef2612aaae48567c58 | [
"MIT"
] | 23 | 2017-12-19T23:47:53.000Z | 2020-09-01T20:48:56.000Z | scripts/build_signal_pages.py | malariagen/agam-selection-atlas | 6150b47b4ec15558193912ef2612aaae48567c58 | [
"MIT"
] | 1 | 2019-08-22T21:40:19.000Z | 2019-08-22T21:40:19.000Z | # -*- coding: utf-8 -*-
from setup import *
def plot_signal_location(report, plot_width=900, plot_height=200):
fig = bplt.figure(title='Signal location', plot_width=plot_width,
plot_height=plot_height,
tools='xpan,xzoom_in,xzoom_out,xwheel_zoom,reset',
toolbar_location='above', active_drag='xpan',
active_scroll='xwheel_zoom')
x = np.array(report['pos']) / 1e6
y_data = np.array(report['values'])
y_fit = np.array(report['best_fit'])
peak_span = bmod.BoxAnnotation(left=report['peak_start']/1e6,
right=report['peak_end']/1e6,
level='underlay',
fill_color='blue', fill_alpha=.1)
focus_span = bmod.BoxAnnotation(left=report['focus_start']/1e6,
right=report['focus_end']/1e6,
level='underlay',
fill_color='red', fill_alpha=.3)
epicenter_span = bmod.BoxAnnotation(left=(report['epicenter'] - 10000)/1e6,
right=(report['epicenter'] + 10000)/1e6,
level='underlay', fill_color='red',
fill_alpha=.3)
fig.add_layout(peak_span)
fig.add_layout(focus_span)
fig.add_layout(epicenter_span)
fig.line(x, y_fit, line_color='black', line_dash='dashed')
fig.circle(x, y_data, alpha=1)
# bound the range to prevent zooming out too far
fig.x_range = bmod.Range1d(x[0], x[-1], bounds=(x[0], x[-1]))
fig.xaxis.axis_label = \
'Chromosome {} position (Mbp)'.format(report['chromosome'])
fig.yaxis.axis_label = report['statistic']['id']
return fig
def plot_genes(genes, chrom, start, end, fig=None, offset=0, x_range=None,
plot_width=900, plot_height=100):
# setup figure
if fig is None:
hover = bmod.HoverTool(
tooltips="<p>@label<br/>@seqid:@start{,}-@end{,}</p>")
fig = bplt.figure(title='Genes', plot_width=plot_width,
plot_height=plot_height, x_range=x_range,
tools='xpan,xzoom_in,xzoom_out,xwheel_zoom,'
'reset,tap'.split() + [hover],
toolbar_location='above', active_drag='xpan',
active_scroll='xwheel_zoom')
fig.xaxis.axis_label = 'Chromosome {} position (Mbp)'.format(chrom)
url = '../../../../../gene/@id.html'
taptool = fig.select(type=bmod.TapTool)
taptool.callback = bmod.OpenURL(url=url)
# handle joined chromosomes
# TODO (thumps desk) there must be a better way!
if chrom in '23':
# plot R arm (on the left)
rarm = '{}R'.format(chrom)
rarm_len = len(genome[rarm])
if start < rarm_len:
rarm_start = start
rarm_end = min(rarm_len, end)
plot_genes(genes, rarm, rarm_start, rarm_end, fig=fig)
# plot L arm (on the right)
larm = '{}L'.format(chrom)
if end > rarm_len:
larm_start = max(0, start - rarm_len)
larm_end = end - rarm_len
plot_genes(genes, larm, larm_start, larm_end, fig=fig,
offset=rarm_len)
return fig
# from here assume single arm
seqid = chrom
df = genes[(genes.seqid == seqid) &
(genes.end >= start) &
(genes.start <= end)]
labels = [('{}'.format(gene.ID) +
(' ({})'.format(gene.Name) if gene.Name else '') +
(' - {}'.format(gene.description.split('[Source:')[0])
if gene.description else ''))
for _, gene in df.iterrows()]
# hover = bmod.HoverTool(tooltips=[
# ("ID", '@id'),
# ("Name", '@name'),
# ("Description", '@description'),
# ("Location", "@seqid:@start-@end"),
# ])
bottom = np.zeros(len(df))
bottom[df.strand == '+'] = 1
source = bmod.ColumnDataSource(data={
'seqid': df.seqid,
'start': df.start,
'end': df.end,
'left': (df.start + offset) / 1e6,
'right': (df.end + offset) / 1e6,
'bottom': bottom,
'top': bottom + .8,
'id': df.ID,
'name': df.Name,
'description': df.description,
'label': labels,
})
fig.quad(bottom='bottom', top='top', left='left', right='right',
source=source, line_width=0)
fig.y_range = bmod.Range1d(-.5, 2.3)
yticks = [0.4, 1.4]
yticklabels = ['reverse', 'forward']
fig.yaxis.ticker = yticks
fig.yaxis.major_label_overrides = {k: v for k, v in zip(yticks, yticklabels)}
fig.ygrid.visible = False
return fig
def fig_signal_location(report, genes):
fig1 = plot_signal_location(report)
chrom = report['chromosome']
start = report['pos'][0]
end = report['pos'][-1]
fig1.xaxis.visible = False
fig2 = plot_genes(genes, chrom, start, end, x_range=fig1.x_range)
gfig = blay.gridplot([[fig1], [fig2]], toolbar_location='above')
return gfig
def build_signal_outputs(path, template, genes, signals, ir_candidates):
# load the basic signal report
with open(path, mode='rb') as report_file:
report = yaml.load(report_file)
# figure out what chromosome arm
chromosome = report['chromosome']
epicenter = report['epicenter']
epicenter_seqid, epicenter_coord = split_arms(chromosome, epicenter)
# obtain focus
focus_start = report['focus_start']
focus_start_seqid, focus_start_coord = split_arms(chromosome, focus_start)
focus_end = report['focus_end']
focus_end_seqid, focus_end_coord = split_arms(chromosome, focus_end)
# crude way to deal with rare case where focus spans centromere
# TODO handle whole chromosomes
if focus_start_seqid != epicenter_seqid:
focus_start_coord = 1
if focus_end_seqid != epicenter_seqid:
focus_end_coord = len(genome[epicenter_seqid])
report['min_flank_delta_aic'] = min(report['delta_aic_left'], report['delta_aic_right'])
# augment report with gene information
overlapping_genes = genes[(
(genes.seqid == epicenter_seqid) &
(genes.start <= focus_end_coord) &
(genes.end >= focus_start_coord)
)]
report['overlapping_genes'] = [
{'id': gene.ID,
'name': gene.Name,
'description': gene.description.split('[Source:')[0].strip()}
for _, gene in overlapping_genes.iterrows()
]
adjacent_genes = genes[(
(genes.seqid == epicenter_seqid) &
((genes.end < focus_start_coord) | (genes.start > focus_end_coord)) &
(genes.start <= (focus_end_coord + 50000)) &
(genes.end >= (focus_start_coord - 50000))
)]
report['adjacent_genes'] = [
{'id': gene.ID,
'name': gene.Name,
'description': gene.description.split('[Source:')[0].strip()}
for _, gene in adjacent_genes.iterrows()
]
# augment report with related signals information
# TODO this doesn't properly handle overlapping signals spanning a
# centromere
overlapping_signals = signals[(
(signals.epicenter_seqid == epicenter_seqid) &
(signals.focus_start_coord <= focus_end_coord) &
(signals.focus_end_coord >= focus_start_coord) &
# don't include self
((signals.pop_key != report['pop_key']) |
(signals.statistic != report['statistic']['id']))
)]
report['overlapping_signals'] = overlapping_signals.to_dict(orient='records')
overlapping_loci = [locus for locus in known_loci
if (locus['seqid'] == epicenter_seqid and
locus['start_coord'] <= focus_end_coord and
locus['end_coord'] >= focus_start_coord)]
overlapping_loci_names = set([locus['short_name'] for locus in overlapping_loci])
adjacent_loci = [locus for locus in known_loci
if (locus['seqid'] == epicenter_seqid and
locus['start_coord'] <= (focus_end_coord + 50000) and
locus['end_coord'] >= (focus_start_coord - 50000) and
locus['short_name'] not in overlapping_loci_names)]
report['overlapping_loci'] = overlapping_loci
report['adjacent_loci'] = adjacent_loci
report['ir_candidates'] = ir_candidates
# render the report
out_dir = os.path.join(
'docs',
os.path.dirname(path)[len('docs/_static/data/'):]
)
os.makedirs(out_dir, exist_ok=True)
page_path = os.path.join(out_dir, 'index.rst')
print('rendering', page_path)
with open(page_path, mode='w') as page_file:
print(template.render(**report), file=page_file)
# render a bokeh signal plot
fig = fig_signal_location(report, genes)
script, div = bemb.components(fig)
plot_path = os.path.join(out_dir, 'peak_location.html')
print('rendering', plot_path)
with open(plot_path, mode='w') as plot_file:
print('<div class="bokeh-figure peak-location">', file=plot_file)
print(script, file=plot_file)
print(div, file=plot_file)
print('</div>', file=plot_file)
def main():
# setup jinja
loader = jinja2.FileSystemLoader('templates')
env = jinja2.Environment(loader=loader)
template = env.get_template('signal.rst')
# setup signals
signals = pd.read_csv('docs/_static/data/signals.csv')
# setup IR candidates
ir_candidates = {
slug: (
etl
.fromtsv('docs/_static/data/ir-candidate-genes/{}.csv'.format(slug))
.values(0).set()
)
for slug in ['metabolic', 'target_site', 'behavioural', 'cuticular']
}
# iterate over signal reports
for path in sorted(glob('docs/_static/data/signal/*/*/*/*/report.yml')):
build_signal_outputs(path, template, genes, signals, ir_candidates)
if __name__ == '__main__':
main()
| 38.641221 | 92 | 0.584552 |
from setup import *
def plot_signal_location(report, plot_width=900, plot_height=200):
fig = bplt.figure(title='Signal location', plot_width=plot_width,
plot_height=plot_height,
tools='xpan,xzoom_in,xzoom_out,xwheel_zoom,reset',
toolbar_location='above', active_drag='xpan',
active_scroll='xwheel_zoom')
x = np.array(report['pos']) / 1e6
y_data = np.array(report['values'])
y_fit = np.array(report['best_fit'])
peak_span = bmod.BoxAnnotation(left=report['peak_start']/1e6,
right=report['peak_end']/1e6,
level='underlay',
fill_color='blue', fill_alpha=.1)
focus_span = bmod.BoxAnnotation(left=report['focus_start']/1e6,
right=report['focus_end']/1e6,
level='underlay',
fill_color='red', fill_alpha=.3)
epicenter_span = bmod.BoxAnnotation(left=(report['epicenter'] - 10000)/1e6,
right=(report['epicenter'] + 10000)/1e6,
level='underlay', fill_color='red',
fill_alpha=.3)
fig.add_layout(peak_span)
fig.add_layout(focus_span)
fig.add_layout(epicenter_span)
fig.line(x, y_fit, line_color='black', line_dash='dashed')
fig.circle(x, y_data, alpha=1)
fig.x_range = bmod.Range1d(x[0], x[-1], bounds=(x[0], x[-1]))
fig.xaxis.axis_label = \
'Chromosome {} position (Mbp)'.format(report['chromosome'])
fig.yaxis.axis_label = report['statistic']['id']
return fig
def plot_genes(genes, chrom, start, end, fig=None, offset=0, x_range=None,
plot_width=900, plot_height=100):
if fig is None:
hover = bmod.HoverTool(
tooltips="<p>@label<br/>@seqid:@start{,}-@end{,}</p>")
fig = bplt.figure(title='Genes', plot_width=plot_width,
plot_height=plot_height, x_range=x_range,
tools='xpan,xzoom_in,xzoom_out,xwheel_zoom,'
'reset,tap'.split() + [hover],
toolbar_location='above', active_drag='xpan',
active_scroll='xwheel_zoom')
fig.xaxis.axis_label = 'Chromosome {} position (Mbp)'.format(chrom)
url = '../../../../../gene/@id.html'
taptool = fig.select(type=bmod.TapTool)
taptool.callback = bmod.OpenURL(url=url)
if chrom in '23':
rarm = '{}R'.format(chrom)
rarm_len = len(genome[rarm])
if start < rarm_len:
rarm_start = start
rarm_end = min(rarm_len, end)
plot_genes(genes, rarm, rarm_start, rarm_end, fig=fig)
larm = '{}L'.format(chrom)
if end > rarm_len:
larm_start = max(0, start - rarm_len)
larm_end = end - rarm_len
plot_genes(genes, larm, larm_start, larm_end, fig=fig,
offset=rarm_len)
return fig
seqid = chrom
df = genes[(genes.seqid == seqid) &
(genes.end >= start) &
(genes.start <= end)]
labels = [('{}'.format(gene.ID) +
(' ({})'.format(gene.Name) if gene.Name else '') +
(' - {}'.format(gene.description.split('[Source:')[0])
if gene.description else ''))
for _, gene in df.iterrows()]
bottom = np.zeros(len(df))
bottom[df.strand == '+'] = 1
source = bmod.ColumnDataSource(data={
'seqid': df.seqid,
'start': df.start,
'end': df.end,
'left': (df.start + offset) / 1e6,
'right': (df.end + offset) / 1e6,
'bottom': bottom,
'top': bottom + .8,
'id': df.ID,
'name': df.Name,
'description': df.description,
'label': labels,
})
fig.quad(bottom='bottom', top='top', left='left', right='right',
source=source, line_width=0)
fig.y_range = bmod.Range1d(-.5, 2.3)
yticks = [0.4, 1.4]
yticklabels = ['reverse', 'forward']
fig.yaxis.ticker = yticks
fig.yaxis.major_label_overrides = {k: v for k, v in zip(yticks, yticklabels)}
fig.ygrid.visible = False
return fig
def fig_signal_location(report, genes):
fig1 = plot_signal_location(report)
chrom = report['chromosome']
start = report['pos'][0]
end = report['pos'][-1]
fig1.xaxis.visible = False
fig2 = plot_genes(genes, chrom, start, end, x_range=fig1.x_range)
gfig = blay.gridplot([[fig1], [fig2]], toolbar_location='above')
return gfig
def build_signal_outputs(path, template, genes, signals, ir_candidates):
with open(path, mode='rb') as report_file:
report = yaml.load(report_file)
chromosome = report['chromosome']
epicenter = report['epicenter']
epicenter_seqid, epicenter_coord = split_arms(chromosome, epicenter)
focus_start = report['focus_start']
focus_start_seqid, focus_start_coord = split_arms(chromosome, focus_start)
focus_end = report['focus_end']
focus_end_seqid, focus_end_coord = split_arms(chromosome, focus_end)
if focus_start_seqid != epicenter_seqid:
focus_start_coord = 1
if focus_end_seqid != epicenter_seqid:
focus_end_coord = len(genome[epicenter_seqid])
report['min_flank_delta_aic'] = min(report['delta_aic_left'], report['delta_aic_right'])
overlapping_genes = genes[(
(genes.seqid == epicenter_seqid) &
(genes.start <= focus_end_coord) &
(genes.end >= focus_start_coord)
)]
report['overlapping_genes'] = [
{'id': gene.ID,
'name': gene.Name,
'description': gene.description.split('[Source:')[0].strip()}
for _, gene in overlapping_genes.iterrows()
]
adjacent_genes = genes[(
(genes.seqid == epicenter_seqid) &
((genes.end < focus_start_coord) | (genes.start > focus_end_coord)) &
(genes.start <= (focus_end_coord + 50000)) &
(genes.end >= (focus_start_coord - 50000))
)]
report['adjacent_genes'] = [
{'id': gene.ID,
'name': gene.Name,
'description': gene.description.split('[Source:')[0].strip()}
for _, gene in adjacent_genes.iterrows()
]
# centromere
overlapping_signals = signals[(
(signals.epicenter_seqid == epicenter_seqid) &
(signals.focus_start_coord <= focus_end_coord) &
(signals.focus_end_coord >= focus_start_coord) &
# don't include self
((signals.pop_key != report['pop_key']) |
(signals.statistic != report['statistic']['id']))
)]
report['overlapping_signals'] = overlapping_signals.to_dict(orient='records')
overlapping_loci = [locus for locus in known_loci
if (locus['seqid'] == epicenter_seqid and
locus['start_coord'] <= focus_end_coord and
locus['end_coord'] >= focus_start_coord)]
overlapping_loci_names = set([locus['short_name'] for locus in overlapping_loci])
adjacent_loci = [locus for locus in known_loci
if (locus['seqid'] == epicenter_seqid and
locus['start_coord'] <= (focus_end_coord + 50000) and
locus['end_coord'] >= (focus_start_coord - 50000) and
locus['short_name'] not in overlapping_loci_names)]
report['overlapping_loci'] = overlapping_loci
report['adjacent_loci'] = adjacent_loci
report['ir_candidates'] = ir_candidates
out_dir = os.path.join(
'docs',
os.path.dirname(path)[len('docs/_static/data/'):]
)
os.makedirs(out_dir, exist_ok=True)
page_path = os.path.join(out_dir, 'index.rst')
print('rendering', page_path)
with open(page_path, mode='w') as page_file:
print(template.render(**report), file=page_file)
fig = fig_signal_location(report, genes)
script, div = bemb.components(fig)
plot_path = os.path.join(out_dir, 'peak_location.html')
print('rendering', plot_path)
with open(plot_path, mode='w') as plot_file:
print('<div class="bokeh-figure peak-location">', file=plot_file)
print(script, file=plot_file)
print(div, file=plot_file)
print('</div>', file=plot_file)
def main():
loader = jinja2.FileSystemLoader('templates')
env = jinja2.Environment(loader=loader)
template = env.get_template('signal.rst')
signals = pd.read_csv('docs/_static/data/signals.csv')
ir_candidates = {
slug: (
etl
.fromtsv('docs/_static/data/ir-candidate-genes/{}.csv'.format(slug))
.values(0).set()
)
for slug in ['metabolic', 'target_site', 'behavioural', 'cuticular']
}
for path in sorted(glob('docs/_static/data/signal/*/*/*/*/report.yml')):
build_signal_outputs(path, template, genes, signals, ir_candidates)
if __name__ == '__main__':
main()
| true | true |
f7fee843617de4c7be474b3092ae4adad1257c81 | 208 | py | Python | test.py | j0nm1/pipeline-python-demo-issue | 3706c043950d2e9570140f83cc4b05e9c9b6a29f | [
"MIT"
] | null | null | null | test.py | j0nm1/pipeline-python-demo-issue | 3706c043950d2e9570140f83cc4b05e9c9b6a29f | [
"MIT"
] | 6 | 2020-05-30T13:55:57.000Z | 2021-10-20T06:11:52.000Z | test.py | j0nm1/pipeline-python-demo-issue | 3706c043950d2e9570140f83cc4b05e9c9b6a29f | [
"MIT"
] | 1 | 2020-06-26T10:49:51.000Z | 2020-06-26T10:49:51.000Z | from unittest import TestCase
from pet import Pet
class PetTestCase(TestCase):
def setUp(self):
self.pet = Pet("Test")
def test_name(self):
self.assertEqual(self.pet.name, "Test")
| 17.333333 | 47 | 0.663462 | from unittest import TestCase
from pet import Pet
class PetTestCase(TestCase):
def setUp(self):
self.pet = Pet("Test")
def test_name(self):
self.assertEqual(self.pet.name, "Test")
| true | true |
f7fee8725e952074176c0e9062ca9ee462550c9e | 586 | py | Python | simulation_ws/build/deepracer_msgs/catkin_generated/pkg.develspace.context.pc.py | we437b/dr_logger | fa47ca74dc46b8ddd596b7255b23f24034db45ac | [
"MIT"
] | null | null | null | simulation_ws/build/deepracer_msgs/catkin_generated/pkg.develspace.context.pc.py | we437b/dr_logger | fa47ca74dc46b8ddd596b7255b23f24034db45ac | [
"MIT"
] | null | null | null | simulation_ws/build/deepracer_msgs/catkin_generated/pkg.develspace.context.pc.py | we437b/dr_logger | fa47ca74dc46b8ddd596b7255b23f24034db45ac | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/inspacehj/dr_logger/simulation_ws/build/deepracer_msgs/devel/include".split(';') if "/home/inspacehj/dr_logger/simulation_ws/build/deepracer_msgs/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "deepracer_msgs"
PROJECT_SPACE_DIR = "/home/inspacehj/dr_logger/simulation_ws/build/deepracer_msgs/devel"
PROJECT_VERSION = "1.0.0"
| 65.111111 | 215 | 0.778157 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/inspacehj/dr_logger/simulation_ws/build/deepracer_msgs/devel/include".split(';') if "/home/inspacehj/dr_logger/simulation_ws/build/deepracer_msgs/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "deepracer_msgs"
PROJECT_SPACE_DIR = "/home/inspacehj/dr_logger/simulation_ws/build/deepracer_msgs/devel"
PROJECT_VERSION = "1.0.0"
| true | true |
f7fee96a3b931fbe9af77c83bdfe78a8174a72c9 | 6,582 | py | Python | tests/h/services/user_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | 2,103 | 2015-01-07T12:47:49.000Z | 2022-03-29T02:38:25.000Z | tests/h/services/user_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | 4,322 | 2015-01-04T17:18:01.000Z | 2022-03-31T17:06:02.000Z | tests/h/services/user_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | 389 | 2015-01-24T04:10:02.000Z | 2022-03-28T08:00:16.000Z | import pytest
from h.models import User
from h.services.user import UserNotActivated, UserService, user_service_factory
@pytest.mark.usefixtures("users")
class TestUserService:
def test_fetch_retrieves_user_by_userid(self, svc):
result = svc.fetch("acct:jacqui@foo.com")
assert isinstance(result, User)
def test_fetch_retrieves_user_by_username_and_authority(self, svc):
result = svc.fetch("jacqui", "foo.com")
assert isinstance(result, User)
def test_fetch_caches_fetched_users(self, db_session, svc, users):
jacqui, _, _, _ = users
svc.fetch("acct:jacqui@foo.com")
db_session.delete(jacqui)
db_session.flush()
user = svc.fetch("acct:jacqui@foo.com")
assert user is not None
assert user.username == "jacqui"
def test_fetch_all_retrieves_users_by_userid(self, svc):
result = svc.fetch_all(["acct:jacqui@foo.com", "acct:steve@example.com"])
assert len(result) == 2
assert isinstance(result[0], User)
assert isinstance(result[1], User)
def test_fetch_all_caches_fetched_users(self, db_session, svc, users):
jacqui, _, _, _ = users
svc.fetch_all(["acct:jacqui@foo.com"])
db_session.delete(jacqui)
db_session.flush()
result = svc.fetch_all(["acct:jacqui@foo.com"])
assert len(result) == 1
assert result[0].username == "jacqui"
def test_fetch_by_identity_finds_by_provider_info(self, svc, users):
_, _, _, freddo = users
assert svc.fetch_by_identity("provider_a", "123") is freddo
assert svc.fetch_by_identity("provider_b", "456") is freddo
def test_fetch_by_identity_returns_none_if_no_match(self, svc):
assert svc.fetch_by_identity("nonsense", "abc") is None
def test_fetch_for_login_by_username(self, svc, users):
_, steve, _, _ = users
assert svc.fetch_for_login("steve") is steve
def test_fetch_for_login_by_email(self, svc, users):
_, steve, _, _ = users
assert svc.fetch_for_login("steve@steveo.com") is steve
assert svc.fetch_for_login("StEvE@steveo.COM") is steve
def test_fetch_for_login_by_username_wrong_authority(self, svc):
assert svc.fetch_for_login("jacqui") is None
def test_fetch_for_login_by_email_wrong_authority(self, svc):
assert svc.fetch_for_login("jacqui@jj.com") is None
def test_fetch_for_login_by_username_not_activated(self, svc):
with pytest.raises(UserNotActivated):
svc.fetch_for_login("mirthe")
def test_fetch_for_login_by_email_not_activated(self, svc):
with pytest.raises(UserNotActivated):
svc.fetch_for_login("mirthe@deboer.com")
def test_update_preferences_tutorial_enable(self, svc, factories):
user = factories.User.build(sidebar_tutorial_dismissed=True)
svc.update_preferences(user, show_sidebar_tutorial=True)
assert not user.sidebar_tutorial_dismissed
def test_update_preferences_tutorial_disable(self, svc, factories):
user = factories.User.build(sidebar_tutorial_dismissed=False)
svc.update_preferences(user, show_sidebar_tutorial=False)
assert user.sidebar_tutorial_dismissed is True
def test_update_preferences_raises_for_unsupported_keys(self, svc, factories):
user = factories.User.build()
with pytest.raises(TypeError) as exc:
svc.update_preferences(user, foo="bar", baz="qux")
assert "keys baz, foo are not allowed" in str(exc.value)
def test_sets_up_cache_clearing_on_transaction_end(self, patch, db_session):
decorator = patch("h.services.user.on_transaction_end")
UserService(default_authority="example.com", session=db_session)
decorator.assert_called_once_with(db_session)
def test_clears_cache_on_transaction_end(self, patch, db_session, users):
funcs = {}
# We need to capture the inline `clear_cache` function so we can
# call it manually later
def on_transaction_end_decorator(session): # pylint:disable=unused-argument
def on_transaction_end(func):
funcs["clear_cache"] = func
return on_transaction_end
decorator = patch("h.services.user.on_transaction_end")
decorator.side_effect = on_transaction_end_decorator
jacqui, _, _, _ = users
svc = UserService(default_authority="example.com", session=db_session)
svc.fetch("acct:jacqui@foo.com")
db_session.delete(jacqui)
funcs["clear_cache"]()
user = svc.fetch("acct:jacqui@foo.com")
assert user is None
@pytest.fixture
def svc(self, db_session):
return UserService(default_authority="example.com", session=db_session)
@pytest.fixture
def users(self, db_session, factories):
user_with_identities = factories.User(
username="frederick", email="freddo@example.com", authority="example.com"
)
user_with_identities.identities = [
factories.UserIdentity(
provider="provider_a",
provider_unique_id="123",
user=user_with_identities,
),
factories.UserIdentity(
provider="provider_b",
provider_unique_id="456",
user=user_with_identities,
),
]
users = [
factories.User(
username="jacqui", email="jacqui@jj.com", authority="foo.com"
),
factories.User(
username="steve", email="steve@steveo.com", authority="example.com"
),
factories.User(
username="mirthe",
email="mirthe@deboer.com",
authority="example.com",
inactive=True,
),
user_with_identities,
]
db_session.flush()
return users
class TestUserServiceFactory:
def test_returns_user_service(self, pyramid_request):
svc = user_service_factory(None, pyramid_request)
assert isinstance(svc, UserService)
def test_provides_request_default_authority_as_default_authority(
self, pyramid_request
):
svc = user_service_factory(None, pyramid_request)
assert svc.default_authority == pyramid_request.default_authority
def test_provides_request_db_as_session(self, pyramid_request):
svc = user_service_factory(None, pyramid_request)
assert svc.session == pyramid_request.db
| 34.642105 | 85 | 0.665299 | import pytest
from h.models import User
from h.services.user import UserNotActivated, UserService, user_service_factory
@pytest.mark.usefixtures("users")
class TestUserService:
def test_fetch_retrieves_user_by_userid(self, svc):
result = svc.fetch("acct:jacqui@foo.com")
assert isinstance(result, User)
def test_fetch_retrieves_user_by_username_and_authority(self, svc):
result = svc.fetch("jacqui", "foo.com")
assert isinstance(result, User)
def test_fetch_caches_fetched_users(self, db_session, svc, users):
jacqui, _, _, _ = users
svc.fetch("acct:jacqui@foo.com")
db_session.delete(jacqui)
db_session.flush()
user = svc.fetch("acct:jacqui@foo.com")
assert user is not None
assert user.username == "jacqui"
def test_fetch_all_retrieves_users_by_userid(self, svc):
result = svc.fetch_all(["acct:jacqui@foo.com", "acct:steve@example.com"])
assert len(result) == 2
assert isinstance(result[0], User)
assert isinstance(result[1], User)
def test_fetch_all_caches_fetched_users(self, db_session, svc, users):
jacqui, _, _, _ = users
svc.fetch_all(["acct:jacqui@foo.com"])
db_session.delete(jacqui)
db_session.flush()
result = svc.fetch_all(["acct:jacqui@foo.com"])
assert len(result) == 1
assert result[0].username == "jacqui"
def test_fetch_by_identity_finds_by_provider_info(self, svc, users):
_, _, _, freddo = users
assert svc.fetch_by_identity("provider_a", "123") is freddo
assert svc.fetch_by_identity("provider_b", "456") is freddo
def test_fetch_by_identity_returns_none_if_no_match(self, svc):
assert svc.fetch_by_identity("nonsense", "abc") is None
def test_fetch_for_login_by_username(self, svc, users):
_, steve, _, _ = users
assert svc.fetch_for_login("steve") is steve
def test_fetch_for_login_by_email(self, svc, users):
_, steve, _, _ = users
assert svc.fetch_for_login("steve@steveo.com") is steve
assert svc.fetch_for_login("StEvE@steveo.COM") is steve
def test_fetch_for_login_by_username_wrong_authority(self, svc):
assert svc.fetch_for_login("jacqui") is None
def test_fetch_for_login_by_email_wrong_authority(self, svc):
assert svc.fetch_for_login("jacqui@jj.com") is None
def test_fetch_for_login_by_username_not_activated(self, svc):
with pytest.raises(UserNotActivated):
svc.fetch_for_login("mirthe")
def test_fetch_for_login_by_email_not_activated(self, svc):
with pytest.raises(UserNotActivated):
svc.fetch_for_login("mirthe@deboer.com")
def test_update_preferences_tutorial_enable(self, svc, factories):
user = factories.User.build(sidebar_tutorial_dismissed=True)
svc.update_preferences(user, show_sidebar_tutorial=True)
assert not user.sidebar_tutorial_dismissed
def test_update_preferences_tutorial_disable(self, svc, factories):
user = factories.User.build(sidebar_tutorial_dismissed=False)
svc.update_preferences(user, show_sidebar_tutorial=False)
assert user.sidebar_tutorial_dismissed is True
def test_update_preferences_raises_for_unsupported_keys(self, svc, factories):
user = factories.User.build()
with pytest.raises(TypeError) as exc:
svc.update_preferences(user, foo="bar", baz="qux")
assert "keys baz, foo are not allowed" in str(exc.value)
def test_sets_up_cache_clearing_on_transaction_end(self, patch, db_session):
decorator = patch("h.services.user.on_transaction_end")
UserService(default_authority="example.com", session=db_session)
decorator.assert_called_once_with(db_session)
def test_clears_cache_on_transaction_end(self, patch, db_session, users):
funcs = {}
def on_transaction_end_decorator(session):
def on_transaction_end(func):
funcs["clear_cache"] = func
return on_transaction_end
decorator = patch("h.services.user.on_transaction_end")
decorator.side_effect = on_transaction_end_decorator
jacqui, _, _, _ = users
svc = UserService(default_authority="example.com", session=db_session)
svc.fetch("acct:jacqui@foo.com")
db_session.delete(jacqui)
funcs["clear_cache"]()
user = svc.fetch("acct:jacqui@foo.com")
assert user is None
@pytest.fixture
def svc(self, db_session):
return UserService(default_authority="example.com", session=db_session)
@pytest.fixture
def users(self, db_session, factories):
user_with_identities = factories.User(
username="frederick", email="freddo@example.com", authority="example.com"
)
user_with_identities.identities = [
factories.UserIdentity(
provider="provider_a",
provider_unique_id="123",
user=user_with_identities,
),
factories.UserIdentity(
provider="provider_b",
provider_unique_id="456",
user=user_with_identities,
),
]
users = [
factories.User(
username="jacqui", email="jacqui@jj.com", authority="foo.com"
),
factories.User(
username="steve", email="steve@steveo.com", authority="example.com"
),
factories.User(
username="mirthe",
email="mirthe@deboer.com",
authority="example.com",
inactive=True,
),
user_with_identities,
]
db_session.flush()
return users
class TestUserServiceFactory:
def test_returns_user_service(self, pyramid_request):
svc = user_service_factory(None, pyramid_request)
assert isinstance(svc, UserService)
def test_provides_request_default_authority_as_default_authority(
self, pyramid_request
):
svc = user_service_factory(None, pyramid_request)
assert svc.default_authority == pyramid_request.default_authority
def test_provides_request_db_as_session(self, pyramid_request):
svc = user_service_factory(None, pyramid_request)
assert svc.session == pyramid_request.db
| true | true |
f7fee991e3b32f7c2dcb42a61ae593b8d2d82fe8 | 2,384 | py | Python | boundary/sampling/normal.py | shiyuchengTJU/CISA | 81c43d2ee2ba1224e307baf636f1c87bfddd2ffa | [
"MIT"
] | null | null | null | boundary/sampling/normal.py | shiyuchengTJU/CISA | 81c43d2ee2ba1224e307baf636f1c87bfddd2ffa | [
"MIT"
] | null | null | null | boundary/sampling/normal.py | shiyuchengTJU/CISA | 81c43d2ee2ba1224e307baf636f1c87bfddd2ffa | [
"MIT"
] | null | null | null | """
Noise generation from Normal distributions.
"""
import numpy as np
def sample_hypersphere(n_samples, sample_shape, radius, l_norm=2, mode='sphere', sample_gen=None, seed=None):
"""
Uniformly sample the surface of a hypersphere.
Uniform picking: create a n-dimensional normal distribution and then normalize it to the desired radius.
See http://mathworld.wolfram.com/HyperspherePointPicking.html
WARNING: this is probably not correct for other norms!! We should check it out carefully if we don't use L2.
:param n_samples: number of image samples to generate.
:param sample_shape: shape of a single image sample.
:param radius: radius(=eps) of the hypersphere.
:param l_norm: L-norm.
:param mode: if 'sphere', then samples the surface of the eps-sphere. If 'ball', then samples the volume of the eps-ball.
Note: 'ball' is currently unused, and certainly not uniformly distributed.
:param sample_gen: If provided, retrieves random numbers from this generator.
:param seed: seed for the random generator. Cannot be used with the sample generator.
:return: Batch of image samples, shape: (n_samples,) + sample_shape
"""
if sample_gen is not None:
assert seed is None, "Can't provide individual seeds if using the multi-threaded generator."
assert sample_shape == sample_gen.shape
# Get precalculated samples from the generator
gauss = np.empty(shape=(n_samples, np.prod(sample_shape)), dtype=np.float64)
for i in range(n_samples):
gauss[i] = sample_gen.get_normal().reshape(-1)
else:
if seed is not None:
np.random.seed(seed)
gauss = np.random.normal(size=(n_samples, np.prod(sample_shape)))
# Norm to
norm = np.linalg.norm(gauss, ord=l_norm, axis=1)
perturbation = (gauss / norm[:, np.newaxis])
# Sphere: sample only the surface of the hypersphere.
# Ball: sample inside the sphere. Note: this is probably not uniform.
if mode == 'sphere':
perturbation *= radius
elif mode == 'ball':
perturbation *= np.random.uniform(low=0.0, high=radius, size=(n_samples, 1))
else:
raise ValueError("Unknown sampling mode.")
perturbation = np.reshape(perturbation, (n_samples,) + sample_shape)
return perturbation | 44.981132 | 126 | 0.676174 |
import numpy as np
def sample_hypersphere(n_samples, sample_shape, radius, l_norm=2, mode='sphere', sample_gen=None, seed=None):
if sample_gen is not None:
assert seed is None, "Can't provide individual seeds if using the multi-threaded generator."
assert sample_shape == sample_gen.shape
# Get precalculated samples from the generator
gauss = np.empty(shape=(n_samples, np.prod(sample_shape)), dtype=np.float64)
for i in range(n_samples):
gauss[i] = sample_gen.get_normal().reshape(-1)
else:
if seed is not None:
np.random.seed(seed)
gauss = np.random.normal(size=(n_samples, np.prod(sample_shape)))
# Norm to
norm = np.linalg.norm(gauss, ord=l_norm, axis=1)
perturbation = (gauss / norm[:, np.newaxis])
# Sphere: sample only the surface of the hypersphere.
# Ball: sample inside the sphere. Note: this is probably not uniform.
if mode == 'sphere':
perturbation *= radius
elif mode == 'ball':
perturbation *= np.random.uniform(low=0.0, high=radius, size=(n_samples, 1))
else:
raise ValueError("Unknown sampling mode.")
perturbation = np.reshape(perturbation, (n_samples,) + sample_shape)
return perturbation | true | true |
f7feeb89e2e53161f7912de60416b46973da8272 | 3,406 | py | Python | src/gtk3/headerbar/MainWindow.py | alexandrebarbaruiva/gui-python-gtk | 7b8e8ab05645271ae55e1e2165eefc9c8f5b0250 | [
"MIT"
] | 42 | 2020-05-09T16:23:23.000Z | 2022-03-28T13:05:32.000Z | src/gtk3/headerbar/MainWindow.py | alexandrebarbaruiva/gui-python-gtk | 7b8e8ab05645271ae55e1e2165eefc9c8f5b0250 | [
"MIT"
] | 2 | 2020-05-27T19:23:54.000Z | 2022-03-08T01:42:59.000Z | src/gtk3/headerbar/MainWindow.py | alexandrebarbaruiva/gui-python-gtk | 7b8e8ab05645271ae55e1e2165eefc9c8f5b0250 | [
"MIT"
] | 8 | 2020-05-09T16:23:28.000Z | 2022-03-31T22:44:45.000Z | # -*- coding: utf-8 -*-
"""Gtk.HeaderBar()."""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gio, Gtk
class MainWindow(Gtk.ApplicationWindow):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_title(title='Gtk.HeaderBar')
self.set_default_size(width=1366 / 2, height=768 / 2)
self.set_position(position=Gtk.WindowPosition.CENTER)
self.set_default_icon_from_file(filename='../../assets/icons/icon.png')
self.set_border_width(border_width=12)
headerbar = Gtk.HeaderBar.new()
# Definindo o título que será exibido na barra.
# O titulo definido aqui sobrescreve o titulo da janela principal.
headerbar.set_title(title='Gtk.HeaderBar')
# Definindo um sub titulo para o HeaderBar.
headerbar.set_subtitle(subtitle='Gtk.HeaderBar')
# Torna visível os botões de minimizar, maximizar e fechar.
# Por padrão essa opção é False.
headerbar.set_show_close_button(setting=True)
# Adicionando o HeaderBar na janela principal.
self.set_titlebar(titlebar=headerbar)
button_mail = Gtk.Button.new_from_icon_name(
icon_name='mail-send-receive-symbolic',
size=Gtk.IconSize.BUTTON
)
button_mail.connect('clicked', self._send_mail)
headerbar.pack_end(child=button_mail)
# Criando um Box Layout horizontal para conter os botões com seta.
# Utilizando um Box Layout os botões ficam melhore distribuidos.
hbox = Gtk.Box.new(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
# Adicionando o Box Layout no começo do HeaderBar.
headerbar.pack_start(child=hbox)
button_left_arrow = Gtk.Button.new_from_icon_name(
icon_name='go-previous-symbolic',
size=Gtk.IconSize.BUTTON
)
button_left_arrow.connect('clicked', self._left_arrow)
hbox.add(widget=button_left_arrow)
button_right_arrow = Gtk.Button.new_from_icon_name(
icon_name='go-previous-symbolic-rtl',
size=Gtk.IconSize.BUTTON
)
button_right_arrow.connect('clicked', self._right_arrow)
hbox.add(widget=button_right_arrow)
# Adicionando um widget do tipo TextView()
# na janela principal (só para não ficar vazio).
text_view = Gtk.TextView.new()
self.add(widget=text_view)
self.show_all()
@staticmethod
def _send_mail(widget):
print('Você clicou no botão que tem o icone de enviar/receber email')
@staticmethod
def _left_arrow(widget):
print('Você clicou no botão que tem uma seta para a esquerda')
@staticmethod
def _right_arrow(widget):
print('Você clicou no botão que tem uma seta para a direita')
class Application(Gtk.Application):
def __init__(self):
super().__init__(application_id='br.natorsc.Exemplo',
flags=Gio.ApplicationFlags.FLAGS_NONE)
def do_startup(self):
Gtk.Application.do_startup(self)
def do_activate(self):
win = self.props.active_window
if not win:
win = MainWindow(application=self)
win.present()
def do_shutdown(self):
Gtk.Application.do_shutdown(self)
if __name__ == '__main__':
import sys
app = Application()
app.run(sys.argv)
| 32.438095 | 79 | 0.660599 |
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gio, Gtk
class MainWindow(Gtk.ApplicationWindow):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_title(title='Gtk.HeaderBar')
self.set_default_size(width=1366 / 2, height=768 / 2)
self.set_position(position=Gtk.WindowPosition.CENTER)
self.set_default_icon_from_file(filename='../../assets/icons/icon.png')
self.set_border_width(border_width=12)
headerbar = Gtk.HeaderBar.new()
headerbar.set_title(title='Gtk.HeaderBar')
headerbar.set_subtitle(subtitle='Gtk.HeaderBar')
headerbar.set_show_close_button(setting=True)
self.set_titlebar(titlebar=headerbar)
button_mail = Gtk.Button.new_from_icon_name(
icon_name='mail-send-receive-symbolic',
size=Gtk.IconSize.BUTTON
)
button_mail.connect('clicked', self._send_mail)
headerbar.pack_end(child=button_mail)
hbox = Gtk.Box.new(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
headerbar.pack_start(child=hbox)
button_left_arrow = Gtk.Button.new_from_icon_name(
icon_name='go-previous-symbolic',
size=Gtk.IconSize.BUTTON
)
button_left_arrow.connect('clicked', self._left_arrow)
hbox.add(widget=button_left_arrow)
button_right_arrow = Gtk.Button.new_from_icon_name(
icon_name='go-previous-symbolic-rtl',
size=Gtk.IconSize.BUTTON
)
button_right_arrow.connect('clicked', self._right_arrow)
hbox.add(widget=button_right_arrow)
text_view = Gtk.TextView.new()
self.add(widget=text_view)
self.show_all()
@staticmethod
def _send_mail(widget):
print('Você clicou no botão que tem o icone de enviar/receber email')
@staticmethod
def _left_arrow(widget):
print('Você clicou no botão que tem uma seta para a esquerda')
@staticmethod
def _right_arrow(widget):
print('Você clicou no botão que tem uma seta para a direita')
class Application(Gtk.Application):
def __init__(self):
super().__init__(application_id='br.natorsc.Exemplo',
flags=Gio.ApplicationFlags.FLAGS_NONE)
def do_startup(self):
Gtk.Application.do_startup(self)
def do_activate(self):
win = self.props.active_window
if not win:
win = MainWindow(application=self)
win.present()
def do_shutdown(self):
Gtk.Application.do_shutdown(self)
if __name__ == '__main__':
import sys
app = Application()
app.run(sys.argv)
| true | true |
f7feec529c6ae142cb1e6f18d183549241187c20 | 10,691 | py | Python | tests/optim/test_oss.py | joshim5/fairscale | 1c2a6f6b46646866f3e86d628b8a4ca437f68215 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/optim/test_oss.py | joshim5/fairscale | 1c2a6f6b46646866f3e86d628b8a4ca437f68215 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/optim/test_oss.py | joshim5/fairscale | 1c2a6f6b46646866f3e86d628b8a4ca437f68215 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import os
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import fairscale.optim as optim
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
def setup_module(module):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
dist.init_process_group(backend=BACKEND, rank=0, world_size=1)
def dist_init(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29501"
dist.init_process_group(backend=BACKEND, rank=rank, world_size=world_size)
def test_create():
params = [torch.rand(1)]
o = optim.OSS(params, lr=0.01)
def test_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1, momentum=0.9)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
assert "param_groups" in state_dict.keys()
assert "state" in state_dict.keys()
# Check that the pulled state is what we expect, and that we have all the expected keys
assert state_dict["param_groups"][0]["lr"] == 0.1
assert state_dict["param_groups"][0]["momentum"] == 0.9
assert not state_dict["param_groups"][0]["nesterov"]
assert state_dict["param_groups"][0]["weight_decay"] == 0.0
assert state_dict["param_groups"][0]["dampening"] == 0.0
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
assert state_dict["param_groups"][0][k] == o.param_groups[0][k]
# Check that it's correctly loaded
o = optim.OSS([x], lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.71], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)
# Check that the exposed param_groups are on the proper device
assert o.param_groups[0]["params"][0].device == x.device
def test_lr_scheduler():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
assert x == x2
def test_step_with_kwargs():
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=[]):
super().step()
kwarg.append(5)
kwarg = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
assert kwarg == [5]
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_without_closure():
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.local_state_dict()
o = optim.OSS([x], lr=0.01)
o.load_local_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_implicit_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.state_dict()
o = optim.OSS([x], lr=0.01)
o.load_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def run_test_add_param_group(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [4, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have 8 elements.
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == 8
assert len(o.optim.param_groups) == 2
def test_add_param_group():
world_size = 3
mp.spawn(run_test_add_param_group, args=(world_size,), nprocs=world_size, join=True)
def run_test_zero_grad(rank, world_size):
dist_init(rank, world_size)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
assert m.weight.grad
assert m.bias.grad
o.zero_grad()
assert not m.weight.grad
assert not m.bias.grad
def test_zero_grad():
world_size = 2
mp.spawn(run_test_zero_grad, args=(world_size,), nprocs=world_size, join=True)
def run_test_step(rank, world_size):
dist_init(rank, world_size)
x = torch.tensor([float(rank + 1)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
o.step()
assert m.weight == torch.tensor([[0.75]], device=rank)
assert m.bias == torch.tensor([1.85], device=rank)
@skip_if_no_cuda
def test_step():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step, args=(world_size,), nprocs=world_size, join=True)
def run_test_step_with_closure(rank, world_size, optimizer=None):
dist_init(rank, world_size)
x_val = rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=rank)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
assert loss == torch.tensor(error, device=rank)
assert m.weight == torch.tensor([[1.1]], device=rank)
assert m.bias == torch.tensor([2.1], device=rank)
@skip_if_no_cuda
def test_step_with_closure():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step_with_closure, args=(world_size,), nprocs=world_size, join=True)
def run_test_sharding(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [5, 4, 2, 6, 4, 3]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == 8
def test_sharding():
world_size = 3
mp.spawn(run_test_sharding, args=(world_size,), nprocs=world_size, join=True)
def run_test_collect_shards(rank, world_size, reference_rank):
dist_init(rank, world_size)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
assert len(optimizer_state_dict["state"]) == world_size
else:
optimizer_state_dict = {}
optimizer_state_dict = optim.utils.broadcast_object(
optimizer_state_dict, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device
)
# Load the optimizer state dict
optimizer.load_state_dict(optimizer_state_dict)
def test_collect_shards():
world_size = 3
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank), nprocs=world_size, join=True,
)
| 31.724036 | 108 | 0.659994 |
import os
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import fairscale.optim as optim
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
def setup_module(module):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
dist.init_process_group(backend=BACKEND, rank=0, world_size=1)
def dist_init(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29501"
dist.init_process_group(backend=BACKEND, rank=rank, world_size=world_size)
def test_create():
params = [torch.rand(1)]
o = optim.OSS(params, lr=0.01)
def test_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1, momentum=0.9)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
o.zero_grad()
o.consolidate_state_dict()
state_dict = o.state_dict()
assert "param_groups" in state_dict.keys()
assert "state" in state_dict.keys()
assert state_dict["param_groups"][0]["lr"] == 0.1
assert state_dict["param_groups"][0]["momentum"] == 0.9
assert not state_dict["param_groups"][0]["nesterov"]
assert state_dict["param_groups"][0]["weight_decay"] == 0.0
assert state_dict["param_groups"][0]["dampening"] == 0.0
for k in state_dict["param_groups"][0].keys():
if k != "params":
assert state_dict["param_groups"][0][k] == o.param_groups[0][k]
o = optim.OSS([x], lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.71], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)
# Check that the exposed param_groups are on the proper device
assert o.param_groups[0]["params"][0].device == x.device
def test_lr_scheduler():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
assert x == x2
def test_step_with_kwargs():
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=[]):
super().step()
kwarg.append(5)
kwarg = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
assert kwarg == [5]
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_without_closure():
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.local_state_dict()
o = optim.OSS([x], lr=0.01)
o.load_local_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_implicit_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.state_dict()
o = optim.OSS([x], lr=0.01)
o.load_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def run_test_add_param_group(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [4, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have 8 elements.
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == 8
assert len(o.optim.param_groups) == 2
def test_add_param_group():
world_size = 3
mp.spawn(run_test_add_param_group, args=(world_size,), nprocs=world_size, join=True)
def run_test_zero_grad(rank, world_size):
dist_init(rank, world_size)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
assert m.weight.grad
assert m.bias.grad
o.zero_grad()
assert not m.weight.grad
assert not m.bias.grad
def test_zero_grad():
world_size = 2
mp.spawn(run_test_zero_grad, args=(world_size,), nprocs=world_size, join=True)
def run_test_step(rank, world_size):
dist_init(rank, world_size)
x = torch.tensor([float(rank + 1)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
o.step()
assert m.weight == torch.tensor([[0.75]], device=rank)
assert m.bias == torch.tensor([1.85], device=rank)
@skip_if_no_cuda
def test_step():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step, args=(world_size,), nprocs=world_size, join=True)
def run_test_step_with_closure(rank, world_size, optimizer=None):
dist_init(rank, world_size)
x_val = rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=rank)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
assert loss == torch.tensor(error, device=rank)
assert m.weight == torch.tensor([[1.1]], device=rank)
assert m.bias == torch.tensor([2.1], device=rank)
@skip_if_no_cuda
def test_step_with_closure():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step_with_closure, args=(world_size,), nprocs=world_size, join=True)
def run_test_sharding(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [5, 4, 2, 6, 4, 3]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == 8
def test_sharding():
world_size = 3
mp.spawn(run_test_sharding, args=(world_size,), nprocs=world_size, join=True)
def run_test_collect_shards(rank, world_size, reference_rank):
dist_init(rank, world_size)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
assert len(optimizer_state_dict["state"]) == world_size
else:
optimizer_state_dict = {}
optimizer_state_dict = optim.utils.broadcast_object(
optimizer_state_dict, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device
)
# Load the optimizer state dict
optimizer.load_state_dict(optimizer_state_dict)
def test_collect_shards():
world_size = 3
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank), nprocs=world_size, join=True,
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.