repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
AOSPA-L/android_external_skia | experimental/benchtools/rebase.py | 65 | 7840 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""rebase.py: standalone script to batch update bench expectations.
Requires gsutil to access gs://chromium-skia-gm and Rietveld credentials.
Usage:
Copy script to a separate dir outside Skia repo. The script will create a
skia dir on the first run to host the repo, and will create/delete
temp dirs as needed.
./rebase.py --githash <githash prefix to use for getting bench data>
"""
import argparse
import filecmp
import os
import re
import shutil
import subprocess
import time
import urllib2
# googlesource url that has most recent Skia git hash info.
SKIA_GIT_HEAD_URL = 'https://skia.googlesource.com/skia/+log/HEAD'
# Google Storage bench file prefix.
GS_PREFIX = 'gs://chromium-skia-gm/perfdata'
# List of Perf platforms we want to process. Populate from expectations/bench.
PLATFORMS = []
# Regular expression for matching githash data.
HA_RE = '<a href="/skia/\+/([0-9a-f]+)">'
HA_RE_COMPILED = re.compile(HA_RE)
def get_git_hashes():
print 'Getting recent git hashes...'
hashes = HA_RE_COMPILED.findall(
urllib2.urlopen(SKIA_GIT_HEAD_URL).read())
return hashes
def filter_file(f):
if f.find('_msaa') > 0 or f.find('_record') > 0:
return True
return False
def clean_dir(d):
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
def get_gs_filelist(p, h):
print 'Looking up for the closest bench files in Google Storage...'
proc = subprocess.Popen(['gsutil', 'ls',
'/'.join([GS_PREFIX, p, 'bench_' + h + '_data_skp_*'])],
stdout=subprocess.PIPE)
out, err = proc.communicate()
if err or not out:
return []
return [i for i in out.strip().split('\n') if not filter_file(i)]
def download_gs_files(p, h, gs_dir):
print 'Downloading raw bench files from Google Storage...'
proc = subprocess.Popen(['gsutil', 'cp',
'/'.join([GS_PREFIX, p, 'bench_' + h + '_data_skp_*']),
'%s/%s' % (gs_dir, p)],
stdout=subprocess.PIPE)
out, err = proc.communicate()
if err:
clean_dir(gs_dir)
return False
files = 0
for f in os.listdir(os.path.join(gs_dir, p)):
if filter_file(f):
os.remove(os.path.join(gs_dir, p, f))
else:
files += 1
if files:
return True
return False
def calc_expectations(p, h, gs_dir, exp_dir, repo_dir):
exp_filename = 'bench_expectations_%s.txt' % p
proc = subprocess.Popen(['python', 'skia/bench/gen_bench_expectations.py',
'-r', h, '-b', p, '-d', os.path.join(gs_dir, p), '-o',
os.path.join(exp_dir, exp_filename)],
stdout=subprocess.PIPE)
out, err = proc.communicate()
if err:
print 'ERR_CALCULATING_EXPECTATIONS: ' + err
return False
print 'CALCULATED_EXPECTATIONS: ' + out
repo_file = os.path.join(repo_dir, 'expectations', 'bench', exp_filename)
if (os.path.isfile(repo_file) and
filecmp.cmp(repo_file, os.path.join(exp_dir, exp_filename))):
print 'NO CHANGE ON %s' % repo_file
return False
return True
def checkout_or_update_skia(repo_dir):
status = True
old_cwd = os.getcwd()
os.chdir(repo_dir)
print 'CHECK SKIA REPO...'
if subprocess.call(['git', 'pull'],
stderr=subprocess.PIPE):
print 'Checking out Skia from git, please be patient...'
os.chdir(old_cwd)
clean_dir(repo_dir)
os.chdir(repo_dir)
if subprocess.call(['git', 'clone', '-q', '--depth=50', '--single-branch',
'https://skia.googlesource.com/skia.git', '.']):
status = False
subprocess.call(['git', 'checkout', 'master'])
subprocess.call(['git', 'pull'])
os.chdir(old_cwd)
return status
def git_commit_expectations(repo_dir, exp_dir, update_li, h, commit):
commit_msg = """manual bench rebase after %s
TBR=robertphillips@google.com
Bypassing trybots:
NOTRY=true""" % h
old_cwd = os.getcwd()
os.chdir(repo_dir)
upload = ['git', 'cl', 'upload', '-f', '--bypass-hooks',
'--bypass-watchlists', '-m', commit_msg]
branch = exp_dir.split('/')[-1]
if commit:
upload.append('--use-commit-queue')
cmds = ([['git', 'checkout', 'master'],
['git', 'pull'],
['git', 'checkout', '-b', branch, '-t', 'origin/master']] +
[['cp', '%s/%s' % (exp_dir, f), 'expectations/bench'] for f in
update_li] +
[['git', 'add'] + ['expectations/bench/%s' % i for i in update_li],
['git', 'commit', '-m', commit_msg],
upload,
['git', 'checkout', 'master'],
['git', 'branch', '-D', branch],
])
status = True
for cmd in cmds:
print 'Running ' + ' '.join(cmd)
if subprocess.call(cmd):
print 'FAILED. Please check if skia git repo is present.'
subprocess.call(['git', 'checkout', 'master'])
status = False
break
os.chdir(old_cwd)
return status
def delete_dirs(li):
for d in li:
print 'Deleting directory %s' % d
shutil.rmtree(d)
def main():
d = os.path.dirname(os.path.abspath(__file__))
os.chdir(d)
if not subprocess.call(['git', 'rev-parse'], stderr=subprocess.PIPE):
print 'Please copy script to a separate dir outside git repos to use.'
return
parser = argparse.ArgumentParser()
parser.add_argument('--githash',
help='Githash prefix (7+ chars) to rebaseline to.')
parser.add_argument('--commit', action='store_true',
help='Whether to commit changes automatically.')
args = parser.parse_args()
repo_dir = os.path.join(d, 'skia')
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
if not checkout_or_update_skia(repo_dir):
print 'ERROR setting up Skia repo at %s' % repo_dir
return 1
file_in_repo = os.path.join(d, 'skia/experimental/benchtools/rebase.py')
if not filecmp.cmp(__file__, file_in_repo):
shutil.copy(file_in_repo, __file__)
print 'Updated this script from repo; please run again.'
return
for item in os.listdir(os.path.join(d, 'skia/expectations/bench')):
PLATFORMS.append(
item.replace('bench_expectations_', '').replace('.txt', ''))
if not args.githash or len(args.githash) < 7:
raise Exception('Please provide --githash with a longer prefix (7+).')
commit = False
if args.commit:
commit = True
rebase_hash = args.githash[:7]
hashes = get_git_hashes()
short_hashes = [h[:7] for h in hashes]
if rebase_hash not in short_hashes:
raise Exception('Provided --githash not found in recent history!')
hashes = hashes[:short_hashes.index(rebase_hash) + 1]
update_li = []
ts_str = '%s' % time.time()
gs_dir = os.path.join(d, 'gs' + ts_str)
exp_dir = os.path.join(d, 'exp' + ts_str)
clean_dir(gs_dir)
clean_dir(exp_dir)
for p in PLATFORMS:
clean_dir(os.path.join(gs_dir, p))
hash_to_use = ''
for h in reversed(hashes):
li = get_gs_filelist(p, h)
if not len(li): # no data
continue
if download_gs_files(p, h, gs_dir):
print 'Copied %s/%s' % (p, h)
hash_to_use = h
break
else:
print 'DOWNLOAD BENCH FAILED %s/%s' % (p, h)
break
if hash_to_use:
if calc_expectations(p, h, gs_dir, exp_dir, repo_dir):
update_li.append('bench_expectations_%s.txt' % p)
if not update_li:
print 'No bench data to update after %s!' % args.githash
elif not git_commit_expectations(
repo_dir, exp_dir, update_li, args.githash[:7], commit):
print 'ERROR uploading expectations using git.'
elif not commit:
print 'CL created. Please take a look at the link above.'
else:
print 'New bench baselines should be in CQ now.'
delete_dirs([gs_dir, exp_dir])
if __name__ == "__main__":
main()
| bsd-3-clause |
thiagopena/djangoSIGE | djangosige/tests/cadastro/test_views.py | 1 | 12459 | # -*- coding: utf-8 -*-
from djangosige.tests.test_case import BaseTestCase, replace_none_values_in_dictionary
from djangosige.apps.cadastro.models import Produto, Unidade, Marca, Categoria, Transportadora, Fornecedor, Cliente, Empresa
from django.urls import reverse
CADASTRO_MODELS = (
Empresa,
Cliente,
Fornecedor,
Transportadora,
Produto,
Categoria,
Unidade,
Marca,
)
PESSOA_MODELS = (
Empresa,
Cliente,
Fornecedor,
Transportadora,
)
INLINE_FORMSET_DATA = {
'endereco_form-0-tipo_endereco': 'UNI',
'endereco_form-0-logradouro': 'Logradouro Cliente',
'endereco_form-0-numero': '123',
'endereco_form-0-bairro': 'Bairro Cliente',
'endereco_form-0-complemento': '',
'endereco_form-0-pais': 'Brasil',
'endereco_form-0-cpais': '1058',
'endereco_form-0-municipio': 'Municipio',
'endereco_form-0-cmun': '12345',
'endereco_form-0-cep': '1234567',
'endereco_form-0-uf': 'MG',
'endereco_form-TOTAL_FORMS': 1,
'endereco_form-INITIAL_FORMS': 0,
'telefone_form-TOTAL_FORMS': 1,
'telefone_form-INITIAL_FORMS': 0,
'email_form-TOTAL_FORMS': 1,
'email_form-INITIAL_FORMS': 0,
'site_form-TOTAL_FORMS': 1,
'site_form-INITIAL_FORMS': 0,
'banco_form-TOTAL_FORMS': 1,
'banco_form-INITIAL_FORMS': 0,
'documento_form-TOTAL_FORMS': 1,
'documento_form-INITIAL_FORMS': 0,
}
class CadastroAdicionarViewsTestCase(BaseTestCase):
def test_add_views_get_request(self):
for model in CADASTRO_MODELS:
model_name = model.__name__.lower()
url = reverse('cadastro:add{}view'.format(model_name))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Testar permissao
permission_codename = 'add_' + str(model_name)
self.check_user_get_permission(
url, permission_codename=permission_codename)
def test_add_pessoa_post_request(self):
for model in PESSOA_MODELS:
model_name = model.__name__.lower()
url = reverse('cadastro:add{}view'.format(model_name))
pessoa_data = {
'{}_form-nome_razao_social'.format(model_name): 'Razao Social Qualquer',
'{}_form-tipo_pessoa'.format(model_name): 'PJ',
'{}_form-inscricao_municipal'.format(model_name): '',
'{}_form-informacoes_adicionais'.format(model_name): '',
}
if model_name == 'cliente':
pessoa_data['cliente_form-limite_de_credito'] = '0.00'
pessoa_data['cliente_form-indicador_ie'] = '1'
pessoa_data['cliente_form-id_estrangeiro'] = ''
elif model_name == 'transportadora':
pessoa_data['veiculo_form-TOTAL_FORMS'] = 1
pessoa_data['veiculo_form-INITIAL_FORMS'] = 0
pessoa_data['veiculo_form-0-descricao'] = 'Veiculo1'
pessoa_data['veiculo_form-0-placa'] = 'XXXXXXXX'
pessoa_data['veiculo_form-0-uf'] = 'SP'
pessoa_data.update(INLINE_FORMSET_DATA)
response = self.client.post(url, pessoa_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'cadastro/pessoa_list.html')
# Assert form invalido
pessoa_data['{}_form-nome_razao_social'.format(model_name)] = ''
response = self.client.post(url, pessoa_data, follow=True)
self.assertFormError(
response, 'form', 'nome_razao_social', 'Este campo é obrigatório.')
def test_add_produto_post_request(self):
url = reverse('cadastro:addprodutoview')
produto_data = {
'codigo': '000000000000010',
'descricao': 'Produto Teste',
'origem': '0',
'venda': '100,00',
'custo': '50,00',
'estoque_minimo': '100,00',
'estoque_atual': '500,00',
'ncm': '02109100[EX:01]',
'fornecedor': '2', # Id Fornecedor1
'local_dest': '1', # Id Estoque Padrao
}
response = self.client.post(url, produto_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'cadastro/produto/produto_list.html')
# Assert form invalido
produto_data['codigo'] = ''
response = self.client.post(url, produto_data, follow=True)
self.assertFormError(response, 'form', 'codigo',
'Este campo é obrigatório.')
def test_add_categoria_post_request(self):
url = reverse('cadastro:addcategoriaview')
data = {
'categoria_desc': 'Categoria Teste',
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
# Assert form invalido
data['categoria_desc'] = ''
response = self.client.post(url, data, follow=True)
self.assertFormError(
response, 'form', 'categoria_desc', 'Este campo é obrigatório.')
def test_add_marca_post_request(self):
url = reverse('cadastro:addmarcaview')
data = {
'marca_desc': 'Marca Teste',
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
# Assert form invalido
data['marca_desc'] = ''
response = self.client.post(url, data, follow=True)
self.assertFormError(response, 'form', 'marca_desc',
'Este campo é obrigatório.')
def test_add_unidade_post_request(self):
url = reverse('cadastro:addunidadeview')
data = {
'sigla_unidade': 'UNT',
'unidade_desc': 'Unidade Teste',
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
# Assert form invalido
data['sigla_unidade'] = ''
response = self.client.post(url, data, follow=True)
self.assertFormError(
response, 'form', 'sigla_unidade', 'Este campo é obrigatório.')
class CadastroListarViewsTestCase(BaseTestCase):
def test_list_views_deletar_objeto(self):
for model in CADASTRO_MODELS:
model_name = model.__name__.lower()
if model_name == 'fornecedor':
url = reverse('cadastro:listafornecedoresview')
else:
url = reverse('cadastro:lista{}sview'.format(model_name))
obj = model.objects.create()
self.check_list_view_delete(url=url, deleted_object=obj)
url = reverse('cadastro:listaprodutosbaixoestoqueview')
obj = Produto.objects.create()
self.check_list_view_delete(url=url, deleted_object=obj)
class CadastroEditarViewsTestCase(BaseTestCase):
def test_edit_pessoa_get_post_request(self):
for model in PESSOA_MODELS:
# Buscar objeto qualquer
model_name = model.__name__.lower()
obj = model.objects.order_by('pk').last()
url = reverse('cadastro:editar{}view'.format(model_name),
kwargs={'pk': obj.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.context['form'].initial
if model_name == 'cliente':
data['{}-limite_de_credito'.format(response.context['form'].prefix)] = data[
'limite_de_credito']
del data['limite_de_credito']
elif model_name == 'transportadora':
data['veiculo_form-TOTAL_FORMS'] = 1
data['veiculo_form-INITIAL_FORMS'] = 0
data['veiculo_form-0-descricao'] = 'Veiculo1'
data['veiculo_form-0-placa'] = 'XXXXXXXX'
data['veiculo_form-0-uf'] = 'SP'
# Inserir informacoes adicionais
data['informacoes_adicionais'] = 'Objeto editado.'
data.update(INLINE_FORMSET_DATA)
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
# Assert form invalido
data[
'{}_form-nome_razao_social'.format(response.context['form'].prefix)] = ''
response = self.client.post(url, data, follow=True)
self.assertFormError(
response, 'form', 'nome_razao_social', 'Este campo é obrigatório.')
def test_edit_produto_get_post_request(self):
# Buscar objeto qualquer
obj = Produto.objects.order_by('pk').last()
url = reverse('cadastro:editarprodutoview',
kwargs={'pk': obj.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.context['form'].initial
replace_none_values_in_dictionary(data)
data['inf_adicionais'] = 'Produto editado.'
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'cadastro/produto/produto_list.html')
def test_edit_categoria_get_post_request(self):
# Buscar objeto qualquer
obj = Categoria.objects.order_by('pk').last()
url = reverse('cadastro:editarcategoriaview',
kwargs={'pk': obj.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.context['form'].initial
data['categoria_desc'] = 'Categoria Editada'
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
# Assert form invalido
data['categoria_desc'] = ''
response = self.client.post(url, data, follow=True)
self.assertFormError(
response, 'form', 'categoria_desc', 'Este campo é obrigatório.')
def test_edit_marca_get_post_request(self):
# Buscar objeto qualquer
obj = Marca.objects.order_by('pk').last()
url = reverse('cadastro:editarmarcaview',
kwargs={'pk': obj.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.context['form'].initial
data['marca_desc'] = 'Marca Editada'
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
def test_edit_unidade_get_post_request(self):
# Buscar objeto qualquer
obj = Unidade.objects.order_by('pk').last()
url = reverse('cadastro:editarunidadeview',
kwargs={'pk': obj.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.context['form'].initial
data['unidade_desc'] = 'Unidade Editada'
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
class CadastroAjaxRequestViewsTestCase(BaseTestCase):
def test_info_cliente_post_request(self):
# Buscar objeto qualquer
obj = Cliente.objects.order_by('pk').last()
obj_pk = obj.pk
url = reverse('cadastro:infocliente')
data = {'pessoaId': obj_pk}
self.check_json_response(url, data, obj_pk, model='cadastro.cliente')
def test_info_transportadora_post_request(self):
# Buscar objeto qualquer
obj = Transportadora.objects.order_by('pk').last()
obj_pk = obj.pk
url = reverse('cadastro:infotransportadora')
data = {'transportadoraId': obj_pk}
self.check_json_response(
url, data, obj_pk, model='cadastro.transportadora')
def test_info_fornecedor_post_request(self):
# Buscar objeto qualquer
obj = Fornecedor.objects.order_by('pk').last()
obj_pk = obj.pk
url = reverse('cadastro:infofornecedor')
data = {'pessoaId': obj_pk}
self.check_json_response(
url, data, obj_pk, model='cadastro.fornecedor')
def test_info_produto_post_request(self):
# Buscar objeto qualquer
obj = Produto.objects.order_by('pk').last()
obj_pk = obj.pk
url = reverse('cadastro:infoproduto')
data = {'produtoId': obj_pk}
self.check_json_response(url, data, obj_pk, model='cadastro.produto')
| mit |
bxshi/gem5 | src/arch/x86/isa/insts/simd64/integer/data_reordering/pack_with_saturation.py | 91 | 3257 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PACKSSDW_MMX_MMX {
pack mmx, mmx, mmxm, ext=Signed, srcSize=4, destSize=2
};
def macroop PACKSSDW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
pack mmx, mmx, ufp1, ext=Signed, srcSize=4, destSize=2
};
def macroop PACKSSDW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
pack mmx, mmx, ufp1, ext=Signed, srcSize=4, destSize=2
};
def macroop PACKSSWB_MMX_MMX {
pack mmx, mmx, mmxm, ext=Signed, srcSize=2, destSize=1
};
def macroop PACKSSWB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
pack mmx, mmx, ufp1, ext=Signed, srcSize=2, destSize=1
};
def macroop PACKSSWB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
pack mmx, mmx, ufp1, ext=Signed, srcSize=2, destSize=1
};
def macroop PACKUSWB_MMX_MMX {
pack mmx, mmx, mmxm, ext=0, srcSize=2, destSize=1
};
def macroop PACKUSWB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
pack mmx, mmx, ufp1, ext=0, srcSize=2, destSize=1
};
def macroop PACKUSWB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
pack mmx, mmx, ufp1, ext=0, srcSize=2, destSize=1
};
'''
| bsd-3-clause |
nugget/home-assistant | homeassistant/components/sensor/shodan.py | 3 | 2760 | """Sensor for displaying the number of result on Shodan.io."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['shodan==1.11.1']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Shodan"
CONF_QUERY = 'query'
DEFAULT_NAME = 'Shodan Sensor'
ICON = 'mdi:tooltip-text'
SCAN_INTERVAL = timedelta(minutes=15)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_QUERY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Shodan sensor."""
import shodan
api_key = config.get(CONF_API_KEY)
name = config.get(CONF_NAME)
query = config.get(CONF_QUERY)
data = ShodanData(shodan.Shodan(api_key), query)
try:
data.update()
except shodan.exception.APIError as error:
_LOGGER.warning("Unable to connect to Shodan.io: %s", error)
return False
add_entities([ShodanSensor(data, name)], True)
class ShodanSensor(Entity):
"""Representation of the Shodan sensor."""
def __init__(self, data, name):
"""Initialize the Shodan sensor."""
self.data = data
self._name = name
self._state = None
self._unit_of_measurement = 'Hits'
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
}
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
self._state = self.data.details['total']
class ShodanData:
"""Get the latest data and update the states."""
def __init__(self, api, query):
"""Initialize the data object."""
self._api = api
self._query = query
self.details = None
def update(self):
"""Get the latest data from shodan.io."""
self.details = self._api.count(self._query)
| apache-2.0 |
kirca/odoo | addons/product/report/__init__.py | 452 | 1080 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_pricelist
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bomeba/bomeba0 | bomeba0/scaffold/gen_templates_gl.py | 2 | 2639 | """Use PyMOl to create templates for bomeba"""
import numpy as np
np.set_printoptions(precision=2)
import __main__
__main__.pymol_argv = ['pymol','-qck']
import pymol
from pymol import cmd, stored
pymol.finish_launching()
import openbabel as ob
# set hydrogen names to PDB compliant
cmd.set('pdb_reformat_names_mode', 2)
#cmd.set('retain_order', 1)
sel = 'all'
#aa = ['BDP', 'NGA']
#aa = ['AFL', 'NAG', 'MAG']
aa = ['B6D', 'A2G', 'BGC']
def minimize(selection='all', forcefield='GAFF', method='cg',
nsteps= 2000, conv=1E-8, cutoff=False, cut_vdw=6.0, cut_elec=8.0):
pdb_string = cmd.get_pdbstr(selection)
name = cmd.get_legal_name(selection)
obconversion = ob.OBConversion()
obconversion.SetInAndOutFormats('pdb', 'pdb')
mol = ob.OBMol()
obconversion.ReadString(mol, pdb_string)
ff = ob.OBForceField.FindForceField(forcefield)
ff.Setup(mol)
if cutoff == True:
ff.EnableCutOff(True)
ff.SetVDWCutOff(cut_vdw)
ff.SetElectrostaticCutOff(cut_elec)
if method == 'cg':
ff.ConjugateGradients(nsteps, conv)
else:
ff.SteepestDescent(nsteps, conv)
ff.GetCoordinates(mol)
nrg = ff.Energy()
pdb_string = obconversion.WriteString(mol)
cmd.delete(name)
if name == 'all':
name = 'all_'
cmd.read_pdbstr(pdb_string, name)
return nrg
#aa = ['W']
for res_name in aa:
## Get coordinates and offset
cmd.load('templates/glycan/{}.pdb'.format(res_name))
stored.IDs = []
cmd.iterate('all','stored.IDs.append((ID))')
cmd.alter('all', 'ID = ID - stored.IDs[0] - 1')
#cmd.fab(res_name)
nrg = minimize(selection=sel, forcefield='GAFF', method='cg', nsteps=2000)
#print(nrg)
xyz = cmd.get_coords(sel)
offset = len(xyz)
## get atom names
stored.atom_names = []
cmd.iterate(sel, 'stored.atom_names.append(name)')
## get bonds
stored.bonds = []
model = cmd.get_model(sel)
for at in model.atom:
cmd.iterate('neighbor ID %s' % at.id,
'stored.bonds.append((%s-1, ID-1))' % at.id)
bonds = list(set([tuple(sorted(i)) for i in stored.bonds]))
bonds.sort()
bb = []
sc = []
## small check before returning the results
if len(stored.atom_names) == offset:
res = """{}_info = AA_info(coords=np.{},
atom_names = {},
bb = {},
sc = {},
bonds = {},
offset = {})\n""".format(res_name, repr(xyz), stored.atom_names, bb, sc, bonds, offset)
print(res)
else:
print('Something funny is going on here!')
cmd.delete('all')
| apache-2.0 |
Yuriy-Leonov/nova | nova/version.py | 13 | 2478 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr.version.VersionInfo('nova')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
import ConfigParser
from oslo.config import cfg
from nova.openstack.common import log as logging
global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = ConfigParser.RawConfigParser()
cfg.read(cfgfile)
NOVA_VENDOR = cfg.get("Nova", "vendor")
if cfg.has_option("Nova", "vendor"):
NOVA_VENDOR = cfg.get("Nova", "vendor")
NOVA_PRODUCT = cfg.get("Nova", "product")
if cfg.has_option("Nova", "product"):
NOVA_PRODUCT = cfg.get("Nova", "product")
NOVA_PACKAGE = cfg.get("Nova", "package")
if cfg.has_option("Nova", "package"):
NOVA_PACKAGE = cfg.get("Nova", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error("Failed to load %(cfgfile)s: %(ex)s",
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return NOVA_VENDOR
def product_string():
_load_config()
return NOVA_PRODUCT
def package_string():
_load_config()
return NOVA_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
| apache-2.0 |
disigma/depot_tools | drover.py | 29 | 20560 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import optparse
import os
import re
import sys
import urlparse
import breakpad # pylint: disable=W0611
import gclient_utils
import subprocess2
USAGE = """
WARNING: Please use this tool in an empty directory
(or at least one that you don't mind clobbering.)
REQUIRES: SVN 1.5+
NOTE: NO NEED TO CHECKOUT ANYTHING IN ADVANCE OF USING THIS TOOL.
Valid parameters:
[Merge from trunk to branch]
--merge <revision> --branch <branch_num>
Example: %(app)s --merge 12345 --branch 187
[Merge from trunk to local copy]
--merge <revision> --local
Example: %(app)s --merge 12345 --local
[Merge from branch to branch]
--merge <revision> --sbranch <branch_num> --branch <branch_num>
Example: %(app)s --merge 12345 --sbranch 248 --branch 249
[Revert from trunk]
--revert <revision>
Example: %(app)s --revert 12345
[Revert from branch]
--revert <revision> --branch <branch_num>
Example: %(app)s --revert 12345 --branch 187
"""
export_map_ = None
files_info_ = None
delete_map_ = None
file_pattern_ = r"[ ]+([MADUC])[ ]+/((?:trunk|branches/.*?)/src(.*)/(.*))"
depot_tools_dir_ = os.path.dirname(os.path.abspath(__file__))
def runGcl(subcommand):
gcl_path = os.path.join(depot_tools_dir_, "gcl")
if not os.path.exists(gcl_path):
print "WARNING: gcl not found beside drover.py. Using system gcl instead..."
gcl_path = 'gcl'
command = "%s %s" % (gcl_path, subcommand)
return os.system(command)
def gclUpload(revision, author):
command = ("upload " + str(revision) +
" --send_mail --no_presubmit --reviewers=" + author)
return runGcl(command)
def getSVNInfo(url, revision):
info = {}
svn_info = subprocess2.capture(
['svn', 'info', '--non-interactive', '%s@%s' % (url, revision)],
stderr=subprocess2.VOID).splitlines()
for line in svn_info:
match = re.search(r"(.*?):(.*)", line)
if match:
info[match.group(1).strip()] = match.group(2).strip()
return info
def isSVNDirty():
svn_status = subprocess2.check_output(['svn', 'status']).splitlines()
for line in svn_status:
match = re.search(r"^[^X?]", line)
if match:
return True
return False
def getAuthor(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Last Changed Author")):
return info["Last Changed Author"]
return None
def isSVNFile(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Node Kind")):
if (info["Node Kind"] == "file"):
return True
return False
def isSVNDirectory(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Node Kind")):
if (info["Node Kind"] == "directory"):
return True
return False
def inCheckoutRoot(path):
info = getSVNInfo(path, "HEAD")
if (not info.has_key("Repository Root")):
return False
repo_root = info["Repository Root"]
info = getSVNInfo(os.path.dirname(os.path.abspath(path)), "HEAD")
if (info.get("Repository Root", None) != repo_root):
return True
return False
def getRevisionLog(url, revision):
"""Takes an svn url and gets the associated revision."""
svn_log = subprocess2.check_output(
['svn', 'log', url, '-r', str(revision)],
universal_newlines=True).splitlines(True)
# Don't include the header lines and the trailing "---..." line.
return ''.join(svn_log[3:-1])
def getSVNVersionInfo():
"""Extract version information from SVN"""
svn_info = subprocess2.check_output(['svn', '--version']).splitlines()
info = {}
for line in svn_info:
match = re.search(r"svn, version ((\d+)\.(\d+)\.(\d+))", line)
if match:
info['version'] = match.group(1)
info['major'] = int(match.group(2))
info['minor'] = int(match.group(3))
info['patch'] = int(match.group(4))
return info
return None
def isMinimumSVNVersion(major, minor, patch=0):
"""Test for minimum SVN version"""
return _isMinimumSVNVersion(getSVNVersionInfo(), major, minor, patch)
def _isMinimumSVNVersion(version, major, minor, patch=0):
"""Test for minimum SVN version, internal method"""
if not version:
return False
if (version['major'] > major):
return True
elif (version['major'] < major):
return False
if (version['minor'] > minor):
return True
elif (version['minor'] < minor):
return False
if (version['patch'] >= patch):
return True
else:
return False
def checkoutRevision(url, revision, branch_url, revert=False, pop=True):
files_info = getFileInfo(url, revision)
paths = getBestMergePaths2(files_info, revision)
export_map = getBestExportPathsMap2(files_info, revision)
command = 'svn checkout -N ' + branch_url
print command
os.system(command)
match = re.search(r"^[a-z]+://.*/(.*)", branch_url)
if match:
os.chdir(match.group(1))
# This line is extremely important due to the way svn behaves in the
# set-depths action. If parents aren't handled before children, the child
# directories get clobbered and the merge step fails.
paths.sort()
# Checkout the directories that already exist
for path in paths:
if (export_map.has_key(path) and not revert):
print "Exclude new directory " + path
continue
subpaths = path.split('/')
#In the normal case, where no url override is specified and it's just
# chromium source, it's necessary to remove the 'trunk' from the filepath,
# since in the checkout we include 'trunk' or 'branch/\d+'.
#
# However, when a url is specified we want to preserve that because it's
# a part of the filepath and necessary for path operations on svn (because
# frankly, we are checking out the correct top level, and not hacking it).
if pop:
subpaths.pop(0)
base = ''
for subpath in subpaths:
base += '/' + subpath
# This logic ensures that you don't empty out any directories
if not os.path.exists("." + base):
command = ('svn update --depth empty ' + "." + base)
print command
os.system(command)
if (revert):
files = getAllFilesInRevision(files_info)
else:
files = getExistingFilesInRevision(files_info)
for f in files:
# Prevent the tool from clobbering the src directory
if (f == ""):
continue
command = ('svn up ".' + f + '"')
print command
os.system(command)
def mergeRevision(url, revision):
paths = getBestMergePaths(url, revision)
export_map = getBestExportPathsMap(url, revision)
for path in paths:
if export_map.has_key(path):
continue
command = ('svn merge -N -r ' + str(revision-1) + ":" + str(revision) + " ")
command += " --ignore-ancestry "
command += " -x --ignore-eol-style "
command += url + path + "@" + str(revision) + " ." + path
print command
os.system(command)
def exportRevision(url, revision):
paths = getBestExportPathsMap(url, revision).keys()
paths.sort()
for path in paths:
command = ('svn export -N ' + url + path + "@" + str(revision) + " ." +
path)
print command
os.system(command)
command = 'svn add .' + path
print command
os.system(command)
def deleteRevision(url, revision):
paths = getBestDeletePathsMap(url, revision).keys()
paths.sort()
paths.reverse()
for path in paths:
command = "svn delete ." + path
print command
os.system(command)
def revertExportRevision(url, revision):
paths = getBestExportPathsMap(url, revision).keys()
paths.sort()
paths.reverse()
for path in paths:
command = "svn delete ." + path
print command
os.system(command)
def revertRevision(url, revision):
command = ('svn merge --ignore-ancestry -c -%d %s .' % (revision, url))
print command
os.system(command)
def getFileInfo(url, revision):
global files_info_
if (files_info_ != None):
return files_info_
svn_log = subprocess2.check_output(
['svn', 'log', url, '-r', str(revision), '-v']).splitlines()
info = []
for line in svn_log:
# A workaround to dump the (from .*) stuff, regex not so friendly in the 2nd
# pass...
match = re.search(r"(.*) \(from.*\)", line)
if match:
line = match.group(1)
match = re.search(file_pattern_, line)
if match:
info.append([match.group(1).strip(), match.group(2).strip(),
match.group(3).strip(),match.group(4).strip()])
files_info_ = info
return info
def getBestMergePaths(url, revision):
"""Takes an svn url and gets the associated revision."""
return getBestMergePaths2(getFileInfo(url, revision), revision)
def getBestMergePaths2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
return list(set([f[2] for f in files_info]))
def getBestExportPathsMap(url, revision):
return getBestExportPathsMap2(getFileInfo(url, revision), revision)
def getBestExportPathsMap2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
global export_map_
if export_map_:
return export_map_
result = {}
for file_info in files_info:
if (file_info[0] == "A"):
if(isSVNDirectory("svn://svn.chromium.org/chrome/" + file_info[1],
revision)):
result[file_info[2] + "/" + file_info[3]] = ""
export_map_ = result
return result
def getBestDeletePathsMap(url, revision):
return getBestDeletePathsMap2(getFileInfo(url, revision), revision)
def getBestDeletePathsMap2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
global delete_map_
if delete_map_:
return delete_map_
result = {}
for file_info in files_info:
if (file_info[0] == "D"):
if(isSVNDirectory("svn://svn.chromium.org/chrome/" + file_info[1],
revision)):
result[file_info[2] + "/" + file_info[3]] = ""
delete_map_ = result
return result
def getExistingFilesInRevision(files_info):
"""Checks for existing files in the revision.
Anything that's A will require special treatment (either a merge or an
export + add)
"""
return ['%s/%s' % (f[2], f[3]) for f in files_info if f[0] != 'A']
def getAllFilesInRevision(files_info):
"""Checks for existing files in the revision.
Anything that's A will require special treatment (either a merge or an
export + add)
"""
return ['%s/%s' % (f[2], f[3]) for f in files_info]
def getSVNAuthInfo(folder=None):
"""Fetches SVN authorization information in the subversion auth folder and
returns it as a dictionary of dictionaries."""
if not folder:
if sys.platform == 'win32':
folder = '%%APPDATA%\\Subversion\\auth'
else:
folder = '~/.subversion/auth'
folder = os.path.expandvars(os.path.expanduser(folder))
svn_simple_folder = os.path.join(folder, 'svn.simple')
results = {}
try:
for auth_file in os.listdir(svn_simple_folder):
# Read the SVN auth file, convert it into a dictionary, and store it.
results[auth_file] = dict(re.findall(r'K [0-9]+\n(.*)\nV [0-9]+\n(.*)\n',
open(os.path.join(svn_simple_folder, auth_file)).read()))
except Exception as _:
pass
return results
def getCurrentSVNUsers(url):
"""Tries to fetch the current SVN in the current checkout by scanning the
SVN authorization folder for a match with the current SVN URL."""
netloc = urlparse.urlparse(url)[1]
auth_infos = getSVNAuthInfo()
results = []
for _, auth_info in auth_infos.iteritems():
if ('svn:realmstring' in auth_info
and netloc in auth_info['svn:realmstring']):
username = auth_info['username']
results.append(username)
if 'google.com' in username:
results.append(username.replace('google.com', 'chromium.org'))
return results
def prompt(question):
while True:
print question + " [y|n]:",
answer = sys.stdin.readline()
if answer.lower().startswith('n'):
return False
elif answer.lower().startswith('y'):
return True
def text_prompt(question, default):
print question + " [" + default + "]:"
answer = sys.stdin.readline()
if answer.strip() == "":
return default
return answer
def drover(options, args):
revision = options.revert or options.merge
# Initialize some variables used below. They can be overwritten by
# the drover.properties file.
BASE_URL = "svn://svn.chromium.org/chrome"
REVERT_ALT_URLS = ['svn://svn.chromium.org/blink',
'svn://svn.chromium.org/chrome-internal',
'svn://svn.chromium.org/native_client']
TRUNK_URL = BASE_URL + "/trunk/src"
BRANCH_URL = BASE_URL + "/branches/$branch/src"
SKIP_CHECK_WORKING = True
PROMPT_FOR_AUTHOR = False
NO_ALT_URLS = options.no_alt_urls
DEFAULT_WORKING = "drover_" + str(revision)
if options.branch:
DEFAULT_WORKING += ("_" + options.branch)
if not isMinimumSVNVersion(1, 5):
print "You need to use at least SVN version 1.5.x"
return 1
# Override the default properties if there is a drover.properties file.
global file_pattern_
if os.path.exists("drover.properties"):
print 'Using options from %s' % os.path.join(
os.getcwd(), 'drover.properties')
FILE_PATTERN = file_pattern_
f = open("drover.properties")
exec(f)
f.close()
if FILE_PATTERN:
file_pattern_ = FILE_PATTERN
NO_ALT_URLS = True
if options.revert and options.branch:
print 'Note: --branch is usually not needed for reverts.'
url = BRANCH_URL.replace("$branch", options.branch)
elif options.merge and options.sbranch:
url = BRANCH_URL.replace("$branch", options.sbranch)
elif options.revert:
url = options.url or BASE_URL
file_pattern_ = r"[ ]+([MADUC])[ ]+((/.*)/(.*))"
else:
url = TRUNK_URL
working = options.workdir or DEFAULT_WORKING
if options.local:
working = os.getcwd()
if not inCheckoutRoot(working):
print "'%s' appears not to be the root of a working copy" % working
return 1
if (isSVNDirty() and not
prompt("Working copy contains uncommitted files. Continue?")):
return 1
if options.revert and not NO_ALT_URLS and not options.url:
for cur_url in [url] + REVERT_ALT_URLS:
try:
commit_date_str = getSVNInfo(
cur_url, options.revert).get('Last Changed Date', 'x').split()[0]
commit_date = datetime.datetime.strptime(commit_date_str, '%Y-%m-%d')
if (datetime.datetime.now() - commit_date).days < 180:
if cur_url != url:
print 'Guessing svn repo: %s.' % cur_url,
print 'Use --no-alt-urls to disable heuristic.'
url = cur_url
break
except ValueError:
pass
command = 'svn log ' + url + " -r "+str(revision) + " -v"
os.system(command)
if not (options.revertbot or prompt("Is this the correct revision?")):
return 0
if (os.path.exists(working)) and not options.local:
if not (options.revertbot or SKIP_CHECK_WORKING or
prompt("Working directory: '%s' already exists, clobber?" % working)):
return 0
gclient_utils.rmtree(working)
if not options.local:
os.makedirs(working)
os.chdir(working)
if options.merge:
action = "Merge"
if not options.local:
branch_url = BRANCH_URL.replace("$branch", options.branch)
# Checkout everything but stuff that got added into a new dir
checkoutRevision(url, revision, branch_url)
# Merge everything that changed
mergeRevision(url, revision)
# "Export" files that were added from the source and add them to branch
exportRevision(url, revision)
# Delete directories that were deleted (file deletes are handled in the
# merge).
deleteRevision(url, revision)
elif options.revert:
action = "Revert"
pop_em = not options.url
checkoutRevision(url, revision, url, True, pop_em)
revertRevision(url, revision)
revertExportRevision(url, revision)
# Check the base url so we actually find the author who made the change
if options.auditor:
author = options.auditor
else:
author = getAuthor(url, revision)
if not author:
author = getAuthor(TRUNK_URL, revision)
# Check that the author of the CL is different than the user making
# the revert. If they're the same, then we'll want to prompt the user
# for a different reviewer to TBR.
current_users = getCurrentSVNUsers(BASE_URL)
is_self_revert = options.revert and author in current_users
filename = str(revision)+".txt"
out = open(filename,"w")
drover_title = '%s %s' % (action, revision)
revision_log = getRevisionLog(url, revision).splitlines()
if revision_log:
commit_title = revision_log[0]
# Limit title to 68 chars so git log --oneline is <80 chars.
max_commit_title = 68 - (len(drover_title) + 3)
if len(commit_title) > max_commit_title:
commit_title = commit_title[:max_commit_title-3] + '...'
drover_title += ' "%s"' % commit_title
out.write(drover_title + '\n\n')
for line in revision_log:
out.write('> %s\n' % line)
if author:
out.write("\nTBR=" + author)
out.close()
change_cmd = 'change ' + str(revision) + " " + filename
if options.revertbot:
if sys.platform == 'win32':
os.environ['SVN_EDITOR'] = 'cmd.exe /c exit'
else:
os.environ['SVN_EDITOR'] = 'true'
runGcl(change_cmd)
os.unlink(filename)
if options.local:
return 0
print author
print revision
print ("gcl upload " + str(revision) +
" --send_mail --no_presubmit --reviewers=" + author)
if options.revertbot or prompt("Would you like to upload?"):
if PROMPT_FOR_AUTHOR or is_self_revert:
author = text_prompt("Enter new author or press enter to accept default",
author)
if options.revertbot and options.revertbot_reviewers:
author += ","
author += options.revertbot_reviewers
gclUpload(revision, author)
else:
print "Deleting the changelist."
print "gcl delete " + str(revision)
runGcl("delete " + str(revision))
return 0
# We commit if the reverbot is set to commit automatically, or if this is
# not the revertbot and the user agrees.
if options.revertbot_commit or (not options.revertbot and
prompt("Would you like to commit?")):
print "gcl commit " + str(revision) + " --no_presubmit --force"
return runGcl("commit " + str(revision) + " --no_presubmit --force")
else:
return 0
def main():
option_parser = optparse.OptionParser(usage=USAGE % {"app": sys.argv[0]})
option_parser.add_option('-m', '--merge', type="int",
help='Revision to merge from trunk to branch')
option_parser.add_option('-b', '--branch',
help='Branch to revert or merge from')
option_parser.add_option('-l', '--local', action='store_true',
help='Local working copy to merge to')
option_parser.add_option('-s', '--sbranch',
help='Source branch for merge')
option_parser.add_option('-r', '--revert', type="int",
help='Revision to revert')
option_parser.add_option('-w', '--workdir',
help='subdir to use for the revert')
option_parser.add_option('-u', '--url',
help='svn url to use for the revert')
option_parser.add_option('-a', '--auditor',
help='overrides the author for reviewer')
option_parser.add_option('--revertbot', action='store_true',
default=False)
option_parser.add_option('--no-alt-urls', action='store_true',
help='Disable heuristics used to determine svn url')
option_parser.add_option('--revertbot-commit', action='store_true',
default=False)
option_parser.add_option('--revertbot-reviewers')
options, args = option_parser.parse_args()
if not options.merge and not options.revert:
option_parser.error("You need at least --merge or --revert")
return 1
if options.merge and not (options.branch or options.local):
option_parser.error("--merge requires --branch or --local")
return 1
if options.local and (options.revert or options.branch):
option_parser.error("--local cannot be used with --revert or --branch")
return 1
return drover(options, args)
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| bsd-3-clause |
primiano/blink-gitcs | Source/devtools/scripts/generate_devtools_grd.py | 15 | 5844 | #!/usr/bin/env python
#
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Creates a grd file for packaging the inspector files."""
from __future__ import with_statement
from os import path
import errno
import os
import shutil
import sys
from xml.dom import minidom
kDevToolsResourcePrefix = 'IDR_DEVTOOLS_'
kGrdTemplate = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="grit/devtools_resources.h" type="rc_header">
<emit emit_type='prepend'></emit>
</output>
<output filename="grit/devtools_resources_map.cc" type="resource_file_map_source" />
<output filename="grit/devtools_resources_map.h" type="resource_map_header" />
<output filename="devtools_resources.pak" type="data_package" />
</outputs>
<release seq="1">
<includes></includes>
</release>
</grit>
'''
class ParsedArgs:
def __init__(self, source_files, relative_path_dirs, image_dirs, output_filename):
self.source_files = source_files
self.relative_path_dirs = relative_path_dirs
self.image_dirs = image_dirs
self.output_filename = output_filename
def parse_args(argv):
static_files_list_position = argv.index('--static_files_list')
relative_path_dirs_position = argv.index('--relative_path_dirs')
images_position = argv.index('--images')
output_position = argv.index('--output')
static_files_list_path = argv[static_files_list_position + 1]
source_files = argv[:static_files_list_position]
with open(static_files_list_path, 'r') as static_list_file:
source_files.extend([line.rstrip('\n') for line in static_list_file.readlines()])
relative_path_dirs = argv[relative_path_dirs_position + 1:images_position]
image_dirs = argv[images_position + 1:output_position]
return ParsedArgs(source_files, relative_path_dirs, image_dirs, argv[output_position + 1])
def make_name_from_filename(filename):
return (filename.replace('/', '_')
.replace('\\', '_')
.replace('-', '_')
.replace('.', '_')).upper()
def add_file_to_grd(grd_doc, relative_filename):
includes_node = grd_doc.getElementsByTagName('includes')[0]
includes_node.appendChild(grd_doc.createTextNode('\n '))
new_include_node = grd_doc.createElement('include')
new_include_node.setAttribute('name', make_name_from_filename(relative_filename))
new_include_node.setAttribute('file', relative_filename)
new_include_node.setAttribute('type', 'BINDATA')
includes_node.appendChild(new_include_node)
def build_relative_filename(relative_path_dirs, filename):
for relative_path_dir in relative_path_dirs:
index = filename.find(relative_path_dir)
if index == 0:
return filename[len(relative_path_dir) + 1:]
return path.basename(filename)
def main(argv):
parsed_args = parse_args(argv[1:])
doc = minidom.parseString(kGrdTemplate)
output_directory = path.dirname(parsed_args.output_filename)
try:
os.makedirs(path.join(output_directory, 'Images'))
except OSError, e:
if e.errno != errno.EEXIST:
raise e
written_filenames = set()
for filename in parsed_args.source_files:
relative_filename = build_relative_filename(parsed_args.relative_path_dirs, filename)
# Avoid writing duplicate relative filenames.
if relative_filename in written_filenames:
continue
written_filenames.add(relative_filename)
target_dir = path.join(output_directory, path.dirname(relative_filename))
if not path.exists(target_dir):
os.makedirs(target_dir)
shutil.copy(filename, target_dir)
add_file_to_grd(doc, relative_filename)
for dirname in parsed_args.image_dirs:
for filename in os.listdir(dirname):
if not filename.endswith('.png') and not filename.endswith('.gif'):
continue
shutil.copy(path.join(dirname, filename),
path.join(output_directory, 'Images'))
add_file_to_grd(doc, path.join('Images', filename))
with open(parsed_args.output_filename, 'w') as output_file:
output_file.write(doc.toxml(encoding='UTF-8'))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
nextgis/NextGIS_QGIS_open | python/ext-libs/pygments/scanner.py | 365 | 3114 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
| gpl-2.0 |
kustodian/ansible | test/units/modules/network/cumulus/test_nclu.py | 132 | 9512 | # -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os.path
import unittest
from ansible.modules.network.cumulus import nclu
class FakeModule(object):
"""Fake NCLU module to check the logic of the ansible module.
We have two sets of tests: fake and real. Real tests only run if
NCLU is installed on the testing machine (it should be a Cumulus VX
VM or something like that).
Fake tests are used to test the logic of the ansible module proper - that
the right things are done when certain feedback is received.
Real tests are used to test regressions against versions of NCLU. This
FakeModule mimics the output that is used for screenscraping. If the real
output differs, the real tests will catch that.
To prepare a VX:
sudo apt-get update
sudo apt-get install python-setuptools git gcc python-dev libssl-dev
sudo easy_install pip
sudo pip install ansible nose coverage
# git the module and cd to the directory
nosetests --with-coverage --cover-package=nclu --cover-erase --cover-branches
If a real test fails, it means that there is a risk of a version split, and
that changing the module will break for old versions of NCLU if not careful.
"""
def __init__(self, **kwargs):
self.reset()
def exit_json(self, **kwargs):
self.exit_code = kwargs
def fail_json(self, **kwargs):
self.fail_code = kwargs
def run_command(self, command):
"""Run an NCLU command"""
self.command_history.append(command)
if command == "/usr/bin/net pending":
return (0, self.pending, "")
elif command == "/usr/bin/net abort":
self.pending = ""
return (0, "", "")
elif command.startswith("/usr/bin/net commit"):
if self.pending:
self.last_commit = self.pending
self.pending = ""
return (0, "", "")
else:
return (0, "commit ignored...there were no pending changes", "")
elif command == "/usr/bin/net show commit last":
return (0, self.last_commit, "")
else:
self.pending += command
return self.mocks.get(command, (0, "", ""))
def mock_output(self, command, _rc, output, _err):
"""Prepare a command to mock certain output"""
self.mocks[command] = (_rc, output, _err)
def reset(self):
self.params = {}
self.exit_code = {}
self.fail_code = {}
self.command_history = []
self.mocks = {}
self.pending = ""
self.last_commit = ""
def skipUnlessNcluInstalled(original_function):
if os.path.isfile('/usr/bin/net'):
return original_function
else:
return unittest.skip('only run if nclu is installed')
class TestNclu(unittest.TestCase):
def test_command_helper(self):
module = FakeModule()
module.mock_output("/usr/bin/net add int swp1", 0, "", "")
result = nclu.command_helper(module, 'add int swp1', 'error out')
self.assertEqual(module.command_history[-1], "/usr/bin/net add int swp1")
self.assertEqual(result, "")
def test_command_helper_error_code(self):
module = FakeModule()
module.mock_output("/usr/bin/net fake fail command", 1, "", "")
result = nclu.command_helper(module, 'fake fail command', 'error out')
self.assertEqual(module.fail_code, {'msg': "error out"})
def test_command_helper_error_msg(self):
module = FakeModule()
module.mock_output("/usr/bin/net fake fail command", 0,
"ERROR: Command not found", "")
result = nclu.command_helper(module, 'fake fail command', 'error out')
self.assertEqual(module.fail_code, {'msg': "error out"})
def test_command_helper_no_error_msg(self):
module = FakeModule()
module.mock_output("/usr/bin/net fake fail command", 0,
"ERROR: Command not found", "")
result = nclu.command_helper(module, 'fake fail command')
self.assertEqual(module.fail_code, {'msg': "ERROR: Command not found"})
def test_empty_run(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, None, None, False, False, False, "")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net pending'])
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, False)
def test_command_list(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, ['add int swp1', 'add int swp2'],
None, False, False, False, "")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending'])
self.assertNotEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_command_list_commit(self):
module = FakeModule()
changed, output = nclu.run_nclu(module,
['add int swp1', 'add int swp2'],
None, True, False, False, "committed")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending',
"/usr/bin/net commit description 'committed'",
'/usr/bin/net show commit last'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_command_atomic(self):
module = FakeModule()
changed, output = nclu.run_nclu(module,
['add int swp1', 'add int swp2'],
None, False, True, False, "atomically")
self.assertEqual(module.command_history, ['/usr/bin/net abort',
'/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending',
"/usr/bin/net commit description 'atomically'",
'/usr/bin/net show commit last'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_command_abort_first(self):
module = FakeModule()
module.pending = "dirty"
nclu.run_nclu(module, None, None, False, False, True, "")
self.assertEqual(len(module.pending), 0)
def test_command_template_commit(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, None,
" add int swp1\n add int swp2",
True, False, False, "committed")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending',
"/usr/bin/net commit description 'committed'",
'/usr/bin/net show commit last'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_commit_ignored(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, None, None, True, False, False, "ignore me")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net pending',
"/usr/bin/net commit description 'ignore me'",
'/usr/bin/net abort'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, False)
| gpl-3.0 |
cloudera/hue | desktop/core/ext-py/python-ldap-2.3.13/Tests/Lib/ldap/schema/test_tokenizer.py | 40 | 1392 | import ldap.schema
from ldap.schema.tokenizer import split_tokens,extract_tokens
testcases_split_tokens = (
(" BLUBBER DI BLUBB ", ["BLUBBER", "DI", "BLUBB"]),
("BLUBBER DI BLUBB",["BLUBBER","DI","BLUBB"]),
("BLUBBER DI BLUBB ",["BLUBBER","DI","BLUBB"]),
("BLUBBER DI 'BLUBB' ",["BLUBBER","DI","BLUBB"]),
("BLUBBER ( DI ) 'BLUBB' ",["BLUBBER","(","DI",")","BLUBB"]),
("BLUBBER(DI)",["BLUBBER","(","DI",")"]),
("BLUBBER ( DI)",["BLUBBER","(","DI",")"]),
("BLUBBER ''",["BLUBBER",""]),
("( BLUBBER (DI 'BLUBB'))",["(","BLUBBER","(","DI","BLUBB",")",")"]),
("BLUBB (DA$BLAH)",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB ( DA $ BLAH )",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB (DA$ BLAH)",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB (DA $BLAH)",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB 'DA$BLAH'",['BLUBB',"DA$BLAH"]),
("BLUBB DI 'BLU B B ER' DA 'BLAH' ",['BLUBB','DI','BLU B B ER','DA','BLAH']),
("BLUBB DI 'BLU B B ER' DA 'BLAH' LABER",['BLUBB','DI','BLU B B ER','DA','BLAH','LABER']),
("BLUBBER DI 'BLU'BB ER' DA 'BLAH' ", ["BLUBBER", "DI", "BLU'BB ER", "DA", "BLAH"]), # for Oracle
("BLUBB DI 'BLU B B ER'MUST 'BLAH' ",['BLUBB','DI','BLU B B ER','MUST','BLAH']) # for Oracle
)
for t,r in testcases_split_tokens:
l = ldap.schema.tokenizer.split_tokens(t,{'MUST':None})
if l!=r:
print 'String:',repr(t)
print '=>',l
print 'differs from',r
| apache-2.0 |
nwjs/chromium.src | tools/perf/contrib/cluster_telemetry/screenshot_unittest.py | 9 | 2916 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import decorators
from telemetry.testing import legacy_page_test_case
from telemetry.util import image_util
from contrib.cluster_telemetry import screenshot
class ScreenshotUnitTest(legacy_page_test_case.LegacyPageTestCase):
# This test should only run on linux, but it got disabled on Linux as
# well because of crbug.com/1023255. Replace the following with
# @decorators.Enabled('linux') once this is fixed on linux.
@decorators.Disabled('all')
def testScreenshot(self):
# Screenshots for Cluster Telemetry purposes currently only supported on
# Linux platform.
screenshot_test = screenshot.Screenshot(self.options.output_dir)
self.RunPageTest(screenshot_test, 'file://screenshot_test.html')
filepath = os.path.join(self.options.output_dir, 'screenshot_test.png')
self.assertTrue(os.path.exists(filepath))
self.assertTrue(os.path.isfile(filepath))
self.assertTrue(os.access(filepath, os.R_OK))
image = image_util.FromPngFile(filepath)
screenshot_pixels = image_util.Pixels(image)
special_colored_pixel = bytearray([217, 115, 43])
self.assertTrue(special_colored_pixel in screenshot_pixels)
@decorators.Enabled('linux')
def testIsScreenshotWithinDynamicContentThreshold(self):
# TODO(lchoi): This unit test fails on Windows due to an apparent platform
# dependent image decoding behavior that will need to be investigated in the
# future if Cluster Telemetry ever becomes compatible with Windows.
width = 2
height = 1
num_total_pixels = width * height
content_pixels = bytearray([0, 0, 0, 128, 128, 128])
base_screenshot = image_util.FromRGBPixels(width, height, content_pixels)
next_pixels = bytearray([1, 1, 1, 128, 128, 128])
next_screenshot = image_util.FromRGBPixels(width, height, next_pixels)
expected_pixels = bytearray([0, 255, 255, 128, 128, 128])
self.assertTrue(screenshot.IsScreenshotWithinDynamicContentThreshold(
base_screenshot, next_screenshot, content_pixels,
num_total_pixels, 0.51))
self.assertTrue(expected_pixels == content_pixels)
next_pixels = bytearray([0, 0, 0, 1, 1, 1])
next_screenshot = image_util.FromRGBPixels(2, 1, next_pixels)
expected_pixels = bytearray([0, 255, 255, 0, 255, 255])
self.assertTrue(screenshot.IsScreenshotWithinDynamicContentThreshold(
base_screenshot, next_screenshot, content_pixels,
num_total_pixels, 0.51))
self.assertTrue(expected_pixels == content_pixels)
self.assertFalse(screenshot.IsScreenshotWithinDynamicContentThreshold(
base_screenshot, next_screenshot, content_pixels,
num_total_pixels, 0.49))
| bsd-3-clause |
payamsm/django-allauth | docs/conf.py | 30 | 7794 | # -*- coding: utf-8 -*-
#
# django-allauth documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 6 22:58:42 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-allauth'
copyright = u'2015, Raymond Penners'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.23.0'
# The full version, including alpha/beta/rc tags.
release = '0.23.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-allauthdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-allauth.tex', u'django-allauth Documentation',
u'Raymond Penners', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-allauth', u'django-allauth Documentation',
[u'Raymond Penners'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-allauth', u'django-allauth Documentation',
u'Raymond Penners', 'django-allauth', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
msmolens/VTK | ThirdParty/Twisted/twisted/internet/_signals.py | 62 | 2718 | # -*- test-case-name: twisted.internet.test.test_sigchld -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module is used to integrate child process termination into a
reactor event loop. This is a challenging feature to provide because
most platforms indicate process termination via SIGCHLD and do not
provide a way to wait for that signal and arbitrary I/O events at the
same time. The naive implementation involves installing a Python
SIGCHLD handler; unfortunately this leads to other syscalls being
interrupted (whenever SIGCHLD is received) and failing with EINTR
(which almost no one is prepared to handle). This interruption can be
disabled via siginterrupt(2) (or one of the equivalent mechanisms);
however, if the SIGCHLD is delivered by the platform to a non-main
thread (not a common occurrence, but difficult to prove impossible),
the main thread (waiting on select() or another event notification
API) may not wake up leading to an arbitrary delay before the child
termination is noticed.
The basic solution to all these issues involves enabling SA_RESTART
(ie, disabling system call interruption) and registering a C signal
handler which writes a byte to a pipe. The other end of the pipe is
registered with the event loop, allowing it to wake up shortly after
SIGCHLD is received. See L{twisted.internet.posixbase._SIGCHLDWaker}
for the implementation of the event loop side of this solution. The
use of a pipe this way is known as the U{self-pipe
trick<http://cr.yp.to/docs/selfpipe.html>}.
From Python version 2.6, C{signal.siginterrupt} and C{signal.set_wakeup_fd}
provide the necessary C signal handler which writes to the pipe to be
registered with C{SA_RESTART}.
"""
from __future__ import division, absolute_import
import signal
def installHandler(fd):
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by installing a SIGCHLD handler that does nothing,
setting the I{SIGCHLD} handler as not allowed to interrupt system calls,
and using L{signal.set_wakeup_fd} to do the actual writing.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@type fd: C{int}
"""
if fd == -1:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
def noopSignalHandler(*args):
pass
signal.signal(signal.SIGCHLD, noopSignalHandler)
signal.siginterrupt(signal.SIGCHLD, False)
return signal.set_wakeup_fd(fd)
def isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
| bsd-3-clause |
ihorlaitan/poet | common/modules/exec.py | 3 | 3197 | import module
import config as CFG
import re
import zlib
REGEX = re.compile('^exec(\s+-o(\s+[\w.]+)?)?\s+(("[^"]+")\s+)+$')
EXEC = 'exec'
RECON = 'recon'
USAGE = """Execute commands on target.
usage: exec [-o [filename]] "cmd1" ["cmd2" "cmd3" ...]
\nExecute given commands and optionally log to file with optional filename.
\noptions:
-h\t\tshow help
-o filename\twrite results to file in {}/'.""".format(CFG.ARCHIVE_DIR)
@module.server_handler(EXEC)
def server_exec(server, argv):
# extra space is for regex
if len(argv) < 2 or argv[1] in ('-h', '--help') or not REGEX.match(' '.join(argv) + ' '):
print USAGE
return
try:
preproc = preprocess(argv)
except Exception:
print USAGE
return
server.generic(*preproc)
@module.client_handler(EXEC)
def client_exec(client, inp):
"""Handle server `exec' command.
Execute specially formatted input string and return specially formatted
response.
"""
client.s.send(execute(client, ' '.join(inp.split()[1:])))
@module.server_handler(RECON)
def server_recon(server, argv):
if '-h' in argv or '--help' in argv:
print USAGE
return
argc = len(argv)
if argc == 1:
server.generic(RECON)
elif '-o' in argv:
if argc == 2:
server.generic(RECON, True)
elif argc == 3:
server.generic(RECON, True, argv[2])
else:
print USAGE
else:
print USAGE
@module.client_handler(RECON)
def client_recon(client, inp):
ipcmd = 'ip addr' if 'no' in client.cmd_exec('which ifconfig') else 'ifconfig'
exec_str = '"whoami" "id" "uname -a" "lsb_release -a" "{}" "w" "who -a"'.format(ipcmd)
client.s.send(zlib.compress(execute(client, exec_str)))
def execute(client, exec_str):
out = ''
cmds = parse_exec_cmds(exec_str)
for cmd in cmds:
cmd_out = client.cmd_exec(cmd)
out += '='*20 + '\n\n$ {}\n{}\n'.format(cmd, cmd_out)
return out
def preprocess(argv):
"""Parse posh `exec' command line.
Args:
inp: raw `exec' command line
Returns:
Tuple suitable for expansion into as self.generic() parameters.
"""
write_file = None
write_flag = argv[1] == '-o'
if write_flag:
if len(argv) == 2:
# it was just "exec -o"
raise Exception
if '"' not in argv[2]:
write_file = argv[2]
del argv[2]
del argv[1]
argv = ' '.join(argv)
return argv, write_flag, write_file
def parse_exec_cmds(inp):
"""Parse string provided by server `exec' command.
Convert space delimited string with commands to execute in quotes, for
example ("ls -l" "cat /etc/passwd") into list with commands as strings.
Returns:
List of commands to execute.
"""
if inp.count('"') == 2:
return [inp[1:-1]]
else:
# server side regex guarantees that these quotes will be in the
# correct place -- the space between two commands
third_quote = inp.find('" "') + 2
first_cmd = inp[:third_quote-1]
rest = inp[third_quote:]
return [first_cmd[1:-1]] + parse_exec_cmds(rest)
| mit |
MattDerry/pendulum_3d | src/pendulum_3d_simulator_corr_inv.py | 1 | 30386 | #!/usr/bin/env python
"""
Matt Derry
Summer 2014
Simulate spherical inverted pendulum and publish results so that
ROS can be visualization.
"""
## define all imports:
import roslib
roslib.load_manifest('pendulum_3d')
import rospy
import tf
from sensor_msgs.msg import JointState as JS
from pendulum_3d.msg import *
import std_srvs.srv as SS
import geometry_msgs.msg as GM
from visualization_msgs.msg import Marker
from sensor_msgs.msg import Joy
import trep
from trep import discopt
from math import pi, fmod, atan2, sin, exp
import numpy as np
import scipy.signal as sp
import matplotlib as mpl
from dynamic_reconfigure.server import Server as DynamicReconfigureServer
from pendulum_3d.cfg import PendulumConfig
import csv
import sys
import time
#####################
# Experiment Settings
#####################
USER_NUMBER = 1
MAX_TRIALS = 10
STARTING_TRUST = 0.3
MAX_TORQUE = 100.0
INPUT_DEVICE = "balance_board" # "joystick"
ALPHA = 0.05
EXPONENT = -0.0001
MAX_COST = 200000
MIN_COST = 50000
####################
# GLOBAL VARIABLES #
####################
DT = 1/50.
tf_freq = 30.
TIMESTEP = 20 # in ms
EXCEPTION_COUNT_MAX = 5
DISTURBANCE_MAGNITUDE = 2.0
# Pendulum params:
MAX_LINKS = 2
# Weight limits:
MAX_WEIGHT = 1000000000
MIN_WEIGHT = 0.0001
KIN_WEIGHT = 1.0
# Trust limits:
MAX_TRUST = 1.0
MIN_TRUST = 0.0
CURRENT_TRUST = 0.0
# Saturation:
MAX_VELOCITY = 0.25/(TIMESTEP/1000.0)
SATURATION_TORQUE = 1000.0
MIN_SCALE = 0.1
MAX_SCALE = 2.0
STATE_INTERACTIVE = 0
STATE_CONTROLLED_INTERACTIVE = 1
QBAR_END = pi
QBAR_DIP = pi/8
QBAR_DIP1 = QBAR_DIP
QBAR_DIP2 = QBAR_DIP
TS_TRANSITION_1 = 200
TS_TRANSITION_2 = 400
CURRENT_TRIAL_NUMBER = 0
###########################
# MISCELLANEOUS FUNCTIONS #
###########################
# map to 0 to 2pi
def normalize_angle_positive(angle):
return fmod(fmod(angle, 2.0*pi) + 2.0*pi, 2.0*pi)
# map to -pi to +pi
def normalize_angle(angle):
a = normalize_angle_positive(angle)
if a > pi:
a -= 2.0*pi
return a
# utility function for getting minimum angular distance between two angles in
# radians. Answer will be -pi <= result <= pi, adding result to 'before' will
# always be an angle equivalent to 'after'
def shortest_angular_distance(before, after):
result = normalize_angle_positive(normalize_angle_positive(after) -
normalize_angle_positive(before))
if result > pi:
result = -(2.0*pi-result)
return normalize_angle(result)
class User:
def __init__(self, user_number, max_number_of_trials, starting_trust, trial_alpha, input_device):
self.user_id = user_number
self.max_num_trials = max_number_of_trials
self.current_trial_num = 0
self.starting_trust = starting_trust
self.input_device = input_device
self.current_trust = starting_trust
self.alpha = trial_alpha
param_file_name = '/home/mderry/cps_data/user_' + str(user_number) + '/user_' + str(user_number) + '_params.csv'
log_file_name = '/home/mderry/cps_data/user_' + str(user_number) + '/user_' + str(user_number) + '_trust_log.csv'
with open(param_file_name, 'wb') as csvfile:
paramwriter = csv.writer(csvfile, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
paramwriter.writerow(['User Number', 'Input Device', 'Max_Num_Trials', 'Starting Trust', 'Alpha'])
paramwriter.writerow([str(user_number), input_device, str(max_number_of_trials), str(starting_trust), str(trial_alpha)])
self.trust_log_file = open(log_file_name, 'wb')
self.log_writer = csv.writer(self.trust_log_file, delimiter=',', quotechar="'", quoting=csv.QUOTE_NONE)
self.log_writer.writerow(['Trial Number', 'Current Trust', 'Raw Task Trust', 'New Trust', 'Raw Task Cost'])
self.write_to_log(0, starting_trust, 0, starting_trust, 0) # no single task trust yet, so just set to zero for now
def update_trust(self, new_trust, new_cost):
self.current_trial_num = self.current_trial_num + 1
adjusted_trust = (1-self.alpha) * self.current_trust + self.alpha * new_trust
self.write_to_log(self.current_trial_num, self.current_trust, new_trust, adjusted_trust, new_cost)
self.current_trust = adjusted_trust
if self.current_trial_num == self.max_num_trials:
self.trust_log_file.close()
def get_current_trust(self):
return self.current_trust
def set_alpha(self, new_alpha):
self.alpha = new_alpha
def write_to_log(self, t_num, current_trust, single_task_trust, adjusted_trust, single_task_cost):
if self.trust_log_file.closed:
log_file_name = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trust_log.csv'
self.trust_log_file = open(log_file_name, 'ab')
self.log_writer = csv.writer(self.trust_log_file, delimiter=',', quotechar="'", quoting=csv.QUOTE_NONE)
self.log_writer.writerow([str(t_num), str(current_trust), str(single_task_trust), str(adjusted_trust), str(single_task_cost)])
self.trust_log_file.close()
else:
self.log_writer.writerow([str(t_num), str(current_trust), str(single_task_trust), str(adjusted_trust), str(single_task_cost)])
# trep system generator:
class BalanceBoard(trep.System):
def __init__(self, num_links, link_length, link_mass, damping):
super(BalanceBoard, self).__init__()
self.num_links = num_links
self.link_length = link_length
self.link_mass = link_mass
rospy.loginfo("Build balance board")
bboard = trep.Frame(self.world_frame, trep.TX, 0)
self._add_link(bboard, 1.0)
trep.potentials.Gravity(self, (0, 0.0, -9.8))
trep.forces.Damping(self, 1.0)
trep.forces.ConfigForce(self, 'link-1-base_x', 'torque1_x')
trep.forces.ConfigForce(self, 'link-1-base_y', 'torque1_y')
rospy.loginfo("Configuration Variables: %d (Kinematic: %d, Dynamic: %d), Inputs: %d, Constraints: %d", self.nQ, self.nQk, self.nQd, self.nu, self.nc)
def _add_link(self, parent, link):
if link == self.num_links+1:
return
base1 = trep.Frame(parent, trep.RX, 'link-%d-base_x' % link,
'link-%d-base_x' % link)
base2 = trep.Frame(base1, trep.RY, 'link-%d-base_y' % link,
'link-%d-base_y' % link)
end = trep.Frame(base2, trep.TZ, self.link_length,
mass=self.link_mass, name=('link-%d' % link))
self._add_link(end, link+1)
def create_systems(max_links, link_length, link_mass, frequency, amplitude, damping):
"""
Creates the balance board and loads or generates the trajectories.
"""
rospy.loginfo("Creating %d link balance board" % 2)
return BalanceBoard(2, float(link_length), float(link_mass), float(damping))
class PendulumSimulator:
def __init__(self, syst, trust, trial_num):
rospy.loginfo("Creating pendulum simulator")
self.links_bool = rospy.get_param('links', False)
self.create_timer_marker()
# first, let's just define the initial configuration of the system:
self.sys = syst
self.mvi = trep.MidpointVI(self.sys)
# self.start_interactive()
self.start_controlled_interactive()
self.trial_num = trial_num
self.finished = False
# fill out a message, and store it in the class so that I can update it
# whenever necessary
self.mappings = {
'link1_rx_link': 'link-1-base_x',
'link1_ry_link': 'link-1-base_y',
'link2_rx_link': 'link-2-base_x',
'link2_ry_link': 'link-2-base_y',
}
self.js = JS()
self.names = [x for x in self.mappings.keys()]
self.js.name = self.names
self.js.header.frame_id = 'base'
self.update_values()
self.count_exceptions = 0
self.user_ux = 0.0
self.user_uy = 0.0
self.reset_button_prev = 0
self.reset_button = 0
self.trust = trust
self.max_torque = MAX_TORQUE # rospy.get_param('~max_torque', MAX_TORQUE)
self.state = STATE_CONTROLLED_INTERACTIVE # rospy.get_param('~state', STATE_CONTROLLED_INTERACTIVE)
self.create_task_marker()
self.create_target_marker()
self.create_score_msg()
# define tf broadcaster and listener
self.br = tf.TransformBroadcaster()
self.listener = tf.TransformListener()
# define a publisher for the joint states
self.joint_pub = rospy.Publisher("joint_states", JS, queue_size=2)
# define a timer for publishing the frames and tf's
self.tf_timer = rospy.Timer(rospy.Duration(1.0/tf_freq), self.send_joint_states)
# define a timer for integrating the state of the VI
# rospy.Subscriber("/board_joy", Joy, self.joy_cb)
rospy.Subscriber("/joy", Joy, self.joy_cb)
self.timer_pub = rospy.Publisher("/task_timer", Marker, queue_size=1)
self.task_pub = rospy.Publisher("/task_direction", Marker, queue_size=2)
self.target_pub = rospy.Publisher("/task_target", Marker, queue_size=2)
self.score_pub = rospy.Publisher("/score", Score, queue_size=2)
time.sleep(0.5)
self.timer_marker.header.stamp = rospy.Time.now()
self.timer_marker.text = "START"
self.timer_pub.publish(self.timer_marker)
self.task_pub.publish(self.task_marker)
self.target_marker.header.stamp = rospy.Time.now()
self.target_pub.publish(self.target_marker)
self.timer = rospy.Timer(rospy.Duration(DT), self.update_interactive)
rospy.loginfo("Starting integration...")
def create_task_marker(self):
self.task_marker = Marker()
self.task_marker.header.frame_id = 'base'
self.task_marker.header.stamp = rospy.Time.now()
#self.task_marker.lifetime = 4.0
self.task_marker.id = 1
self.task_marker.type = 0
self.task_marker.action = 0
self.task_marker.pose.position.x = 0.0
self.task_marker.pose.position.y = 0.0
self.task_marker.pose.position.z = 0.0
# rospy.loginfo("task heading: %f", atan2(QBAR_DIP1, QBAR_DIP2))
self.task_marker.pose.orientation = GM.Quaternion(*(tf.transformations.quaternion_from_euler(atan2(QBAR_DIP2, -1*QBAR_DIP1), 0, 0, 'rzyx')))
self.task_marker.color.r = 1.0
self.task_marker.color.g = 1.0
self.task_marker.color.b = 0.0
self.task_marker.color.a = 1.0
self.task_marker.scale.z = 0.01
self.task_marker.scale.x = 0.750
self.task_marker.scale.y = 0.15
###########################
# Timer Marker
###########################
def create_timer_marker(self):
self.timer_marker = Marker()
self.timer_marker.header.frame_id = 'base'
self.timer_marker.header.stamp = rospy.Time.now()
self.timer_marker.id = 1
self.timer_marker.type = 9
self.timer_marker.action = 0
self.timer_marker.pose.position.x = 0
self.timer_marker.pose.position.y = 2.0
self.timer_marker.pose.position.z = 0
self.timer_marker.text = "0"
self.timer_marker.color.r = 1.0
self.timer_marker.color.g = 1.0
self.timer_marker.color.b = 0.0
self.timer_marker.color.a = 1.0
self.timer_marker.scale.z = 0.5
def create_target_marker(self):
self.target_marker = Marker()
self.target_marker.header.frame_id = 'base'
self.target_marker.header.stamp = rospy.Time.now()
self.target_marker.id = 1
self.target_marker.type = 2
self.target_marker.action = 0
self.target_marker.pose.position.x = 0.0
self.target_marker.pose.position.y = 0.0
self.target_marker.pose.position.z = -2.0
self.target_marker.text = "0"
self.target_marker.color.r = 0.0
self.target_marker.color.g = 0.0
self.target_marker.color.b = 1.0
self.target_marker.color.a = 0.5
self.target_marker.scale.x = 0.35
self.target_marker.scale.y = 0.35
self.target_marker.scale.z = 0.35
def create_score_msg(self):
self.score = Score()
def joy_cb(self, data):
self.joy = data
self.user_ux = self.max_torque * -data.axes[0]
self.user_uy = self.max_torque * -data.axes[1]
self.reset_button_prev = self.reset_button
self.reset_button = data.buttons[0]
if self.reset_button_prev and not self.reset_button:
self.reset()
def start_controlled_interactive(self):
self.state = STATE_CONTROLLED_INTERACTIVE
self.simulation_failed = False
# calculate linearization, and feedback controller:
tvec = np.arange(0, 600.0*TIMESTEP/1000.0, TIMESTEP/1000.0)
self.dsys = trep.discopt.DSystem(self.mvi, tvec)
link1_rx_index = self.dsys.system.get_config('link-1-base_x').index
link1_ry_index = self.dsys.system.get_config('link-1-base_y').index
link2_rx_index = self.dsys.system.get_config('link-2-base_x').index
link2_ry_index = self.dsys.system.get_config('link-2-base_y').index
qd_up = np.zeros((len(tvec), self.mvi.system.nQ))
qd_up[:] = [0]*self.mvi.system.nQ
qd_up[:,link1_rx_index] = QBAR_END
#qd_up[:,link1_ry_index] = QBAR_END
qd_dip = np.zeros((len(tvec), self.mvi.system.nQ))
qd_dip[:] = [0]*self.mvi.system.nQ
qd_dip[:,link1_rx_index] = QBAR_END
#qd_dip[:,link1_ry_index] = QBAR_END
self.qd_comb = np.zeros((len(tvec), self.mvi.system.nQ))
self.qd_comb[:] = [0]*self.mvi.system.nQ
self.qd_comb[:,link1_rx_index] = QBAR_END
#self.qd_comb[:,link1_ry_index] = QBAR_END
for i, t in enumerate(tvec):
if i > 200 and i < 400:
qd_dip[i, link1_rx_index] = normalize_angle(QBAR_END + QBAR_DIP) # Set desired configuration trajectory
qd_dip[i, link1_ry_index] = -1*normalize_angle(QBAR_DIP)
qd_dip[i, link2_rx_index] = -1*QBAR_DIP
qd_dip[i, link2_ry_index] = QBAR_DIP
self.qd_comb[i, link1_rx_index] = normalize_angle(QBAR_END + QBAR_DIP) # Set desired configuration trajectory
self.qd_comb[i, link1_ry_index] = -1*normalize_angle(QBAR_DIP)
self.qd_comb[i, link2_rx_index] = -1*QBAR_DIP
self.qd_comb[i, link2_ry_index] = QBAR_DIP
qd_state_dip = np.zeros((1, self.mvi.system.nQ))
qd_state_dip[0, link1_rx_index] = normalize_angle(QBAR_END + QBAR_DIP)
qd_state_dip[0, link1_ry_index] = -1*normalize_angle(QBAR_DIP)
qd_state_dip[0, link2_rx_index] = -1*QBAR_DIP
qd_state_dip[0, link2_ry_index] = QBAR_DIP
self.xBar_dip = self.dsys.build_state(qd_state_dip)
qd_state_end = np.zeros((1, self.mvi.system.nQ))
qd_state_end[0, link1_rx_index] = normalize_angle(QBAR_END)
qd_state_end[0, link1_ry_index] = 0.0
qd_state_end[0, link2_rx_index] = 0.0
qd_state_end[0, link2_ry_index] = 0.0
self.xBar_end = self.dsys.build_state(qd_state_end)
(Xd_up, Ud_up) = self.dsys.build_trajectory(qd_up) # Set desired state and input trajectory
(Xd_dip, Ud_dip) = self.dsys.build_trajectory(qd_dip)
(Xd_comb, Ud_comb) = self.dsys.build_trajectory(self.qd_comb)
self.Xd = Xd_comb
self.Ud = Ud_comb
# rospy.loginfo("X shape")
# rospy.loginfo(Xd_up.shape)
# rospy.loginfo("U shape")
# rospy.loginfo(Ud_up.shape)
self.state_trajectory = np.zeros(Xd_up.shape)
self.user_input_trajectory = np.zeros(Ud_up.shape)
self.controller_input_trajectory = np.zeros(Ud_up.shape)
self.combined_input_trajectory = np.zeros(Ud_up.shape)
# rospy.loginfo("Xd: %d", len(Xd_up))
dyn_weight = 50
kin_weight = 25
mom_weight = 1
vel_weight = kin_weight*TIMESTEP/1000.0
self.Q = np.diag(np.hstack(([dyn_weight]*self.sys.nQd,
[kin_weight]*self.sys.nQk,
[mom_weight]*self.sys.nQd,
[vel_weight]*self.sys.nQk)))
self.R = 0.000001*np.identity(2)
# rospy.loginfo("Q shape: %s, R shape %s", self.Q.shape, self.R.shape)
self.Qk = lambda k: self.Q
self.Rk = lambda k: self.R
(Kstab_up, A_up, B_up) = self.dsys.calc_feedback_controller(Xd_up, Ud_up, self.Qk, self.Rk, return_linearization=True)
(Kstab_dip, A_dip, B_dip) = self.dsys.calc_feedback_controller(Xd_dip, Ud_dip, self.Qk, self.Rk, return_linearization=True)
self.Kstab_up = Kstab_up
self.Kstab_dip = Kstab_dip
self.k = 1
self.state_trajectory[0] = self.xBar_end
# set initial system state
q0 = (QBAR_END, 0.0, 0.0, 0.0)
q1 = (QBAR_END, 0.0, 0.0, 0.0)
self.mvi.initialize_from_configs(0.0, q0, DT, q1)
self.disp_q = self.mvi.q2
self.disp_qd = np.hstack((self.mvi.q2[0:self.sys.nQd], [0]*self.sys.nQk))
def calc_correlations(self):
self.state_trajectory
self.combined_input_trajectory
self.user_input_trajectory
self.controller_input_trajectory
self.cmd_x_corr = np.correlate(self.user_input_trajectory[:,0], self.controller_input_trajectory[:,0], 'full')
self.cmd_y_corr = np.correlate(self.user_input_trajectory[:,1], self.controller_input_trajectory[:,1], 'full')
max_x_idx = np.argmax(self.cmd_x_corr)
max_y_idx = np.argmax(self.cmd_y_corr)
max_idx = 0
max_corr = 0
for i in range(len(self.cmd_x_corr)):
if self.cmd_x_corr[i] + self.cmd_y_corr[i] > max_corr:
max_idx = i
max_corr = self.cmd_x_corr[i] + self.cmd_y_corr[i]
self.score.cmd_corr = max_corr
self.score.cmd_offset = max_idx - (len(self.user_input_trajectory[:,0]))
self.score.cmd_x_corr = self.cmd_x_corr[max_x_idx]
self.score.cmd_x_corr_offset = max_x_idx - (len(self.user_input_trajectory[:,0]))
self.score.cmd_y_corr = self.cmd_y_corr[max_y_idx]
self.score.cmd_y_corr_offset = max_y_idx - (len(self.user_input_trajectory[:,0]))
rospy.loginfo("x cmd correlation: %f, index offset: %d", self.cmd_x_corr[max_x_idx], max_x_idx - (len(self.user_input_trajectory[:,0])))
rospy.loginfo("y cmd correlation: %f, index offset: %d", self.cmd_y_corr[max_y_idx], max_y_idx - (len(self.user_input_trajectory[:,0])))
rospy.loginfo("cmd correlation: %f, index offset: %d", max_corr, max_idx - (len(self.user_input_trajectory[:,0])))
self.state_x1_corr = np.correlate(self.state_trajectory[:,0], self.Xd[:,0], 'full')
self.state_y1_corr = np.correlate(self.state_trajectory[:,1], self.Xd[:,1], 'full')
self.state_x2_corr = np.correlate(self.state_trajectory[:,2], self.Xd[:,2], 'full')
self.state_y2_corr = np.correlate(self.state_trajectory[:,3], self.Xd[:,3], 'full')
max_idx = 0
max_corr = 0
for i in range(len(self.state_x1_corr)):
if self.state_x1_corr[i] + self.state_y1_corr[i] + self.state_x2_corr[i] + self.state_x2_corr[i]> max_corr:
max_idx = i
max_corr = self.state_x1_corr[i] + self.state_y1_corr[i] + self.state_x2_corr[i] + self.state_x2_corr[i]
self.score.state_corr = max_corr
self.score.state_offset = max_idx - (len(self.state_trajectory[:,0]))
rospy.loginfo("state correlation: %f, index offset: %d", self.score.state_corr, self.score.state_offset)
def update_interactive(self, event):
if self.simulation_failed or self.k == 600:
self.timer.shutdown()
self.tf_timer.shutdown()
self.tcost = 0
if self.k == 600:
self.timer_marker.header.stamp = rospy.Time.now()
self.timer_marker.text = "STOP"
self.timer_pub.publish(self.timer_marker)
# Pad the trajectories so the costs are high, but not so high in the case of stabilization failure
if self.k < 599:
last_state = self.state_trajectory[self.k-1]
last_combined_command = self.combined_input_trajectory[self.k-1]
last_user_command = self.user_input_trajectory[self.k-1]
last_controller_command = self.controller_input_trajectory[self.k-1]
for i in range(self.k, 600):
self.state_trajectory[i] = last_state
self.combined_input_trajectory[i-1] = last_combined_command
self.user_input_trajectory[i-1] = last_user_command
self.controller_input_trajectory[i-1] = last_controller_command
filestr = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trial_' + str(self.trial_num) + '_combined_trajectory.mat'
self.dsys.save_state_trajectory(filestr, self.state_trajectory, self.combined_input_trajectory)
filestr = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trial_' + str(self.trial_num) + '_user_trajectory.mat'
self.dsys.save_state_trajectory(filestr, self.state_trajectory, self.user_input_trajectory)
filestr = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trial_' + str(self.trial_num) + '_controller_trajectory.mat'
self.dsys.save_state_trajectory(filestr, self.state_trajectory, self.controller_input_trajectory)
dcost = discopt.DCost(self.Xd, self.Ud, self.Q, self.R)
optimizer = discopt.DOptimizer(self.dsys, dcost)
self.tcost = optimizer.calc_cost(self.state_trajectory, self.combined_input_trajectory)
rospy.loginfo("calc_cost: %f", self.tcost)
#if tcost > MAX_COST:
# tcost = MAX_COST
if self.tcost < MIN_COST:
self.tcost = MIN_COST
self.score.cost = self.tcost
# self.task_trust = 1.0 - ((optimizer.calc_cost(self.state_trajectory, self.combined_input_trajectory) - MIN_COST)/(MAX_COST-MIN_COST))
self.task_trust = exp(EXPONENT * (self.tcost - MIN_COST))
if self.task_trust > 1.0:
self.task_trust = 1.0
elif self.task_trust < 0.0:
self.task_trust = 0.0
self.calc_correlations()
self.score_pub.publish(self.score)
self.finished = True
return
u = self.mvi.u1
u[0] = self.user_ux
u[1] = self.user_uy
if self.k % 50 == 0:
rospy.loginfo("Clock: %d", self.k/50)
self.timer_marker.header.stamp = rospy.Time.now()
self.target_marker.header.stamp = rospy.Time.now()
if self.k == 200:
self.timer_marker.text = "DIP"
self.target_marker.pose.position.x = -1*sin(QBAR_DIP1) + 0.05
self.target_marker.pose.position.y = sin(QBAR_DIP2) - 0.05
self.target_marker.pose.position.z = -1.95
elif self.k == 400:
self.timer_marker.text = "RETURN"
self.target_marker.pose.position.x = 0.0
self.target_marker.pose.position.y = 0.0
self.target_marker.pose.position.z = -2.0
elif self.k == 600:
self.timer_marker.text = "STOP"
elif self.k > 400:
self.timer_marker.text = str(12-(self.k/50))
elif self.k > 200:
self.timer_marker.text = str(8-(self.k/50))
elif self.k < 200:
self.timer_marker.text = str(4-(self.k/50))
self.timer_pub.publish(self.timer_marker)
self.target_pub.publish(self.target_marker)
## if we are in interactive+control mode, let's run that controller:
if self.state == STATE_CONTROLLED_INTERACTIVE:
# get state stuff
qtmp = self.mvi.q2
ptmp = self.mvi.p2
# wrap angle of pendulum:
for q in self.sys.dyn_configs:
q0 = qtmp[self.sys.get_config(q).index]
qtmp[self.sys.get_config(q).index] = normalize_angle(q0)
X = np.hstack((qtmp, ptmp))
#print "State:"
#print X
if not self.in_basin_of_attraction(X):
self.simulation_failed = True
rospy.loginfo("Outside basin of attraction")
self.state_trajectory[self.k] = X
# calculate feedback law
if self.k > 200 and self.k < 400:
xTilde = X - self.xBar_dip
xTilde[0] = normalize_angle(xTilde[0])
xTilde[1] = normalize_angle(xTilde[1])
xTilde[2] = normalize_angle(xTilde[2])
xTilde[3] = normalize_angle(xTilde[3])
u_cont = -np.dot(self.Kstab_dip[0], xTilde)
#u_cont = u_cont + (0.75 * 9.81 * 2.0 * sin(QBAR_DIP))
if self.k == 201:
rospy.loginfo("DIP!")
# rospy.loginfo(xTilde)
else:
xTilde = X - self.xBar_end
xTilde[0] = normalize_angle(xTilde[0])
xTilde[1] = normalize_angle(xTilde[1])
xTilde[2] = normalize_angle(xTilde[2])
xTilde[3] = normalize_angle(xTilde[3])
u_cont = -np.dot(self.Kstab_up[0], xTilde)
if self.k == 400:
rospy.loginfo("POP!")
# rospy.loginfo(xTilde)
#print "Error:"
#print xTilde
if u_cont[0] > SATURATION_TORQUE:
u_cont[0] = SATURATION_TORQUE
if u_cont[1] > SATURATION_TORQUE:
u_cont[1] = SATURATION_TORQUE
#self.user_ux = 0.0
#self.user_uy = 0.0
# blend user and feedback control input
u[0] = self.trust*self.user_ux + (1-self.trust)*u_cont[0]
u[1] = self.trust*self.user_uy + (1-self.trust)*u_cont[1]
self.controller_input_trajectory[self.k-1] = u_cont
self.user_input_trajectory[self.k-1, 0] = self.user_ux
self.user_input_trajectory[self.k-1, 1] = self.user_uy
self.combined_input_trajectory[self.k-1] = u
# rospy.loginfo("[before] u: (%f, %f), user: (%f, %f), controller: (%f, %f)", u[0], u[1], self.user_ux, self.user_uy, u_cont[0], u_cont[1])
try:
self.k += 1
t2 = self.k*TIMESTEP/1000.0
self.mvi.step(t2, u1=u)
except trep.ConvergenceError:
self.simulation_failed = True
rospy.logwarn("No solution to DEL equations!")
return
self.disp_q = self.mvi.q2
self.disp_qd = np.hstack((self.mvi.q2[0:self.sys.nQd], [0]*self.sys.nQk))
self.update_values()
def update_values(self):
"""
Just fill in all of position array stuff for the JS message
"""
pos = [self.sys.get_config(self.mappings[n]).q for n in self.names]
self.js.position = pos
def send_joint_states(self, event):
tnow = rospy.Time.now()
# first send the transform:
quat = tuple(tf.transformations.quaternion_from_euler(0, 0, 0, 'rzyx'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat,
tnow,
'base', 'world')
self.js.header.stamp = tnow
self.joint_pub.publish(self.js)
# send transforms
quat = tuple(tf.transformations.quaternion_from_euler(self.sys.get_config('link-1-base_x').q, 0, 0, 'sxyz'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat, tnow, 'link1_rx_link', 'base')
quat = tuple(tf.transformations.quaternion_from_euler(0, self.sys.get_config('link-1-base_y').q, 0, 'sxyz'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat, tnow, 'link1_ry_link', 'link1_rx_link')
quat = tuple(tf.transformations.quaternion_from_euler(self.sys.get_config('link-2-base_x').q, 0, 0, 'sxyz'))
point = tuple((0, 0, 0.5))
self.br.sendTransform(point, quat, tnow, 'link2_rx_link', 'link1')
quat = tuple(tf.transformations.quaternion_from_euler(0, self.sys.get_config('link-2-base_y').q, 0, 'sxyz'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat, tnow, 'link2_ry_link', 'link2_rx_link')
def in_basin_of_attraction(self, state):
BASIN_THRESHOLD = 2*pi/3
if abs(state[0]) + abs(state[1]) > BASIN_THRESHOLD:
# return False
return True
else:
return True
def output_log(self):
self.file = 2
def main():
"""
Run the main loop, by instatiating a PendulumSimulator, and then
calling ros.spin
"""
rospy.init_node('pendulum_simulator') # , log_level=rospy.INFO)
# check what the value of the links param is
links_bool = rospy.get_param('links', False)
if not rospy.has_param('links'):
rospy.set_param('links', links_bool)
count = 1
try:
system = create_systems(2, link_length='1.0', link_mass='2.0', frequency='0.5', amplitude='0.5', damping='0.1')
user = User(USER_NUMBER, MAX_TRIALS, STARTING_TRUST, ALPHA, INPUT_DEVICE)
r = rospy.Rate(100)
while user.current_trial_num < user.max_num_trials:
rospy.loginfo("Trial number: %d", count)
sim = PendulumSimulator(system, user.current_trust, user.current_trial_num)
while not sim.finished:
r.sleep()
rospy.loginfo("current trust: %f", user.current_trust)
user.update_trust(sim.task_trust, sim.tcost)
rospy.loginfo("new current trust: %f, task trust: %f", user.current_trust, sim.task_trust)
del sim
count = count + 1
time.sleep(2)
except rospy.ROSInterruptException:
pass
rospy.loginfo('Session Complete')
if __name__ == '__main__':
main()
| gpl-2.0 |
MatthewDaws/TileMapBase | tests/test_ordnancesurvey.py | 1 | 14708 | import pytest
import unittest.mock as mock
import tilemapbase.ordnancesurvey as ons
import os, re
def test_project():
assert ons.project(-1.55532, 53.80474) == pytest.approx((429383.15535285, 434363.0962841))
assert ons.project(-5.71808, 50.06942) == pytest.approx((134041.0757941, 25435.9074222))
assert ons.project(-3.02516, 58.64389) == pytest.approx((340594.489913, 973345.118179))
def test_to_latlon():
assert ons.to_lonlat(429383.15535285, 434363.0962841) == pytest.approx((-1.55532, 53.80474))
assert ons.to_lonlat(134041.0757941, 25435.9074222) == pytest.approx((-5.71808, 50.06942))
def test_to_os_national_grid():
assert ons.to_os_national_grid(-1.55532, 53.80474) == ("SE 29383 34363",
#pytest.approx(0.155352845), pytest.approx(0.096284069))
pytest.approx(0.15221664), pytest.approx(0.096820819))
assert ons.to_os_national_grid(-5.71808, 50.06942) == ("SW 34041 25435",
#pytest.approx(0.0757940984), pytest.approx(0.90742218543))
pytest.approx(0.073081686), pytest.approx(0.907697877))
assert ons.to_os_national_grid(-3.02516, 58.64389) == ("ND 40594 73345",
#pytest.approx(0.4899132418), pytest.approx(0.118179377))
pytest.approx(0.48627711), pytest.approx(0.118587372))
with pytest.raises(ValueError):
print(ons.to_os_national_grid(-10, 10))
def test_os_national_grid_to_coords():
assert ons.os_national_grid_to_coords("SE 29383 34363") == (429383, 434363)
assert ons.os_national_grid_to_coords("SW 34041 25435") == (134041, 25435)
assert ons.os_national_grid_to_coords("ND 40594 73345") == (340594, 973345)
with pytest.raises(ValueError):
assert ons.os_national_grid_to_coords("IXJ23678412 123 12")
def test_init():
ons.init(os.path.join("tests", "test_os_map_data"))
base = os.path.abspath(os.path.join("tests", "test_os_map_data", "data"))
assert ons._lookup["openmap_local"] == {
"AH" : os.path.join(base, "one"),
"AA" : os.path.join(base, "two") }
assert ons._lookup["vectormap_district"] == {
"BG" : os.path.join(base, "one") }
mini = os.path.abspath(os.path.join("tests", "test_os_map_data", "mini"))
assert ons._lookup["miniscale"] == {"MiniScale_one.tif" : mini,
"MiniScale_two.tif" : mini}
def test__separate_init():
base = os.path.abspath(os.path.join("tests", "test_os_map_data", "data"))
callback = mock.Mock()
ons._separate_init(re.compile(r"^[A-Za-z]{2}\d\d\.tif$"),
os.path.join("tests", "test_os_map_data"),
callback)
assert callback.call_args_list == [mock.call("BG76.tif", os.path.join(base, "one"))]
@pytest.fixture
def omll():
files = {"openmap_local" : {"SE" : "se_dir"}}
with mock.patch("tilemapbase.ordnancesurvey._lookup", new=files):
yield None
@pytest.fixture
def image_mock():
with mock.patch("tilemapbase.ordnancesurvey._Image") as i:
yield i
def test_OpenMapLocal(omll, image_mock):
oml = ons.OpenMapLocal()
oml("SE 12345 54321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE15SW.tif"))
oml("SE 16345 54321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE15SE.tif"))
oml("SE 22345 55321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE25NW.tif"))
oml("SE 15345 75321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE17NE.tif"))
with pytest.raises(ons.TileNotFoundError):
oml("SF 1456 12653")
with pytest.raises(ValueError):
oml("SF 145612653")
assert oml.tilesize == 5000
assert oml.size_in_meters == 5000
@pytest.fixture
def vmd():
files = {"vectormap_district" : {"SE" : "se_dir"}}
with mock.patch("tilemapbase.ordnancesurvey._lookup", new=files):
yield None
def test_VectorMapDistrict(vmd, image_mock):
oml = ons.VectorMapDistrict()
oml("SE 12345 54321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE15.tif"))
oml("SE 16345 54321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE15.tif"))
oml("SE 22345 55321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE25.tif"))
oml("SE 15345 75321")
image_mock.open.assert_called_with(os.path.join("se_dir", "SE17.tif"))
with pytest.raises(ons.TileNotFoundError):
oml("SF 1456 12653")
with pytest.raises(ValueError):
oml("SF 145612653")
assert oml.tilesize == 4000
assert oml.size_in_meters == 10000
@pytest.fixture
def tfk():
files = {"25k_raster" : {"SE" : "se_dir"}}
with mock.patch("tilemapbase.ordnancesurvey._lookup", new=files):
yield None
def test_TwentyFiveRaster(tfk, image_mock):
oml = ons.TwentyFiveRaster()
oml("SE 12345 54321")
image_mock.open.assert_called_with(os.path.join("se_dir", "se15.tif"))
oml("SE 16345 54321")
image_mock.open.assert_called_with(os.path.join("se_dir", "se15.tif"))
oml("SE 22345 55321")
image_mock.open.assert_called_with(os.path.join("se_dir", "se25.tif"))
oml("SE 15345 75321")
image_mock.open.assert_called_with(os.path.join("se_dir", "se17.tif"))
with pytest.raises(ons.TileNotFoundError):
oml("SF 1456 12653")
with pytest.raises(ValueError):
oml("SF 145612653")
assert oml.tilesize == 4000
assert oml.size_in_meters == 10000
@pytest.fixture
def tfs():
files = {"250k_raster" : "250kdir"}
with mock.patch("tilemapbase.ordnancesurvey._lookup", new=files):
yield None
def test_TwoFiftyScale(tfs, image_mock):
ts = ons.TwoFiftyScale()
assert ts.tilesize == 4000
assert ts.size_in_meters == 100000
ts("SE 1234 43231")
image_mock.open.assert_called_with(os.path.join("250kdir", "SE.tif"))
def mock_dir_entry(name):
m = mock.Mock()
m.is_dir.return_value = False
m.is_file.return_value = True
m.name = name
return m
def test_MasterMap_init():
with mock.patch("os.path.abspath") as abspath_mock:
abspath_mock.return_value = "spam"
with mock.patch("os.scandir") as scandir_mock:
scandir_mock.return_value = [mock_dir_entry("eggs"),
mock_dir_entry("se3214.tif"), mock_dir_entry("sD1234.tif"),
mock_dir_entry("sa6543.png")]
ons.MasterMap.init("test")
abspath_mock.assert_called_with("test")
scandir_mock.assert_called_with("spam")
assert set(ons.MasterMap.found_tiles()) == {("se", "32", "14"),
("sD", "12", "34"), ("sa", "65", "43")}
ons._lookup["MasterMap"] == {"spam" : ["se3214.tif", "sD1234.tif", "sa6543.png"]}
@pytest.fixture
def mm_dir():
files = {"MasterMap" : {"spam" : ["sA1342.png"]}}
with mock.patch("tilemapbase.ordnancesurvey._lookup", new=files):
yield None
def test_MasterMap(mm_dir, image_mock):
source = ons.MasterMap()
with pytest.raises(ons.TileNotFoundError):
source("SD 12345 65432")
tile = source("SA 13876 42649")
assert tile == image_mock.open.return_value
image_mock.open.assert_called_with(os.path.join("spam", "sA1342.png"))
assert source.tilesize == 3200
assert source.size_in_meters == 1000
source.tilesize = 1243
assert source.tilesize == 1243
@pytest.fixture
def mini():
files = {"miniscale" : {"mini_one.tif" : "dirone", "mini_two.tif" : "dirtwo"}}
with mock.patch("tilemapbase.ordnancesurvey._lookup", new=files):
yield None
def test_MiniScale(mini, image_mock):
ts = ons.MiniScale()
assert ts.tilesize == 1000
assert ts.size_in_meters == 100000
assert set(ts.filenames) == {"mini_one.tif", "mini_two.tif"}
ts.filename = "mini_two.tif"
assert ts.filename == "mini_two.tif"
tile = ts("SE 1234 43231")
image_mock.open.assert_called_with(os.path.join("dirtwo", "mini_two.tif"))
image = image_mock.open.return_value
image.crop.assert_called_with((4000,8000,5000,9000))
assert tile is image.crop.return_value
assert ts.bounding_box == (0, 0, 700000, 1300000)
def test_MiniScale_cache(mini, image_mock):
ts = ons.MiniScale()
ts.filename = "mini_two.tif"
tile = ts("SE 1234 43231")
image_mock.reset_mock()
ts("SD 1 2")
assert image_mock.open.call_args_list == []
ts.filename = "mini_one.tif"
ts("SD 1 2")
image_mock.open.assert_called_with(os.path.join("dirone", "mini_one.tif"))
@pytest.fixture
def overview():
files = {"overview" : {"mini_one.tif" : "dirone", "mini_two.tif" : "dirtwo"}}
with mock.patch("tilemapbase.ordnancesurvey._lookup", new=files):
yield None
def test_OverView(overview, image_mock):
ts = ons.OverView()
assert ts.tilesize == 100
assert ts.size_in_meters == 50000
ts.filename = "mini_one.tif"
tile = ts("SD 1 2")
assert image_mock.open.call_args_list == [mock.call(os.path.join("dirone", "mini_one.tif"))]
assert tile == image_mock.open.return_value.crop.return_value
# Should cache...
image_mock.reset_mock()
ts("SD 1 2")
assert image_mock.open.call_args_list == []
ts.tilesize = 50
assert ts.size_in_meters == 25000
with pytest.raises(ValueError):
ts.tilesize = 5.5
with pytest.raises(ValueError):
ts.tilesize = 0
def test_Extent_construct():
ex = ons.Extent.from_centre(1000, 0, 1000, 4000)
assert ex.xrange == (500, 1500)
assert ex.yrange == (-2000, 2000)
ex = ons.Extent.from_centre(1000, 0, 1000, aspect=2.0)
assert ex.xrange == (500, 1500)
assert ex.yrange == (-250, 250)
ex = ons.Extent.from_centre_lonlat(-1.55532, 53.80474, 2000)
assert ex.xrange == pytest.approx((428383.15535285, 430383.15535285))
assert ex.yrange == pytest.approx((433363.0962841, 435363.0962841))
ex = ons.Extent.from_lonlat(-5.71808, -1.55532, 53.80474, 50.06942)
assert ex.xrange == pytest.approx((134041.075794, 429383.15535285))
assert ex.yrange == pytest.approx((25435.907422, 434363.0962841))
ex = ons.Extent.from_centre_grid("ND 40594 73345", ysize=2000, aspect=0.5)
assert ex.xrange == (340094, 341094)
assert ex.yrange == (972345, 974345)
def test_Extent_mutations():
# 1000 x 5000
ex = ons.Extent(1000, 2000, 4000, 9000)
ex1 = ex.with_centre(10000, 20000)
assert ex1.xrange == (10000-500, 10000+500)
assert ex1.yrange == (20000-2500, 20000+2500)
ex2 = ex.with_centre_lonlat(-3.02516, 58.64389)
assert ex2.xrange == pytest.approx((340094.489913, 341094.489913))
assert ex2.yrange == pytest.approx((973345.118179-2500, 973345.118179+2500))
ex3 = ex.to_aspect(2.0)
assert ex3.xrange == (1000, 2000)
assert ex3.yrange == (6500-250, 6500+250)
ex3 = ex.to_aspect(0.5)
assert ex3.xrange == (1000, 2000)
assert ex3.yrange == (6500-1000, 6500+1000)
ex3 = ex.to_aspect(0.1)
assert ex3.xrange == (1250, 1750)
assert ex3.yrange == (4000, 9000)
ex4 = ex.with_absolute_translation(100, 200)
assert ex4.xrange == (1100, 2100)
assert ex4.yrange == (4200, 9200)
ex5 = ex.with_translation(0.5, 1)
assert ex5.xrange == (1500, 2500)
assert ex5.yrange == (9000, 14000)
ex6 = ex.with_scaling(0.5)
assert ex6.xrange == (1500 - 1000, 2500)
assert ex6.yrange == (6500 - 5000, 6500 + 5000)
@pytest.fixture
def source():
s = mock.Mock()
s.size_in_meters = 1000
s.tilesize = 2000
return s
def test_TileScalar(source, image_mock):
ts = ons.TileScalar(source, 500)
assert ts.tilesize == 500
assert ts.size_in_meters == 1000
assert ts.bounding_box == source.bounding_box
tile = ts("SN 1234 54321")
source.assert_called_with("SN 1234 54321")
assert tile == source.return_value.convert.return_value.resize.return_value
source.return_value.convert.assert_called_with("RGB")
source.return_value.convert.return_value.resize.assert_called_with((500, 500), image_mock.LANCZOS)
def test_Plotter_plotlq(source):
ex = ons.Extent(1100, 1900, 4200, 5500)
plotter = ons.Plotter(ex, source)
ax = mock.Mock()
plotter.plotlq(ax, bob="fish")
assert source.call_args_list == [
mock.call("SV 1000 4000"), mock.call("SV 1000 5000") ]
assert ax.imshow.call_args_list == [
mock.call(source.return_value, interpolation="lanczos", extent=(1000, 2000, 4000, 5000), bob="fish"),
mock.call(source.return_value, interpolation="lanczos", extent=(1000, 2000, 5000, 6000), bob="fish")
]
def test_Plotter_as_one_image(source, image_mock):
ex = ons.Extent(1100, 1900, 4200, 5500)
plotter = ons.Plotter(ex, source)
ax = mock.Mock()
out = plotter.as_one_image()
assert source.call_args_list == [
mock.call("SV 1000 4000"), mock.call("SV 1000 5000") ]
image_mock.new.assert_called_with("RGB", (2000, 4000))
im = image_mock.new.return_value
assert out is im
assert im.paste.call_args_list == [
mock.call(source.return_value, (0, 2000)),
mock.call(source.return_value, (0, 0))
]
def test_Plotter_plot(source, image_mock):
ex = ons.Extent(1100, 1900, 4200, 5500)
plotter = ons.Plotter(ex, source)
ax = mock.Mock()
plotter.plot(ax, bob="fish")
assert source.call_args_list == [
mock.call("SV 1000 4000"), mock.call("SV 1000 5000") ]
assert ax.imshow.call_args_list == [
mock.call(image_mock.new.return_value, interpolation="lanczos", extent=(1000, 2000, 4000, 6000), bob="fish"),
]
def test_Plotter_ignore_errors(source, image_mock):
source.side_effect = Exception
ex = ons.Extent(1100, 1900, 4200, 5500)
plotter = ons.Plotter(ex, source)
ax = mock.Mock()
plotter.plotlq(ax, bob="fish")
assert ax.imshow.call_args_list == [
mock.call(source.blank.return_value, interpolation="lanczos", extent=(1000, 2000, 4000, 5000), bob="fish"),
mock.call(source.blank.return_value, interpolation="lanczos", extent=(1000, 2000, 5000, 6000), bob="fish")
]
def test_TileSplitter(source):
source.tilesize = 1000
source.size_in_meters = 500
with pytest.raises(ValueError):
ons.TileSplitter(source, 3)
ts = ons.TileSplitter(source, 500)
assert ts.tilesize == 500
assert ts.size_in_meters == 250
tile = ts("SV 1000 4000")
assert source.call_args_list == [mock.call("SV 1000 4000")]
image = source.return_value
assert image.crop.call_args_list == [mock.call((0,0,500,500)),
mock.call((0,500,500,1000)), mock.call((500,0,1000,500)),
mock.call((500,500,1000,1000))]
assert tile is image.crop.return_value
# Should be cached...
source.reset_mock()
tile = ts("SV 1250 4000")
assert source.call_args_list == []
| mit |
freezmeinster/teh-manis | django/contrib/sessions/middleware.py | 323 | 1888 | import time
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.utils.importlib import import_module
class SessionMiddleware(object):
def process_request(self, request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = engine.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| bsd-3-clause |
DarkPrince304/MozDef | alerts/alertWorker.py | 7 | 8708 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jeff Bryner jbryner@mozilla.com
#
# Alert Worker to listen for alerts and call python plugins
# for user-controlled reaction to alerts.
import json
import kombu
import logging
import os
import pynsive
import sys
from configlib import getConfig, OptionParser
from kombu import Connection, Queue, Exchange
from kombu.mixins import ConsumerMixin
from logging.handlers import SysLogHandler
from operator import itemgetter
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def registerPlugins():
pluginList = list() # tuple of module,registration dict,priority
plugin_manager = pynsive.PluginManager()
if os.path.exists('plugins'):
modules = pynsive.list_modules('plugins')
for mname in modules:
module = pynsive.import_module(mname)
reload(module)
if not module:
raise ImportError('Unable to load module {}'.format(mname))
else:
if 'message' in dir(module):
mclass = module.message()
mreg = mclass.registration
if 'priority' in dir(mclass):
mpriority = mclass.priority
else:
mpriority = 100
if isinstance(mreg, list):
print('[*] plugin {0} registered to receive messages with {1}'.format(mname, mreg))
pluginList.append((mclass, mreg, mpriority))
return pluginList
def dict2List(inObj):
'''given a dictionary, potentially with multiple sub dictionaries
return a list of the dict keys and values
'''
if isinstance(inObj, dict):
for key, value in inObj.iteritems():
if isinstance(value, dict):
for d in dict2List(value):
yield d
elif isinstance(value, list):
yield key.encode('ascii', 'ignore').lower()
for l in dict2List(value):
yield l
else:
yield key.encode('ascii', 'ignore').lower()
if isinstance(value, str):
yield value.lower()
elif isinstance(value, unicode):
yield value.encode('ascii', 'ignore').lower()
else:
yield value
elif isinstance(inObj, list):
for v in inObj:
if isinstance(v, str):
yield v.lower()
elif isinstance(v, unicode):
yield v.encode('ascii', 'ignore').lower()
elif isinstance(v, list):
for l in dict2List(v):
yield l
elif isinstance(v, dict):
for l in dict2List(v):
yield l
else:
yield v
else:
yield ''
def sendEventToPlugins(anevent, pluginList):
'''compare the event to the plugin registrations.
plugins register with a list of keys or values
or values they want to match on
this function compares that registration list
to the current event and sends the event to plugins
in order
'''
if not isinstance(anevent, dict):
raise TypeError('event is type {0}, should be a dict'.format(type(anevent)))
# expecting tuple of module,criteria,priority in pluginList
# sort the plugin list by priority
for plugin in sorted(pluginList, key=itemgetter(2), reverse=False):
# assume we don't run this event through the plugin
send = False
if isinstance(plugin[1], list):
try:
if (set(plugin[1]).intersection([e for e in dict2List(anevent)])):
send = True
except TypeError:
sys.stderr.write('TypeError on set intersection for dict {0}'.format(anevent))
return anevent
if send:
anevent = plugin[0].onMessage(anevent)
return anevent
class alertConsumer(ConsumerMixin):
'''read in alerts,
compare them to plugins
and send alerts to plugins as requested
'''
def __init__(self, mqAlertsConnection, alertQueue, alertExchange):
self.connection = mqAlertsConnection # default connection for the kombu mixin
self.alertsConnection = mqAlertsConnection
self.alertQueue = alertQueue
self.alertExchange = alertExchange
def get_consumers(self, Consumer, channel):
consumer = Consumer(
self.alertQueue,
callbacks=[self.on_message],
accept=['json'])
consumer.qos(prefetch_count=options.prefetch)
return [consumer]
def on_message(self, body, message):
try:
# just to be safe..check what we were sent.
if isinstance(body, dict):
bodyDict = body
elif isinstance(body, str) or isinstance(body, unicode):
try:
bodyDict = json.loads(body) # lets assume it's json
except ValueError as e:
# not json..ack but log the message
logger.exception(
"alertworker exception: unknown body type received %r" % body)
return
else:
logger.exception(
"alertworker exception: unknown body type received %r" % body)
return
# process valid message
bodyDict = sendEventToPlugins(bodyDict, pluginList)
message.ack()
except ValueError as e:
logger.exception(
"alertworker exception while processing events queue %r" % e)
def main():
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(formatter)
logger.addHandler(sh)
# connect and declare the message queue/kombu objects.
# Event server/exchange/queue
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport)
mqAlertConn = Connection(mqConnString)
# Exchange for alerts we pass to plugins
alertExchange = Exchange(name=options.alertExchange,
type='topic',
durable=True,
delivery_mode=1)
alertExchange(mqAlertConn).declare()
# Queue for the exchange
alertQueue = Queue(options.queueName,
exchange=alertExchange,
routing_key=options.alerttopic,
durable=False,
no_ack=(not options.mqack))
alertQueue(mqAlertConn).declare()
# consume our alerts.
alertConsumer(mqAlertConn, alertQueue, alertExchange).run()
def initConfig():
'''setup the default options and override with any in our .conf file'''
# message queue server hostname
options.mqalertserver = getConfig(
'mqalertserver',
'localhost',
options.configfile)
# queue exchange name
options.alertExchange = getConfig(
'alertexchange',
'alerts',
options.configfile)
# queue name
options.queueName = getConfig(
'alertqueuename',
'alertPlugins',
options.configfile)
# queue topic
options.alerttopic = getConfig(
'alerttopic',
'mozdef.*',
options.configfile)
# how many messages to ask for at once
options.prefetch = getConfig('prefetch', 50, options.configfile)
options.mquser = getConfig('mquser', 'guest', options.configfile)
options.mqpassword = getConfig('mqpassword', 'guest', options.configfile)
options.mqport = getConfig('mqport', 5672, options.configfile)
# mqack=True sets persistant delivery, False sets transient delivery
options.mqack = getConfig('mqack', True, options.configfile)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c",
dest='configfile',
default=sys.argv[0].replace('.py', '.conf'),
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
pluginList = registerPlugins()
main()
| mpl-2.0 |
jeroendierckx/Camelot | camelot/view/controls/delegates/smileydelegate.py | 1 | 2798 | # ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.view.controls.editors.smileyeditor import SmileyEditor, default_icons
class SmileyDelegate(CustomDelegate):
"""Delegate for Smiley's
"""
__metaclass__ = DocumentationMetaclass
editor = SmileyEditor
def __init__(self, parent, editable=True, icons=default_icons, **kwargs):
CustomDelegate.__init__(self,
parent=parent,
editable=editable,
icons=icons,
**kwargs)
self.icons_by_name = dict(icons)
def paint(self, painter, option, index):
painter.save()
icon_name = unicode(index.model().data(index, Qt.DisplayRole).toString())
background_color = QtGui.QColor(index.model().data(index, Qt.BackgroundRole))
self.drawBackground(painter, option, index)
rect = option.rect
rect = QtCore.QRect(rect.left()+3, rect.top()+6, rect.width()-5, rect.height())
if( option.state & QtGui.QStyle.State_Selected ):
painter.fillRect(option.rect, option.palette.highlight())
else:
if not self.editable:
painter.fillRect(option.rect, option.palette.window())
else:
painter.fillRect(option.rect, background_color)
if icon_name:
pixmap = self.icons_by_name[icon_name].getQPixmap()
QtGui.QApplication.style().drawItemPixmap(painter, rect, 1, pixmap)
painter.restore()
| gpl-2.0 |
andela-ooladayo/django | django/contrib/contenttypes/views.py | 380 | 3608 | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404(_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith(('http://', 'https://', '//')):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.remote_field.model is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.remote_field and field.remote_field.model is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current(request).domain
except Site.DoesNotExist:
pass
else:
# Fall back to the current request's site.
object_domain = RequestSite(request).domain
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.scheme
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| bsd-3-clause |
BlastarIndia/Blastarix | qemu-1.7.0/roms/seabios/tools/acpi_extract.py | 9 | 12133 | #!/usr/bin/python
# Copyright (C) 2011 Red Hat, Inc., Michael S. Tsirkin <mst@redhat.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
# Process mixed ASL/AML listing (.lst file) produced by iasl -l
# Locate and execute ACPI_EXTRACT directives, output offset info
#
# Documentation of ACPI_EXTRACT_* directive tags:
#
# These directive tags output offset information from AML for BIOS runtime
# table generation.
# Each directive is of the form:
# ACPI_EXTRACT_<TYPE> <array_name> <Operator> (...)
# and causes the extractor to create an array
# named <array_name> with offset, in the generated AML,
# of an object of a given type in the following <Operator>.
#
# A directive must fit on a single code line.
#
# Object type in AML is verified, a mismatch causes a build failure.
#
# Directives and operators currently supported are:
# ACPI_EXTRACT_NAME_DWORD_CONST - extract a Dword Const object from Name()
# ACPI_EXTRACT_NAME_WORD_CONST - extract a Word Const object from Name()
# ACPI_EXTRACT_NAME_BYTE_CONST - extract a Byte Const object from Name()
# ACPI_EXTRACT_METHOD_STRING - extract a NameString from Method()
# ACPI_EXTRACT_NAME_STRING - extract a NameString from Name()
# ACPI_EXTRACT_PROCESSOR_START - start of Processor() block
# ACPI_EXTRACT_PROCESSOR_STRING - extract a NameString from Processor()
# ACPI_EXTRACT_PROCESSOR_END - offset at last byte of Processor() + 1
# ACPI_EXTRACT_PKG_START - start of Package block
#
# ACPI_EXTRACT_ALL_CODE - create an array storing the generated AML bytecode
#
# ACPI_EXTRACT is not allowed anywhere else in code, except in comments.
import re;
import sys;
import fileinput;
aml = []
asl = []
output = {}
debug = ""
class asl_line:
line = None
lineno = None
aml_offset = None
def die(diag):
sys.stderr.write("Error: %s; %s\n" % (diag, debug))
sys.exit(1)
#Store an ASL command, matching AML offset, and input line (for debugging)
def add_asl(lineno, line):
l = asl_line()
l.line = line
l.lineno = lineno
l.aml_offset = len(aml)
asl.append(l)
#Store an AML byte sequence
#Verify that offset output by iasl matches # of bytes so far
def add_aml(offset, line):
o = int(offset, 16);
# Sanity check: offset must match size of code so far
if (o != len(aml)):
die("Offset 0x%x != 0x%x" % (o, len(aml)))
# Strip any trailing dots and ASCII dump after "
line = re.sub(r'\s*\.*\s*".*$',"", line)
# Strip traling whitespace
line = re.sub(r'\s+$',"", line)
# Strip leading whitespace
line = re.sub(r'^\s+',"", line)
# Split on whitespace
code = re.split(r'\s+', line)
for c in code:
# Require a legal hex number, two digits
if (not(re.search(r'^[0-9A-Fa-f][0-9A-Fa-f]$', c))):
die("Unexpected octet %s" % c);
aml.append(int(c, 16));
# Process aml bytecode array, decoding AML
def aml_pkglen_bytes(offset):
# PkgLength can be multibyte. Bits 8-7 give the # of extra bytes.
pkglenbytes = aml[offset] >> 6;
return pkglenbytes + 1
def aml_pkglen(offset):
pkgstart = offset
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml[offset] & 0x3F
# If multibyte, first nibble only uses bits 0-3
if ((pkglenbytes > 1) and (pkglen & 0x30)):
die("PkgLen bytes 0x%x but first nibble 0x%x expected 0x0X" %
(pkglen, pkglen))
offset += 1
pkglenbytes -= 1
for i in range(pkglenbytes):
pkglen |= aml[offset + i] << (i * 8 + 4)
if (len(aml) < pkgstart + pkglen):
die("PckgLen 0x%x at offset 0x%x exceeds AML size 0x%x" %
(pkglen, offset, len(aml)))
return pkglen
# Given method offset, find its NameString offset
def aml_method_string(offset):
#0x14 MethodOp PkgLength NameString MethodFlags TermList
if (aml[offset] != 0x14):
die( "Method offset 0x%x: expected 0x14 actual 0x%x" %
(offset, aml[offset]));
offset += 1;
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes;
return offset;
# Given name offset, find its NameString offset
def aml_name_string(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x08):
die( "Name offset 0x%x: expected 0x08 actual 0x%x" %
(offset, aml[offset]));
offset += 1
# Block Name Modifier. Skip it.
if (aml[offset] == 0x5c or aml[offset] == 0x5e):
offset += 1
return offset;
# Given data offset, find 8 byte buffer offset
def aml_data_buffer8(offset):
#0x08 NameOp NameString DataRef
expect = [0x11, 0x0B, 0x0A, 0x08]
if (aml[offset:offset+4] != expect):
die( "Name offset 0x%x: expected %s actual %s" %
(offset, aml[offset:offset+4], expect))
return offset + len(expect)
# Given data offset, find dword const offset
def aml_data_dword_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0C):
die( "Name offset 0x%x: expected 0x0C actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given data offset, find word const offset
def aml_data_word_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0B):
die( "Name offset 0x%x: expected 0x0B actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given data offset, find byte const offset
def aml_data_byte_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0A):
die( "Name offset 0x%x: expected 0x0A actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Find name'd buffer8
def aml_name_buffer8(offset):
return aml_data_buffer8(aml_name_string(offset) + 4)
# Given name offset, find dword const offset
def aml_name_dword_const(offset):
return aml_data_dword_const(aml_name_string(offset) + 4)
# Given name offset, find word const offset
def aml_name_word_const(offset):
return aml_data_word_const(aml_name_string(offset) + 4)
# Given name offset, find byte const offset
def aml_name_byte_const(offset):
return aml_data_byte_const(aml_name_string(offset) + 4)
def aml_device_start(offset):
#0x5B 0x82 DeviceOp PkgLength NameString
if ((aml[offset] != 0x5B) or (aml[offset + 1] != 0x82)):
die( "Name offset 0x%x: expected 0x5B 0x82 actual 0x%x 0x%x" %
(offset, aml[offset], aml[offset + 1]));
return offset
def aml_device_string(offset):
#0x5B 0x82 DeviceOp PkgLength NameString
start = aml_device_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes
return offset
def aml_device_end(offset):
start = aml_device_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml_pkglen(offset)
return offset + pkglen
def aml_processor_start(offset):
#0x5B 0x83 ProcessorOp PkgLength NameString ProcID
if ((aml[offset] != 0x5B) or (aml[offset + 1] != 0x83)):
die( "Name offset 0x%x: expected 0x5B 0x83 actual 0x%x 0x%x" %
(offset, aml[offset], aml[offset + 1]));
return offset
def aml_processor_string(offset):
#0x5B 0x83 ProcessorOp PkgLength NameString ProcID
start = aml_processor_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes
return offset
def aml_processor_end(offset):
start = aml_processor_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml_pkglen(offset)
return offset + pkglen
def aml_package_start(offset):
offset = aml_name_string(offset) + 4
# 0x12 PkgLength NumElements PackageElementList
if (aml[offset] != 0x12):
die( "Name offset 0x%x: expected 0x12 actual 0x%x" %
(offset, aml[offset]));
offset += 1
return offset + aml_pkglen_bytes(offset) + 1
lineno = 0
for line in fileinput.input():
# Strip trailing newline
line = line.rstrip();
# line number and debug string to output in case of errors
lineno = lineno + 1
debug = "input line %d: %s" % (lineno, line)
#ASL listing: space, then line#, then ...., then code
pasl = re.compile('^\s+([0-9]+)(:\s\s|\.\.\.\.)\s*')
m = pasl.search(line)
if (m):
add_asl(lineno, pasl.sub("", line));
# AML listing: offset in hex, then ...., then code
paml = re.compile('^([0-9A-Fa-f]+)(:\s\s|\.\.\.\.)\s*')
m = paml.search(line)
if (m):
add_aml(m.group(1), paml.sub("", line))
# Now go over code
# Track AML offset of a previous non-empty ASL command
prev_aml_offset = -1
for i in range(len(asl)):
debug = "input line %d: %s" % (asl[i].lineno, asl[i].line)
l = asl[i].line
# skip if not an extract directive
a = len(re.findall(r'ACPI_EXTRACT', l))
if (not a):
# If not empty, store AML offset. Will be used for sanity checks
# IASL seems to put {}. at random places in the listing.
# Ignore any non-words for the purpose of this test.
m = re.search(r'\w+', l)
if (m):
prev_aml_offset = asl[i].aml_offset
continue
if (a > 1):
die("Expected at most one ACPI_EXTRACT per line, actual %d" % a)
mext = re.search(r'''
^\s* # leading whitespace
/\*\s* # start C comment
(ACPI_EXTRACT_\w+) # directive: group(1)
\s+ # whitspace separates directive from array name
(\w+) # array name: group(2)
\s*\*/ # end of C comment
\s*$ # trailing whitespace
''', l, re.VERBOSE)
if (not mext):
die("Stray ACPI_EXTRACT in input")
# previous command must have produced some AML,
# otherwise we are in a middle of a block
if (prev_aml_offset == asl[i].aml_offset):
die("ACPI_EXTRACT directive in the middle of a block")
directive = mext.group(1)
array = mext.group(2)
offset = asl[i].aml_offset
if (directive == "ACPI_EXTRACT_ALL_CODE"):
if array in output:
die("%s directive used more than once" % directive)
output[array] = aml
continue
if (directive == "ACPI_EXTRACT_NAME_BUFFER8"):
offset = aml_name_buffer8(offset)
elif (directive == "ACPI_EXTRACT_NAME_DWORD_CONST"):
offset = aml_name_dword_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_WORD_CONST"):
offset = aml_name_word_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_BYTE_CONST"):
offset = aml_name_byte_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_STRING"):
offset = aml_name_string(offset)
elif (directive == "ACPI_EXTRACT_METHOD_STRING"):
offset = aml_method_string(offset)
elif (directive == "ACPI_EXTRACT_DEVICE_START"):
offset = aml_device_start(offset)
elif (directive == "ACPI_EXTRACT_DEVICE_STRING"):
offset = aml_device_string(offset)
elif (directive == "ACPI_EXTRACT_DEVICE_END"):
offset = aml_device_end(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_START"):
offset = aml_processor_start(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_STRING"):
offset = aml_processor_string(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_END"):
offset = aml_processor_end(offset)
elif (directive == "ACPI_EXTRACT_PKG_START"):
offset = aml_package_start(offset)
else:
die("Unsupported directive %s" % directive)
if array not in output:
output[array] = []
output[array].append(offset)
debug = "at end of file"
def get_value_type(maxvalue):
#Use type large enough to fit the table
if (maxvalue >= 0x10000):
return "int"
elif (maxvalue >= 0x100):
return "short"
else:
return "char"
# Pretty print output
for array in output.keys():
otype = get_value_type(max(output[array]))
odata = []
for value in output[array]:
odata.append("0x%x" % value)
sys.stdout.write("static unsigned %s %s[] = {\n" % (otype, array))
sys.stdout.write(",\n".join(odata))
sys.stdout.write('\n};\n');
| gpl-3.0 |
PulsePod/evepod | lib/python2.7/site-packages/newrelic-2.12.0.10/newrelic/hooks/nosql_pymongo.py | 5 | 1288 | import newrelic.api.function_trace
_methods = ['save', 'insert', 'update', 'drop', 'remove', 'find_one',
'find', 'count', 'create_index', 'ensure_index', 'drop_indexes',
'drop_index', 'reindex', 'index_information', 'options',
'group', 'rename', 'distinct', 'map_reduce', 'inline_map_reduce',
'find_and_modify']
def instrument_pymongo_connection(module):
# Must name function explicitly as pymongo overrides the
# __getattr__() method in a way that breaks introspection.
newrelic.api.function_trace.wrap_function_trace(
module, 'Connection.__init__',
name='%s:Connection.__init__' % module.__name__)
def instrument_pymongo_collection(module):
# Must name function explicitly as pymongo overrides the
# __getattr__() method in a way that breaks introspection.
for method in _methods:
if hasattr(module.Collection, method):
#newrelic.api.function_trace.wrap_function_trace(
# module, 'Collection.%s' % method,
# name=method, group='Custom/MongoDB')
newrelic.api.function_trace.wrap_function_trace(
module, 'Collection.%s' % method,
name='%s:Collection.%s' % (module.__name__, method))
| apache-2.0 |
fungos/gemuo | src/merge.py | 2 | 1372 | #!/usr/bin/python
#
# GemUO
#
# (c) 2005-2010 Max Kellermann <max@duempel.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from uo.entity import *
from gemuo.entity import Item
from gemuo.simple import simple_run
from gemuo.engine.messages import PrintMessages
from gemuo.engine.items import MergeStacks
def find_box_at(world, x, y):
for e in world.iter_entities_at(x, y):
if isinstance(e, Item) and e.item_id in ITEMS_LARGE_CRATE:
return e
return None
def find_restock_box(world):
"""Find the large crate one tile north of the player. It is used
for restocking."""
return find_box_at(world, world.player.position.x, world.player.position.y - 1)
def run(client):
PrintMessages(client)
box = find_restock_box(client.world)
#box = client.world.backpack()
if box is None:
raise NoSuchEntity('No box')
return MergeStacks(client, box, ITEMS_INGOT + ITEMS_LOCKPICK)
simple_run(run)
| gpl-2.0 |
idigbio-api-hackathon/LifemapperQgis | lifemapperTools/tools/postOccSet.py | 1 | 11260 | # -*- coding: utf-8 -*-
"""
@author: Jeff Cavner
@contact: jcavner@ku.edu
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import QSettings
from PyQt4.QtCore import Qt, QUrl, QObject
from qgis.core import *
from qgis.gui import *
from lifemapperTools.tools.ui_postOccSetDialog import Ui_Dialog
from lifemapperTools.tools.constructGrid import ConstructGridDialog
from lifemapperTools.tools.listExperiments import ListExperimentDialog
from lifemapperTools.tools.newExperiment import NewExperimentDialog
from LmClient.lmClientLib import LMClient
from lifemapperTools.common.communicate import Communicate
class UploadOccSetDialog(QDialog, Ui_Dialog):
# .............................................................................
# Constructor
# .............................................................................
def __init__(self, iface, client=None):
QDialog.__init__(self)
#self.setWindowFlags(self.windowFlags() & Qt.WindowMinimizeButtonHint)
self.setupUi()
self.interface = iface
self.client = client
self.fileName = None
self.populateCanvasCombo()
#self.setWhatsThis(Qt.tr("This dialog allows a user to sign in to the system."))
# ..............................................................................
def accept(self):
# after upload, we recieve an occurrence id
# which we need to pass back along with everything else to the postExp Dialog
if self.validate():
self.uploadBut.setEnabled(False)
try:
response = self.client.sdm.postOccurrenceSet(self.displayName,self.fileType,
self.fileName,epsgCode=self.epsgCode)
except:
msgBox = QMessageBox.information(self,
"Problem...",
"Problem with the post occurrence set service",
QMessageBox.Ok)
else:
occSetId = response.id
#QgsProject.instance().emit(
# SIGNAL(
# "PostedOccurrenceSet(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)" ),
# occSetId,self.displayName,self.epsgCode)
Communicate.instance().postedOccurrenceSet.emit(occSetId,self.displayName,self.epsgCode)
self.close()
# ..............................................................................
def validate(self):
epsgCode = str(self.epsgCodeEdit.text())
self.displayName = self.displayNameEdit.text()
if self.fileName is None:
self.fileName = self.file.text()
self.fileType = self.getFileType(userData=False)
self.occMapLayerName = self.canvasLayersCombo.currentText()
valid = True
if len(epsgCode) == 0:
message = "Please supply an EPSG code"
valid = False
elif len(self.displayName) == 0:
message = "Please supply a display name"
valid = False
elif len(self.fileName) == 0:
if self.occMapLayerName == "":
message = "Please supply a filename or choose a layer from your map canvas"
valid = False
elif len(self.fileType) == 0:
message = "Please supply a file type"
valid = False
# this ensures that file type matches
fileName, fileExtension = os.path.splitext(self.fileName)
fileExt = fileExtension[1:]
extIdx = self.fileTypeCombo.findData(fileExt, role=Qt.UserRole)
self.fileTypeCombo.setCurrentIndex(extIdx)
self.fileType = self.getFileType(userData=False)
if not valid:
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
self.epsgCode = int(epsgCode)
return valid
# ..............................................................................
def setFileNameFromCombo(self):
currentIndex = self.canvasLayersCombo.currentIndex()
fileNameVariant = self.canvasLayersCombo.itemData(currentIndex,role=Qt.UserRole)
tocName = str(self.canvasLayersCombo.itemData(currentIndex,role=Qt.DisplayRole))
layers = self.interface.legendInterface().layers()
for layer in layers:
if layer.name() == tocName:
if layer.isEditable() and layer.isModified():
self.interface.setActiveLayer(layer)
self.interface.actionToggleEditing().trigger()
self.fileName = str(fileNameVariant)
# ..............................................................................
def selectFromCanvas(self,index):
if index != 0:
# clear and disable filename text
self.file.setText('')
self.file.setEnabled(False) # maybe
shpIdx = self.fileTypeCombo.findData('shp', role=Qt.UserRole)
self.fileTypeCombo.setCurrentIndex(shpIdx)
self.setFileNameFromCombo()
elif index == 0:
self.fileName = None
# ..............................................................................
def populateCanvasCombo(self):
layers = QgsMapLayerRegistry.instance().mapLayers()
self.canvasLayersCombo.addItem("",userData="select")
for key in layers.keys():
# probably need to think about multipoint
if layers[key].type() == layers[key].VectorLayer:
if layers[key].geometryType() == QGis.Point:
source = layers[key].source()
name = layers[key].name()
self.canvasLayersCombo.addItem(name,userData=source)
# we connect the combo here, because adding items triggers the currenIndexChanged signal,
# maybe we don't connect unless there are point layers
#QObject.connect(self.canvasLayersCombo,
# SIGNAL("currentIndexChanged(int)"), self.selectFromCanvas)
self.canvasLayersCombo.currentIndexChanged.connect(self.selectFromCanvas)
# ..............................................................................
def openProjSelectorSetEPSG(self):
"""
@summary: opens the stock qgis projection selector
and sets epsg edit field and set map units attribute
"""
projSelector = QgsGenericProjectionSelector(self)
dialog = projSelector.exec_()
EpsgCode = projSelector.selectedAuthId().replace('EPSG:','')
# some projections don't have epsg's
if dialog != 0:
if EpsgCode != 0: # will be zero if projection doesn't have an epsg
crs = QgsCoordinateReferenceSystem()
crs.createFromOgcWmsCrs( projSelector.selectedAuthId() )
mapunitscode = crs.mapUnits()
if mapunitscode == 0:
self.mapunits = 'meters'
elif mapunitscode == 1:
self.mapunits = 'feet'
elif mapunitscode == 2:
self.mapunits = 'dd'
self.epsgCodeEdit.setText(str(EpsgCode))
else:
# error message saying that the users chosen projection doesn't have a epsg
self.mapunits = None
message = "The projection you have chosen does not have an epsg code"
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
self.mapunits = None
# ....................................................................................
def showFileDialog(self):
"""
@summary: Shows a file selection dialog
"""
# get the selection for file type and set file type here
settings = QSettings()
self.file.setEnabled(True)
filetype = self.getFileType()
filetypestr = "%s files (*.%s)" % (filetype, filetype)
dirName = settings.value( "/UI/lastShapefileDir" )
fileDialog = QgsEncodingFileDialog( self, "Open File", dirName,filetypestr)
fileDialog.setDefaultSuffix( "shp" )
fileDialog.setFileMode( QFileDialog.AnyFile )
fileDialog.setAcceptMode( QFileDialog.AcceptOpen )
fileDialog.setConfirmOverwrite( True )
if not fileDialog.exec_() == QFileDialog.Accepted:
return
filename = fileDialog.selectedFiles()
self.addFile(filename[0])
# ..................................................................................
def getFileType(self,userData=True):
if userData:
currentIndex = self.fileTypeCombo.currentIndex()
fileVariant = self.fileTypeCombo.itemData(currentIndex,role=Qt.UserRole)
filetype = str(fileVariant)
else:
filetype = str(self.fileTypeCombo.currentText())
return filetype
# ..................................................................................
def addFile(self,filename):
# set/clear the canvas layers combo back to select, this
# will fire index changed and set self.fileName to None
self.canvasLayersCombo.setCurrentIndex(0)
self.file.setText(filename)
# .................................................................................
def help(self):
self.help = QWidget()
self.help.setWindowTitle('Lifemapper Help')
self.help.resize(600, 400)
self.help.setMinimumSize(600,400)
self.help.setMaximumSize(1000,1000)
layout = QVBoxLayout()
helpDialog = QTextBrowser()
helpDialog.setOpenExternalLinks(True)
#helpDialog.setSearchPaths(['documents'])
helppath = os.path.dirname(os.path.realpath(__file__))+'/documents/help.html'
helpDialog.setSource(QUrl.fromLocalFile(helppath))
helpDialog.scrollToAnchor('signIn')
layout.addWidget(helpDialog)
self.help.setLayout(layout)
if self.isModal():
self.setModal(False)
self.help.show()
| gpl-2.0 |
ankraft/onem2mlib | tests/test_cse.py | 1 | 3297 | #!/usr/local/bin/python3
#
# test_cse.py
#
# (c) 2017 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Unit test for the <CSEBase> reource implementation.
#
import unittest
import os, sys
sys.path.append('..')
from onem2mlib import *
from conf import *
class TestCSE(unittest.TestCase,):
session = None
ae = None
cse = None
@classmethod
def setUpClass(cls):
TestCSE.session = Session(host, originator, encoding)
@classmethod
def tearDownClass(cls):
if TestCSE.ae:
TestCSE.ae.deleteFromCSE()
TestCSE.ae = None
def test_init(self):
self.assertIsNotNone(TestCSE.session)
self.assertEqual(TestCSE.session.address, host)
self.assertEqual(TestCSE.session.originator, originator)
self.assertEqual(TestCSE.session.encoding, encoding)
def test_cseGet(self):
TestCSE.cse = CSEBase(TestCSE.session, CSE_ID)
self.assertIsNotNone(TestCSE.cse)
self.assertEqual(TestCSE.cse.type, CON.Type_CSEBase)
if TestCSE.cse.findAE(AE_NAME):
self.fail('*** AE with name "' + AE_NAME + '" already present in CSE. Please remove it first.')
def test_createAEInCSE(self):
TestCSE.ae = AE(TestCSE.cse, resourceName=AE_NAME, labels=AE_LABELS, instantly=False)
self.assertTrue(TestCSE.ae.createInCSE())
self.assertEqual(TestCSE.ae.resourceName, AE_NAME)
def test_findAEInCSE(self):
aef = TestCSE.cse.findAE(AE_NAME)
self.assertIsNotNone(aef)
self.assertEqual(aef.resourceID, TestCSE.ae.resourceID)
def test_aesInCSE(self):
aes = TestCSE.cse.aes()
self.assertTrue(len(aes) > 0)
found = False
for ae in aes:
if ae.resourceID == TestCSE.ae.resourceID:
found = True
break
self.assertTrue(found)
def test_containersInCSE(self):
self.assertIsNotNone(TestCSE.cse)
# add a container
cnt = TestCSE.cse.addContainer(CNT_NAME)
self.assertIsNotNone(cnt)
# check the container
cnts = TestCSE.cse.containers()
self.assertTrue(len(cnts) == 1)
found = False
for c in cnts:
if c.resourceID == cnt.resourceID:
found = True
break
self.assertTrue(found)
def test_addFindAE(self):
self.assertIsNotNone(TestCSE.cse)
# create an AE
ae = TestCSE.cse.addAE(AE_NAME + '_1')
self.assertIsNotNone(ae)
# try to retrieve it
a = TestCSE.cse.findAE(AE_NAME + '_1')
self.assertIsNotNone(a)
self.assertEqual(a.resourceID, ae.resourceID)
def test_addFindGroup(self):
self.assertIsNotNone(TestCSE.cse)
# create a group
grp = TestCSE.cse.addGroup(GRP_NAME, [TestCSE.ae])
self.assertIsNotNone(grp)
# try to retrieve it
g = TestCSE.cse.findGroup(GRP_NAME)
self.assertIsNotNone(g)
self.assertEqual(g.resourceID, grp.resourceID)
def test_finit(self):
self.assertIsNotNone(TestCSE.ae)
self.assertTrue(TestCSE.ae.deleteFromCSE())
TestCSE.ae = None
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(TestCSE('test_init'))
suite.addTest(TestCSE('test_cseGet'))
suite.addTest(TestCSE('test_createAEInCSE'))
suite.addTest(TestCSE('test_findAEInCSE'))
suite.addTest(TestCSE('test_aesInCSE'))
suite.addTest(TestCSE('test_containersInCSE'))
suite.addTest(TestCSE('test_addFindAE'))
suite.addTest(TestCSE('test_addFindGroup'))
suite.addTest(TestCSE('test_finit'))
unittest.TextTestRunner(verbosity=2, failfast=True).run(suite)
| bsd-3-clause |
rackerlabs/rax-autoscaler | raxas/scaling_group.py | 2 | 8082 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# this file is part of 'RAX-AutoScaler'
#
# Copyright 2014 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
import requests.exceptions
import pyrax
import pyrax.exceptions
from raxas import common
from raxas import enums
class ScalingGroup(object):
def __init__(self, config, group_name):
"""Instantiate a ScalingGroup object.
:param config: Configuration dict for this scaling group
:param group_name: Name of the group to print in the log file
"""
self._group_name = group_name
self._config = self.check_config(config)
self._scaling_group = None
self._servers_state = None
self._active_servers = None
@classmethod
def check_config(cls, config):
logger = common.get_logger()
if None in [
config.get('group_id'),
config.get('scale_up_policy'),
config.get('scale_down_policy')]:
common.exit_with_error('Invalid group configuration')
if None in [
config.get('plugins')]:
logger.warn('DeprecationWarning: You are using a deprecated config file please update'
' your configuration file to v0.3 standard.')
return config
else:
return config
@property
def plugin_config(self):
if self._config.get('plugins') is None:
self._config['plugins'] = \
{
'raxmon':
{
'scale_up_threshold': self._config.get('scale_up_threshold', 0.6),
'scale_down_threshold': self._config.get('scale_down_threshold', 0.4),
'check_config': self._config.get('check_config', '{}'),
'metric_name': self._config.get('metric_name', '1m'),
'check_type': self._config.get('check_type', 'agent.load_average')
}
}
return self._config.get('plugins')
@property
def group_uuid(self):
return self.get_group_value('group_id')
@property
def launch_config(self):
try:
return self.scaling_group.get_launch_config()
except AttributeError:
return None
@property
def scaling_group(self):
if self._scaling_group is None:
logger = common.get_logger()
autoscale_api = pyrax.autoscale
try:
self._scaling_group = autoscale_api.get(self.group_uuid)
except pyrax.exc.PyraxException as error:
logger.error('Error: Unable to get scaling group \'%s\': %s',
self.group_uuid, error)
return None
else:
return self._scaling_group
else:
return self._scaling_group
@property
def state(self):
if self._servers_state is None:
try:
self._servers_state = self.scaling_group.get_state()
except AttributeError:
return None
else:
return self._servers_state
else:
return self._servers_state
@property
def active_servers(self):
if self._active_servers is None:
try:
self._active_servers = self.state.get('active', [])
except AttributeError:
return []
else:
return self._active_servers
else:
return self._active_servers
@property
def is_master(self):
"""This property checks scaling group state and determines if this node is a master.
:returns: enums.NodeStatus
"""
logger = common.get_logger()
masters = []
node_id = common.get_machine_uuid(self)
if node_id is None:
logger.error('Failed to get server uuid')
return enums.NodeStatus.Unknown
active_count = len(self.active_servers)
if active_count == 1:
masters.append(self.active_servers[0])
elif active_count > 1:
masters.append(self.active_servers[0])
masters.append(self.active_servers[1])
else:
logger.error('Unknown cluster state')
return enums.NodeStatus.Unknown
if node_id in masters:
logger.info('Node is a master, continuing')
return enums.NodeStatus.Master
else:
logger.info('Node is not a master, nothing to do. Exiting')
return enums.NodeStatus.Slave
def get_group_value(self, key):
"""This function returns value in autoscale_groups section associated with
provided key.
:param key: key name
:returns: value associated with key
"""
logger = common.get_logger()
value = self._config.get(key.lower())
if value is None:
logger.error('Error: unable to get value for key "%s" in group "%s"',
key, self._group_name)
return value
def get_webhook_values(self, policy, hook):
"""This function returns value in webhooks section of json file which is
associated with provided key.
:param policy: raxas.enums.ScaleDirection
:param hook: raxas.enums.HookType
:returns: value associated with key
"""
logger = common.get_logger()
policy = 'scale_%s' % policy.name.lower()
hook = hook.name.lower()
try:
return self._config['webhooks'][policy][hook]
except KeyError:
logger.error('Error: unable to get config value for '
'[\'%s\'][\'webhooks\'][\'%s\'][\'%s\']',
self._group_name, policy, hook)
return None
def execute_webhook(self, policy, hook):
"""This function makes webhook calls.
:param policy: raxas.enums.ScaleDirection
:param hook: raxas.enums.HookType
"""
logger = common.get_logger()
logger.info('Executing webhook: scale_%s:%s', policy.name, hook.name)
urls = self.get_webhook_values(policy, hook)
data = json.dumps(self._config)
for url in urls:
logger.info('Sending POST request to url: \'%s\'', url)
try:
response = requests.post(url, json=data)
logger.info('Received status code %d from url: \'%s\'', response.status_code, url)
except requests.exceptions.RequestException as error:
logger.error(error)
def execute_policy(self, policy):
"""
:param policy: raxas.enums.ScaleDirection
:returns: True
False
"""
logger = common.get_logger()
policy_id = self.get_group_value('scale_%s_policy' % policy.name)
if len(self.active_servers) == 1 and policy == enums.ScaleDirection.Down:
logger.info('Current active server count is 1, will not scale down')
return enums.ScaleEvent.NoAction
try:
self.scaling_group.get_policy(policy_id).execute()
except pyrax.exceptions.PyraxException as error:
logger = common.get_logger()
logger.error('Error scaling %s: %s', policy.name, error)
return enums.ScaleEvent.Error
else:
return enums.ScaleEvent.Success
| apache-2.0 |
aerospike/aerospike-admin | lib/health/health_checker.py | 1 | 16248 | # Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from distutils.version import LooseVersion
import re
from lib.health.constants import (
ParserResultType,
HealthResultType,
HealthResultCounter,
AssertResultKey,
)
from lib.health.exceptions import SyntaxException, HealthException
from lib.health.parser import HealthParser
from lib.health.query import QUERIES
from lib.health.util import is_health_parser_variable
from lib.utils.util import parse_queries
from lib.view import terminal
VERSION_CONSTRAINT_PATTERN = "SET CONSTRAINT VERSION(.+)"
class HealthChecker:
def __init__(self):
try:
self.health_parser = HealthParser()
self.health_parser.build()
except Exception:
self.health_parser = None
pass
self.verbose = False
self.no_valid_version = False
self.filtered_data_set_to_parser = False
def _reset_counters(self):
self.status_counters = {}
self.status_counters[HealthResultCounter.QUERY_COUNTER] = 0
self.status_counters[HealthResultCounter.QUERY_SUCCESS_COUNTER] = 0
self.status_counters[HealthResultCounter.QUERY_SKIPPED_COUNTER] = 0
self.status_counters[HealthResultCounter.ASSERT_QUERY_COUNTER] = 0
self.status_counters[HealthResultCounter.ASSERT_FAILED_COUNTER] = 0
self.status_counters[HealthResultCounter.ASSERT_PASSED_COUNTER] = 0
self.status_counters[HealthResultCounter.DEBUG_COUNTER] = 0
self.status_counters[HealthResultCounter.SYNTAX_EXCEPTION_COUNTER] = 0
self.status_counters[HealthResultCounter.HEALTH_EXCEPTION_COUNTER] = 0
self.status_counters[HealthResultCounter.OTHER_EXCEPTION_COUNTER] = 0
self.assert_outputs = {}
self.health_exceptions = []
self.syntax_exceptions = []
self.other_exceptions = []
self.debug_outputs = []
def _increment_counter(self, counter):
if counter and counter in self.status_counters:
self.status_counters[counter] += 1
def _set_parser_input(self, data):
try:
self.health_parser.set_health_data(data)
except Exception:
raise Exception(
"No parser available. Please check ply module installed or not."
)
def _reset_parser(self):
self.health_parser.clear_health_cache()
if self.filtered_data_set_to_parser:
# Healthchecker should work as setting input once and calling execute multiple times on same data.
# So we need to reset parser input data if we set version filtered data.
self._set_parser_input(self.health_input_data)
def set_health_input_data(self, data):
self.health_input_data = data
if not data or not isinstance(data, dict):
raise ValueError(
terminal.fg_red()
+ "Wrong Input Data for HealthChecker"
+ terminal.fg_clear()
)
self._set_parser_input(data)
def _create_health_result_dict(self):
res = {}
res[HealthResultType.STATUS_COUNTERS] = copy.deepcopy(self.status_counters)
res[HealthResultType.EXCEPTIONS] = {}
res[HealthResultType.EXCEPTIONS][
HealthResultType.EXCEPTIONS_SYNTAX
] = copy.deepcopy(self.syntax_exceptions)
res[HealthResultType.EXCEPTIONS][
HealthResultType.EXCEPTIONS_PROCESSING
] = copy.deepcopy(self.health_exceptions)
res[HealthResultType.EXCEPTIONS][
HealthResultType.EXCEPTIONS_OTHER
] = copy.deepcopy(self.other_exceptions)
res[HealthResultType.ASSERT] = copy.deepcopy(self.assert_outputs)
res[HealthResultType.DEBUG_MESSAGES] = copy.deepcopy(self.debug_outputs)
return res
def _is_assert_query(self, query):
if query and "ASSERT" in query:
return True
return False
def _is_version_set_query(self, query):
return re.search(VERSION_CONSTRAINT_PATTERN, query)
def _set_version_checker_function(self, line):
vp_l_e = "<=(.+)"
vp_l = "<(.+)"
vp_g_e = ">=(.+)"
vp_g = ">(.+)"
vp_e = "=(.+)"
vp_in = r"IN \[(.+)\]"
v_str = re.search(VERSION_CONSTRAINT_PATTERN, line).group(1).strip()
if re.search(vp_l_e, v_str):
v = re.search(vp_l_e, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(x.strip()) <= LooseVersion(
v.strip()
)
elif re.search(vp_l, v_str):
v = re.search(vp_l, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(x.strip()) < LooseVersion(
v.strip()
)
elif re.search(vp_g_e, v_str):
v = re.search(vp_g_e, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(x.strip()) >= LooseVersion(
v.strip()
)
elif re.search(vp_g, v_str):
v = re.search(vp_g, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(x.strip()) > LooseVersion(
v.strip()
)
elif re.search(vp_e, v_str):
v = re.search(vp_e, v_str).group(1).strip()
if v.lower() == "all":
self.version_checker_fn = None
else:
self.version_checker_fn = lambda x: LooseVersion(
x.strip()
) == LooseVersion(v)
elif re.search(vp_in, v_str):
v = re.search(vp_in, v_str).group(1).strip()
v = [i.strip() for i in v.split(",")]
if "ALL" in v or "all" in v:
self.version_checker_fn = None
else:
self.version_checker_fn = lambda x: LooseVersion(x.strip()) in [
LooseVersion(i) for i in v
]
else:
v = v_str.strip()
if v.lower() == "all":
self.version_checker_fn = None
else:
self.version_checker_fn = lambda x: LooseVersion(
x.strip()
) == LooseVersion(v.strip())
def _filter_nodes_to_remove(self, data):
"""
Returns map as {cl1:v1, cl2:v2, ... } or 0 or 1
where v can be 0 : means remove all nodes
1 : means keep all nodes
list of nodes : remove nodes from list
"""
if not data or "METADATA" not in data or "CLUSTER" not in data["METADATA"]:
# no metadata information available
return 1
sn_partial_count = 0
sn_one_count = 0
sn_zero_count = 0
sn_node_dict = {}
sn_total_clusters = 0
for cl in data["METADATA"]["CLUSTER"].keys():
sn_total_clusters += 1
try:
cl_one_count = 0
cl_zero_count = 0
cl_node_list = []
cl_total_nodes = 0
for n in data["METADATA"]["CLUSTER"][cl].keys():
cl_total_nodes += 1
try:
if not self.version_checker_fn(
data["METADATA"]["CLUSTER"][cl][n][("version", "KEY")]
):
cl_zero_count += 1
cl_node_list.append(n)
else:
cl_one_count += 1
except Exception:
cl_one_count += 1
pass
if cl_total_nodes == cl_one_count:
# keep all nodes for this cluster
sn_node_dict[cl] = 1
sn_one_count += 1
elif cl_total_nodes == cl_zero_count:
# remove all nodes for this cluster
sn_node_dict[cl] = 0
sn_zero_count += 1
else:
# some nodes need to remove
sn_node_dict[cl] = cl_node_list
sn_partial_count += 1
except Exception:
sn_node_dict[cl] = 1
sn_one_count += 1
if sn_total_clusters == sn_one_count:
# keep all nodes for all cluster
return 1
elif sn_total_clusters == sn_zero_count:
# remove all nodes for all cluster, so remove this snapshot itself
return 0
else:
# some nodes need to remove
return sn_node_dict
def _remove_node_data(self, data, remove_nodes):
if not data or not isinstance(data, dict):
return
for _key in list(data.keys()):
if isinstance(_key, tuple) and _key[1] == "CLUSTER":
if _key not in remove_nodes or remove_nodes[_key] == 1:
continue
if remove_nodes[_key] == 0:
data.pop(_key)
continue
for n in list(data[_key].keys()):
if n in remove_nodes[_key]:
data[_key].pop(n)
if not data[_key]:
data.pop(_key)
else:
self._remove_node_data(data[_key], remove_nodes)
if not data[_key]:
data.pop(_key)
def _filter_health_input_data(self):
data = copy.deepcopy(self.health_input_data)
for sn in list(data.keys()):
# SNAPSHOT level
remove_nodes = self._filter_nodes_to_remove(data[sn])
if remove_nodes == 1:
continue
elif remove_nodes == 0:
data.pop(sn)
continue
else:
self._remove_node_data(data[sn], remove_nodes)
if not data[sn]:
data.pop(sn)
return data
def _filter_and_set_health_input_data(self, line):
self._set_version_checker_function(line)
if not self.version_checker_fn:
self.no_valid_version = False
self._set_parser_input(self.health_input_data)
self.filtered_data_set_to_parser = False
else:
d = self._filter_health_input_data()
if not d:
self.no_valid_version = True
else:
self.no_valid_version = False
self._set_parser_input(d)
self.filtered_data_set_to_parser = True
def _execute_query(self, query):
return self.health_parser.parse(query)
def _add_assert_output(self, assert_out):
if not assert_out:
return
categories = assert_out[AssertResultKey.CATEGORY]
assert_ptr = self.assert_outputs
for c in categories[:-1]:
if c not in assert_ptr:
assert_ptr[c] = {}
assert_ptr = assert_ptr[c]
c = categories[-1]
if c not in assert_ptr:
assert_ptr[c] = []
assert_ptr = assert_ptr[c]
assert_ptr.append(assert_out)
def _execute_queries(self, query_source=None, is_source_file=True):
self._reset_counters()
if not self.health_input_data or not isinstance(self.health_input_data, dict):
raise Exception("No Health Input Data available")
if not query_source:
raise Exception("No Input Query Source.")
if not isinstance(query_source, str):
raise Exception("Query input source is not valid")
queries = parse_queries(query_source, is_file=is_source_file)
if not queries:
raise Exception("Wrong Health query source.")
for query in queries:
if not query:
continue
self._increment_counter(HealthResultCounter.QUERY_COUNTER)
if query.lower() == "exit":
self._increment_counter(HealthResultCounter.QUERY_SUCCESS_COUNTER)
break
result = None
if self._is_version_set_query(query):
self._filter_and_set_health_input_data(query)
self._increment_counter(HealthResultCounter.QUERY_SUCCESS_COUNTER)
continue
if self.no_valid_version:
self._increment_counter(HealthResultCounter.QUERY_SKIPPED_COUNTER)
continue
if self._is_assert_query(query):
self._increment_counter(HealthResultCounter.ASSERT_QUERY_COUNTER)
try:
result = self._execute_query(query)
self._increment_counter(HealthResultCounter.QUERY_SUCCESS_COUNTER)
except SyntaxException as se:
self._increment_counter(HealthResultCounter.SYNTAX_EXCEPTION_COUNTER)
self.syntax_exceptions.append(
{
"index": self.status_counters[
HealthResultCounter.QUERY_COUNTER
],
"query": query,
"error": str(se),
}
)
except HealthException as he:
self._increment_counter(HealthResultCounter.HEALTH_EXCEPTION_COUNTER)
self.health_exceptions.append(
{
"index": self.status_counters[
HealthResultCounter.QUERY_COUNTER
],
"query": query,
"error": str(he),
}
)
except Exception as oe:
self._increment_counter(HealthResultCounter.OTHER_EXCEPTION_COUNTER)
self.other_exceptions.append(
{
"index": self.status_counters[
HealthResultCounter.QUERY_COUNTER
],
"query": query,
"error": str(oe),
}
)
if result:
try:
if isinstance(result, tuple):
if result[0] == ParserResultType.ASSERT:
if result[1][AssertResultKey.SUCCESS]:
self._increment_counter(
HealthResultCounter.ASSERT_PASSED_COUNTER
)
else:
self._increment_counter(
HealthResultCounter.ASSERT_FAILED_COUNTER
)
self._add_assert_output(result[1])
elif is_health_parser_variable(result):
self._increment_counter(HealthResultCounter.DEBUG_COUNTER)
self.debug_outputs.append(result)
except Exception:
pass
return True
def execute(self, query_file=None):
health_summary = None
if query_file is None:
if not self._execute_queries(query_source=QUERIES, is_source_file=False):
return {}
health_summary = self._create_health_result_dict()
elif query_file:
if not self._execute_queries(query_source=query_file, is_source_file=True):
return {}
health_summary = self._create_health_result_dict()
else:
raise Exception("Wrong Query-file input for Health-Checker to execute")
self.no_valid_version = False
self._reset_parser()
self._reset_counters()
return health_summary
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/python/kernel_tests/batch_scatter_ops_test.py | 22 | 4576 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
indx = i[:-1] + (indx,)
ref[indx] = updates[i]
_TF_OPS_TO_NUMPY = {
state_ops.batch_scatter_update: _NumpyUpdate,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False,
method=False):
np.random.seed(8)
with self.cached_session(use_gpu=False):
for indices_shape in (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
sparse_dim = len(indices_shape) - 1
indices = np.random.randint(
indices_shape[sparse_dim], size=indices_shape, dtype=itype)
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
ref.initializer.run()
if method:
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
vtypes = [np.float32, np.float64]
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(
state_ops.batch_scatter_update, vtype, itype)
@test_util.run_deprecated_v1
def testBooleanScatterUpdate(self):
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.batch_scatter_update(var, [1], [True])
update1 = state_ops.batch_scatter_update(
var, constant_op.constant(
[0], dtype=dtypes.int64), [False])
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1
def testScatterOutOfRange(self):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
state_ops.batch_scatter_update(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = \[-1\] does not index into shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = \[6\] does not index into '
r'shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
| apache-2.0 |
evansd/django | tests/staticfiles_tests/test_forms.py | 47 | 1551 | from urllib.parse import urljoin
from django.contrib.staticfiles import storage
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.forms import Media
from django.test import SimpleTestCase, override_settings
class StaticTestStorage(storage.StaticFilesStorage):
def url(self, name):
return urljoin('https://example.com/assets/', name)
@override_settings(
STATIC_URL='http://media.example.com/static/',
INSTALLED_APPS=('django.contrib.staticfiles', ),
STATICFILES_STORAGE='staticfiles_tests.test_forms.StaticTestStorage',
)
class StaticFilesFormsMediaTestCase(SimpleTestCase):
def test_absolute_url(self):
m = Media(
css={'all': ('path/to/css1', '/path/to/css2')},
js=(
'/path/to/js1',
'http://media.other.com/path/to/js2',
'https://secure.other.com/path/to/js3',
static('relative/path/to/js4'),
),
)
self.assertEqual(
str(m),
"""<link href="https://example.com/assets/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="https://example.com/assets/relative/path/to/js4"></script>"""
)
| bsd-3-clause |
JohnDevitt/appengine-django-skeleton-master | lib/django/middleware/csrf.py | 86 | 8801 | """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import logging
import re
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.encoding import force_text
from django.utils.http import same_origin
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_key(),
})
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's
# available to the view.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = force_text(
request.META.get('HTTP_REFERER'),
strings_only=True,
errors='replace'
)
if referer is None:
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port.
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probably because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
| bsd-3-clause |
makerbot/ReplicatorG | skein_engines/skeinforge-47/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/gts.py | 13 | 4218 | """
This page is in the table of contents.
The gts.py script is an import translator plugin to get a carving from an gts file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an gts file and returns the carving.
The GNU Triangulated Surface (.gts) format is described at:
http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
Quoted from http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
"All the lines beginning with GTS_COMMENTS (#!) are ignored. The first line contains three unsigned integers separated by spaces. The first integer is the number of vertexes, nv, the second is the number of edges, ne and the third is the number of faces, nf.
Follows nv lines containing the x, y and z coordinates of the vertexes. Follows ne lines containing the two indices (starting from one) of the vertexes of each edge. Follows nf lines containing the three ordered indices (also starting from one) of the edges of each face.
The format described above is the least common denominator to all GTS files. Consistent with an object-oriented approach, the GTS file format is extensible. Each of the lines of the file can be extended with user-specific attributes accessible through the read() and write() virtual methods of each of the objects written (surface, vertexes, edges or faces). When read with different object classes, these extra attributes are just ignored."
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import gcodec
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCarving(fileName):
"Get the carving for the gts file."
return getFromGNUTriangulatedSurfaceText( archive.getFileText(fileName), triangle_mesh.TriangleMesh() )
def getFromGNUTriangulatedSurfaceText( gnuTriangulatedSurfaceText, triangleMesh ):
"Initialize from a GNU Triangulated Surface Text."
if gnuTriangulatedSurfaceText == '':
return None
lines = archive.getTextLines( gnuTriangulatedSurfaceText )
linesWithoutComments = []
for line in lines:
if len(line) > 0:
firstCharacter = line[0]
if firstCharacter != '#' and firstCharacter != '!':
linesWithoutComments.append(line)
splitLine = linesWithoutComments[0].split()
numberOfVertexes = int( splitLine[0] )
numberOfEdges = int(splitLine[1])
numberOfFaces = int( splitLine[2] )
faceTriples = []
for vertexIndex in xrange( numberOfVertexes ):
line = linesWithoutComments[ vertexIndex + 1 ]
splitLine = line.split()
vertex = Vector3( float( splitLine[0] ), float(splitLine[1]), float( splitLine[2] ) )
triangleMesh.vertexes.append(vertex)
edgeStart = numberOfVertexes + 1
for edgeIndex in xrange( numberOfEdges ):
line = linesWithoutComments[ edgeIndex + edgeStart ]
splitLine = line.split()
vertexIndexes = []
for word in splitLine[ : 2 ]:
vertexIndexes.append( int(word) - 1 )
edge = face.Edge().getFromVertexIndexes( edgeIndex, vertexIndexes )
triangleMesh.edges.append( edge )
faceStart = edgeStart + numberOfEdges
for faceIndex in xrange( numberOfFaces ):
line = linesWithoutComments[ faceIndex + faceStart ]
splitLine = line.split()
edgeIndexes = []
for word in splitLine[ : 3 ]:
edgeIndexes.append( int(word) - 1 )
triangleMesh.faces.append( face.Face().getFromEdgeIndexes( edgeIndexes, triangleMesh.edges, faceIndex ) )
return triangleMesh
| gpl-2.0 |
jbreitbart/autopin-plus | vendor/fast-lib/vendor/mosquitto-1.3.5/test/lib/02-subscribe-qos1.py | 19 | 2299 | #!/usr/bin/env python
# Test whether a client sends a correct SUBSCRIBE to a topic with QoS 1.
# The client should connect to port 1888 with keepalive=60, clean session set,
# and client id subscribe-qos1-test
# The test will send a CONNACK message to the client with rc=0. Upon receiving
# the CONNACK and verifying that rc=0, the client should send a SUBSCRIBE
# message to subscribe to topic "qos1/test" with QoS=1. If rc!=0, the client
# should exit with an error.
# Upon receiving the correct SUBSCRIBE message, the test will reply with a
# SUBACK message with the accepted QoS set to 1. On receiving the SUBACK
# message, the client should send a DISCONNECT message.
import inspect
import os
import subprocess
import socket
import sys
import time
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("subscribe-qos1-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "qos1/test", 1)
suback_packet = mosq_test.gen_suback(mid, 1)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = subprocess.Popen(client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "subscribe", subscribe_packet):
conn.send(suback_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| gpl-3.0 |
zerkrx/zerkbox | lib/websockets/server.py | 8 | 14166 | """
The :mod:`websockets.server` module defines a simple WebSocket server API.
"""
import asyncio
import collections.abc
import email.message
import logging
from .compatibility import asyncio_ensure_future
from .exceptions import InvalidHandshake, InvalidOrigin
from .handshake import build_response, check_request
from .http import USER_AGENT, read_request
from .protocol import CONNECTING, OPEN, WebSocketCommonProtocol
__all__ = ['serve', 'WebSocketServerProtocol']
logger = logging.getLogger(__name__)
class WebSocketServerProtocol(WebSocketCommonProtocol):
"""
Complete WebSocket server implementation as an :class:`asyncio.Protocol`.
This class inherits most of its methods from
:class:`~websockets.protocol.WebSocketCommonProtocol`.
For the sake of simplicity, it doesn't rely on a full HTTP implementation.
Its support for HTTP responses is very limited.
"""
state = CONNECTING
def __init__(self, ws_handler, ws_server, *,
origins=None, subprotocols=None, extra_headers=None, **kwds):
self.ws_handler = ws_handler
self.ws_server = ws_server
self.origins = origins
self.subprotocols = subprotocols
self.extra_headers = extra_headers
super().__init__(**kwds)
def connection_made(self, transport):
super().connection_made(transport)
# Register the connection with the server when creating the handler
# task. (Registering at the beginning of the handler coroutine would
# create a race condition between the creation of the task, which
# schedules its execution, and the moment the handler starts running.)
self.ws_server.register(self)
self.handler_task = asyncio_ensure_future(
self.handler(), loop=self.loop)
@asyncio.coroutine
def handler(self):
# Since this method doesn't have a caller able to handle exceptions,
# it attemps to log relevant ones and close the connection properly.
try:
try:
path = yield from self.handshake(
origins=self.origins, subprotocols=self.subprotocols,
extra_headers=self.extra_headers)
except ConnectionError as exc:
logger.info('Connection error during opening handshake', exc_info=True)
raise
except Exception as exc:
if self._is_server_shutting_down(exc):
response = ('HTTP/1.1 503 Service Unavailable\r\n\r\n'
'Server is shutting down.')
elif isinstance(exc, InvalidOrigin):
response = 'HTTP/1.1 403 Forbidden\r\n\r\n' + str(exc)
elif isinstance(exc, InvalidHandshake):
response = 'HTTP/1.1 400 Bad Request\r\n\r\n' + str(exc)
else:
logger.warning("Error in opening handshake", exc_info=True)
response = ('HTTP/1.1 500 Internal Server Error\r\n\r\n'
'See server log for more information.')
self.writer.write(response.encode())
raise
try:
yield from self.ws_handler(self, path)
except Exception as exc:
if self._is_server_shutting_down(exc):
yield from self.fail_connection(1001)
else:
logger.error("Error in connection handler", exc_info=True)
yield from self.fail_connection(1011)
raise
try:
yield from self.close()
except ConnectionError as exc:
if self._is_server_shutting_down(exc):
pass
logger.info('Connection error in closing handshake', exc_info=True)
raise
except Exception as exc:
if self._is_server_shutting_down(exc):
pass
else:
logger.warning("Error in closing handshake", exc_info=True)
raise
except Exception:
# Last-ditch attempt to avoid leaking connections on errors.
try:
self.writer.close()
except Exception: # pragma: no cover
pass
finally:
# Unregister the connection with the server when the handler task
# terminates. Registration is tied to the lifecycle of the handler
# task because the server waits for tasks attached to registered
# connections before terminating.
self.ws_server.unregister(self)
def _is_server_shutting_down(self, exc):
"""
Decide whether an exception means that the server is shutting down.
"""
return (
isinstance(exc, asyncio.CancelledError) and
self.ws_server.closing
)
@asyncio.coroutine
def handshake(self, origins=None, subprotocols=None, extra_headers=None):
"""
Perform the server side of the opening handshake.
If provided, ``origins`` is a list of acceptable HTTP Origin values.
Include ``''`` if the lack of an origin is acceptable.
If provided, ``subprotocols`` is a list of supported subprotocols in
order of decreasing preference.
If provided, ``extra_headers`` sets additional HTTP response headers.
It can be a mapping or an iterable of (name, value) pairs. It can also
be a callable taking the request path and headers in arguments.
Return the URI of the request.
"""
# Read handshake request.
try:
path, headers = yield from read_request(self.reader)
except ValueError as exc:
raise InvalidHandshake("Malformed HTTP message") from exc
self.request_headers = headers
self.raw_request_headers = list(headers.raw_items())
get_header = lambda k: headers.get(k, '')
key = check_request(get_header)
if origins is not None:
origin = get_header('Origin')
if not set(origin.split() or ['']) <= set(origins):
raise InvalidOrigin("Origin not allowed: {}".format(origin))
if subprotocols is not None:
protocol = get_header('Sec-WebSocket-Protocol')
if protocol:
client_subprotocols = [p.strip() for p in protocol.split(',')]
self.subprotocol = self.select_subprotocol(
client_subprotocols, subprotocols)
headers = []
set_header = lambda k, v: headers.append((k, v))
set_header('Server', USER_AGENT)
if self.subprotocol:
set_header('Sec-WebSocket-Protocol', self.subprotocol)
if extra_headers is not None:
if callable(extra_headers):
extra_headers = extra_headers(path, self.raw_request_headers)
if isinstance(extra_headers, collections.abc.Mapping):
extra_headers = extra_headers.items()
for name, value in extra_headers:
set_header(name, value)
build_response(set_header, key)
self.response_headers = email.message.Message()
for name, value in headers:
self.response_headers[name] = value
self.raw_response_headers = headers
# Send handshake response. Since the status line and headers only
# contain ASCII characters, we can keep this simple.
response = ['HTTP/1.1 101 Switching Protocols']
response.extend('{}: {}'.format(k, v) for k, v in headers)
response.append('\r\n')
response = '\r\n'.join(response).encode()
self.writer.write(response)
assert self.state == CONNECTING
self.state = OPEN
self.opening_handshake.set_result(True)
return path
@staticmethod
def select_subprotocol(client_protos, server_protos):
"""
Pick a subprotocol among those offered by the client.
"""
common_protos = set(client_protos) & set(server_protos)
if not common_protos:
return None
priority = lambda p: client_protos.index(p) + server_protos.index(p)
return sorted(common_protos, key=priority)[0]
class WebSocketServer(asyncio.AbstractServer):
"""
Wrapper for :class:`~asyncio.Server` that triggers the closing handshake.
"""
def __init__(self, loop):
# Store a reference to loop to avoid relying on self.server._loop.
self.loop = loop
self.closing = False
self.websockets = set()
def wrap(self, server):
"""
Attach to a given :class:`~asyncio.Server`.
Since :meth:`~asyncio.BaseEventLoop.create_server` doesn't support
injecting a custom ``Server`` class, a simple solution that doesn't
rely on private APIs is to:
- instantiate a :class:`WebSocketServer`
- give the protocol factory a reference to that instance
- call :meth:`~asyncio.BaseEventLoop.create_server` with the factory
- attach the resulting :class:`~asyncio.Server` with this method
"""
self.server = server
def register(self, protocol):
self.websockets.add(protocol)
def unregister(self, protocol):
self.websockets.remove(protocol)
def close(self):
"""
Stop accepting new connections and close open connections.
"""
# Make a note that the server is shutting down. Websocket connections
# check this attribute to decide to send a "going away" close code.
self.closing = True
# Stop accepting new connections.
self.server.close()
# Close open connections. For each connection, two tasks are running:
# 1. self.worker_task shuffles messages between the network and queues
# 2. self.handler_task runs the opening handshake, the handler provided
# by the user and the closing handshake
# In the general case, cancelling the handler task will cause the
# handler provided by the user to exit with a CancelledError, which
# will then cause the worker task to terminate.
for websocket in self.websockets:
websocket.handler_task.cancel()
@asyncio.coroutine
def wait_closed(self):
"""
Wait until all connections are closed.
This method must be called after :meth:`close()`.
"""
# asyncio.wait doesn't accept an empty first argument.
if self.websockets:
# The handler or the worker task can terminate first, depending
# on how the client behaves and the server is implemented.
yield from asyncio.wait(
[websocket.handler_task for websocket in self.websockets] +
[websocket.worker_task for websocket in self.websockets],
loop=self.loop)
yield from self.server.wait_closed()
@asyncio.coroutine
def serve(ws_handler, host=None, port=None, *,
klass=WebSocketServerProtocol,
timeout=10, max_size=2 ** 20, max_queue=2 ** 5,
loop=None, legacy_recv=False,
origins=None, subprotocols=None, extra_headers=None,
**kwds):
"""
This coroutine creates a WebSocket server.
It yields a :class:`~asyncio.Server` which provides:
* a :meth:`~asyncio.Server.close` method that closes open connections with
status code 1001 and stops accepting new connections
* a :meth:`~asyncio.Server.wait_closed` coroutine that waits until closing
handshakes complete and connections are closed.
``ws_handler`` is the WebSocket handler. It must be a coroutine accepting
two arguments: a :class:`WebSocketServerProtocol` and the request URI.
:func:`serve` is a wrapper around the event loop's
:meth:`~asyncio.BaseEventLoop.create_server` method. ``host``, ``port`` as
well as extra keyword arguments are passed to
:meth:`~asyncio.BaseEventLoop.create_server`.
For example, you can set the ``ssl`` keyword argument to a
:class:`~ssl.SSLContext` to enable TLS.
The behavior of the ``timeout``, ``max_size``, and ``max_queue`` optional
arguments is described the documentation of
:class:`~websockets.protocol.WebSocketCommonProtocol`.
:func:`serve` also accepts the following optional arguments:
* ``origins`` defines acceptable Origin HTTP headers — include
``''`` if the lack of an origin is acceptable
* ``subprotocols`` is a list of supported subprotocols in order of
decreasing preference
* ``extra_headers`` sets additional HTTP response headers — it can be a
mapping, an iterable of (name, value) pairs, or a callable taking the
request path and headers in arguments.
Whenever a client connects, the server accepts the connection, creates a
:class:`WebSocketServerProtocol`, performs the opening handshake, and
delegates to the WebSocket handler. Once the handler completes, the server
performs the closing handshake and closes the connection.
Since there's no useful way to propagate exceptions triggered in handlers,
they're sent to the ``'websockets.server'`` logger instead. Debugging is
much easier if you configure logging to print them::
import logging
logger = logging.getLogger('websockets.server')
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
"""
if loop is None:
loop = asyncio.get_event_loop()
ws_server = WebSocketServer(loop)
secure = kwds.get('ssl') is not None
factory = lambda: klass(
ws_handler, ws_server,
host=host, port=port, secure=secure,
timeout=timeout, max_size=max_size, max_queue=max_queue,
loop=loop, legacy_recv=legacy_recv,
origins=origins, subprotocols=subprotocols,
extra_headers=extra_headers,
)
server = yield from loop.create_server(factory, host, port, **kwds)
ws_server.wrap(server)
return ws_server
| gpl-3.0 |
mcella/django | django/contrib/gis/utils/ogrinfo.py | 564 | 1984 | """
This module includes some utility functions for inspecting the layout
of a GDAL data source -- the functionality is analogous to the output
produced by the `ogrinfo` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.geometries import GEO_CLASSES
def ogrinfo(data_source, num_features=10):
"""
Walks the available layers in the supplied `data_source`, displaying
the fields for the first `num_features` features.
"""
# Checking the parameters.
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise Exception('Data source parameter must be a string or a DataSource object.')
for i, layer in enumerate(data_source):
print("data source : %s" % data_source.name)
print("==== layer %s" % i)
print(" shape type: %s" % GEO_CLASSES[layer.geom_type.num].__name__)
print(" # features: %s" % len(layer))
print(" srs: %s" % layer.srs)
extent_tup = layer.extent.tuple
print(" extent: %s - %s" % (extent_tup[0:2], extent_tup[2:4]))
print("Displaying the first %s features ====" % num_features)
width = max(*map(len, layer.fields))
fmt = " %%%ss: %%s" % width
for j, feature in enumerate(layer[:num_features]):
print("=== Feature %s" % j)
for fld_name in layer.fields:
type_name = feature[fld_name].type_name
output = fmt % (fld_name, type_name)
val = feature.get(fld_name)
if val:
if isinstance(val, str):
val_fmt = ' ("%s")'
else:
val_fmt = ' (%s)'
output += val_fmt % val
else:
output += ' (None)'
print(output)
# For backwards compatibility.
sample = ogrinfo
| bsd-3-clause |
litecoin-project/litecore-litecoin | qa/rpc-tests/zmq_test.py | 19 | 3264 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test ZMQ interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import zmq
import struct
import http.client
import urllib.parse
class ZMQTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
port = 29332
def setup_nodes(self):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
print("listen...")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
nseq = msg[2]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(0,n*2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
blockcount += 1
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest ().main ()
| mit |
joyaether/zxing | cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/sunf95.py | 34 | 2168 | """SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 5023 2010/06/14 22:05:46 scons"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
allen-fdes/python_demo | venv/Lib/site-packages/pip/_vendor/requests/structures.py | 1160 | 2977 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| mit |
bnwn/ardupilot | Tools/LogAnalyzer/tests/TestDupeLogData.py | 273 | 2651 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestDupeLogData(Test):
'''test for duplicated data in log, which has been happening on PX4/Pixhawk'''
def __init__(self):
Test.__init__(self)
self.name = "Dupe Log Data"
def __matchSample(self, sample, sampleStartIndex, logdata):
'''return the line number where a match is found, otherwise return False'''
# ignore if all data in sample is the same value
nSame = 0
for s in sample:
if s[1] == sample[0][1]:
nSame += 1
if nSame == 20:
return False
# c
data = logdata.channels["ATT"]["Pitch"].listData
for i in range(sampleStartIndex, len(data)):
#print "Checking against index %d" % i
if i == sampleStartIndex:
continue # skip matching against ourselves
j = 0
while j<20 and (i+j)<len(data) and data[i+j][1] == sample[j][1]:
#print "### Match found, j=%d, data=%f, sample=%f, log data matched to sample at line %d" % (j,data[i+j][1],sample[j][1],data[i+j][0])
j += 1
if j == 20: # all samples match
return data[i][0]
return False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this could be made more flexible by not hard-coding to use ATT data, could make it dynamic based on whatever is available as long as it is highly variable
if "ATT" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# pick 10 sample points within the range of ATT data we have
sampleStartIndices = []
attStartIndex = 0
attEndIndex = len(logdata.channels["ATT"]["Pitch"].listData)-1
step = attEndIndex / 11
for i in range(step,attEndIndex-step,step):
sampleStartIndices.append(i)
#print "Dupe data sample point index %d at line %d" % (i, logdata.channels["ATT"]["Pitch"].listData[i][0])
# get 20 datapoints of pitch from each sample location and check for a match elsewhere
sampleIndex = 0
for i in range(sampleStartIndices[0], len(logdata.channels["ATT"]["Pitch"].listData)):
if i == sampleStartIndices[sampleIndex]:
#print "Checking sample %d" % i
sample = logdata.channels["ATT"]["Pitch"].listData[i:i+20]
matchedLine = self.__matchSample(sample, i, logdata)
if matchedLine:
#print "Data from line %d found duplicated at line %d" % (sample[0][0],matchedLine)
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Duplicate data chunks found in log (%d and %d)" % (sample[0][0],matchedLine)
return
sampleIndex += 1
if sampleIndex >= len(sampleStartIndices):
break
| gpl-3.0 |
darkleons/odoo | openerp/netsvc.py | 12 | 8962 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import logging.handlers
import os
import pprint
import release
import sys
import threading
import psycopg2
import openerp
import sql_db
import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def LocalService(name):
"""
The openerp.netsvc.LocalService() function is deprecated. It still works
in two cases: workflows and reports. For workflows, instead of using
LocalService('workflow'), openerp.workflow should be used (better yet,
methods on openerp.osv.orm.Model should be used). For reports,
openerp.report.render_report() should be used (methods on the Model should
be provided too in the future).
"""
assert openerp.conf.deprecation.allow_local_service
_logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name)
if name == 'workflow':
return openerp.workflow
if name.startswith('report.'):
report = openerp.report.interface.report_int._reports.get(name)
if report:
return report
else:
dbname = getattr(threading.currentThread(), 'dbname', None)
if dbname:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
path_prefix = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Loggin Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] or ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr:
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname[len(path_prefix)+1:], record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
logging.addLevelName(25, "INFO")
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
else:
handler = logging.handlers.SysLogHandler()
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.handlers.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
handler = logging.StreamHandler(sys.stdout)
else:
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(25)
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'openerp.workflow.workitem:WARNING',
'openerp.http.rpc.request:INFO',
'openerp.http.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG', 'openerp.http.rpc.response:DEBUG'],
'debug_rpc': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG'],
'debug': ['openerp:DEBUG'],
'debug_sql': ['openerp.sql_db:DEBUG'],
'info': [],
'warn': ['openerp:WARNING', 'werkzeug:WARNING'],
'error': ['openerp:ERROR', 'werkzeug:ERROR'],
'critical': ['openerp:CRITICAL', 'werkzeug:CRITICAL'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FusionSP/android_external_chromium_org | chrome/common/extensions/docs/server2/path_canonicalizer.py | 78 | 4806 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import posixpath
from future import Future
from path_util import SplitParent
from special_paths import SITE_VERIFICATION_FILE
def _Normalize(file_name, splittext=False):
normalized = file_name
if splittext:
normalized = posixpath.splitext(file_name)[0]
normalized = normalized.replace('.', '').replace('-', '').replace('_', '')
return normalized.lower()
def _CommonNormalizedPrefix(first_file, second_file):
return posixpath.commonprefix((_Normalize(first_file),
_Normalize(second_file)))
class PathCanonicalizer(object):
'''Transforms paths into their canonical forms. Since the docserver has had
many incarnations - e.g. there didn't use to be apps/ - there may be old
paths lying around the webs. We try to redirect those to where they are now.
'''
def __init__(self,
file_system,
object_store_creator,
strip_extensions):
# |strip_extensions| is a list of file extensions (e.g. .html) that should
# be stripped for a path's canonical form.
self._cache = object_store_creator.Create(
PathCanonicalizer, category=file_system.GetIdentity())
self._file_system = file_system
self._strip_extensions = strip_extensions
def _LoadCache(self):
def load(cached):
# |canonical_paths| is the pre-calculated set of canonical paths.
# |simplified_paths_map| is a lazily populated mapping of simplified file
# names to a list of full paths that contain them. For example,
# - browseraction: [extensions/browserAction.html]
# - storage: [apps/storage.html, extensions/storage.html]
canonical_paths, simplified_paths_map = (
cached.get('canonical_paths'), cached.get('simplified_paths_map'))
if canonical_paths is None:
assert simplified_paths_map is None
canonical_paths = set()
simplified_paths_map = defaultdict(list)
for base, dirs, files in self._file_system.Walk(''):
for path in dirs + files:
path_without_ext, ext = posixpath.splitext(path)
canonical_path = posixpath.join(base, path_without_ext)
if (ext not in self._strip_extensions or
path == SITE_VERIFICATION_FILE):
canonical_path += ext
canonical_paths.add(canonical_path)
simplified_paths_map[_Normalize(path, splittext=True)].append(
canonical_path)
# Store |simplified_paths_map| sorted. Ties in length are broken by
# taking the shortest, lexicographically smallest path.
for path_list in simplified_paths_map.itervalues():
path_list.sort(key=lambda p: (len(p), p))
self._cache.SetMulti({
'canonical_paths': canonical_paths,
'simplified_paths_map': simplified_paths_map,
})
else:
assert simplified_paths_map is not None
return canonical_paths, simplified_paths_map
return self._cache.GetMulti(('canonical_paths',
'simplified_paths_map')).Then(load)
def Canonicalize(self, path):
'''Returns the canonical path for |path|.
'''
canonical_paths, simplified_paths_map = self._LoadCache().Get()
# Path may already be the canonical path.
if path in canonical_paths:
return path
# Path not found. Our single heuristic: find |base| in the directory
# structure with the longest common prefix of |path|.
_, base = SplitParent(path)
# Paths with a non-extension dot separator lose information in
# _SimplifyFileName, so we try paths both with and without the dot to
# maximize the possibility of finding the right path.
potential_paths = (
simplified_paths_map.get(_Normalize(base), []) +
simplified_paths_map.get(_Normalize(base, splittext=True), []))
if potential_paths == []:
# There is no file with anything close to that name.
return path
# The most likely canonical file is the one with the longest common prefix
# with |path|. This is slightly weaker than it could be; |path| is
# compared without symbols, not the simplified form of |path|,
# which may matter.
max_prefix = potential_paths[0]
max_prefix_length = len(_CommonNormalizedPrefix(max_prefix, path))
for path_for_file in potential_paths[1:]:
prefix_length = len(_CommonNormalizedPrefix(path_for_file, path))
if prefix_length > max_prefix_length:
max_prefix, max_prefix_length = path_for_file, prefix_length
return max_prefix
def Refresh(self):
return self._LoadCache()
| bsd-3-clause |
tmendoza/func | func/overlord/sslclient.py | 2 | 2003 | import sys
import xmlrpclib
import urllib
from certmaster import SSLCommon
class SSL_Transport(xmlrpclib.Transport):
user_agent = "pyOpenSSL_XMLRPC/%s - %s" % ('0.1', xmlrpclib.Transport.user_agent)
def __init__(self, ssl_context, timeout=None, use_datetime=0):
if sys.version_info[:3] >= (2, 5, 0):
xmlrpclib.Transport.__init__(self, use_datetime)
self.ssl_ctx=ssl_context
self._timeout = timeout
def make_connection(self, host):
# Handle username and password.
try:
host, extra_headers, x509 = self.get_host_info(host)
except AttributeError:
# Yay for Python 2.2
pass
_host, _port = urllib.splitport(host)
if hasattr(xmlrpclib.Transport, 'single_request'):
cnx_class = SSLCommon.HTTPSConnection
else:
cnx_class = SSLCommon.HTTPS
return cnx_class(_host, int(_port), ssl_context=self.ssl_ctx, timeout=self._timeout)
class SSLXMLRPCServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, pkey_file, cert_file, ca_cert_file, timeout=None):
self.ctx = SSLCommon.CreateSSLContext(pkey_file, cert_file, ca_cert_file)
xmlrpclib.ServerProxy.__init__(self, uri, SSL_Transport(ssl_context=self.ctx, timeout=timeout), allow_none=True)
class FuncServer(SSLXMLRPCServerProxy):
def __init__(self, uri, pem=None, crt=None, ca=None, timeout=None):
self.pem = pem
self.crt = crt
self.ca = ca
self.timeout = timeout
SSLXMLRPCServerProxy.__init__(self, uri,
self.pem,
self.crt,
self.ca,
self.timeout)
if __name__ == "__main__":
s = SSLXMLRPCServerProxy('https://localhost:51234/', '/etc/pki/certmaster/slave.pem', '/etc/pki/certmaster/slave.cert', '/etc/pki/certmaster/ca/certmaster.crt')
f = s.ping(1, 2)
print f
| gpl-2.0 |
rkibria/yapyg | yapyg_widgets/screen_widget.py | 1 | 18072 | # Copyright (c) 2015 Raihan Kibria
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from kivy.uix.floatlayout import FloatLayout
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.core.window import Keyboard
from kivy import platform
from yapyg import texture_db
from yapyg import controls
from yapyg import debug
from yapyg_widgets.display_widget import DisplayWidget
from yapyg_widgets.joystick_widget import JoystickWidget
class ScreenWidget(FloatLayout):
KEYCODE_SPACE = Keyboard.keycodes['spacebar']
KEYCODE_R = Keyboard.keycodes['r']
def __init__(self, state, on_exit_function=None, debugging=False, **kwargs):
super(ScreenWidget, self).__init__(**kwargs)
self.state = state
texture_db.insert_color_rect(state, 1.0, 1.0, "tl_null", 0.0, 0.0, 0.0)
self.display_widget = DisplayWidget(state)
self.on_exit_function = on_exit_function
self.add_widget(self.display_widget)
self.joystick = None
joystick_panel_height = 0.25
joystick_x = 0.01
joystick_y = 0.01
if controls.need_joystick(state) or controls.need_buttons(state):
joystick_height = 0.24
joystick_width = (joystick_height * Window.height) / Window.width
self.add_widget(Image(source="assets/img/ui/joy_panel.png",
size_hint=(1.0, joystick_panel_height),
pos_hint = {"x" : 0.0, "y" : 0.0},
allow_stretch = True,
),
)
if controls.need_joystick(state):
self.joystick = JoystickWidget(
size_hint=(joystick_width, joystick_height),
pos_hint = {"x" : joystick_x, "y" : joystick_y},)
self.add_widget(self.joystick)
Clock.schedule_interval(self.on_timer, 0.1)
if controls.need_buttons(state):
button_width = joystick_width / 2.1
button_height = joystick_height / 2.1
button_width_big = 2 * button_width
button_height_big = 2 * button_height
background_file = "assets/img/ui/joy_button.png"
background_down_file = "assets/img/ui/joy_button_down.png"
background_file_big = "assets/img/ui/joy_button_big.png"
background_down_file_big = "assets/img/ui/joy_button_down_big.png"
button_defs = controls.get_buttons(state)
if button_defs:
if button_defs[0][controls.IDX_CONTROL_BUTTON_POS] == "right":
if button_defs[0][controls.IDX_CONTROL_BUTTON_SIZE] == "small":
button_0 = Button(
background_normal=background_file,
background_down=background_down_file,
size_hint=(button_width, button_height),
pos_hint = {"x" : 1.0 - button_width - 0.01, "y" : 0.0 + 0.01},
)
else:
button_0 = Button(
background_normal=background_file_big,
background_down=background_down_file_big,
size_hint=(button_width_big, button_height_big),
pos_hint={"x" : 1.0 - button_width_big - 0.01, "y" : 0.0 + 0.01},
)
elif button_defs[0][controls.IDX_CONTROL_BUTTON_POS] == "left":
if button_defs[0][controls.IDX_CONTROL_BUTTON_SIZE] == "small":
button_0 = Button(
background_normal=background_file,
background_down=background_down_file,
size_hint=(button_width, button_height),
pos_hint = {"x" : joystick_x, "y" : joystick_y},
)
else:
button_0 = Button(
background_normal=background_file_big,
background_down=background_down_file_big,
size_hint=(button_width_big, button_height_big),
pos_hint = {"x" : joystick_x, "y" : joystick_y},
)
self.add_widget(button_0)
if button_defs[0][controls.IDX_CONTROL_BUTTON_POS] == "right":
if button_defs[0][controls.IDX_CONTROL_BUTTON_SIZE] == "small":
pass
else:
self.add_widget(Image(source="assets/img/ui/button_a.png",
allow_stretch = True,
pos_hint = {"x" : 1.0 - button_width_big - 0.01, "y" : 0.0 + 0.01},
size_hint=(button_width_big, button_height_big),
))
else:
if button_defs[0][controls.IDX_CONTROL_BUTTON_SIZE] == "small":
pass
else:
self.add_widget(Image(source="assets/img/ui/button_a.png",
allow_stretch = True,
size_hint=(button_width_big, button_height_big),
pos_hint = {"x" : joystick_x, "y" : joystick_y},
))
button_0.bind(state=self.on_button_0)
if len(button_defs) > 1:
if button_defs[1][controls.IDX_CONTROL_BUTTON_POS] == "right":
if button_defs[1][controls.IDX_CONTROL_BUTTON_SIZE] == "small":
button_1 = Button(
background_normal=background_file,
background_down=background_down_file,
size_hint=(button_width, button_height),
pos_hint = {"x" : 1.0 - joystick_width - 0.01, "y" : 0.0 + 0.01},
)
else:
button_1 = Button(
background_normal=background_file_big,
background_down=background_down_file_big,
size_hint=(button_width_big, button_height_big),
pos_hint = {"x" : 1.0 - joystick_width - 0.01, "y" : 0.0 + 0.01},
)
elif button_defs[1][controls.IDX_CONTROL_BUTTON_POS] == "left":
if button_defs[1][controls.IDX_CONTROL_BUTTON_SIZE] == "small":
button_1 = Button(
background_normal=background_file,
background_down=background_down_file,
size_hint=(button_width, button_height),
pos_hint = {"x" : joystick_x, "y" : joystick_y},
)
else:
button_1 = Button(
background_normal=background_file_big,
background_down=background_down_file_big,
size_hint=(button_width_big, button_height_big),
pos_hint = {"x" : joystick_x, "y" : joystick_y},
)
self.add_widget(button_1)
button_1.bind(state=self.on_button_1)
if button_defs[1][controls.IDX_CONTROL_BUTTON_POS] == "right":
if button_defs[1][controls.IDX_CONTROL_BUTTON_SIZE] == "big":
self.add_widget(Image(source="assets/img/ui/button_b.png",
allow_stretch = True,
pos_hint = {"x" : 1.0 - button_width_big - 0.01, "y" : 0.0 + 0.01},
size_hint=(button_width_big, button_height_big),
))
if len(button_defs) > 2:
button_2 = Button(
background_normal=background_file,
background_down=background_down_file,
size_hint=(button_width, button_height),
pos_hint = {"x" : 1.0 - button_width - 0.01, "y" : button_height + 0.01},
)
self.add_widget(button_2)
button_2.bind(state=self.on_button_2)
if len(button_defs) > 3:
button_3 = Button(
background_normal=background_file,
background_down=background_down_file,
size_hint=(button_width, button_height),
pos_hint = {"x" : 1.0 - joystick_width - 0.01, "y" : button_height + 0.01},
)
self.add_widget(button_3)
button_3.bind(state=self.on_button_3)
if self.on_exit_function:
exit_button_size = (0.15, 0.05)
exit_button_x = (1.0 - exit_button_size[0]) / 2.0
exit_button = Button(text="",
background_normal="assets/img/ui/joy_option_button.png",
background_down="assets/img/ui/joy_option_button_down.png",
size_hint=exit_button_size,
pos_hint = {"x":exit_button_x, "y":0.01},
allow_stretch = True,
)
exit_button.bind(state=self.on_exit)
self.add_widget(exit_button)
if debugging:
NUM_DEBUG_LINES = debug.NUM_DEBUG_LINES + 1
DEBUG_LINE_SIZE = 0.05
self.debug_label_array = []
for i in xrange(NUM_DEBUG_LINES):
self.debug_label_array.append(Label(
color=(0, 1, 0, 1),
size_hint=(1.0, DEBUG_LINE_SIZE),
pos_hint = {"x": 0.05, "y": 1.0 - DEBUG_LINE_SIZE - DEBUG_LINE_SIZE * i},
markup=True,
text_size=(Window.width, Window.height / NUM_DEBUG_LINES),
))
self.debug_label_array[i].bind(texture_size=self.setter('size'))
self.add_widget(self.debug_label_array[i])
Clock.schedule_interval(self.on_debug_timer, 0.5)
if platform == 'win' or platform == 'linux' or platform == 'macosx':
Window.bind(on_key_down=self._on_keyboard_down)
Window.bind(on_key_up=self._on_keyboard_up)
def set_debug_text(self, line_no, txt):
self.debug_label_array[line_no].text = txt
def on_debug_timer(self, dt):
frame_time = self.display_widget.get_frame_time()
status_output = "fps:%.1f frame_time:%.1fms" % (float(Clock.get_fps()), frame_time)
self.set_debug_text(0, status_output)
for i in xrange(debug.NUM_DEBUG_LINES):
debug_line = debug.get_line(self.state, i)
self.set_debug_text(1 + i, debug_line)
def on_timer(self, dt):
if self.state:
controls.set_joystick(self.state, self.joystick.get_direction())
def on_exit(self, instance, value):
if self.parent:
Clock.unschedule(self.on_debug_timer)
Clock.unschedule(self.on_timer)
self.display_widget.destroy()
parent = self.parent
parent.remove_widget(self)
self.state = None
if self.on_exit_function:
(self.on_exit_function)(self.state, parent)
def on_button_0(self, instance, value):
if self.state:
controls.set_button_state(self.state, 0, True if value == "down" else False)
def on_button_1(self, instance, value):
if self.state:
controls.set_button_state(self.state, 1, True if value == "down" else False)
def on_button_2(self, instance, value):
if self.state:
controls.set_button_state(self.state, 2, True if value == "down" else False)
def on_button_3(self, instance, value):
if self.state:
controls.set_button_state(self.state, 3, True if value == "down" else False)
def _on_keyboard_down(self, window, keycode, scancode, codepoint, modifier):
if self.state:
if keycode == self.KEYCODE_SPACE:
controls.set_button_state(self.state, 0, True)
def _on_keyboard_up(self, window, keycode, scancode):
if self.state:
if keycode == self.KEYCODE_SPACE:
controls.set_button_state(self.state, 0, False)
elif keycode == self.KEYCODE_R:
self.on_exit(None, None)
| mit |
rupran/ansible | test/units/modules/network/eos/test_eos_command.py | 41 | 4104 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_command
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosCommandModule(TestEosModule):
module = eos_command
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.eos.eos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
filename = 'eos_command_%s.txt' % filename
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_eos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Arista'))
def test_eos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Arista'))
def test_eos_command_wait_for(self):
wait_for = 'result[0] contains "Arista vEOS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_eos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_eos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_eos_command_match_any(self):
wait_for = ['result[0] contains "Arista"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_eos_command_match_all(self):
wait_for = ['result[0] contains "Arista"',
'result[0] contains "Software image"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_eos_command_match_all_failure(self):
wait_for = ['result[0] contains "Arista"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
akosyakov/intellij-community | python/lib/Lib/distutils/core.py | 82 | 9006 | """distutils.core
The only module that needs to be imported to use the Distutils; provides
the 'setup' function (which is to be called from the setup script). Also
indirectly provides the Distribution and Command classes, although they are
really defined in distutils.dist and distutils.cmd.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: core.py 38672 2005-03-20 22:19:47Z fdrake $"
import sys, os
from types import *
from distutils.debug import DEBUG
from distutils.errors import *
from distutils.util import grok_environment_error
# Mainly import these so setup scripts can "from distutils.core import" them.
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.extension import Extension
# This is a barebones help message generated displayed when the user
# runs the setup script with no arguments at all. More useful help
# is generated with various --help options: global help, list commands,
# and per-command help.
USAGE = """\
usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
or: %(script)s --help [cmd1 cmd2 ...]
or: %(script)s --help-commands
or: %(script)s cmd --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
_setup_stop_after = None
_setup_distribution = None
# Legal keyword arguments for the setup() function
setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
'name', 'version', 'author', 'author_email',
'maintainer', 'maintainer_email', 'url', 'license',
'description', 'long_description', 'keywords',
'platforms', 'classifiers', 'download_url',
'requires', 'provides', 'obsoletes',
)
# Legal keyword arguments for the Extension constructor
extension_keywords = ('name', 'sources', 'include_dirs',
'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'swig_opts', 'export_symbols', 'depends', 'language')
def setup (**attrs):
"""The gateway to the Distutils: do everything your setup script needs
to do, in a highly flexible and user-driven way. Briefly: create a
Distribution instance; find and parse config files; parse the command
line; run each Distutils command found there, customized by the options
supplied to 'setup()' (as keyword arguments), in config files, and on
the command line.
The Distribution instance might be an instance of a class supplied via
the 'distclass' keyword argument to 'setup'; if no such class is
supplied, then the Distribution class (in dist.py) is instantiated.
All other arguments to 'setup' (except for 'cmdclass') are used to set
attributes of the Distribution instance.
The 'cmdclass' argument, if supplied, is a dictionary mapping command
names to command classes. Each command encountered on the command line
will be turned into a command class, which is in turn instantiated; any
class found in 'cmdclass' is used in place of the default, which is
(for command 'foo_bar') class 'foo_bar' in module
'distutils.command.foo_bar'. The command class must provide a
'user_options' attribute which is a list of option specifiers for
'distutils.fancy_getopt'. Any command-line options between the current
and the next command are used to set attributes of the current command
object.
When the entire command-line has been successfully parsed, calls the
'run()' method on each command object in turn. This method will be
driven entirely by the Distribution object (which each command object
has a reference to, thanks to its constructor), and the
command-specific options that became attributes of each command
object.
"""
global _setup_stop_after, _setup_distribution
# Determine the distribution class -- either caller-supplied or
# our Distribution (see below).
klass = attrs.get('distclass')
if klass:
del attrs['distclass']
else:
klass = Distribution
if not attrs.has_key('script_name'):
attrs['script_name'] = os.path.basename(sys.argv[0])
if not attrs.has_key('script_args'):
attrs['script_args'] = sys.argv[1:]
# Create the Distribution instance, using the remaining arguments
# (ie. everything except distclass) to initialize it
try:
_setup_distribution = dist = klass(attrs)
except DistutilsSetupError, msg:
if attrs.has_key('name'):
raise SystemExit, "error in %s setup command: %s" % \
(attrs['name'], msg)
else:
raise SystemExit, "error in setup command: %s" % msg
if _setup_stop_after == "init":
return dist
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
if DEBUG:
print "options (after parsing config files):"
dist.dump_option_dicts()
if _setup_stop_after == "config":
return dist
# Parse the command line; any command-line errors are the end user's
# fault, so turn them into SystemExit to suppress tracebacks.
try:
ok = dist.parse_command_line()
except DistutilsArgError, msg:
raise SystemExit, gen_usage(dist.script_name) + "\nerror: %s" % msg
if DEBUG:
print "options (after parsing command line):"
dist.dump_option_dicts()
if _setup_stop_after == "commandline":
return dist
# And finally, run all the commands found on the command line.
if ok:
try:
dist.run_commands()
except KeyboardInterrupt:
raise SystemExit, "interrupted"
except (IOError, os.error), exc:
error = grok_environment_error(exc)
if DEBUG:
sys.stderr.write(error + "\n")
raise
else:
raise SystemExit, error
except (DistutilsError,
CCompilerError), msg:
if DEBUG:
raise
else:
raise SystemExit, "error: " + str(msg)
return dist
# setup ()
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
return the Distribution instance that drives things. This is useful
if you need to find out the distribution meta-data (passed as
keyword args from 'script' to 'setup()', or the contents of the
config files or command-line.
'script_name' is a file that will be run with 'execfile()';
'sys.argv[0]' will be replaced with 'script' for the duration of the
call. 'script_args' is a list of strings; if supplied,
'sys.argv[1:]' will be replaced by 'script_args' for the duration of
the call.
'stop_after' tells 'setup()' when to stop processing; possible
values:
init
stop after the Distribution instance has been created and
populated with the keyword arguments to 'setup()'
config
stop after config files have been parsed (and their data
stored in the Distribution instance)
commandline
stop after the command-line ('sys.argv[1:]' or 'script_args')
have been parsed (and the data stored in the Distribution)
run [default]
stop after all commands have been run (the same as if 'setup()'
had been called in the usual way
Returns the Distribution instance, which provides all information
used to drive the Distutils.
"""
if stop_after not in ('init', 'config', 'commandline', 'run'):
raise ValueError, "invalid value for 'stop_after': %r" % (stop_after,)
global _setup_stop_after, _setup_distribution
_setup_stop_after = stop_after
save_argv = sys.argv
g = {}
l = {}
try:
try:
sys.argv[0] = script_name
if script_args is not None:
sys.argv[1:] = script_args
execfile(script_name, g, l)
finally:
sys.argv = save_argv
_setup_stop_after = None
except SystemExit:
# Hmm, should we do something if exiting with a non-zero code
# (ie. error)?
pass
except:
raise
if _setup_distribution is None:
raise RuntimeError, \
("'distutils.core.setup()' was never called -- "
"perhaps '%s' is not a Distutils setup script?") % \
script_name
# I wonder if the setup script's namespace -- g and l -- would be of
# any interest to callers?
#print "_setup_distribution:", _setup_distribution
return _setup_distribution
# run_setup ()
| apache-2.0 |
pyqt/maya2012-qt4 | PyQt4/uic/exceptions.py | 27 | 2169 | #############################################################################
##
## Copyright (C) 2011 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
class NoSuchWidgetError(Exception):
def __str__(self):
return "Unknown Qt widget: %s" % (self.args[0],)
class UnsupportedPropertyError(Exception):
pass
class WidgetPluginError(Exception):
pass
| gpl-3.0 |
larsmans/numpy | numpy/ma/extras.py | 10 | 56831 | """
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
__all__ = ['apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average',
'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols',
'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef',
'cov',
'diagflat', 'dot', 'dstack',
'ediff1d',
'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack',
'in1d', 'intersect1d',
'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all',
'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges',
'polyfit',
'row_stack',
'setdiff1d', 'setxor1d',
'unique', 'union1d',
'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import MaskedArray, MAError, add, array, asarray, concatenate, count, \
filled, getmask, getmaskarray, make_mask_descr, masked, masked_array, \
mask_or, nomask, ones, sort, zeros
#from core import *
import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.lib.index_tricks import AxisConcatenator
from numpy.linalg import lstsq
#...............................................................................
def issequence(seq):
"""Is seq a sequence (ndarray, list or tuple)?"""
if isinstance(seq, (ndarray, tuple, list)):
return True
return False
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False False False]
[ True False True]
[False True False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : tuple
Shape of the required MaskedArray.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(data =
[[-- -- --]
[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]
[ True True True]],
fill_value=1e+20)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(data =
[[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]],
fill_value=1e+20)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
.. warning::
If the function docstring already contained a Notes section, the
new docstring will have two Notes sections instead of appending a note
to the existing section.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
locdoc = "Notes\n-----\nThe function is applied to both the _data"\
" and the _mask, if any."
return '\n'.join((sig, doc, locdoc))
return
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
if len(args) == 1:
x = args[0]
if isinstance(x, ndarray):
_d = func(x.__array__(), **params)
_m = func(getmaskarray(x), **params)
return masked_array(_d, mask=_m)
elif isinstance(x, tuple) or isinstance(x, list):
_d = func(tuple([np.asarray(a) for a in x]), **params)
_m = func(tuple([getmaskarray(a) for a in x]), **params)
return masked_array(_d, mask=_m)
else:
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
return res
atleast_1d = _fromnxfunction('atleast_1d')
atleast_2d = _fromnxfunction('atleast_2d')
atleast_3d = _fromnxfunction('atleast_3d')
#atleast_1d = np.atleast_1d
#atleast_2d = np.atleast_2d
#atleast_3d = np.atleast_3d
vstack = row_stack = _fromnxfunction('vstack')
hstack = _fromnxfunction('hstack')
column_stack = _fromnxfunction('column_stack')
dstack = _fromnxfunction('dstack')
hsplit = _fromnxfunction('hsplit')
diagflat = _fromnxfunction('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
j = i.copy()
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result...
#...so we force the type to object, and build a list of dtypes
#...we'll just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0: axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = ma.masked
>>> a[:,1,:] = ma.masked
>>> print a
[[[0 -- 2 3]
[-- -- -- --]
[8 9 10 11]]
[[12 -- 14 15]
[-- -- -- --]
[20 21 22 23]]]
>>> print ma.apply_over_axes(ma.sum, a, [0,2])
[[[46]
[--]
[124]]]
Tuple axis arguments to ufuncs are equivalent:
>>> print ma.sum(a, axis=(0,2)).reshape((1,-1,1))
[[[46]
[--]
[124]]]
"""
def average(a, axis=None, weights=None, returned=False):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which the average is computed. The default is to compute
the average of the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. If `weights` is complex, the imaginary parts
are ignored.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type, otherwise it is of the same type as `a`.
If returned, `sum_of_weights` is of the same type as `average`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> print x
[[ 0. 1.]
[ 2. 3.]
[ 4. 5.]]
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> print avg
[2.66666666667 3.66666666667]
"""
a = asarray(a)
mask = a.mask
ash = a.shape
if ash == ():
ash = (1,)
if axis is None:
if mask is nomask:
if weights is None:
n = a.sum(axis=None)
d = float(a.size)
else:
w = filled(weights, 0.0).ravel()
n = umath.add.reduce(a._data.ravel() * w)
d = umath.add.reduce(w)
del w
else:
if weights is None:
n = a.filled(0).sum(axis=None)
d = float(umath.add.reduce((~mask).ravel()))
else:
w = array(filled(weights, 0.0), float, mask=mask).ravel()
n = add.reduce(a.ravel() * w)
d = add.reduce(w)
del w
else:
if mask is nomask:
if weights is None:
d = ash[axis] * 1.0
n = add.reduce(a._data, axis)
else:
w = filled(weights, 0.0)
wsh = w.shape
if wsh == ():
wsh = (1,)
if wsh == ash:
w = np.array(w, float, copy=0)
n = add.reduce(a * w, axis)
d = add.reduce(w, axis)
del w
elif wsh == (ash[axis],):
ni = ash[axis]
r = [None] * len(ash)
r[axis] = slice(None, None, 1)
w = eval ("w[" + repr(tuple(r)) + "] * ones(ash, float)")
n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
del w, r
else:
raise ValueError('average: weights wrong shape.')
else:
if weights is None:
n = add.reduce(a, axis)
d = umath.add.reduce((~mask), axis=axis, dtype=float)
else:
w = filled(weights, 0.0)
wsh = w.shape
if wsh == ():
wsh = (1,)
if wsh == ash:
w = array(w, dtype=float, mask=mask, copy=0)
n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
elif wsh == (ash[axis],):
ni = ash[axis]
r = [None] * len(ash)
r[axis] = slice(None, None, 1)
w = eval ("w[" + repr(tuple(r)) + \
"] * masked_array(ones(ash, float), mask)")
n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
else:
raise ValueError('average: weights wrong shape.')
del w
if n is masked or d is masked:
return masked
result = n / d
del n
if isinstance(result, MaskedArray):
if ((axis is None) or (axis == 0 and a.ndim == 1)) and \
(result.mask is nomask):
result = result._data
if returned:
if not isinstance(d, MaskedArray):
d = masked_array(d)
if isinstance(d, ndarray) and (not d.shape == result.shape):
d = ones(result.shape, dtype=float) * d
if returned:
return result, d
else:
return result
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.extras.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.extras.median(x)
2.5
>>> np.ma.extras.median(x, axis=-1, overwrite_input=True)
masked_array(data = [ 2. 5.],
mask = False,
fill_value = 1e+20)
"""
if not hasattr(a, 'mask') or np.count_nonzero(a.mask) == 0:
return masked_array(np.median(getattr(a, 'data', a), axis=axis,
out=out, overwrite_input=overwrite_input), copy=False)
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort()
else:
a.sort(axis=axis)
asorted = a
else:
asorted = sort(a, axis=axis)
if axis is None:
axis = 0
elif axis < 0:
axis += a.ndim
counts = asorted.shape[axis] - (asorted.mask).sum(axis=axis)
h = counts // 2
# create indexing mesh grid for all but reduced axis
axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
if i != axis]
ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
# insert indices of low and high median
ind.insert(axis, h - 1)
low = asorted[ind]
ind[axis] = h
high = asorted[ind]
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
if asorted.ndim == 1:
if odd:
low = high
else:
low[odd] = high[odd]
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum([low, high], axis=0, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
else:
s = np.ma.mean([low, high], axis=0, out=out)
return s
#..............................................................................
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(data =
[[-- 1 2]
[-- 4 5]
[6 7 8]],
mask =
[[ True False False]
[ True False False]
[False False False]],
fill_value = 999999)
>>> np.ma.extras.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.extras.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.extras.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
x = asarray(x)
if x.ndim != 2:
raise NotImplementedError("compress2d works for 2D arrays only.")
m = getmask(x)
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Builds a list of rows/columns indices
(idxr, idxc) = (list(range(len(x))), list(range(x.shape[1])))
masked = m.nonzero()
if not axis:
for i in np.unique(masked[0]):
idxr.remove(i)
if axis in [None, 1, -1]:
for j in np.unique(masked[1]):
idxc.remove(j)
return x._data[idxr][:, idxc]
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.extras.compress_rowcols(a, 0)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.extras.compress_rowcols(a, 1)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
return compress_rowcols(a, 1)
def mask_rowcols(a, axis=None):
"""
Mask rows and/or columns of a 2D array that contain masked values.
Mask whole rows and/or columns of a 2D array that contain
masked values. The masking behavior is selected using the
`axis` parameter.
- If `axis` is None, rows *and* columns are masked.
- If `axis` is 0, only rows are masked.
- If `axis` is 1 or -1, only columns are masked.
Parameters
----------
a : array_like, MaskedArray
The array to mask. If not a MaskedArray instance (or if no array
elements are masked). The result is a MaskedArray with `mask` set
to `nomask` (False). Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. If None, applies to a
flattened version of the array.
Returns
-------
a : MaskedArray
A modified version of the input array, masked depending on the value
of the `axis` parameter.
Raises
------
NotImplementedError
If input array `a` is not 2D.
See Also
--------
mask_rows : Mask rows of a 2D array that contain masked values.
mask_cols : Mask cols of a 2D array that contain masked values.
masked_where : Mask where a condition is met.
Notes
-----
The input array's mask is modified by this function.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rowcols(a)
masked_array(data =
[[0 -- 0]
[-- -- --]
[0 -- 0]],
mask =
[[False True False]
[ True True True]
[False True False]],
fill_value=999999)
"""
a = array(a, subok=False)
if a.ndim != 2:
raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
# Nothing is masked: return a
if m is nomask or not m.any():
return a
maskedval = m.nonzero()
a._mask = a._mask.copy()
if not axis:
a[np.unique(maskedval[0])] = masked
if axis in [None, 1, -1]:
a[:, np.unique(maskedval[1])] = masked
return a
def mask_rows(a, axis=None):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rows(a)
masked_array(data =
[[0 0 0]
[-- -- --]
[0 0 0]],
mask =
[[False False False]
[ True True True]
[False False False]],
fill_value=999999)
"""
return mask_rowcols(a, 0)
def mask_cols(a, axis=None):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_cols(a)
masked_array(data =
[[0 -- 0]
[0 -- 0]
[0 -- 0]],
mask =
[[False True False]
[False True False]
[False True False]],
fill_value=999999)
"""
return mask_rowcols(a, 1)
def dot(a, b, strict=False):
"""
Return the dot product of two arrays.
.. note::
Works only with 2-D arrays at the moment.
This function is the equivalent of `numpy.dot` that takes masked values
into account, see `numpy.dot` for details.
Parameters
----------
a, b : ndarray
Inputs arrays.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False) for the
computation. Default is False.
Propagating the mask means that if a masked value appears in a row or
column, the whole row or column is considered masked.
See Also
--------
numpy.dot : Equivalent function for ndarrays.
Examples
--------
>>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
>>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
masked_array(data =
[[21 26]
[45 64]],
mask =
[[False False]
[False False]],
fill_value = 999999)
>>> np.ma.dot(a, b, strict=True)
masked_array(data =
[[-- --]
[-- 64]],
mask =
[[ True True]
[ True False]],
fill_value = 999999)
"""
#!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rows(a)
b = mask_cols(b)
#
d = np.dot(filled(a, 0), filled(b, 0))
#
am = (~getmaskarray(a))
bm = (~getmaskarray(b))
m = ~np.dot(am, bm)
return masked_array(d, mask=m)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> intersect1d(x, y)
masked_array(data = [1 3 --],
mask = [False False True],
fill_value = 999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
See Also
--------
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.extras.setdiff1d(x, [1, 2])
masked_array(data = [3 --],
mask = [False True],
fill_value = 999999)
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = in1d(ar1, ar2, assume_unique=True)
if aux.size == 0:
return aux
else:
return ma.asarray(ar1)[aux == 0]
#####--------------------------------------------------------------------------
#---- --- Covariance ---
#####--------------------------------------------------------------------------
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data...")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data...")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
x.unshare_mask()
y.unshare_mask()
xmask = x._mask = y._mask = ymask = common_mask
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Return correlation coefficients of the input array.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is 1,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1, ddof=ddof)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1, ddof=ddof)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
def __init__(self, axis=0):
AxisConcatenator.__init__(self, axis, matrix=False)
def __getitem__(self, key):
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
final_dtypedescr = None
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = np.linspace(start, stop, num=size)
else:
newobj = np.arange(start, stop, step)
elif isinstance(key[k], str):
if (key[k] in 'rc'):
self.matrix = True
self.col = (key[k] == 'c')
continue
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("Unknown special directive")
elif type(key[k]) in np.ScalarType:
newobj = asarray([key[k]])
scalars.append(k)
scalar = True
else:
newobj = key[k]
objs.append(newobj)
if isinstance(newobj, ndarray) and not scalar:
if final_dtypedescr is None:
final_dtypedescr = newobj.dtype
elif newobj.dtype > final_dtypedescr:
final_dtypedescr = newobj.dtype
if final_dtypedescr is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtypedescr)
res = concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
arr : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> flatnotmasked_edges(a)
[0,-1]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print flatnotmasked_edges(ma)
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous,
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.extras.notmasked_edges(ma)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : narray
The input array.
Returns
-------
slice_list : list
A sorted sequence of slices (start index, end index).
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.extras.flatnotmasked_contiguous(a)
slice(0, 10, None)
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.extras.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> print np.ma.extras.flatnotmasked_edges(a)
None
"""
m = getmask(a)
if m is nomask:
return slice(0, a.size, None)
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result or None
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> mask = np.zeros_like(a)
>>> mask[1:, 1:] = 1
>>> ma = np.ma.array(a, mask=mask)
>>> np.array(ma[~ma.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.extras.notmasked_contiguous(ma)
[slice(0, 4, None), slice(6, 7, None)]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[idx]) or None)
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
#def clump_masked(a):
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
slices = [slice(left, right)
for (left, right) in zip(itertools.chain([0], idx),
itertools.chain(idx, [len(mask)]),)]
return slices
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.extras.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
slices = _ezclump(mask)
if a[0] is masked:
result = slices[1::2]
else:
result = slices[::2]
return result
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.extras.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
slices = _ezclump(mask)
if len(slices):
if a[0] is masked:
slices = slices[::2]
else:
slices = slices[1::2]
return slices
#####--------------------------------------------------------------------------
#---- Polynomial fit ---
#####--------------------------------------------------------------------------
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0] :
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
if w is not None:
w = ~m*w
else:
w = ~m
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
################################################################################
| bsd-3-clause |
arve0/example_lessons | src/python/lessons/Minecraft2D/Project Resources/variables.py | 1 | 2223 | import pygame
#--------------------------
# Minecraft 2D -- Variables
#--------------------------
#the maximum number of each resource that can be held
#----------------------------------------------------
MAXTILES = 20
#the title bar text/image
#------------------------
pygame.display.set_caption('pygame window')
pygame.display.set_icon(pygame.image.load('player.png'))
#variables for representing colours
#----------------------------------
BLACK = (0, 0, 0 )
BROWN = (153, 76, 0 )
GREEN = (0, 255, 0 )
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
#variables representing the different resources
#----------------------------------------------
DIRT = 0
GRASS = 1
WATER = 2
BRICK = 3
#a list of all game resources
#----------------------------
resources = [DIRT,GRASS,WATER,BRICK]
#a dictionary linking resources to textures
#------------------------------------------
textures = {
DIRT : pygame.image.load('dirt.png'),
GRASS : pygame.image.load('grass.png'),
WATER : pygame.image.load('water.png'),
BRICK : pygame.image.load('brick.png')
}
#the number of each resource that we have to start with
#------------------------------------------------------
inventory = {
DIRT : 10,
GRASS : 10,
WATER : 10,
BRICK : 0
}
#the player image
#----------------
PLAYER = pygame.image.load('player.png')
#rules to make new objects
#-------------------------
craft = {
BRICK : { WATER : 1, DIRT : 2 }
}
#instructions list
#-----------------
instructions = [
"Minecraft 2D",
"Instructions:",
" ARROW KEYS = move",
" SPACE = Pick up tile",
" NUMBER KEYS = Place tile",
" SHIFT + NUMBER KEYS = Craft tile",
"",
"Crafting Rules:",
" BRICK = 2xDIRT + 1xWATER"
]
| cc0-1.0 |
2014c2g19/2014c2g19 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/sys.py | 92 | 3227 | # hack to return special attributes
from _sys import *
from javascript import JSObject
has_local_storage=__BRYTHON__.has_local_storage
has_json=__BRYTHON__.has_json
argv = ['__main__']
base_exec_prefix = __BRYTHON__.brython_path
base_prefix = __BRYTHON__.brython_path
builtin_module_names=__BRYTHON__.builtin_module_names
byteorder='little'
def exc_info():
exc = __BRYTHON__.exception_stack[-1]
return (exc.__class__,exc,exc.traceback)
exec_prefix = __BRYTHON__.brython_path
executable = __BRYTHON__.brython_path+'/brython.js'
def exit(i=None):
raise SystemExit('')
class flag_class:
def __init__(self):
self.debug=0
self.inspect=0
self.interactive=0
self.optimize=0
self.dont_write_bytecode=0
self.no_user_site=0
self.no_site=0
self.ignore_environment=0
self.verbose=0
self.bytes_warning=0
self.quiet=0
self.hash_randomization=1
flags=flag_class()
def getfilesystemencoding(*args,**kw):
"""getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames."""
return 'utf-8'
maxsize=2147483647
maxunicode=1114111
path = __BRYTHON__.path
path_hooks = list(JSObject(__BRYTHON__.path_hooks))
platform="brython"
prefix = __BRYTHON__.brython_path
version = '.'.join(str(x) for x in __BRYTHON__.version_info[:3])
#todo, put in a 'real' date, etc
version += " (default, Feb 29 2013, 00:00:00) \n[Javascript 1.5]"
hexversion = 0x03000000 # python 3.0
class __version_info(object):
def __init__(self, version_info):
self.version_info = version_info
self.major = version_info[0]
self.minor = version_info[1]
self.micro = version_info[2]
self.releaselevel = version_info[3]
self.serial = version_info[4]
def __getitem__(self, index):
if isinstance(self.version_info[index], list):
return tuple(self.version_info[index])
return self.version_info[index]
def hexversion(self):
try:
return '0%d0%d0%d' % (self.major, self.minor, self.micro)
finally: #probably some invalid char in minor (rc, etc)
return '0%d0000' % (self.major)
def __str__(self):
_s="sys.version(major=%d, minor=%d, micro=%d, releaselevel='%s', serial=%d)"
return _s % (self.major, self.minor, self.micro,
self.releaselevel, self.serial)
#return str(self.version_info)
#eventually this needs to be the real python version such as 3.0, 3.1, etc
version_info=__version_info(__BRYTHON__.version_info)
class _implementation:
def __init__(self):
self.name='brython'
self.version = __version_info(__BRYTHON__.implementation)
self.hexversion = self.version.hexversion()
self.cache_tag=None
def __repr__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
def __str__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
implementation=_implementation()
warnoptions=[]
#delete objects not in python sys module namespace
del JSObject
del _implementation
| gpl-2.0 |
elenaoat/tests | twisted-deferred/defer-block.py | 10 | 1216 | import sys, time
from twisted.internet.defer import Deferred
def start_chain(_):
print "The start of the callback chain."
def blocking_poem(_):
def delayed_write(s, delay):
time.sleep(delay)
sys.stdout.write(s)
sys.stdout.flush()
delayed_write('\n', 0)
delayed_write('I', .6)
delayed_write(' block', .4)
delayed_write('\n and', 1)
delayed_write(' the', .4)
delayed_write(' deferred', .6)
delayed_write('\n blocks', 1.5)
delayed_write(' with', .2)
delayed_write(' me', .2)
delayed_write('\nas does', 1)
delayed_write('\n the reactor', .6)
delayed_write('\nkeep that', 1)
delayed_write('\n factor', .6)
delayed_write('\nin', 1)
delayed_write(' mind', .4)
delayed_write('\n\n', 2)
def end_chain(_):
print "The end of the callback chain."
from twisted.internet import reactor
reactor.stop()
d = Deferred()
d.addCallback(start_chain)
d.addCallback(blocking_poem)
d.addCallback(end_chain)
def fire():
print 'Firing deferred.'
d.callback(True)
print 'Firing finished.'
from twisted.internet import reactor
reactor.callWhenRunning(fire)
print 'Starting reactor.'
reactor.run()
print 'Done.'
| mit |
massot/odoo | addons/hr_holidays/tests/test_holidays_flow.py | 338 | 10513 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.addons.hr_holidays.tests.common import TestHrHolidaysBase
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestHolidaysFlow(TestHrHolidaysBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_leave_request_flow(self):
""" Testing leave request flow """
cr, uid = self.cr, self.uid
def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
self.assertEqual(holiday_status.max_leaves, ml,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.leaves_taken, lt,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.remaining_leaves, rl,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
'hr_holidays: wrong type days computation')
# HrUser creates some holiday statuses -> crash because only HrManagers should do this
with self.assertRaises(AccessError):
self.holidays_status_dummy = self.hr_holidays_status.create(cr, self.user_hruser_id, {
'name': 'UserCheats',
'limit': True,
})
# HrManager creates some holiday statuses
self.holidays_status_0 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'WithMeetingType',
'limit': True,
'categ_id': self.registry('calendar.event.type').create(cr, self.user_hrmanager_id, {'name': 'NotLimitedMeetingType'}),
})
self.holidays_status_1 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'NotLimited',
'limit': True,
})
self.holidays_status_2 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'Limited',
'limit': False,
'double_validation': True,
})
# --------------------------------------------------
# Case1: unlimited type of leave request
# --------------------------------------------------
# Employee creates a leave request for another employee -> should crash
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol10',
'employee_id': self.employee_hruser_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
ids = self.hr_holidays.search(cr, uid, [('name', '=', 'Hol10')])
self.hr_holidays.unlink(cr, uid, ids)
# Employee creates a leave request in a no-limit category
hol1_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol11',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
hol1 = self.hr_holidays.browse(cr, self.user_hruser_id, hol1_id)
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# Employee validates its leave request -> should not work
self.hr_holidays.signal_workflow(cr, self.user_employee_id, [hol1_id], 'validate')
hol1.refresh()
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request')
# HrUser validates the employee leave request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol1_id], 'validate')
hol1.refresh()
self.assertEqual(hol1.state, 'validate', 'hr_holidays: validates leave request should be in validate state')
# --------------------------------------------------
# Case2: limited type of leave request
# --------------------------------------------------
# Employee creates a new leave request at the same time -> crash, avoid interlapping
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol21',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a limited category -> crash, not enough days left
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=1)),
'number_of_days_temp': 1,
})
# Clean transaction
self.hr_holidays.unlink(cr, uid, self.hr_holidays.search(cr, uid, [('name', 'in', ['Hol21', 'Hol22'])]))
# HrUser allocates some leaves to the employee
aloc1_id = self.hr_holidays.create(cr, self.user_hruser_id, {
'name': 'Days for limited category',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'type': 'add',
'number_of_days_temp': 2,
})
# HrUser validates the allocation request
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'validate')
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'second_validate')
# Checks Employee has effectively some days left
hol_status_2 = self.hr_holidays_status.browse(cr, self.user_employee_id, self.holidays_status_2)
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Employee creates a leave request in the limited category, now that he has some days left
hol2_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=3)),
'number_of_days_temp': 1,
})
hol2 = self.hr_holidays.browse(cr, self.user_hruser_id, hol2_id)
# Check left days: - 1 virtual remaining day
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 1.0)
# HrUser validates the first step
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'validate')
hol2.refresh()
self.assertEqual(hol2.state, 'validate1',
'hr_holidays: first validation should lead to validate1 state')
# HrUser validates the second step
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'second_validate')
hol2.refresh()
self.assertEqual(hol2.state, 'validate',
'hr_holidays: second validation should lead to validate state')
# Check left days: - 1 day taken
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 1.0, 1.0, 1.0)
# HrManager finds an error: he refuses the leave request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'refuse')
hol2.refresh()
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: refuse should lead to refuse state')
# Check left days: 2 days left again
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'reset')
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: hr_user should not be able to reset a refused leave request')
# HrManager resets the request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'reset')
hol2.refresh()
self.assertEqual(hol2.state, 'draft',
'hr_holidays: resetting should lead to draft state')
# HrManager changes the date and put too much days -> crash when confirming
self.hr_holidays.write(cr, self.user_hrmanager_id, [hol2_id], {
'date_from': (datetime.today() + relativedelta(days=4)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=7)),
'number_of_days_temp': 4,
})
with self.assertRaises(except_orm):
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'confirm')
| agpl-3.0 |
AunShiLord/sympy | sympy/integrals/tests/test_heurisch.py | 1 | 10216 | from sympy import Rational, sqrt, symbols, sin, exp, log, sinh, cosh, cos, pi, \
I, erf, tan, asin, asinh, acos, Function, Derivative, diff, simplify, \
LambertW, Eq, Piecewise, Symbol, Add, ratsimp, Integral, Sum
from sympy.integrals.heurisch import components, heurisch, heurisch_wrapper
from sympy.utilities.pytest import XFAIL, skip, slow, ON_TRAVIS
x, y, z, nu = symbols('x,y,z,nu')
f = Function('f')
def test_components():
assert components(x*y, x) == set([x])
assert components(1/(x + y), x) == set([x])
assert components(sin(x), x) == set([sin(x), x])
assert components(sin(x)*sqrt(log(x)), x) == \
set([log(x), sin(x), sqrt(log(x)), x])
assert components(x*sin(exp(x)*y), x) == \
set([sin(y*exp(x)), x, exp(x)])
assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \
set([sin(x), x**Rational(1, 54), sqrt(sin(x)), x])
assert components(f(x), x) == \
set([x, f(x)])
assert components(Derivative(f(x), x), x) == \
set([x, f(x), Derivative(f(x), x)])
assert components(f(x)*diff(f(x), x), x) == \
set([x, f(x), Derivative(f(x), x), Derivative(f(x), x)])
def test_heurisch_polynomials():
assert heurisch(1, x) == x
assert heurisch(x, x) == x**2/2
assert heurisch(x**17, x) == x**18/18
def test_heurisch_fractions():
assert heurisch(1/x, x) == log(x)
assert heurisch(1/(2 + x), x) == log(x + 2)
assert heurisch(1/(x + sin(y)), x) == log(x + sin(y))
# Up to a constant, where C = 5*pi*I/12, Mathematica gives identical
# result in the first case. The difference is because sympy changes
# signs of expressions without any care.
# XXX ^ ^ ^ is this still correct?
assert heurisch(5*x**5/(
2*x**6 - 5), x) in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 + 5) / 12]
assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 + 5) / 12
assert heurisch(1/x**2, x) == -1/x
assert heurisch(-1/x**5, x) == 1/(4*x**4)
def test_heurisch_log():
assert heurisch(log(x), x) == x*log(x) - x
assert heurisch(log(3*x), x) == -x + x*log(3) + x*log(x)
assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x]
def test_heurisch_exp():
assert heurisch(exp(x), x) == exp(x)
assert heurisch(exp(-x), x) == -exp(-x)
assert heurisch(exp(17*x), x) == exp(17*x) / 17
assert heurisch(x*exp(x), x) == x*exp(x) - exp(x)
assert heurisch(x*exp(x**2), x) == exp(x**2) / 2
assert heurisch(exp(-x**2), x) is None
assert heurisch(2**x, x) == 2**x/log(2)
assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2)
assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1)
assert heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x)
def test_heurisch_trigonometric():
assert heurisch(sin(x), x) == -cos(x)
assert heurisch(pi*sin(x) + 1, x) == x - pi*cos(x)
assert heurisch(cos(x), x) == sin(x)
assert heurisch(tan(x), x) in [
log(1 + tan(x)**2)/2,
log(tan(x) + I) + I*x,
log(tan(x) - I) - I*x,
]
assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y)
assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x)
# gives sin(x) in answer when run via setup.py and cos(x) when run via py.test
assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2 / 2]
assert heurisch(cos(x)/sin(x), x) == log(sin(x))
assert heurisch(x*sin(7*x), x) == sin(7*x) / 49 - x*cos(7*x) / 7
assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) -
2*sin(x) + 2*x*cos(x))
assert heurisch(acos(x/4) * asin(x/4), x) == 2*x - (sqrt(16 - x**2))*asin(x/4) \
+ (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4)
def test_heurisch_hyperbolic():
assert heurisch(sinh(x), x) == cosh(x)
assert heurisch(cosh(x), x) == sinh(x)
assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x)
assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x)
assert heurisch(
x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4
def test_heurisch_mixed():
assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2
def test_heurisch_radicals():
assert heurisch(1/sqrt(x), x) == 2*sqrt(x)
assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x)
assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5
assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3
y = Symbol('y')
assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \
2*sqrt(x)*cos(y*sqrt(x))/y
assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise(
(0, Eq(y, 0)),
(-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True))
y = Symbol('y', positive=True)
assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \
2*sqrt(x)*cos(y*sqrt(x))/y
def test_heurisch_special():
assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi)
assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4
def test_heurisch_symbolic_coeffs():
assert heurisch(1/(x + y), x) == log(x + y)
assert heurisch(1/(x + sqrt(2)), x) == log(x + sqrt(2))
assert simplify(diff(heurisch(log(x + y + z), y), y)) == log(x + y + z)
def test_heurisch_symbolic_coeffs_1130():
y = Symbol('y')
assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise(
(-1/x, Eq(y, 0)),
(-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True))
y = Symbol('y', positive=True)
assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 -
I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) /
(2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))]
def test_heurisch_hacking():
assert heurisch(sqrt(1 + 7*x**2), x, hints=[]) == \
x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14
assert heurisch(sqrt(1 - 7*x**2), x, hints=[]) == \
x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14
assert heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) == \
sqrt(7)*asinh(sqrt(7)*x)/7
assert heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) == \
sqrt(7)*asin(sqrt(7)*x)/7
assert heurisch(exp(-7*x**2), x, hints=[]) == \
sqrt(7*pi)*erf(sqrt(7)*x)/14
assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == \
asin(2*x/3)/2
assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == \
asinh(2*x/3)/2
def test_heurisch_function():
assert heurisch(f(x), x) is None
@XFAIL
def test_heurisch_function_derivative():
# TODO: it looks like this used to work just by coincindence and
# thanks to sloppy implementation. Investigate why this used to
# work at all and if support for this can be restored.
df = diff(f(x), x)
assert heurisch(f(x)*df, x) == f(x)**2/2
assert heurisch(f(x)**2*df, x) == f(x)**3/3
assert heurisch(df/f(x), x) == log(f(x))
def test_heurisch_wrapper():
f = 1/(y + x)
assert heurisch_wrapper(f, x) == log(x + y)
f = 1/(y - x)
assert heurisch_wrapper(f, x) == -log(x - y)
f = 1/((y - x)*(y + x))
assert heurisch_wrapper(f, x) == \
Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y - log(x - y)/2/y, True))
# issue 6926
f = sqrt(x**2/((y - x)*(y + x)))
assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \
- y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x
def test_issue_3609():
assert heurisch(1/(x * (1 + log(x)**2)), x) == I*log(log(x) + I)/2 - \
I*log(log(x) - I)/2
### These are examples from the Poor Man's Integrator
### http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/
def test_pmint_rat():
# TODO: heurisch() is off by a constant: -3/4. Possibly different permutation
# would give the optimal result?
def drop_const(expr, x):
if expr.is_Add:
return Add(*[ arg for arg in expr.args if arg.has(x) ])
else:
return expr
f = (x**7 - 24*x**4 - 4*x**2 + 8*x - 8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2)
g = (4 + 8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x)
assert drop_const(ratsimp(heurisch(f, x)), x) == g
def test_pmint_trig():
f = (x - tan(x)) / tan(x)**2 + tan(x)
g = -x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2
assert heurisch(f, x) == g
@slow # 8 seconds on 3.4 GHz
def test_pmint_logexp():
f = (1 + x + x*exp(x))*(x + log(x) + exp(x) - 1)/(x + log(x) + exp(x))**2/x
g = log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x) + log(x))
# TODO: Optimal solution is g = 1/(x + log(x) + exp(x)) + log(x + log(x) + exp(x)),
# but SymPy requires a lot of guidance to properly simplify heurisch() output.
assert ratsimp(heurisch(f, x)) == g
@slow # 8 seconds on 3.4 GHz
@XFAIL # there's a hash dependent failure lurking here
def test_pmint_erf():
f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1)
g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4)
assert ratsimp(heurisch(f, x)) == g
def test_pmint_LambertW():
f = LambertW(x)
g = x*LambertW(x) - x + x/LambertW(x)
assert heurisch(f, x) == g
@XFAIL
def test_pmint_besselj():
# TODO: in both cases heurisch() gives None. Wrong besselj() derivative?
f = besselj(nu + 1, x)/besselj(nu, x)
g = nu*log(x) - log(besselj(nu, x))
assert heurisch(f, x) == g
f = (nu*besselj(nu, x) - x*besselj(nu + 1, x))/x
g = besselj(nu, x)
assert heurisch(f, x) == g
@slow # 110 seconds on 3.4 GHz
def test_pmint_WrightOmega():
if ON_TRAVIS:
skip("Too slow for travis.")
def omega(x):
return LambertW(exp(x))
f = (1 + omega(x) * (2 + cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x + omega(x))
g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x)))
assert heurisch(f, x) == g
# TODO: convert the rest of PMINT tests:
# Airy functions
# f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2)
# g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x))
# f = x**2 * AiryAi(x)
# g = -AiryAi(x) + AiryAi(1, x)*x
# Whittaker functions
# f = WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu, x) * x)
# g = x/2 - mu*ln(x) - ln(WhittakerW(mu, nu, x))
| bsd-3-clause |
ahmadassaf/LODStats | lodstats/util/interfaces.py | 1 | 4652 | import datetime
import urlparse
import logging
logger = logging.getLogger("lodstats")
class CallbackInterface(object):
def __init__(self):
#TODO: move to config file
self.callback_delay = 2
self.time_of_last_callback = None
def ratelimited_callback_caller(self, callback_function):
"""helper for callbacks that should only fire every self.callback_delay seconds"""
if callback_function is None:
return
now = datetime.datetime.now()
if self.time_of_last_callback is None:
self.time_of_last_callback = now
callback_function(self)
else:
time_delta = (now-self.time_of_last_callback).seconds
if time_delta >= self.callback_delay:
callback_function(self)
self.time_of_last_callback = now
class UriParserInterface(object):
def __init__(self):
pass
def is_remote(self):
return any(self.uri.lower().startswith(x) for x in ('http://', 'https://'))
def identify_rdf_format(self, uri):
rdf_format = None
rdf_format = self.identify_rdf_format_by_file_extension(uri)
if(rdf_format is None):
raise NameError("could not guess format")
else:
return rdf_format
def identify_rdf_format_by_file_extension(self, uri):
logger.debug("UriParser.get_format: %s" % uri)
process_uri = uri.split('/')[-1]
process_uri = process_uri.split('.')
for item in process_uri:
if(item == 'ttl'):
return 'ttl'
if(item == 'nt'):
return 'nt'
if(item == 'n3'):
return 'n3'
if(item == 'rdf' or item == 'owl' or item == 'rdfs'):
return 'rdf'
if(item == 'nq'):
return 'nq'
if(item == 'sparql' or item == 'sparql/'):
return 'sparql'
if(item == 'sitemap'):
return 'sitemap'
return None
def identify_rdf_format_by_magic(self, uri):
return None
def identify_compression_format(self, uri):
"""guess compression of resource"""
logger.debug("UriParser.get_compression: %s" % uri)
compression_format = None
if self._has_tar_extension(uri):
compression_format = 'tar'
if self._has_zip_extension(uri):
compression_format = 'zip'
if self._has_gzip_extension(uri):
compression_format = 'gz'
if self._has_bzip2_extension(uri):
compression_format = 'bz2'
return compression_format
def _has_tar_extension(self, uri):
return any(uri.lower().endswith(x) for x in ('.tgz', '.tar.gz', '.tar.bz2', '.tar'))
def _has_zip_extension(self, uri):
return uri.lower().endswith('.zip')
def _has_gzip_extension(self, uri):
return uri.lower().endswith('.gz')
def _has_bzip2_extension(self, uri):
return uri.lower().endswith('.bz2')
def identify_filename(self, uri):
return uri.split('/')[-1]
def has_scheme(self, uri):
scheme = self.get_scheme(uri)
if(scheme is ''):
return False
else:
return True
def get_scheme(self, uri):
parsed_uri = urlparse.urlparse(uri)
return parsed_uri.scheme
def fix_uri(self, uri):
fixed_uri = uri
scheme = self.get_scheme(uri)
if(scheme == "http" or
scheme == "https"):
return fixed_uri
elif(scheme == ""):
import os
abs_path = os.path.abspath(fixed_uri)
return "file://%s"%abs_path
elif(scheme == "file"):
return uri
def get_file_extension(self, filename=None):
if(filename is None):
filename = self.filename
extension = ""
extension_outer = None
extension_inner = None
try:
extension_outer = filename.split(".")[-1]
extension_inner = filename.split(".")[-2]
except IndexError as e:
logging.error("Could not determine extension %s" % (str(e),))
return extension
if(extension_outer is not None and len(extension_outer) < 6):
extension = extension + "." + extension_inner
if(extension_inner is not None and len(extension_inner) < 6):
extension = extension + "." + extension_outer
extension_question_mark = extension.split("?")
if(len(extension_question_mark) > 1):
extension = extension_question_mark[0]
return extension
| gpl-3.0 |
cchurch/ansible-modules-core | cloud/digital_ocean/digital_ocean_sshkey.py | 22 | 4933 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey:
state: present
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
client_id: XXX
api_key: XXX
'''
import os
import traceback
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
from ansible.module_utils.basic import AnsibleModule
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except (DoError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
detiber/openshift-ansible-contrib | reference-architecture/gce-cli/ansible/inventory/gce/hosts/gce.py | 3 | 18075 | #!/usr/bin/env python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
import ConfigParser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.driver = self.get_gce_driver()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# OCP on GCP - we are using bastion host for ssh-proxy, so we need
# instance name here
'ansible_ssh_host': inst.name
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
self.driver.connection.gce_params = params
all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory()
| apache-2.0 |
thalespaiva/autofinger | comfort.py | 1 | 3386 | #!/usr/bin/python
# First try with a simple genetic algorithm
# Based on my hand: (in centimeters)
DISTANCES_NATURAL = {
(1, 2): 15, (1, 3): 19, (1, 4): 20.5,
(1, 5): 21.5, (2, 3): 9, (2, 4): 13,
(2, 5): 16.5, (3, 4): 8.5, (3, 5): 13,
(4, 5): 8,
}
DISTANCES_CROSSING = {
(1, 2): 12, (1, 3): 9, (1, 4): 6,
(1, 5): 0.01, (2, 3): 0.01, (2, 4): 0.005,
(2, 5): 0.0001, (3, 4): 0.01, (3, 5): 0.0001,
(4, 5): 0.0001,
}
CLOSED_DISTANCE = {
(1, 2): 4.5, (1, 3): 7, (1, 4): 8.6,
(1, 5): 10.5, (2, 3): 3.3, (2, 4): 6,
(2, 5): 8.7, (3, 4): 3.3, (3, 5): 6.1,
(4, 5): 3.3,
}
AV_KEY_WIDTH = 1.95 # centimeters
COMFORT_MEMO_TABLE = {}
def calculate_jump_comfort(finger_org, finger_dst, jump_in_half_tones):
global COMFORT_MEMO_TABLE
try:
return COMFORT_MEMO_TABLE[(finger_org, finger_dst, jump_in_half_tones)]
except KeyError:
key = (finger_org, finger_dst, jump_in_half_tones)
COMFORT_MEMO_TABLE[key] = calculate_jump_comfort_real(
finger_org, finger_dst, jump_in_half_tones)
return COMFORT_MEMO_TABLE[key]
def calculate_jump_comfort_real(finger_org, finger_dst, jump_in_half_tones):
jump = jump_in_half_tones * AV_KEY_WIDTH
if (finger_org is None) or (finger_dst is None):
return 0
key = tuple(sorted((finger_org, finger_dst)))
if finger_org == finger_dst:
return abs(jump)
elif jump >= 0:
if finger_org < finger_dst:
dist = DISTANCES_NATURAL[key]
else:
dist = DISTANCES_CROSSING[key]
diff = jump - dist
factor = jump / dist
if diff > 0:
return abs(diff)
else:
return abs(factor * CLOSED_DISTANCE[key])
else:
if finger_org > finger_dst:
dist = DISTANCES_NATURAL[key]
else:
dist = DISTANCES_CROSSING[key]
diff = jump + dist
factor = -jump / dist
if diff < 0:
return abs(diff)
else:
return abs(factor * CLOSED_DISTANCE[key])
def calculate_comforts(fingers_org, fingers_dst, jumps):
from itertools import product
keys = product(fingers_org, fingers_dst, jumps)
xs = []
ys = []
zs = []
cs = []
for key in keys:
o, d, j = key
xs.append(o)
ys.append(d)
zs.append(j)
cs.append(calculate_jump_comfort(o, d, j))
return (xs, ys, zs, cs)
def plot_comfort(fingers_org=range(1, 6, 1), fingers_dst=range(1, 6, 1),
jumps=range(-12, 13, 1)):
import seaborn
from mpl_toolkits.mplot3d import Axes3D
from pylab import plt
xs, ys, zs, cs = calculate_comforts(
fingers_org, fingers_dst, jumps)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, ys, zs, c=cs)
ax.set_zlabel("Interval (half steps)", fontsize=15)
ax.set_zlim(jumps[0], jumps[-1])
# ax.set_zticks(jumps)
plt.xticks(fingers_org)
plt.xlim(fingers_org[0], fingers_org[-1])
plt.xlabel("From finger", fontsize=15)
plt.yticks(fingers_dst)
plt.ylim(fingers_dst[0], fingers_dst[-1])
plt.ylabel("To finger", fontsize=15)
plt.title("Difficulty of finger passages", fontsize=25)
plt.savefig('./figures/image.png', figsize=(16, 12), dpi=300)
plt.show()
if __name__ == '__main__':
plot_comfort()
| gpl-2.0 |
abhikumar22/MYBLOG | blg/Lib/site-packages/django-1.11.7-py3.6.egg/django/db/utils.py | 143 | 10368 | import os
import pkgutil
from importlib import import_module
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils._os import npath, upath
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception if six.PY3 else StandardError): # NOQA: StandardError undefined on PY3
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
if not hasattr(exc_value, '__traceback__'):
exc_value.__traceback__ = traceback
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([npath(backend_dir)])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| gpl-3.0 |
wasade/networkx | networkx/algorithms/centrality/tests/test_current_flow_closeness.py | 54 | 1410 | #!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx
class TestFlowClosenessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4(self):
"""Closeness centrality: K4"""
G=networkx.complete_graph(4)
b=networkx.current_flow_closeness_centrality(G)
b_answer={0: 2.0/3, 1: 2.0/3, 2: 2.0/3, 3: 2.0/3}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4(self):
"""Closeness centrality: P4"""
G=networkx.path_graph(4)
b=networkx.current_flow_closeness_centrality(G)
b_answer={0: 1.0/6, 1: 1.0/4, 2: 1.0/4, 3:1.0/6}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_star(self):
"""Closeness centrality: star """
G=networkx.Graph()
G.add_star(['a','b','c','d'])
b=networkx.current_flow_closeness_centrality(G)
b_answer={'a': 1.0/3, 'b': 0.6/3, 'c': 0.6/3, 'd':0.6/3}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
class TestWeightedFlowClosenessCentrality(object):
pass
| bsd-3-clause |
coberger/DIRAC | FrameworkSystem/Agent/TopErrorMessagesReporter.py | 7 | 5388 | # $HeadURL$
""" TopErrorMessagesReporter produces a list with the most common errors
injected in the SystemLoggingDB and sends a notification to a mailing
list and specific users.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUserOption
from DIRAC.FrameworkSystem.DB.SystemLoggingDB import SystemLoggingDB
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.Core.Utilities.Time import date, toString, fromString, day
class TopErrorMessagesReporter( AgentModule ):
def initialize( self ):
self.systemLoggingDB = SystemLoggingDB()
self.agentName = self.am_getModuleParam( 'fullName' )
self.notification = NotificationClient()
mailList = self.am_getOption( "MailList", [] )
userList = self.am_getOption( "Reviewer", [] )
self.log.debug( "Users to be notified:", ', '.join( userList ) )
for user in userList:
mail = getUserOption( user, 'Email', '' )
if not mail:
self.log.warn( "Could not get user's mail", user )
else:
mailList.append( mail )
if not mailList:
mailList = Operations().getValue( 'EMail/Logging', [] )
if not len( mailList ):
errString = "There are no valid users in the list"
varString = "[" + ','.join( userList ) + "]"
self.log.error( errString, varString )
return S_ERROR( errString + varString )
self.log.info( "List of mails to be notified", ','.join( mailList ) )
self._mailAddress = mailList
self._threshold = int( self.am_getOption( 'Threshold', 10 ) )
self.__days = self.am_getOption( 'QueryPeriod', 7 )
self._period = int( self.__days ) * day
self._limit = int ( self.am_getOption( 'NumberOfErrors', 10 ) )
string = "The %i most common errors in the SystemLoggingDB" % self._limit
self._subject = string + " for the last %s days" % self.__days
return S_OK()
def execute( self ):
""" The main agent execution method
"""
limitDate = date() - self._period
tableList = [ "MessageRepository", "FixedTextMessages", "Systems",
"SubSystems" ]
columnsList = [ "SystemName", "SubSystemName", "count(*) as entries",
"FixedTextString" ]
cmd = "SELECT " + ', '.join( columnsList ) + " FROM " \
+ " NATURAL JOIN ".join( tableList ) \
+ " WHERE MessageTime > '%s'" % limitDate \
+ " GROUP BY FixedTextString HAVING entries > %s" % self._threshold \
+ " ORDER BY entries DESC LIMIT %i;" % self._limit
result = self.systemLoggingDB._query( cmd )
if not result['OK']:
return result
messageList = result['Value']
if messageList == 'None' or messageList == ():
self.log.warn( 'The DB query returned an empty result' )
return S_OK()
mailBody = '\n'
for message in messageList:
mailBody = mailBody + "Count: " + str( message[2] ) + "\tError: '"\
+ message[3] + "'\tSystem: '" + message[0]\
+ "'\tSubsystem: '" + message[1] + "'\n"
mailBody = mailBody + "\n\n-------------------------------------------------------\n"\
+ "Please do not reply to this mail. It was automatically\n"\
+ "generated by a Dirac Agent.\n"
result = self.systemLoggingDB._getDataFromAgentTable( self.agentName )
self.log.debug( result )
if not result['OK']:
errorString = "Could not get the date when the last mail was sent"
self.log.error( errorString )
return S_ERROR( errorString )
else:
if len( result['Value'] ):
self.log.debug( "date value: %s" % fromString( result['Value'][0][0][1:-1] ) )
lastMailSentDate = fromString( result['Value'][0][0][1:-1] )
else:
lastMailSentDate = limitDate - 1 * day
result = self.systemLoggingDB._insertDataIntoAgentTable( self.agentName, lastMailSentDate )
if not result['OK']:
errorString = "Could not insert data into the DB"
self.log.error( errorString, result['Message'] )
return S_ERROR( errorString + ": " + result['Message'] )
self.log.debug( "limitDate: %s\t" % limitDate \
+ "lastMailSentDate: %s\n" % lastMailSentDate )
if lastMailSentDate > limitDate:
self.log.info( "The previous report was sent less "\
+ " than %s days ago" % self.__days )
return S_OK()
dateSent = toString( date() )
self.log.info( "The list with the top errors has been sent" )
result = self.systemLoggingDB._insertDataIntoAgentTable( self.agentName, dateSent )
if not result['OK']:
errorString = "Could not insert data into the DB"
self.log.error( errorString, result['Message'] )
return S_ERROR( errorString + ": " + result['Message'] )
result = self.notification.sendMail( self._mailAddress, self._subject,
mailBody )
if not result[ 'OK' ]:
self.log.warn( "The notification could not be sent" )
return S_OK()
return S_OK( "The list with the top errors has been sent" )
| gpl-3.0 |
jamesread/demobuilder | layers/rhel-server-7:gui:ose-3.0/@target/openshift-auth.py | 16 | 1751 | #!/usr/bin/python
import BaseHTTPServer
import getpass
import json
import PAM
import sys
class AuthHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def auth(self, username, password):
def pam_conversation(pam, query_list):
rv = []
for (query, typ) in query_list:
if (query, typ) == ("Password: ", PAM.PAM_PROMPT_ECHO_OFF):
rv.append((password, 0))
else:
print >>sys.stderr, "%s: unexpected query/type %s/%s" % \
(username, query, typ)
rv.append(("", 0))
return rv
pam = PAM.pam()
pam.start("password-auth", username, pam_conversation)
try:
pam.authenticate()
pam.acct_mgmt()
return True
except PAM.error as (resp, code):
print >>sys.stderr, "%s: auth fail (%s)" % (username, resp)
except Exception as e:
print >>sys.stderr, "%s: error (%s)" % (username, e)
return False
def do_GET(self):
auth = self.headers.getheader("Authorization")
if auth and auth.startswith("Basic "):
(username, password) = auth[6:].decode("base64").split(":", 1)
if self.auth(username, password):
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps({"sub": username}))
return
self.send_response(401)
self.send_header("WWW-Authenticate", "Basic")
self.end_headers()
if __name__ == "__main__":
BaseHTTPServer.HTTPServer(("127.0.0.1", 2305), AuthHandler).serve_forever()
| gpl-3.0 |
clusterpy/clusterpy | clusterpy/core/contiguity/transformations.py | 1 | 2052 | # encoding: latin2
""" Transforming contiguity matrix
"""
__author__ = "Juan C. Duque, Alejandro Betancourt"
__credits__ = "Copyright (c) 2010-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
__all__ = ['dict2matrix']
import struct
import numpy
from os import path
from scipy.sparse import lil_matrix
# WfromPolig
def dict2matrix(wDict,std=0,diag=0):
"""Transform the contiguity dictionary to a matrix
:param wDict: Contiguity dictionary
:type wDict: dictionary
:param std: 1 to get the standarized matrix
:type std: dictionary
**Example 1** ::
>>> import clusterpy
>>> lay = clusterpy.importArcData("clusterpy/data_examples/china")
>>> wmatrix = clusterpy.contiguity.dict2matrix(lay.Wrook)
:rtype: list, List of lists representing the contiguity matrix
"""
data = []
nAreas = len(wDict.keys())
for i in wDict:
data.append([0]*nAreas)
data[i][i] = diag
ne = len(wDict[i])+ diag
for j in wDict[i]:
if std:
data[i][j] = 1 / float(ne)
else:
data[i][j] = 1
return data
def dict2sparseMatrix(wDict,std=0,diag=0):
"""Transform the contiguity dictionary to a matrix
:param wDict: Contiguity dictionary
:type wDict: dictionary
:param std: 1 to get the standarized matrix
:type std: dictionary
**Example 1** ::
>>> import clusterpy
>>> lay = clusterpy.importArcData("clusterpy/data_examples/china")
>>> wmatrix = clusterpy.contiguity.dict2matrix(lay.Wrook)
:rtype: list, List of lists representing the contiguity matrix
"""
data = lil_matrix((len(wDict.keys()),len(wDict.keys())))
nAreas = len(wDict.keys())
for i in wDict:
data[i,i] = diag
ne = len(wDict[i])+ diag
for j in wDict[i]:
if std:
data[i,j] = 1 / float(ne)
else:
data[i,j] = 1
return data
| bsd-3-clause |
dsajkl/reqiop | lms/djangoapps/course_wiki/views.py | 32 | 4608 | """
This file contains view functions for wrapping the django-wiki.
"""
import logging
import re
import cgi
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from wiki.core.exceptions import NoRootURL
from wiki.models import URLPath, Article
from courseware.courses import get_course_by_id
from course_wiki.utils import course_wiki_slug
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
def root_create(request): # pylint: disable=W0613
"""
In the edX wiki, we don't show the root_create view. Instead, we
just create the root automatically if it doesn't exist.
"""
root = get_or_create_root()
return redirect('wiki:get', path=root.path)
def course_wiki_redirect(request, course_id): # pylint: disable=W0613
"""
This redirects to whatever page on the wiki that the course designates
as it's home page. A course's wiki must be an article on the root (for
example, "/6.002x") to keep things simple.
"""
course = get_course_by_id(SlashSeparatedCourseKey.from_deprecated_string(course_id))
course_slug = course_wiki_slug(course)
valid_slug = True
if not course_slug:
log.exception("This course is improperly configured. The slug cannot be empty.")
valid_slug = False
if re.match(r'^[-\w\.]+$', course_slug) is None:
log.exception("This course is improperly configured. The slug can only contain letters, numbers, periods or hyphens.")
valid_slug = False
if not valid_slug:
return redirect("wiki:get", path="")
# The wiki needs a Site object created. We make sure it exists here
try:
Site.objects.get_current()
except Site.DoesNotExist:
new_site = Site()
new_site.domain = settings.SITE_NAME
new_site.name = "edX"
new_site.save()
site_id = str(new_site.id) # pylint: disable=E1101
if site_id != str(settings.SITE_ID):
raise ImproperlyConfigured("No site object was created and the SITE_ID doesn't match the newly created one. {} != {}".format(site_id, settings.SITE_ID))
try:
urlpath = URLPath.get_by_path(course_slug, select_related=True)
results = list(Article.objects.filter(id=urlpath.article.id))
if results:
article = results[0]
else:
article = None
except (NoRootURL, URLPath.DoesNotExist):
# We will create it in the next block
urlpath = None
article = None
if not article:
# create it
root = get_or_create_root()
if urlpath:
# Somehow we got a urlpath without an article. Just delete it and
# recerate it.
urlpath.delete()
content = cgi.escape(
# Translators: this string includes wiki markup. Leave the ** and the _ alone.
_("This is the wiki for **{organization}**'s _{course_name}_.").format(
organization=course.display_org_with_default,
course_name=course.display_name_with_default,
)
)
urlpath = URLPath.create_article(
root,
course_slug,
title=course_slug,
content=content,
user_message=_("Course page automatically created."),
user=None,
ip_address=None,
article_kwargs={'owner': None,
'group': None,
'group_read': True,
'group_write': True,
'other_read': True,
'other_write': True,
})
return redirect("wiki:get", path=urlpath.path)
def get_or_create_root():
"""
Returns the root article, or creates it if it doesn't exist.
"""
try:
root = URLPath.root()
if not root.article:
root.delete()
raise NoRootURL
return root
except NoRootURL:
pass
starting_content = "\n".join((
_("Welcome to the edX Wiki"),
"===",
_("Visit a course wiki to add an article."),
))
root = URLPath.create_root(title=_("Wiki"), content=starting_content)
article = root.article
article.group = None
article.group_read = True
article.group_write = False
article.other_read = True
article.other_write = False
article.save()
return root
| agpl-3.0 |
blademainer/intellij-community | plugins/hg4idea/testData/bin/hgext/relink.py | 93 | 6018 | # Mercurial extension to provide 'hg relink' command
#
# Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""recreates hardlinks between repository clones"""
from mercurial import hg, util
from mercurial.i18n import _
import os, stat
testedwith = 'internal'
def relink(ui, repo, origin=None, **opts):
"""recreate hardlinks between two repositories
When repositories are cloned locally, their data files will be
hardlinked so that they only use the space of a single repository.
Unfortunately, subsequent pulls into either repository will break
hardlinks for any files touched by the new changesets, even if
both repositories end up pulling the same changes.
Similarly, passing --rev to "hg clone" will fail to use any
hardlinks, falling back to a complete copy of the source
repository.
This command lets you recreate those hardlinks and reclaim that
wasted space.
This repository will be relinked to share space with ORIGIN, which
must be on the same local disk. If ORIGIN is omitted, looks for
"default-relink", then "default", in [paths].
Do not attempt any read operations on this repository while the
command is running. (Both repositories will be locked against
writes.)
"""
if (not util.safehasattr(util, 'samefile') or
not util.safehasattr(util, 'samedevice')):
raise util.Abort(_('hardlinks are not supported on this system'))
src = hg.repository(repo.baseui, ui.expandpath(origin or 'default-relink',
origin or 'default'))
ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
if repo.root == src.root:
ui.status(_('there is nothing to relink\n'))
return
locallock = repo.lock()
try:
remotelock = src.lock()
try:
candidates = sorted(collect(src, ui))
targets = prune(candidates, src.store.path, repo.store.path, ui)
do_relink(src.store.path, repo.store.path, targets, ui)
finally:
remotelock.release()
finally:
locallock.release()
def collect(src, ui):
seplen = len(os.path.sep)
candidates = []
live = len(src['tip'].manifest())
# Your average repository has some files which were deleted before
# the tip revision. We account for that by assuming that there are
# 3 tracked files for every 2 live files as of the tip version of
# the repository.
#
# mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
total = live * 3 // 2
src = src.store.path
pos = 0
ui.status(_("tip has %d files, estimated total number of files: %s\n")
% (live, total))
for dirpath, dirnames, filenames in os.walk(src):
dirnames.sort()
relpath = dirpath[len(src) + seplen:]
for filename in sorted(filenames):
if filename[-2:] not in ('.d', '.i'):
continue
st = os.stat(os.path.join(dirpath, filename))
if not stat.S_ISREG(st.st_mode):
continue
pos += 1
candidates.append((os.path.join(relpath, filename), st))
ui.progress(_('collecting'), pos, filename, _('files'), total)
ui.progress(_('collecting'), None)
ui.status(_('collected %d candidate storage files\n') % len(candidates))
return candidates
def prune(candidates, src, dst, ui):
def linkfilter(src, dst, st):
try:
ts = os.stat(dst)
except OSError:
# Destination doesn't have this file?
return False
if util.samefile(src, dst):
return False
if not util.samedevice(src, dst):
# No point in continuing
raise util.Abort(
_('source and destination are on different devices'))
if st.st_size != ts.st_size:
return False
return st
targets = []
total = len(candidates)
pos = 0
for fn, st in candidates:
pos += 1
srcpath = os.path.join(src, fn)
tgt = os.path.join(dst, fn)
ts = linkfilter(srcpath, tgt, st)
if not ts:
ui.debug('not linkable: %s\n' % fn)
continue
targets.append((fn, ts.st_size))
ui.progress(_('pruning'), pos, fn, _('files'), total)
ui.progress(_('pruning'), None)
ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
return targets
def do_relink(src, dst, files, ui):
def relinkfile(src, dst):
bak = dst + '.bak'
os.rename(dst, bak)
try:
util.oslink(src, dst)
except OSError:
os.rename(bak, dst)
raise
os.remove(bak)
CHUNKLEN = 65536
relinked = 0
savedbytes = 0
pos = 0
total = len(files)
for f, sz in files:
pos += 1
source = os.path.join(src, f)
tgt = os.path.join(dst, f)
# Binary mode, so that read() works correctly, especially on Windows
sfp = file(source, 'rb')
dfp = file(tgt, 'rb')
sin = sfp.read(CHUNKLEN)
while sin:
din = dfp.read(CHUNKLEN)
if sin != din:
break
sin = sfp.read(CHUNKLEN)
sfp.close()
dfp.close()
if sin:
ui.debug('not linkable: %s\n' % f)
continue
try:
relinkfile(source, tgt)
ui.progress(_('relinking'), pos, f, _('files'), total)
relinked += 1
savedbytes += sz
except OSError, inst:
ui.warn('%s: %s\n' % (tgt, str(inst)))
ui.progress(_('relinking'), None)
ui.status(_('relinked %d files (%s reclaimed)\n') %
(relinked, util.bytecount(savedbytes)))
cmdtable = {
'relink': (
relink,
[],
_('[ORIGIN]')
)
}
| apache-2.0 |
QijunPan/ansible | lib/ansible/modules/network/dellos9/dellos9_command.py | 12 | 6998 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: dellos9_command
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Run commands on remote devices running Dell OS9
description:
- Sends arbitrary commands to a Dell OS9 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos9_config) to configure Dell OS9 devices.
extends_documentation_fragment: dellos9
options:
commands:
description:
- List of commands to send to the remote dellos9 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
notes:
- This module requires Dell OS9 version 9.10.0.1P13 or above.
- This module requires to increase the ssh connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can be done via M(dellos9_config) module
as well.
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos9_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS9
dellos9_command:
commands: show version
wait_for: result[0] contains OS9
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos9_command:
commands:
- show version
- show interfaces
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos9_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains OS9
- result[1] contains Loopback
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner, FailedConditionsError
from ansible.module_utils.network import NetworkModule, NetworkError
import ansible.module_utils.dellos9
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = NetworkModule(argument_spec=spec,
connect_on_load=False,
supports_check_mode=True)
commands = module.params['commands']
conditionals = module.params['wait_for'] or list()
warnings = list()
runner = CommandRunner(module)
for cmd in commands:
if module.check_mode and not cmd.startswith('show'):
warnings.append('only show commands are supported when using '
'check mode, not executing `%s`' % cmd)
else:
if cmd.startswith('conf'):
module.fail_json(msg='dellos9_command does not support running '
'config mode commands. Please use '
'dellos9_config instead')
runner.add_command(cmd)
for item in conditionals:
runner.add_conditional(item)
runner.retries = module.params['retries']
runner.interval = module.params['interval']
try:
runner.run()
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc))
result = dict(changed=False)
result['stdout'] = list()
for cmd in commands:
try:
output = runner.get_command(cmd)
except ValueError:
output = 'command not executed due to check_mode, see warnings'
result['stdout'].append(output)
result['warnings'] = warnings
result['stdout_lines'] = list(to_lines(result['stdout']))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
koder-ua/nailgun-fcert | fuel_upgrade_system/fuel_update_downloader/setup.py | 4 | 1761 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import os.path
from setuptools import find_packages
from setuptools import setup
def find_requires():
dir_path = os.path.dirname(os.path.realpath(__file__))
requirements = []
with open(u'{0}/requirements.txt'.format(dir_path), 'r') as reqs:
requirements = reqs.readlines()
return requirements
if __name__ == "__main__":
setup(name='fuel_update_downloader',
version='0.1.0',
description='Updates downloader for Fuel-master node',
long_description="""Updates downloader for Fuel-master node""",
classifiers=[
"Programming Language :: Python",
"Topic :: System :: Software Distribution"],
author='Mirantis Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='fuel download upgrade mirantis',
packages=find_packages(),
zip_safe=False,
install_requires=find_requires(),
include_package_data=True,
entry_points={
'console_scripts': [
'fuel-update-downloader = fuel_update_downloader.cli:main']})
| apache-2.0 |
hcsturix74/django | tests/model_formsets/models.py | 49 | 7480 | from __future__ import unicode_literals
import datetime
import uuid
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
@python_2_unicode_compatible
class Book(models.Model):
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __str__(self):
return self.title
@python_2_unicode_compatible
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
def __str__(self):
return '%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __str__(self):
return self.title
@python_2_unicode_compatible
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __str__(self):
return '%s - %s' % (self.title, self.notes)
@python_2_unicode_compatible
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __str__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place)
def __str__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
@python_2_unicode_compatible
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %d" % (self.owner.name, self.age)
@python_2_unicode_compatible
class Restaurant(Place):
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField(default=False)
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField(default=False)
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
@python_2_unicode_compatible
class Repository(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Revision(models.Model):
repository = models.ForeignKey(Repository)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __str__(self):
return "%s (%s)" % (self.revision, six.text_type(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Player(models.Model):
team = models.ForeignKey(Team, null=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
@python_2_unicode_compatible
class Poet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poem(models.Model):
poet = models.ForeignKey(Poet)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
# Models for testing UUID primary keys
class UUIDPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
class UUIDPKChild(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent)
class ChildWithEditablePK(models.Model):
name = models.CharField(max_length=255, primary_key=True)
parent = models.ForeignKey(UUIDPKParent)
class AutoPKChildOfUUIDPKParent(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent)
class AutoPKParent(models.Model):
name = models.CharField(max_length=255)
class UUIDPKChildOfAutoPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(AutoPKParent)
class ParentWithUUIDAlternateKey(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
class ChildRelatedViaAK(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(to=ParentWithUUIDAlternateKey, to_field='uuid')
| bsd-3-clause |
diplomacy/research | diplomacy_research/proto/diplomacy_tensorflow/core/example/example_parser_configuration_pb2.py | 1 | 14842 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: diplomacy_tensorflow/core/example/example_parser_configuration.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from diplomacy_tensorflow.core.framework import tensor_shape_pb2 as diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from diplomacy_tensorflow.core.framework import tensor_pb2 as diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__pb2
from diplomacy_tensorflow.core.framework import types_pb2 as diplomacy__tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='diplomacy_tensorflow/core/example/example_parser_configuration.proto',
package='diplomacy.tensorflow',
syntax='proto3',
serialized_options=_b('\n\026org.tensorflow.exampleB ExampleParserConfigurationProtosP\001Z;github.com/tensorflow/tensorflow/tensorflow/go/core/example\370\001\001'),
serialized_pb=_b('\nDdiplomacy_tensorflow/core/example/example_parser_configuration.proto\x12\x14\x64iplomacy.tensorflow\x1a\x36\x64iplomacy_tensorflow/core/framework/tensor_shape.proto\x1a\x30\x64iplomacy_tensorflow/core/framework/tensor.proto\x1a/diplomacy_tensorflow/core/framework/types.proto\"\xad\x01\n\x12VarLenFeatureProto\x12-\n\x05\x64type\x18\x01 \x01(\x0e\x32\x1e.diplomacy.tensorflow.DataType\x12!\n\x19values_output_tensor_name\x18\x02 \x01(\t\x12\"\n\x1aindices_output_tensor_name\x18\x03 \x01(\t\x12!\n\x19shapes_output_tensor_name\x18\x04 \x01(\t\"\xd9\x01\n\x14\x46ixedLenFeatureProto\x12-\n\x05\x64type\x18\x01 \x01(\x0e\x32\x1e.diplomacy.tensorflow.DataType\x12\x35\n\x05shape\x18\x02 \x01(\x0b\x32&.diplomacy.tensorflow.TensorShapeProto\x12\x38\n\rdefault_value\x18\x03 \x01(\x0b\x32!.diplomacy.tensorflow.TensorProto\x12!\n\x19values_output_tensor_name\x18\x04 \x01(\t\"\xae\x01\n\x14\x46\x65\x61tureConfiguration\x12G\n\x11\x66ixed_len_feature\x18\x01 \x01(\x0b\x32*.diplomacy.tensorflow.FixedLenFeatureProtoH\x00\x12\x43\n\x0fvar_len_feature\x18\x02 \x01(\x0b\x32(.diplomacy.tensorflow.VarLenFeatureProtoH\x00\x42\x08\n\x06\x63onfig\"\xd2\x01\n\x1a\x45xampleParserConfiguration\x12U\n\x0b\x66\x65\x61ture_map\x18\x01 \x03(\x0b\x32@.diplomacy.tensorflow.ExampleParserConfiguration.FeatureMapEntry\x1a]\n\x0f\x46\x65\x61tureMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x39\n\x05value\x18\x02 \x01(\x0b\x32*.diplomacy.tensorflow.FeatureConfiguration:\x02\x38\x01\x42|\n\x16org.tensorflow.exampleB ExampleParserConfigurationProtosP\x01Z;github.com/tensorflow/tensorflow/tensorflow/go/core/example\xf8\x01\x01\x62\x06proto3')
,
dependencies=[diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__pb2.DESCRIPTOR,diplomacy__tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_VARLENFEATUREPROTO = _descriptor.Descriptor(
name='VarLenFeatureProto',
full_name='diplomacy.tensorflow.VarLenFeatureProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dtype', full_name='diplomacy.tensorflow.VarLenFeatureProto.dtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='values_output_tensor_name', full_name='diplomacy.tensorflow.VarLenFeatureProto.values_output_tensor_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='indices_output_tensor_name', full_name='diplomacy.tensorflow.VarLenFeatureProto.indices_output_tensor_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shapes_output_tensor_name', full_name='diplomacy.tensorflow.VarLenFeatureProto.shapes_output_tensor_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=250,
serialized_end=423,
)
_FIXEDLENFEATUREPROTO = _descriptor.Descriptor(
name='FixedLenFeatureProto',
full_name='diplomacy.tensorflow.FixedLenFeatureProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dtype', full_name='diplomacy.tensorflow.FixedLenFeatureProto.dtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='diplomacy.tensorflow.FixedLenFeatureProto.shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_value', full_name='diplomacy.tensorflow.FixedLenFeatureProto.default_value', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='values_output_tensor_name', full_name='diplomacy.tensorflow.FixedLenFeatureProto.values_output_tensor_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=426,
serialized_end=643,
)
_FEATURECONFIGURATION = _descriptor.Descriptor(
name='FeatureConfiguration',
full_name='diplomacy.tensorflow.FeatureConfiguration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fixed_len_feature', full_name='diplomacy.tensorflow.FeatureConfiguration.fixed_len_feature', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='var_len_feature', full_name='diplomacy.tensorflow.FeatureConfiguration.var_len_feature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='config', full_name='diplomacy.tensorflow.FeatureConfiguration.config',
index=0, containing_type=None, fields=[]),
],
serialized_start=646,
serialized_end=820,
)
_EXAMPLEPARSERCONFIGURATION_FEATUREMAPENTRY = _descriptor.Descriptor(
name='FeatureMapEntry',
full_name='diplomacy.tensorflow.ExampleParserConfiguration.FeatureMapEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='diplomacy.tensorflow.ExampleParserConfiguration.FeatureMapEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='diplomacy.tensorflow.ExampleParserConfiguration.FeatureMapEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=940,
serialized_end=1033,
)
_EXAMPLEPARSERCONFIGURATION = _descriptor.Descriptor(
name='ExampleParserConfiguration',
full_name='diplomacy.tensorflow.ExampleParserConfiguration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='feature_map', full_name='diplomacy.tensorflow.ExampleParserConfiguration.feature_map', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_EXAMPLEPARSERCONFIGURATION_FEATUREMAPENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=823,
serialized_end=1033,
)
_VARLENFEATUREPROTO.fields_by_name['dtype'].enum_type = diplomacy__tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_FIXEDLENFEATUREPROTO.fields_by_name['dtype'].enum_type = diplomacy__tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_FIXEDLENFEATUREPROTO.fields_by_name['shape'].message_type = diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_FIXEDLENFEATUREPROTO.fields_by_name['default_value'].message_type = diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO
_FEATURECONFIGURATION.fields_by_name['fixed_len_feature'].message_type = _FIXEDLENFEATUREPROTO
_FEATURECONFIGURATION.fields_by_name['var_len_feature'].message_type = _VARLENFEATUREPROTO
_FEATURECONFIGURATION.oneofs_by_name['config'].fields.append(
_FEATURECONFIGURATION.fields_by_name['fixed_len_feature'])
_FEATURECONFIGURATION.fields_by_name['fixed_len_feature'].containing_oneof = _FEATURECONFIGURATION.oneofs_by_name['config']
_FEATURECONFIGURATION.oneofs_by_name['config'].fields.append(
_FEATURECONFIGURATION.fields_by_name['var_len_feature'])
_FEATURECONFIGURATION.fields_by_name['var_len_feature'].containing_oneof = _FEATURECONFIGURATION.oneofs_by_name['config']
_EXAMPLEPARSERCONFIGURATION_FEATUREMAPENTRY.fields_by_name['value'].message_type = _FEATURECONFIGURATION
_EXAMPLEPARSERCONFIGURATION_FEATUREMAPENTRY.containing_type = _EXAMPLEPARSERCONFIGURATION
_EXAMPLEPARSERCONFIGURATION.fields_by_name['feature_map'].message_type = _EXAMPLEPARSERCONFIGURATION_FEATUREMAPENTRY
DESCRIPTOR.message_types_by_name['VarLenFeatureProto'] = _VARLENFEATUREPROTO
DESCRIPTOR.message_types_by_name['FixedLenFeatureProto'] = _FIXEDLENFEATUREPROTO
DESCRIPTOR.message_types_by_name['FeatureConfiguration'] = _FEATURECONFIGURATION
DESCRIPTOR.message_types_by_name['ExampleParserConfiguration'] = _EXAMPLEPARSERCONFIGURATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VarLenFeatureProto = _reflection.GeneratedProtocolMessageType('VarLenFeatureProto', (_message.Message,), dict(
DESCRIPTOR = _VARLENFEATUREPROTO,
__module__ = 'diplomacy_tensorflow.core.example.example_parser_configuration_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.VarLenFeatureProto)
))
_sym_db.RegisterMessage(VarLenFeatureProto)
FixedLenFeatureProto = _reflection.GeneratedProtocolMessageType('FixedLenFeatureProto', (_message.Message,), dict(
DESCRIPTOR = _FIXEDLENFEATUREPROTO,
__module__ = 'diplomacy_tensorflow.core.example.example_parser_configuration_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.FixedLenFeatureProto)
))
_sym_db.RegisterMessage(FixedLenFeatureProto)
FeatureConfiguration = _reflection.GeneratedProtocolMessageType('FeatureConfiguration', (_message.Message,), dict(
DESCRIPTOR = _FEATURECONFIGURATION,
__module__ = 'diplomacy_tensorflow.core.example.example_parser_configuration_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.FeatureConfiguration)
))
_sym_db.RegisterMessage(FeatureConfiguration)
ExampleParserConfiguration = _reflection.GeneratedProtocolMessageType('ExampleParserConfiguration', (_message.Message,), dict(
FeatureMapEntry = _reflection.GeneratedProtocolMessageType('FeatureMapEntry', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLEPARSERCONFIGURATION_FEATUREMAPENTRY,
__module__ = 'diplomacy_tensorflow.core.example.example_parser_configuration_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.ExampleParserConfiguration.FeatureMapEntry)
))
,
DESCRIPTOR = _EXAMPLEPARSERCONFIGURATION,
__module__ = 'diplomacy_tensorflow.core.example.example_parser_configuration_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.ExampleParserConfiguration)
))
_sym_db.RegisterMessage(ExampleParserConfiguration)
_sym_db.RegisterMessage(ExampleParserConfiguration.FeatureMapEntry)
DESCRIPTOR._options = None
_EXAMPLEPARSERCONFIGURATION_FEATUREMAPENTRY._options = None
# @@protoc_insertion_point(module_scope)
| mit |
Jeremmm/multizab | multizab/api/controllers.py | 1 | 1225 | from flask import Blueprint, jsonify
from multizab.utils import Zabbix, get_zabbix_list, count_type
api = Blueprint('api', __name__)
@api.route('/alerts')
def alerts():
alerts_data = []
hosts = get_zabbix_list()['hosts']
for i in hosts:
for j in Zabbix(i['uri'], i['username'], i['password']).get_triggers():
j['platform'] = i['name']
alerts_data.append(j)
return jsonify({'result': alerts_data})
@api.route('/graphics')
def graphics():
count_alerts = {}
count_types_per_zabbix = {}
hosts = get_zabbix_list()['hosts']
triggers = []
for i in hosts:
for j in Zabbix(i['uri'], i['username'], i['password']).get_triggers():
j['platform'] = i['name']
triggers.append(j)
for i in triggers:
if i['platform'] not in count_alerts:
count_alerts[i['platform']] = 0
count_alerts[i['platform']] += 1
for i in count_alerts:
count_types_per_zabbix[i] = count_type([j for j in triggers if j['platform'] == i])
return jsonify({'result': {'count_alerts': count_alerts, 'count_types_per_zabbix': count_types_per_zabbix,
'count_types': count_type(triggers)}})
| mit |
sem-geologist/hyperspy | hyperspy/tests/io/test_tiff.py | 3 | 25745 | import os
import tempfile
import numpy as np
from numpy.testing import assert_allclose
import pytest
import traits.api as t
import hyperspy.api as hs
from hyperspy.misc.test_utils import assert_deep_almost_equal
MY_PATH = os.path.dirname(__file__)
MY_PATH2 = os.path.join(MY_PATH, "tiff_files")
def test_rgba16():
s = hs.load(os.path.join(MY_PATH2, "test_rgba16.tif"))
data = np.load(os.path.join(MY_PATH, "npy_files", "test_rgba16.npy"))
assert (s.data == data).all()
assert s.axes_manager[0].units == t.Undefined
assert s.axes_manager[1].units == t.Undefined
assert s.axes_manager[2].units == t.Undefined
assert_allclose(s.axes_manager[0].scale, 1.0, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 1.0, atol=1E-5)
assert_allclose(s.axes_manager[2].scale, 1.0, atol=1E-5)
assert s.metadata.General.date == '2014-03-31'
assert s.metadata.General.time == '16:35:46'
def _compare_signal_shape_data(s0, s1):
assert s0.data.shape == s1.data.shape
np.testing.assert_equal(s0.data, s1.data)
def test_read_unit_um():
# Load DM file and save it as tif
s = hs.load(os.path.join(MY_PATH2, 'test_dm_image_um_unit.dm3'))
assert s.axes_manager[0].units == 'µm'
assert s.axes_manager[1].units == 'µm'
assert_allclose(s.axes_manager[0].scale, 0.16867, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 0.16867, atol=1E-5)
assert s.metadata.General.date == '2015-07-20'
assert s.metadata.General.time == '18:48:25'
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'tiff_files', 'test_export_um_unit.tif')
s.save(fname, overwrite=True, export_scale=True)
# load tif file
s2 = hs.load(fname)
assert s.axes_manager[0].units == 'µm'
assert s.axes_manager[1].units == 'µm'
assert_allclose(s2.axes_manager[0].scale, 0.16867, atol=1E-5)
assert_allclose(s2.axes_manager[1].scale, 0.16867, atol=1E-5)
assert s2.metadata.General.date == s.metadata.General.date
assert s2.metadata.General.time == s.metadata.General.time
def test_write_read_intensity_axes_DM():
s = hs.load(os.path.join(MY_PATH2, 'test_dm_image_um_unit.dm3'))
s.metadata.Signal.set_item('quantity', 'Electrons (Counts)')
d = {'gain_factor': 5.0,
'gain_offset': 2.0}
s.metadata.Signal.set_item('Noise_properties.Variance_linear_model', d)
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'tiff_files', 'test_export_um_unit2.tif')
s.save(fname, overwrite=True, export_scale=True)
s2 = hs.load(fname)
assert_deep_almost_equal(s.metadata.Signal.as_dictionary(),
s2.metadata.Signal.as_dictionary())
def test_read_unit_from_imagej():
fname = os.path.join(MY_PATH, 'tiff_files',
'test_loading_image_saved_with_imageJ.tif')
s = hs.load(fname)
assert s.axes_manager[0].units == 'µm'
assert s.axes_manager[1].units == 'µm'
assert_allclose(s.axes_manager[0].scale, 0.16867, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 0.16867, atol=1E-5)
def test_read_unit_from_imagej_stack():
fname = os.path.join(MY_PATH, 'tiff_files',
'test_loading_image_saved_with_imageJ_stack.tif')
s = hs.load(fname)
assert s.data.shape == (2, 68, 68)
assert s.axes_manager[0].units == t.Undefined
assert s.axes_manager[1].units == 'µm'
assert s.axes_manager[2].units == 'µm'
assert_allclose(s.axes_manager[0].scale, 2.5, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 0.16867, atol=1E-5)
assert_allclose(s.axes_manager[2].scale, 0.16867, atol=1E-5)
@pytest.mark.parametrize("lazy", [True, False])
def test_read_unit_from_DM_stack(lazy):
fname = os.path.join(MY_PATH, 'tiff_files',
'test_loading_image_saved_with_DM_stack.tif')
s = hs.load(fname, lazy=lazy)
assert s.data.shape == (2, 68, 68)
assert s.axes_manager[0].units == 's'
assert s.axes_manager[1].units == 'µm'
assert s.axes_manager[2].units == 'µm'
assert_allclose(s.axes_manager[0].scale, 2.5, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 0.16867, atol=1E-5)
assert_allclose(s.axes_manager[2].scale, 1.68674, atol=1E-5)
with tempfile.TemporaryDirectory() as tmpdir:
fname2 = os.path.join(
tmpdir, 'test_loading_image_saved_with_DM_stack2.tif')
s.save(fname2, overwrite=True)
s2 = hs.load(fname2)
_compare_signal_shape_data(s, s2)
assert s2.axes_manager[0].units == s.axes_manager[0].units
assert s2.axes_manager[1].units == 'µm'
assert s2.axes_manager[2].units == 'µm'
assert_allclose(
s2.axes_manager[0].scale, s.axes_manager[0].scale, atol=1E-5)
assert_allclose(
s2.axes_manager[1].scale, s.axes_manager[1].scale, atol=1E-5)
assert_allclose(
s2.axes_manager[2].scale, s.axes_manager[2].scale, atol=1E-5)
assert_allclose(
s2.axes_manager[0].offset, s.axes_manager[0].offset, atol=1E-5)
assert_allclose(
s2.axes_manager[1].offset, s.axes_manager[1].offset, atol=1E-5)
assert_allclose(
s2.axes_manager[2].offset, s.axes_manager[2].offset, atol=1E-5)
def test_read_unit_from_imagej_stack_no_scale():
fname = os.path.join(MY_PATH, 'tiff_files',
'test_loading_image_saved_with_imageJ_stack_no_scale.tif')
s = hs.load(fname)
assert s.data.shape == (2, 68, 68)
assert s.axes_manager[0].units == t.Undefined
assert s.axes_manager[1].units == t.Undefined
assert s.axes_manager[2].units == t.Undefined
assert_allclose(s.axes_manager[0].scale, 1.0, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 1.0, atol=1E-5)
assert_allclose(s.axes_manager[2].scale, 1.0, atol=1E-5)
def test_read_unit_from_imagej_no_scale():
fname = os.path.join(MY_PATH, 'tiff_files',
'test_loading_image_saved_with_imageJ_no_scale.tif')
s = hs.load(fname)
assert s.axes_manager[0].units == t.Undefined
assert s.axes_manager[1].units == t.Undefined
assert_allclose(s.axes_manager[0].scale, 1.0, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 1.0, atol=1E-5)
def test_write_read_unit_imagej():
fname = os.path.join(MY_PATH, 'tiff_files',
'test_loading_image_saved_with_imageJ.tif')
s = hs.load(fname, convert_units=True)
s.axes_manager[0].units = 'µm'
s.axes_manager[1].units = 'µm'
with tempfile.TemporaryDirectory() as tmpdir:
fname2 = os.path.join(
tmpdir, 'test_loading_image_saved_with_imageJ2.tif')
s.save(fname2, export_scale=True, overwrite=True)
s2 = hs.load(fname2)
assert s2.axes_manager[0].units == 'µm'
assert s2.axes_manager[1].units == 'µm'
assert s.data.shape == s.data.shape
def test_write_read_unit_imagej_with_description():
fname = os.path.join(MY_PATH, 'tiff_files',
'test_loading_image_saved_with_imageJ.tif')
s = hs.load(fname)
s.axes_manager[0].units = 'µm'
s.axes_manager[1].units = 'µm'
assert_allclose(s.axes_manager[0].scale, 0.16867, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 0.16867, atol=1E-5)
with tempfile.TemporaryDirectory() as tmpdir:
fname2 = os.path.join(tmpdir, 'description.tif')
s.save(fname2, export_scale=False, overwrite=True, description='test')
s2 = hs.load(fname2)
assert s2.axes_manager[0].units == t.Undefined
assert s2.axes_manager[1].units == t.Undefined
assert_allclose(s2.axes_manager[0].scale, 1.0, atol=1E-5)
assert_allclose(s2.axes_manager[1].scale, 1.0, atol=1E-5)
fname3 = os.path.join(tmpdir, 'description2.tif')
s.save(fname3, export_scale=True, overwrite=True, description='test')
s3 = hs.load(fname3, convert_units=True)
assert s3.axes_manager[0].units == 'um'
assert s3.axes_manager[1].units == 'um'
assert_allclose(s3.axes_manager[0].scale, 0.16867, atol=1E-5)
assert_allclose(s3.axes_manager[1].scale, 0.16867, atol=1E-5)
def test_saving_with_custom_tag():
s = hs.signals.Signal2D(
np.arange(
10 * 15,
dtype=np.uint8).reshape(
(10,
15)))
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'test_saving_with_custom_tag.tif')
extratag = [(65000, 's', 1, "Random metadata", False)]
s.save(fname, extratags=extratag, overwrite=True)
s2 = hs.load(fname)
assert (s2.original_metadata['Number_65000'] ==
"Random metadata")
def _test_read_unit_from_dm():
fname = os.path.join(MY_PATH2, 'test_loading_image_saved_with_DM.tif')
s = hs.load(fname)
assert s.axes_manager[0].units == 'µm'
assert s.axes_manager[1].units == 'µm'
assert_allclose(s.axes_manager[0].scale, 0.16867, atol=1E-5)
assert_allclose(s.axes_manager[1].scale, 0.16867, atol=1E-5)
assert_allclose(s.axes_manager[0].offset, 139.66264, atol=1E-5)
assert_allclose(s.axes_manager[1].offset, 128.19276, atol=1E-5)
with tempfile.TemporaryDirectory() as tmpdir:
fname2 = os.path.join(tmpdir, "DM2.tif")
s.save(fname2, overwrite=True)
s2 = hs.load(fname2)
_compare_signal_shape_data(s, s2)
assert s2.axes_manager[0].units == 'micron'
assert s2.axes_manager[1].units == 'micron'
assert_allclose(s2.axes_manager[0].scale, s.axes_manager[0].scale,
atol=1E-5)
assert_allclose(s2.axes_manager[1].scale, s.axes_manager[1].scale,
atol=1E-5)
assert_allclose(s2.axes_manager[0].offset, s.axes_manager[0].offset,
atol=1E-5)
assert_allclose(s2.axes_manager[1].offset, s.axes_manager[1].offset,
atol=1E-5)
def test_write_scale_unit():
_test_write_scale_unit(export_scale=True)
def test_write_scale_unit_no_export_scale():
_test_write_scale_unit(export_scale=False)
def _test_write_scale_unit(export_scale=True):
""" Lazy test, still need to open the files in ImageJ or DM to check if the
scale and unit are correct """
s = hs.signals.Signal2D(
np.arange(
10 * 15,
dtype=np.uint8).reshape(
(10,
15)))
s.axes_manager[0].name = 'x'
s.axes_manager[1].name = 'y'
s.axes_manager['x'].scale = 0.25
s.axes_manager['y'].scale = 0.25
s.axes_manager['x'].units = 'nm'
s.axes_manager['y'].units = 'nm'
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(
tmpdir, 'test_export_scale_unit_%s.tif' % export_scale)
s.save(fname, overwrite=True, export_scale=export_scale)
def test_write_scale_unit_not_square_pixel():
""" Lazy test, still need to open the files in ImageJ or DM to check if the
scale and unit are correct """
s = hs.signals.Signal2D(
np.arange(
10 * 15,
dtype=np.uint8).reshape(
(10,
15)))
s.change_dtype(np.uint8)
s.axes_manager[0].name = 'x'
s.axes_manager[1].name = 'y'
s.axes_manager['x'].scale = 0.25
s.axes_manager['y'].scale = 0.5
s.axes_manager['x'].units = 'nm'
s.axes_manager['y'].units = 'µm'
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(
tmpdir, 'test_export_scale_unit_not_square_pixel.tif')
s.save(fname, overwrite=True, export_scale=True)
def test_write_scale_with_undefined_unit():
""" Lazy test, still need to open the files in ImageJ or DM to check if the
scale and unit are correct """
s = hs.signals.Signal2D(
np.arange(
10 * 15,
dtype=np.uint8).reshape(
(10,
15)))
s.axes_manager[0].name = 'x'
s.axes_manager[1].name = 'y'
s.axes_manager['x'].scale = 0.25
s.axes_manager['y'].scale = 0.25
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(
tmpdir, 'test_export_scale_undefined_unit.tif')
s.save(fname, overwrite=True, export_scale=True)
def test_write_scale_with_undefined_scale():
""" Lazy test, still need to open the files in ImageJ or DM to check if the
scale and unit are correct """
s = hs.signals.Signal2D(
np.arange(
10 * 15,
dtype=np.uint8).reshape(
(10,
15)))
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(
tmpdir, 'test_export_scale_undefined_scale.tif')
s.save(fname, overwrite=True, export_scale=True)
s1 = hs.load(fname)
_compare_signal_shape_data(s, s1)
def test_write_scale_with_um_unit():
""" Lazy test, still need to open the files in ImageJ or DM to check if the
scale and unit are correct """
s = hs.load(os.path.join(MY_PATH, 'tiff_files',
'test_dm_image_um_unit.dm3'))
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'test_export_um_unit.tif')
s.save(fname, overwrite=True, export_scale=True)
s1 = hs.load(fname)
_compare_signal_shape_data(s, s1)
def test_write_scale_unit_image_stack():
""" Lazy test, still need to open the files in ImageJ or DM to check if the
scale and unit are correct """
s = hs.signals.Signal2D(
np.arange(
5 * 10 * 15,
dtype=np.uint8).reshape(
(5,
10,
15)))
s.axes_manager[0].scale = 0.25
s.axes_manager[1].scale = 0.5
s.axes_manager[2].scale = 1.5
s.axes_manager[0].units = 'nm'
s.axes_manager[1].units = 'um'
s.axes_manager[2].units = 'um'
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'test_export_scale_unit_stack2.tif')
s.save(fname, overwrite=True, export_scale=True)
s1 = hs.load(fname, convert_units=True)
_compare_signal_shape_data(s, s1)
assert s1.axes_manager[0].units == 'pm'
# only one unit can be read
assert s1.axes_manager[1].units == 'um'
assert s1.axes_manager[2].units == 'um'
assert_allclose(s1.axes_manager[0].scale, 250.0)
assert_allclose(s1.axes_manager[1].scale, s.axes_manager[1].scale)
assert_allclose(s1.axes_manager[2].scale, s.axes_manager[2].scale)
def test_saving_loading_stack_no_scale():
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'test_export_scale_unit_stack2.tif')
s0 = hs.signals.Signal2D(np.zeros((10, 20, 30)))
s0.save(fname, overwrite=True)
s1 = hs.load(fname)
_compare_signal_shape_data(s0, s1)
FEI_Helios_metadata = {'Acquisition_instrument': {'SEM': {'Stage': {'rotation': -2.3611,
'tilt': 6.54498e-06,
'x': 2.576e-05,
'y': -0.000194177,
'z': 0.007965},
'beam_current': 0.00625,
'beam_energy': 5.0,
'dwell_time': 1e-05,
'microscope': 'Helios NanoLab" 660',
'working_distance': 4.03466}},
'General': {'original_filename': 'FEI-Helios-Ebeam-8bits.tif',
'title': '',
'authors': 'supervisor',
'date': '2016-06-13',
'time': '17:06:40'},
'Signal': {'binned': False,
'signal_type': ''},
'_HyperSpy': {'Folding': {'original_axes_manager': None,
'original_shape': None,
'signal_unfolded': False,
'unfolded': False}}}
def test_read_FEI_SEM_scale_metadata_8bits():
fname = os.path.join(MY_PATH2, 'FEI-Helios-Ebeam-8bits.tif')
s = hs.load(fname, convert_units=True)
assert s.axes_manager[0].units == 'um'
assert s.axes_manager[1].units == 'um'
assert_allclose(s.axes_manager[0].scale, 3.3724, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 3.3724, rtol=1E-5)
assert s.data.dtype == 'uint8'
FEI_Helios_metadata['General'][
'original_filename'] = 'FEI-Helios-Ebeam-8bits.tif'
assert_deep_almost_equal(s.metadata.as_dictionary(), FEI_Helios_metadata)
def test_read_FEI_SEM_scale_metadata_16bits():
fname = os.path.join(MY_PATH2, 'FEI-Helios-Ebeam-16bits.tif')
s = hs.load(fname, convert_units=True)
assert s.axes_manager[0].units == 'um'
assert s.axes_manager[1].units == 'um'
assert_allclose(s.axes_manager[0].scale, 3.3724, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 3.3724, rtol=1E-5)
assert s.data.dtype == 'uint16'
FEI_Helios_metadata['General'][
'original_filename'] = 'FEI-Helios-Ebeam-16bits.tif'
assert_deep_almost_equal(s.metadata.as_dictionary(), FEI_Helios_metadata)
def test_read_Zeiss_SEM_scale_metadata_1k_image():
md = {'Acquisition_instrument': {'SEM': {'Stage': {'rotation': 10.2,
'tilt': -0.0,
'x': 75.6442,
'y': 60.4901,
'z': 25.193},
'Detector':{'detector_type':
'HE-SE2'},
'beam_current': 1.0,
'beam_energy': 25.0,
'dwell_time': 5e-08,
'magnification': 105.0,
'microscope': 'Merlin-61-08',
'working_distance': 14.808}},
'General': {'authors': 'LIM',
'date': '2015-12-23',
'original_filename': 'test_tiff_Zeiss_SEM_1k.tif',
'time': '09:40:32',
'title': ''},
'Signal': {'binned': False, 'signal_type': ''},
'_HyperSpy': {'Folding': {'original_axes_manager': None,
'original_shape': None,
'signal_unfolded': False,
'unfolded': False}}}
fname = os.path.join(MY_PATH2, 'test_tiff_Zeiss_SEM_1k.tif')
s = hs.load(fname, convert_units=True)
assert s.axes_manager[0].units == 'um'
assert s.axes_manager[1].units == 'um'
assert_allclose(s.axes_manager[0].scale, 2.614514, rtol=1E-6)
assert_allclose(s.axes_manager[1].scale, 2.614514, rtol=1E-6)
assert s.data.dtype == 'uint8'
assert_deep_almost_equal(s.metadata.as_dictionary(), md)
def test_read_Zeiss_SEM_scale_metadata_512_image():
md = {'Acquisition_instrument': {'SEM': {'Stage': {'rotation': 245.8,
'tilt': 0.0,
'x': 62.9961,
'y': 65.3168,
'z': 44.678},
'beam_energy': 5.0,
'magnification': '50.00 K X',
'microscope': 'ULTRA 55-36-06',
'working_distance': 3.9}},
'General': {'authors': 'LIBERATO',
'date': '2018-09-25',
'original_filename': 'test_tiff_Zeiss_SEM_512pix.tif',
'time': '08:20:42',
'title': ''},
'Signal': {'binned': False, 'signal_type': ''},
'_HyperSpy': {'Folding': {'original_axes_manager': None,
'original_shape': None,
'signal_unfolded': False,
'unfolded': False}}}
fname = os.path.join(MY_PATH2, 'test_tiff_Zeiss_SEM_512pix.tif')
s = hs.load(fname, convert_units=True)
assert s.axes_manager[0].units == 'um'
assert s.axes_manager[1].units == 'um'
assert_allclose(s.axes_manager[0].scale, 0.011649976, rtol=1E-6)
assert_allclose(s.axes_manager[1].scale, 0.011649976, rtol=1E-6)
assert s.data.dtype == 'uint8'
assert_deep_almost_equal(s.metadata.as_dictionary(), md)
def test_read_RGB_Zeiss_optical_scale_metadata():
fname = os.path.join(MY_PATH2, 'optical_Zeiss_AxioVision_RGB.tif')
s = hs.load(fname, )
dtype = np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')])
assert s.data.dtype == dtype
assert s.data.shape == (10, 13)
assert s.axes_manager[0].units == t.Undefined
assert s.axes_manager[1].units == t.Undefined
assert_allclose(s.axes_manager[0].scale, 1.0, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 1.0, rtol=1E-5)
assert s.metadata.General.date == '2016-06-13'
assert s.metadata.General.time == '15:59:52'
def test_read_BW_Zeiss_optical_scale_metadata():
fname = os.path.join(MY_PATH2, 'optical_Zeiss_AxioVision_BW.tif')
s = hs.load(fname, force_read_resolution=True, convert_units=True)
assert s.data.dtype == np.uint8
assert s.data.shape == (10, 13)
assert s.axes_manager[0].units == 'um'
assert s.axes_manager[1].units == 'um'
assert_allclose(s.axes_manager[0].scale, 169.333, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 169.333, rtol=1E-5)
assert s.metadata.General.date == '2016-06-13'
assert s.metadata.General.time == '16:08:49'
def test_read_BW_Zeiss_optical_scale_metadata_convert_units_false():
fname = os.path.join(MY_PATH2, 'optical_Zeiss_AxioVision_BW.tif')
s = hs.load(fname, force_read_resolution=True, convert_units=False)
assert s.data.dtype == np.uint8
assert s.data.shape == (10, 13)
assert s.axes_manager[0].units == 'µm'
assert s.axes_manager[1].units == 'µm'
assert_allclose(s.axes_manager[0].scale, 169.333, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 169.333, rtol=1E-5)
def test_read_BW_Zeiss_optical_scale_metadata2():
fname = os.path.join(MY_PATH2, 'optical_Zeiss_AxioVision_BW.tif')
s = hs.load(fname, force_read_resolution=True, convert_units=True)
assert s.data.dtype == np.uint8
assert s.data.shape == (10, 13)
assert s.axes_manager[0].units == 'um'
assert s.axes_manager[1].units == 'um'
assert_allclose(s.axes_manager[0].scale, 169.333, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 169.333, rtol=1E-5)
assert s.metadata.General.date == '2016-06-13'
assert s.metadata.General.time == '16:08:49'
def test_read_BW_Zeiss_optical_scale_metadata3():
fname = os.path.join(MY_PATH2, 'optical_Zeiss_AxioVision_BW.tif')
s = hs.load(fname, force_read_resolution=False)
assert s.data.dtype == np.uint8
assert s.data.shape == (10, 13)
assert s.axes_manager[0].units == t.Undefined
assert s.axes_manager[1].units == t.Undefined
assert_allclose(s.axes_manager[0].scale, 1.0, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 1.0, rtol=1E-5)
assert s.metadata.General.date == '2016-06-13'
assert s.metadata.General.time == '16:08:49'
def test_read_TVIPS_metadata():
md = {'Acquisition_instrument': {'TEM': {'Detector': {'Camera': {'exposure': 0.4,
'name': 'F416'}},
'Stage': {'tilt_alpha': -0.0070000002,
'tilt_beta': -0.055,
'x': 0.0,
'y': -9.2000000506686774e-05,
'z': 7.0000001350933871e-06},
'beam_energy': 99.0,
'magnification': 32000.0}},
'General': {'original_filename': 'TVIPS_bin4.tif',
'time': '9:01:17',
'title': ''},
'Signal': {'binned': False, 'signal_type': ''},
'_HyperSpy': {'Folding': {'original_axes_manager': None,
'original_shape': None,
'signal_unfolded': False,
'unfolded': False}}}
fname = os.path.join(MY_PATH2, 'TVIPS_bin4.tif')
s = hs.load(fname, convert_units=True)
assert s.data.dtype == np.uint8
assert s.data.shape == (1024, 1024)
assert s.axes_manager[0].units == 'nm'
assert s.axes_manager[1].units == 'nm'
assert_allclose(s.axes_manager[0].scale, 1.42080, rtol=1E-5)
assert_allclose(s.axes_manager[1].scale, 1.42080, rtol=1E-5)
assert_deep_almost_equal(s.metadata.as_dictionary(), md)
| gpl-3.0 |
KamalAwasthi/FullContact | myvenv/lib/python3.4/site-packages/django/contrib/auth/management/commands/changepassword.py | 136 | 2610 | from __future__ import unicode_literals
import getpass
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
class Command(BaseCommand):
help = "Change a user's password for django.contrib.auth."
requires_system_checks = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def add_arguments(self, parser):
parser.add_argument('username', nargs='?',
help='Username to change password for; by default, it\'s the current username.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
def handle(self, *args, **options):
if options.get('username'):
username = options['username']
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options.get('database')).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
password_validated = False
while (p1 != p2 or not password_validated) and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count += 1
# Don't validate passwords that don't match.
continue
try:
validate_password(p2, u)
except ValidationError as err:
self.stderr.write('\n'.join(err.messages))
count += 1
else:
password_validated = True
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| apache-2.0 |
Work4Labs/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/databrowse/plugins/fieldchoices.py | 88 | 3876 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.text import capfirst
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
import urllib
class FieldChoicePlugin(DatabrowsePlugin):
def __init__(self, field_filter=None):
# If field_filter is given, it should be a callable that takes a
# Django database Field instance and returns True if that field should
# be included. If field_filter is None, that all fields will be used.
self.field_filter = field_filter
def field_dict(self, model):
"""
Helper function that returns a dictionary of all fields in the given
model. If self.field_filter is set, it only includes the fields that
match the filter.
"""
if self.field_filter:
return dict([(f.name, f) for f in model._meta.fields if self.field_filter(f)])
else:
return dict([(f.name, f) for f in model._meta.fields if not f.rel and not f.primary_key and not f.unique and not isinstance(f, (models.AutoField, models.TextField))])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(u'<p class="filter"><strong>View by:</strong> %s</p>' % \
u', '.join(['<a href="fields/%s/">%s</a>' % (f.name, force_unicode(capfirst(f.verbose_name))) for f in fields.values()]))
def urls(self, plugin_name, easy_instance_field):
if easy_instance_field.field in self.field_dict(easy_instance_field.model.model).values():
field_value = smart_str(easy_instance_field.raw_value)
return [mark_safe(u'%s%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
urllib.quote(field_value, safe='')))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no fields with choices, there's no point in going
# further.
if not self.fields:
raise http.Http404('The requested model has no fields.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/', 1)
if self.fields.has_key(url_bits[0]):
return self.field_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(lambda x, y: cmp(x.verbose_name, y.verbose_name))
return render_to_response('databrowse/fieldchoice_homepage.html', {'root_url': self.site.root_url, 'model': easy_model, 'field_list': field_list})
def field_view(self, request, field, value=None):
easy_model = EasyModel(self.site, self.model)
easy_field = easy_model.field(field.name)
if value is not None:
obj_list = easy_model.objects(**{field.name: value})
return render_to_response('databrowse/fieldchoice_detail.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'value': value, 'object_list': obj_list})
obj_list = [v[field.name] for v in self.model._default_manager.distinct().order_by(field.name).values(field.name)]
return render_to_response('databrowse/fieldchoice_list.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'object_list': obj_list})
| gpl-3.0 |
Pedram26/Humans-vs-Aliens | HumansAliens.app/Contents/Resources/lib/python2.7/pygame/tests/sndarray_test.py | 6 | 7264 | if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
import unittest
import pygame
from pygame.compat import as_bytes
import pygame.sndarray
from numpy import \
int8, int16, uint8, uint16, array, alltrue
arraytype = "numpy"
class SndarrayTest (unittest.TestCase):
if arraytype:
array_dtypes = {8: uint8, -8: int8, 16: uint16, -16: int16}
def _assert_compatible(self, arr, size):
dtype = self.array_dtypes[size]
if arraytype == 'numpy':
self.failUnlessEqual(arr.dtype, dtype)
else:
self.failUnlessEqual(arr.typecode(), dtype)
def test_import(self):
'does it import'
if not arraytype:
self.fail("no array package installed")
import pygame.sndarray
def test_array(self):
if not arraytype:
self.fail("no array package installed")
def check_array(size, channels, test_data):
try:
pygame.mixer.init(22050, size, channels)
except pygame.error:
# Not all sizes are supported on all systems.
return
try:
__, sz, __ = pygame.mixer.get_init()
if sz == size:
srcarr = array(test_data, self.array_dtypes[size])
snd = pygame.sndarray.make_sound(srcarr)
arr = pygame.sndarray.array(snd)
self._assert_compatible(arr, size)
self.failUnless(alltrue(arr == srcarr),
"size: %i\n%s\n%s" %
(size, arr, test_data))
finally:
pygame.mixer.quit()
check_array(8, 1, [0, 0x0f, 0xf0, 0xff])
check_array(8, 2,
[[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]])
check_array(16, 1, [0, 0x00ff, 0xff00, 0xffff])
check_array(16, 2, [[0, 0xffff], [0xffff, 0],
[0x00ff, 0xff00], [0x0f0f, 0xf0f0]])
check_array(-8, 1, [0, -0x80, 0x7f, 0x64])
check_array(-8, 2,
[[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]])
check_array(-16, 1, [0, 0x7fff, -0x7fff, -1])
check_array(-16, 2, [[0, -0x7fff], [-0x7fff, 0],
[0x7fff, 0], [0, 0x7fff]])
def test_get_arraytype(self):
if not arraytype:
self.fail("no array package installed")
self.failUnless((pygame.sndarray.get_arraytype() in
['numpy']),
("unknown array type %s" %
pygame.sndarray.get_arraytype()))
def test_get_arraytypes(self):
if not arraytype:
self.fail("no array package installed")
arraytypes = pygame.sndarray.get_arraytypes()
self.failUnless('numpy' in arraytypes)
for atype in arraytypes:
self.failUnless(atype in ['numpy'],
"unknown array type %s" % atype)
def test_make_sound(self):
if not arraytype:
self.fail("no array package installed")
def check_sound(size, channels, test_data):
try:
pygame.mixer.init(22050, size, channels)
except pygame.error:
# Not all sizes are supported on all systems.
return
try:
__, sz, __ = pygame.mixer.get_init()
if sz == size:
srcarr = array(test_data, self.array_dtypes[size])
snd = pygame.sndarray.make_sound(srcarr)
arr = pygame.sndarray.samples(snd)
self.failUnless(alltrue(arr == srcarr),
"size: %i\n%s\n%s" %
(size, arr, test_data))
finally:
pygame.mixer.quit()
check_sound(8, 1, [0, 0x0f, 0xf0, 0xff])
check_sound(8, 2,
[[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]])
check_sound(16, 1, [0, 0x00ff, 0xff00, 0xffff])
check_sound(16, 2, [[0, 0xffff], [0xffff, 0],
[0x00ff, 0xff00], [0x0f0f, 0xf0f0]])
check_sound(-8, 1, [0, -0x80, 0x7f, 0x64])
check_sound(-8, 2,
[[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]])
check_sound(-16, 1, [0, 0x7fff, -0x7fff, -1])
check_sound(-16, 2, [[0, -0x7fff], [-0x7fff, 0],
[0x7fff, 0], [0, 0x7fff]])
def test_samples(self):
if not arraytype:
self.fail("no array package installed")
null_byte = as_bytes('\x00')
def check_sample(size, channels, test_data):
try:
pygame.mixer.init(22050, size, channels)
except pygame.error:
# Not all sizes are supported on all systems.
return
try:
__, sz, __ = pygame.mixer.get_init()
if sz == size:
zeroed = null_byte * ((abs(size) // 8) *
len(test_data) *
channels)
snd = pygame.mixer.Sound(buffer=zeroed)
samples = pygame.sndarray.samples(snd)
self._assert_compatible(samples, size)
##print ('X %s' % (samples.shape,))
##print ('Y %s' % (test_data,))
samples[...] = test_data
arr = pygame.sndarray.array(snd)
self.failUnless(alltrue(samples == arr),
"size: %i\n%s\n%s" %
(size, arr, test_data))
finally:
pygame.mixer.quit()
check_sample(8, 1, [0, 0x0f, 0xf0, 0xff])
check_sample(8, 2,
[[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]])
check_sample(16, 1, [0, 0x00ff, 0xff00, 0xffff])
check_sample(16, 2, [[0, 0xffff], [0xffff, 0],
[0x00ff, 0xff00], [0x0f0f, 0xf0f0]])
check_sample(-8, 1, [0, -0x80, 0x7f, 0x64])
check_sample(-8, 2,
[[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]])
check_sample(-16, 1, [0, 0x7fff, -0x7fff, -1])
check_sample(-16, 2, [[0, -0x7fff], [-0x7fff, 0],
[0x7fff, 0], [0, 0x7fff]])
def test_use_arraytype(self):
if not arraytype:
self.fail("no array package installed")
def do_use_arraytype(atype):
pygame.sndarray.use_arraytype(atype)
pygame.sndarray.use_arraytype('numpy')
self.failUnlessEqual(pygame.sndarray.get_arraytype(), 'numpy')
self.failUnlessRaises(ValueError, do_use_arraytype, 'not an option')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
molly/GorillaBot | gorillabot/plugins/util.py | 2 | 3830 | # Copyright (c) 2013-2016 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from urllib.request import Request, urlopen, URLError
from random import choice
import os
import pickle
def admin(*args):
"""Designates bot administrator-only commands. Args is a list of command aliases."""
def decorator(func):
path = os.path.dirname(os.path.abspath(__file__)) + '/commands.pkl'
try:
with open(path, 'rb') as pickle_file:
commands = pickle.load(pickle_file)
except (OSError, IOError):
commands = dict()
command_name = func.__module__ + '.' + func.__name__
if args:
for name in args:
commands[name] = (command_name, True)
commands[func.__name__] = (command_name, False)
with open(path, 'wb') as pickle_file:
pickle.dump(commands, pickle_file)
return func
return decorator
def command(*args):
"""Designates general bot commands. Args is a list of command aliases."""
def decorator(func):
path = os.path.dirname(os.path.abspath(__file__)) + '/admincommands.pkl'
try:
with open(path, 'rb') as pickle_file:
commands = pickle.load(pickle_file)
except (OSError, IOError):
commands = dict()
command_name = func.__module__ + '.' + func.__name__
if args:
for name in args:
commands[name] = (command_name, True)
commands[func.__name__] = (command_name, False)
with open(path, 'wb') as pickle_file:
pickle.dump(commands, pickle_file)
return func
return decorator
def get_line(m, file):
"""Get a random line from the given file."""
with open(m.bot.base_path + '/plugins/responses/' + file, 'r') as resps:
lines = resps.read().splitlines()
l = choice(lines)
return l
def get_url(m, url, title=False):
"""Request a given URL, handling any errors. If 'title' is True, this will only return the
first 1000 bytes of the page in an effort to not load too much more than is necessary."""
request = Request(url, headers=m.bot.header)
try:
html = urlopen(request)
except URLError as e:
m.bot.logger.info("{0}: {1}".format(url, e.reason))
return None
else:
try:
bytes = html.read(5000 if title else -1).decode('utf-8')
except UnicodeDecodeError as e:
m.bot.logger.info("{0}: {1}".format(url, e.reason))
return None
else:
return bytes
def humanize_list(l):
"""Return a human-readable list."""
if len(l) == 1:
return l[0]
elif len(l) == 2:
return "{0} and {1}".format(l[0], l[1])
else:
return ", ".join(l[:-1]) + ", and " + l[-1]
| mit |
manjunaths/tensorflow | tensorflow/python/kernel_tests/stack_ops_test.py | 64 | 5580 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class StackOpTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
return [nx, ny]
rx, ry = control_flow_ops.while_loop(
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops._stack_pop(h1, dtypes.float32)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops._stack_pop(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
r = c1 + c2
self.assertNotEqual(h1.eval()[1], h2.eval()[1])
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
if __name__ == "__main__":
test.main()
| apache-2.0 |
leapcode/bitmask_client | src/leap/bitmask/services/eip/tests/test_eipbootstrapper.py | 6 | 12748 | # -*- coding: utf-8 -*-
# test_eipbootstrapper.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for the EIP Boostrapper checks
These will be whitebox tests since we want to make sure the private
implementation is checking what we expect.
"""
import os
import mock
import tempfile
import time
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa - skip 'unused import' warning
from nose.twistedtools import deferred, reactor
from twisted.internet import threads
from requests.models import Response
from leap.bitmask.services.eip.eipbootstrapper import EIPBootstrapper
from leap.bitmask.services.eip.eipconfig import EIPConfig
from leap.bitmask.config.providerconfig import ProviderConfig
from leap.bitmask.crypto.tests import fake_provider
from leap.bitmask.crypto.srpauth import SRPAuth
from leap.bitmask import util
from leap.common.testing.basetest import BaseLeapTest
from leap.common.files import mkdir_p
class EIPBootstrapperActiveTest(BaseLeapTest):
@classmethod
def setUpClass(cls):
BaseLeapTest.setUpClass()
factory = fake_provider.get_provider_factory()
http = reactor.listenTCP(0, factory)
https = reactor.listenSSL(
0, factory,
fake_provider.OpenSSLServerContextFactory())
get_port = lambda p: p.getHost().port
cls.http_port = get_port(http)
cls.https_port = get_port(https)
def setUp(self):
self.eb = EIPBootstrapper()
self.old_pp = util.get_path_prefix
self.old_save = EIPConfig.save
self.old_load = EIPConfig.load
self.old_si = SRPAuth.get_session_id
def tearDown(self):
util.get_path_prefix = self.old_pp
EIPConfig.save = self.old_save
EIPConfig.load = self.old_load
SRPAuth.get_session_id = self.old_si
def _download_config_test_template(self, ifneeded, new):
"""
All download config tests have the same structure, so this is
a parametrized test for that.
:param ifneeded: sets _download_if_needed
:type ifneeded: bool
:param new: if True uses time.time() as mtime for the mocked
eip-service file, otherwise it uses 100 (a really
old mtime)
:type new: float or int (will be coersed)
"""
pc = ProviderConfig()
pc.get_domain = mock.MagicMock(
return_value="localhost:%s" % (self.https_port))
self.eb._provider_config = pc
pc.get_api_uri = mock.MagicMock(
return_value="https://%s" % (pc.get_domain()))
pc.get_api_version = mock.MagicMock(return_value="1")
# This is to ignore https checking, since it's not the point
# of this test
pc.get_ca_cert_path = mock.MagicMock(return_value=False)
path_prefix = tempfile.mkdtemp()
util.get_path_prefix = mock.MagicMock(return_value=path_prefix)
EIPConfig.save = mock.MagicMock()
EIPConfig.load = mock.MagicMock()
self.eb._download_if_needed = ifneeded
provider_dir = os.path.join(util.get_path_prefix(),
"leap",
"providers",
pc.get_domain())
mkdir_p(provider_dir)
eip_config_path = os.path.join(provider_dir,
"eip-service.json")
with open(eip_config_path, "w") as ec:
ec.write("A")
# set mtime to something really new
if new:
os.utime(eip_config_path, (-1, time.time()))
else:
os.utime(eip_config_path, (-1, 100))
@deferred()
def test_download_config_not_modified(self):
self._download_config_test_template(True, True)
d = threads.deferToThread(self.eb._download_config)
def check(*args):
self.assertFalse(self.eb._eip_config.save.called)
d.addCallback(check)
return d
@deferred()
def test_download_config_modified(self):
self._download_config_test_template(True, False)
d = threads.deferToThread(self.eb._download_config)
def check(*args):
self.assertTrue(self.eb._eip_config.save.called)
d.addCallback(check)
return d
@deferred()
def test_download_config_ignores_mtime(self):
self._download_config_test_template(False, True)
d = threads.deferToThread(self.eb._download_config)
def check(*args):
self.eb._eip_config.save.assert_called_once_with(
("leap",
"providers",
self.eb._provider_config.get_domain(),
"eip-service.json"))
d.addCallback(check)
return d
def _download_certificate_test_template(self, ifneeded, createcert):
"""
All download client certificate tests have the same structure,
so this is a parametrized test for that.
:param ifneeded: sets _download_if_needed
:type ifneeded: bool
:param createcert: if True it creates a dummy file to play the
part of a downloaded certificate
:type createcert: bool
:returns: the temp eip cert path and the dummy cert contents
:rtype: tuple of str, str
"""
pc = ProviderConfig()
ec = EIPConfig()
self.eb._provider_config = pc
self.eb._eip_config = ec
pc.get_domain = mock.MagicMock(
return_value="localhost:%s" % (self.https_port))
pc.get_api_uri = mock.MagicMock(
return_value="https://%s" % (pc.get_domain()))
pc.get_api_version = mock.MagicMock(return_value="1")
pc.get_ca_cert_path = mock.MagicMock(return_value=False)
path_prefix = tempfile.mkdtemp()
util.get_path_prefix = mock.MagicMock(return_value=path_prefix)
EIPConfig.save = mock.MagicMock()
EIPConfig.load = mock.MagicMock()
self.eb._download_if_needed = ifneeded
provider_dir = os.path.join(util.get_path_prefix(),
"leap",
"providers",
"somedomain")
mkdir_p(provider_dir)
eip_cert_path = os.path.join(provider_dir,
"cert")
ec.get_client_cert_path = mock.MagicMock(
return_value=eip_cert_path)
cert_content = "A"
if createcert:
with open(eip_cert_path, "w") as ec:
ec.write(cert_content)
return eip_cert_path, cert_content
def test_download_client_certificate_not_modified(self):
cert_path, old_cert_content = self._download_certificate_test_template(
True, True)
with mock.patch('leap.common.certs.should_redownload',
new_callable=mock.MagicMock,
return_value=False):
self.eb._download_client_certificates()
with open(cert_path, "r") as c:
self.assertEqual(c.read(), old_cert_content)
@deferred()
def test_download_client_certificate_old_cert(self):
cert_path, old_cert_content = self._download_certificate_test_template(
True, True)
def wrapper(*args):
with mock.patch('leap.common.certs.should_redownload',
new_callable=mock.MagicMock,
return_value=True):
with mock.patch('leap.common.certs.is_valid_pemfile',
new_callable=mock.MagicMock,
return_value=True):
self.eb._download_client_certificates()
def check(*args):
with open(cert_path, "r") as c:
self.assertNotEqual(c.read(), old_cert_content)
d = threads.deferToThread(wrapper)
d.addCallback(check)
return d
@deferred()
def test_download_client_certificate_no_cert(self):
cert_path, _ = self._download_certificate_test_template(
True, False)
def wrapper(*args):
with mock.patch('leap.common.certs.should_redownload',
new_callable=mock.MagicMock,
return_value=False):
with mock.patch('leap.common.certs.is_valid_pemfile',
new_callable=mock.MagicMock,
return_value=True):
self.eb._download_client_certificates()
def check(*args):
self.assertTrue(os.path.exists(cert_path))
d = threads.deferToThread(wrapper)
d.addCallback(check)
return d
@deferred()
def test_download_client_certificate_force_not_valid(self):
cert_path, old_cert_content = self._download_certificate_test_template(
True, True)
def wrapper(*args):
with mock.patch('leap.common.certs.should_redownload',
new_callable=mock.MagicMock,
return_value=True):
with mock.patch('leap.common.certs.is_valid_pemfile',
new_callable=mock.MagicMock,
return_value=True):
self.eb._download_client_certificates()
def check(*args):
with open(cert_path, "r") as c:
self.assertNotEqual(c.read(), old_cert_content)
d = threads.deferToThread(wrapper)
d.addCallback(check)
return d
@deferred()
def test_download_client_certificate_invalid_download(self):
cert_path, _ = self._download_certificate_test_template(
False, False)
def wrapper(*args):
with mock.patch('leap.common.certs.should_redownload',
new_callable=mock.MagicMock,
return_value=True):
with mock.patch('leap.common.certs.is_valid_pemfile',
new_callable=mock.MagicMock,
return_value=False):
with self.assertRaises(Exception):
self.eb._download_client_certificates()
d = threads.deferToThread(wrapper)
return d
@deferred()
def test_download_client_certificate_uses_session_id(self):
_, _ = self._download_certificate_test_template(
False, False)
SRPAuth.get_session_id = mock.MagicMock(return_value="1")
def check_cookie(*args, **kwargs):
cookies = kwargs.get("cookies", None)
self.assertEqual(cookies, {'_session_id': '1'})
return Response()
def wrapper(*args):
with mock.patch('leap.common.certs.should_redownload',
new_callable=mock.MagicMock,
return_value=False):
with mock.patch('leap.common.certs.is_valid_pemfile',
new_callable=mock.MagicMock,
return_value=True):
with mock.patch('requests.sessions.Session.get',
new_callable=mock.MagicMock,
side_effect=check_cookie):
with mock.patch('requests.models.Response.content',
new_callable=mock.PropertyMock,
return_value="A"):
self.eb._download_client_certificates()
d = threads.deferToThread(wrapper)
return d
@deferred()
def test_run_eip_setup_checks(self):
self.eb._download_config = mock.MagicMock()
self.eb._download_client_certificates = mock.MagicMock()
d = self.eb.run_eip_setup_checks(ProviderConfig())
def check(*args):
self.eb._download_config.assert_called_once_with()
self.eb._download_client_certificates.assert_called_once_with(None)
d.addCallback(check)
return d
| gpl-3.0 |
IronLanguages/ironpython2 | Src/StdLib/Lib/whichdb.py | 175 | 3379 | # !/usr/bin/env python
"""Guess which db package to use to open a db file."""
import os
import struct
import sys
try:
import dbm
_dbmerror = dbm.error
except ImportError:
dbm = None
# just some sort of valid exception which might be raised in the
# dbm test
_dbmerror = IOError
def whichdb(filename):
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the module name (e.g. "dbm" or "gdbm") if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
"""
# Check for dbm first -- this has a .pag and a .dir file
try:
f = open(filename + os.extsep + "pag", "rb")
f.close()
# dbm linked with gdbm on OS/2 doesn't have .dir file
if not (dbm.library == "GNU gdbm" and sys.platform == "os2emx"):
f = open(filename + os.extsep + "dir", "rb")
f.close()
return "dbm"
except IOError:
# some dbm emulations based on Berkeley DB generate a .db file
# some do not, but they should be caught by the dbhash checks
try:
f = open(filename + os.extsep + "db", "rb")
f.close()
# guarantee we can actually open the file using dbm
# kind of overkill, but since we are dealing with emulations
# it seems like a prudent step
if dbm is not None:
d = dbm.open(filename)
d.close()
return "dbm"
except (IOError, _dbmerror):
pass
# Check for dumbdbm next -- this has a .dir and a .dat file
try:
# First check for presence of files
os.stat(filename + os.extsep + "dat")
size = os.stat(filename + os.extsep + "dir").st_size
# dumbdbm files with no keys are empty
if size == 0:
return "dumbdbm"
f = open(filename + os.extsep + "dir", "rb")
try:
if f.read(1) in ("'", '"'):
return "dumbdbm"
finally:
f.close()
except (OSError, IOError):
pass
# See if the file exists, return None if not
try:
f = open(filename, "rb")
except IOError:
return None
# Read the start of the file -- the magic number
s16 = f.read(16)
f.close()
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic in (0x13579ace, 0x13579acd, 0x13579acf):
return "gdbm"
# Check for old Berkeley db hash file format v2
if magic in (0x00061561, 0x61150600):
return "bsddb185"
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
# Check for BSD hash
if magic in (0x00061561, 0x61150600):
return "dbhash"
# Unknown
return ""
if __name__ == "__main__":
for filename in sys.argv[1:]:
print whichdb(filename) or "UNKNOWN", filename
| apache-2.0 |
ZachRiegel/scriptbin | pypyjs/modules/encodings/mac_croatian.py | 593 | 13889 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
rhelmer/socorro | webapp-django/crashstats/crashstats/management/__init__.py | 10 | 2499 | from django.contrib.auth.models import Permission
from django.db.models.signals import post_syncdb
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
# Import the specific models we need to wait for to have been
# sync'ed by Django
import django.contrib.auth.models
import django.contrib.contenttypes.models
# Note! When referring to these in code, you'll have to use the
# prefix 'crashstats'. So template code looks like this for example:
# {% if request.user.has_perm('crashstats.view_pii') %}
PERMISSIONS = {
'view_pii': 'View Personal Identifiable Information',
'view_rawdump': 'View Raw Dumps',
'view_exploitability': 'View Exploitability Results',
'view_flash_exploitability': 'View Flash Exploitability Results',
'run_custom_queries': 'Run Custom Queries in Super Search',
'run_long_queries': 'Run Long Queries',
'upload_symbols': 'Upload Symbols Files',
}
# internal use to know when all interesting models have been synced
_senders_left = [
django.contrib.auth.models,
django.contrib.contenttypes.models
]
@receiver(post_syncdb)
def setup_custom_permissions_and_groups(sender, **kwargs):
"""
When you `./manage.py syncdb` every installed app gets synced.
We use this opportunity to create and set up permissions that are NOT
attached to a specific model. We need this because we want to use
permissions but not the Django models.
Note that this needs to run after django.contrib.auth.models and
django.contrib.contenttypes.models have been synced so that we can use
them in this context.
"""
if _senders_left:
if sender in _senders_left:
_senders_left.remove(sender)
if _senders_left:
return
# All the relevant senders have been sync'ed.
# We can now use them
appname = 'crashstats'
ct, __ = ContentType.objects.get_or_create(
model='',
app_label=appname,
defaults={'name': appname}
)
for codename, name in PERMISSIONS.items():
try:
p = Permission.objects.get(codename=codename, content_type=ct)
if p.name != name:
p.name = name
p.save()
except Permission.DoesNotExist:
Permission.objects.create(
name=name,
codename=codename,
content_type=ct
)
| mpl-2.0 |
meissnert/StarCluster | starcluster/__init__.py | 19 | 1878 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import sys
from starcluster import static
sys.path.insert(0, static.STARCLUSTER_PLUGIN_DIR)
__version__ = static.VERSION
__author__ = "Justin Riley (justin.t.riley@gmail.com)"
__all__ = [
"config",
"static",
"cluster",
"clustersetup",
"node",
"sshutils",
"plugins",
"balancers",
"managers",
"validators",
"image",
"volume",
"awsutils",
"cli",
"commands",
"logger",
"utils",
"userdata",
"webtools",
"threadpool",
"templates",
"exception",
"tests",
"completion",
"progressbar",
"spinner",
]
def test_nose():
try:
from nose import run
run(argv=['sctest', '-s', '--exe', 'starcluster'], exit=False)
except ImportError:
print 'error importing nose'
def test_pytest():
try:
import pytest
import os
pytest.main('-xvs %s' % os.path.dirname(__file__))
except ImportError:
print 'error importing pytest'
def test(use_nose=False):
if use_nose:
test_nose()
else:
test_pytest()
test_nose.__test__ = False
test_pytest.__test__ = False
test.__test__ = False
| gpl-3.0 |
sojo21/ToughRADIUS | toughradius/console/admin/passwd.py | 5 | 1937 | #!/usr/bin/env python
#coding=utf-8
import sys, os
from bottle import Bottle
from bottle import request
from bottle import response
from bottle import redirect
from bottle import MakoTemplate
from bottle import static_file
from bottle import abort
from beaker.cache import cache_managers
from toughradius.console.libs.paginator import Paginator
from toughradius.console.libs import utils
from toughradius.console.websock import websock
from toughradius.console import models
from toughradius.console.base import *
from toughradius.console.admin import forms
from hashlib import md5
from twisted.python import log
import bottle
import datetime
import json
import functools
__prefix__ = "/passwd"
app = Bottle()
app.config['__prefix__'] = __prefix__
###############################################################################
# password update
###############################################################################
@app.get('/', apply=auth_opr)
def passwd(db, render):
form = forms.passwd_update_form()
form.fill(operator_name=get_cookie("username"))
return render("base_form", form=form)
@app.post('/', apply=auth_opr)
def passwd_update(db, render):
form = forms.passwd_update_form()
if not form.validates(source=request.forms):
return render("base_form", form=form)
if form.d.operator_pass != form.d.operator_pass_chk:
return render("base_form", form=form, msg=u"确认密码不一致")
opr = db.query(models.SlcOperator).filter_by(operator_name=form.d.operator_name).first()
opr.operator_pass = md5(form.d.operator_pass).hexdigest()
ops_log = models.SlcRadOperateLog()
ops_log.operator_name = get_cookie("username")
ops_log.operate_ip = get_cookie("login_ip")
ops_log.operate_time = utils.get_currtime()
ops_log.operate_desc = u'操作员(%s)修改密码' % (get_cookie("username"),)
db.add(ops_log)
db.commit()
redirect("/passwd")
| agpl-3.0 |
eugene7646/autopsy | InternalPythonModules/android/TskCallLogsParser.py | 5 | 2323 | """
Autopsy Forensic Browser
Copyright 2019 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ResultSetIterator import ResultSetIterator
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import CallMediaType
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import CommunicationDirection
from org.sleuthkit.datamodel import Account
class TskCallLogsParser(ResultSetIterator):
"""
Generic TSK_CALLLOG artifact template. Each of these methods
will contain the extraction and transformation logic for
converting raw database records to the expected TSK_CALLLOG
format.
A simple example of data transformation would be computing
the end time of a call when the database only supplies the start
time and duration.
"""
def __init__(self, result_set):
super(TskCallLogsParser, self).__init__(result_set)
self._DEFAULT_STRING = ""
self._DEFAULT_DIRECTION = CommunicationDirection.UNKNOWN
self._DEFAULT_CALL_TYPE = CallMediaType.UNKNOWN
self._DEFAULT_LONG = -1L
self.INCOMING_CALL = CommunicationDirection.INCOMING
self.OUTGOING_CALL = CommunicationDirection.OUTGOING
self.AUDIO_CALL = CallMediaType.AUDIO
self.VIDEO_CALL = CallMediaType.VIDEO
def get_call_direction(self):
return self._DEFAULT_DIRECTION
def get_phone_number_from(self):
return self._DEFAULT_STRING
def get_phone_number_to(self):
return self._DEFAULT_STRING
def get_call_start_date_time(self):
return self._DEFAULT_LONG
def get_call_end_date_time(self):
return self._DEFAULT_LONG
def get_call_type(self):
return self._DEFAULT_CALL_TYPE
| apache-2.0 |
DinoCow/airflow | tests/providers/google/cloud/sensors/test_cloud_storage_transfer_service.py | 7 | 6886 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from parameterized import parameterized
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import GcpTransferOperationStatus
from airflow.providers.google.cloud.sensors.cloud_storage_transfer_service import (
CloudDataTransferServiceJobStatusSensor,
)
TEST_NAME = "transferOperations/transferJobs-123-456"
TEST_COUNTERS = {
"bytesFoundFromSource": 512,
"bytesCopiedToSink": 1024,
}
class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase):
@mock.patch(
'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook'
)
def test_wait_for_status_success(self, mock_tool):
operations = [
{
'name': TEST_NAME,
'metadata': {
'status': GcpTransferOperationStatus.SUCCESS,
'counters': TEST_COUNTERS,
},
}
]
mock_tool.return_value.list_transfer_operations.return_value = operations
mock_tool.operations_contain_expected_statuses.return_value = True
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
mock_tool.return_value.list_transfer_operations.assert_called_once_with(
request_filter={'project_id': 'project-id', 'job_names': ['job-name']}
)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations, expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
self.assertTrue(result)
@mock.patch(
'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook'
)
def test_wait_for_status_success_default_expected_status(self, mock_tool):
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=mock.ANY, expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
self.assertTrue(result)
@mock.patch(
'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook'
)
def test_wait_for_status_after_retry(self, mock_tool):
operations_set = [
[
{
'name': TEST_NAME,
'metadata': {
'status': GcpTransferOperationStatus.SUCCESS,
'counters': TEST_COUNTERS,
},
},
],
[
{
'name': TEST_NAME,
'metadata': {
'status': GcpTransferOperationStatus.SUCCESS,
'counters': TEST_COUNTERS,
},
},
],
]
mock_tool.return_value.list_transfer_operations.side_effect = operations_set
mock_tool.operations_contain_expected_statuses.side_effect = [False, True]
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
self.assertFalse(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations_set[0], expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
mock_tool.operations_contain_expected_statuses.reset_mock()
result = op.poke(context)
self.assertTrue(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations_set[1], expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
@parameterized.expand(
[
(GcpTransferOperationStatus.SUCCESS, {GcpTransferOperationStatus.SUCCESS}),
({GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS}),
(
{GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS},
{GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS},
),
]
)
@mock.patch(
'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook'
)
def test_wait_for_status_normalize_status(self, expected_status, received_status, mock_tool):
operations = [
{
'name': TEST_NAME,
'metadata': {
'status': GcpTransferOperationStatus.SUCCESS,
'counters': TEST_COUNTERS,
},
}
]
mock_tool.return_value.list_transfer_operations.return_value = operations
mock_tool.operations_contain_expected_statuses.side_effect = [False, True]
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=expected_status,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
self.assertFalse(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations, expected_statuses=received_status
)
| apache-2.0 |
WhySoGeeky/DroidPot | venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.py | 1039 | 1469 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| mit |
RagingTiger/CodingInterview | ch5/python/flip_bit_to_win.py | 1 | 1298 | #!/usr/bin/env python
'''
Author: John D. Anderson
Email: jander43@vols.utk.edu
Problem Statement:
You have an integer you can flip exactly one bit from a 0 to a 1. Write
code to find the length of the longest sequence of 1s you could create.
EXAMPLE
Input: 1775 (or: 11011101111)
Output: 8
Complexity: O(N)
Usage: flip_bit_to_win <int>
'''
# function
def answer(n):
# convert int to bit string
bitstring = bin(n).split('b')[1]
# then cut out '0'
bitlist = bitstring.split('0')
# now measure
bllen = len(bitlist)
longest = 0
for i, bitgroup in enumerate(bitlist):
if bitgroup is not '':
l1 = len(bitgroup)
if ((i+1) % bllen) != 0:
if bitlist[i+1] is not '':
l2 = len(bitlist[i+1])
newseq = l1 + l2 + 1
if longest < newseq:
longest = newseq
continue
newseq = l1 + 1
if longest < newseq:
longest = newseq
# return
print longest
return bitstring
# executable
if __name__ == '__main__':
# executable import only
from docopt import docopt
# check CLA
args = docopt(__doc__)
# return
print answer(int(args['<int>']))
| mit |
NaturalGIS/QGIS | python/plugins/processing/gui/wrappers.py | 1 | 80816 | # -*- coding: utf-8 -*-
"""
***************************************************************************
wrappers.py - Standard parameters widget wrappers
---------------------
Date : May 2016
Copyright : (C) 2016 by Arnaud Morvan, Victor Olaya
Email : arnaud dot morvan at camptocamp dot com
volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Arnaud Morvan'
__date__ = 'May 2016'
__copyright__ = '(C) 2016, Arnaud Morvan'
import locale
import os
import re
from functools import cmp_to_key
from inspect import isclass
from copy import deepcopy
from qgis.core import (
QgsApplication,
QgsUnitTypes,
QgsCoordinateReferenceSystem,
QgsExpression,
QgsExpressionContextGenerator,
QgsFieldProxyModel,
QgsMapLayerProxyModel,
QgsWkbTypes,
QgsSettings,
QgsProject,
QgsMapLayer,
QgsMapLayerType,
QgsVectorLayer,
QgsProcessing,
QgsProcessingUtils,
QgsProcessingParameterDefinition,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterExtent,
QgsProcessingParameterPoint,
QgsProcessingParameterFile,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterNumber,
QgsProcessingParameterDistance,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterExpression,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMeshLayer,
QgsProcessingParameterField,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterMapLayer,
QgsProcessingParameterBand,
QgsProcessingParameterMatrix,
QgsProcessingParameterDistance,
QgsProcessingFeatureSourceDefinition,
QgsProcessingOutputRasterLayer,
QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers,
QgsProcessingOutputFile,
QgsProcessingOutputString,
QgsProcessingOutputNumber,
QgsProcessingModelChildParameterSource,
QgsProcessingModelAlgorithm,
QgsRasterDataProvider,
NULL,
Qgis)
from qgis.PyQt.QtWidgets import (
QCheckBox,
QComboBox,
QLabel,
QDialog,
QFileDialog,
QHBoxLayout,
QVBoxLayout,
QLineEdit,
QPlainTextEdit,
QToolButton,
QWidget,
)
from qgis.gui import (
QgsGui,
QgsExpressionLineEdit,
QgsExpressionBuilderDialog,
QgsFieldComboBox,
QgsFieldExpressionWidget,
QgsProjectionSelectionDialog,
QgsMapLayerComboBox,
QgsProjectionSelectionWidget,
QgsRasterBandComboBox,
QgsProcessingGui,
QgsAbstractProcessingParameterWidgetWrapper,
QgsProcessingMapLayerComboBox
)
from qgis.PyQt.QtCore import pyqtSignal, QObject, QVariant, Qt
from qgis.utils import iface
from processing.core.ProcessingConfig import ProcessingConfig
from processing.modeler.MultilineTextPanel import MultilineTextPanel
from processing.gui.NumberInputPanel import NumberInputPanel, ModelerNumberInputPanel, DistanceInputPanel
from processing.gui.RangePanel import RangePanel
from processing.gui.PointSelectionPanel import PointSelectionPanel
from processing.gui.FileSelectionPanel import FileSelectionPanel
from processing.gui.CheckboxesPanel import CheckboxesPanel
from processing.gui.MultipleInputPanel import MultipleInputPanel
from processing.gui.BatchInputSelectionPanel import BatchInputSelectionPanel
from processing.gui.FixedTablePanel import FixedTablePanel
from processing.gui.ExtentSelectionPanel import ExtentSelectionPanel
from processing.gui.ParameterGuiUtils import getFileFilter
from processing.tools import dataobjects
DIALOG_STANDARD = QgsProcessingGui.Standard
DIALOG_BATCH = QgsProcessingGui.Batch
DIALOG_MODELER = QgsProcessingGui.Modeler
class InvalidParameterValue(Exception):
pass
dialogTypes = {"AlgorithmDialog": DIALOG_STANDARD,
"ModelerParametersDialog": DIALOG_MODELER,
"BatchAlgorithmDialog": DIALOG_BATCH}
def getExtendedLayerName(layer):
authid = layer.crs().authid()
if ProcessingConfig.getSetting(ProcessingConfig.SHOW_CRS_DEF) and authid is not None:
return u'{} [{}]'.format(layer.name(), authid)
else:
return layer.name()
class WidgetWrapper(QgsAbstractProcessingParameterWidgetWrapper):
NOT_SET_OPTION = '~~~~!!!!NOT SET!!!!~~~~~~~'
def __init__(self, param, dialog, row=0, col=0, **kwargs):
self.dialogType = dialogTypes.get(dialog.__class__.__name__, QgsProcessingGui.Standard)
super().__init__(param, self.dialogType)
self.dialog = dialog
self.row = row
self.col = col
self.widget = self.createWidget(**kwargs)
self.label = self.createLabel()
if param.defaultValue() is not None:
self.setValue(param.defaultValue())
def comboValue(self, validator=None, combobox=None):
if combobox is None:
combobox = self.widget
idx = combobox.findText(combobox.currentText())
if idx < 0:
v = combobox.currentText().strip()
if validator is not None and not validator(v):
raise InvalidParameterValue()
return v
if combobox.currentData() == self.NOT_SET_OPTION:
return None
elif combobox.currentData() is not None:
return combobox.currentData()
else:
return combobox.currentText()
def createWidget(self, **kwargs):
pass
def createLabel(self):
if self.dialogType == DIALOG_BATCH:
return None
desc = self.parameterDefinition().description()
if isinstance(self.parameterDefinition(), QgsProcessingParameterExtent):
desc += self.tr(' (xmin, xmax, ymin, ymax)')
if isinstance(self.parameterDefinition(), QgsProcessingParameterPoint):
desc += self.tr(' (x, y)')
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
desc += self.tr(' [optional]')
label = QLabel(desc)
label.setToolTip(self.parameterDefinition().name())
return label
def setValue(self, value):
pass
def value(self):
return None
def widgetValue(self):
return self.value()
def setWidgetValue(self, value, context):
self.setValue(value)
def setComboValue(self, value, combobox=None):
if combobox is None:
combobox = self.widget
if isinstance(value, list):
if value:
value = value[0]
else:
value = None
values = [combobox.itemData(i) for i in range(combobox.count())]
try:
idx = values.index(value)
combobox.setCurrentIndex(idx)
return
except ValueError:
pass
if combobox.isEditable():
if value is not None:
combobox.setEditText(str(value))
else:
combobox.setCurrentIndex(0)
def refresh(self):
pass
def getFileName(self, initial_value=''):
"""Shows a file open dialog"""
settings = QgsSettings()
if os.path.isdir(initial_value):
path = initial_value
elif os.path.isdir(os.path.dirname(initial_value)):
path = os.path.dirname(initial_value)
elif settings.contains('/Processing/LastInputPath'):
path = str(settings.value('/Processing/LastInputPath'))
else:
path = ''
# TODO: should use selectedFilter argument for default file format
filename, selected_filter = QFileDialog.getOpenFileName(self.widget, self.tr('Select File'),
path, getFileFilter(self.parameterDefinition()))
if filename:
settings.setValue('/Processing/LastInputPath',
os.path.dirname(str(filename)))
return filename, selected_filter
class BasicWidgetWrapper(WidgetWrapper):
def createWidget(self):
return QLineEdit()
def setValue(self, value):
self.widget.setText(value)
def value(self):
return self.widget.text()
class BooleanWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("BooleanWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createLabel(self):
if self.dialogType == DIALOG_STANDARD:
return None
else:
return super().createLabel()
def createWidget(self):
if self.dialogType == DIALOG_STANDARD:
return QCheckBox()
elif self.dialogType == DIALOG_BATCH:
widget = QComboBox()
widget.addItem(self.tr('Yes'), True)
widget.addItem(self.tr('No'), False)
return widget
else:
widget = QComboBox()
widget.addItem(self.tr('Yes'), True)
widget.addItem(self.tr('No'), False)
bools = self.dialog.getAvailableValuesOfType(QgsProcessingParameterBoolean, None)
for b in bools:
widget.addItem(self.dialog.resolveValueDescription(b), b)
return widget
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType == DIALOG_STANDARD:
self.widget.setChecked(value)
else:
self.setComboValue(value)
def value(self):
if self.dialogType == DIALOG_STANDARD:
return self.widget.isChecked()
else:
return self.comboValue()
class CrsWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("CrsWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
if self.dialogType == DIALOG_MODELER:
self.combo = QComboBox()
widget = QWidget()
layout = QHBoxLayout()
layout.setMargin(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(1)
layout.addWidget(self.combo)
btn = QToolButton()
btn.setIcon(QgsApplication.getThemeIcon("mActionSetProjection.svg"))
btn.setToolTip(self.tr("Select CRS"))
btn.clicked.connect(self.selectProjection)
layout.addWidget(btn)
widget.setLayout(layout)
self.combo.setEditable(True)
crss = self.dialog.getAvailableValuesOfType((QgsProcessingParameterCrs, QgsProcessingParameterString), QgsProcessingOutputString)
for crs in crss:
self.combo.addItem(self.dialog.resolveValueDescription(crs), crs)
layers = self.dialog.getAvailableValuesOfType([QgsProcessingParameterRasterLayer,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMeshLayer,
QgsProcessingParameterFeatureSource],
[QgsProcessingOutputVectorLayer,
QgsProcessingOutputRasterLayer,
QgsProcessingOutputMapLayer])
for l in layers:
self.combo.addItem("Crs of layer " + self.dialog.resolveValueDescription(l), l)
if self.parameterDefinition().defaultValue():
self.combo.setEditText(self.parameterDefinition().defaultValue())
return widget
else:
widget = QgsProjectionSelectionWidget()
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
widget.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, True)
if self.parameterDefinition().defaultValue():
if self.parameterDefinition().defaultValue() == 'ProjectCrs':
crs = QgsProject.instance().crs()
else:
crs = QgsCoordinateReferenceSystem(self.parameterDefinition().defaultValue())
widget.setCrs(crs)
else:
widget.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, True)
widget.crsChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
def selectProjection(self):
dialog = QgsProjectionSelectionDialog(self.widget)
current_crs = QgsCoordinateReferenceSystem(self.combo.currentText())
if current_crs.isValid():
dialog.setCrs(current_crs)
if dialog.exec_():
self.setValue(dialog.crs().authid())
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType == DIALOG_MODELER:
self.setComboValue(value, self.combo)
elif value == 'ProjectCrs':
self.widget.setCrs(QgsProject.instance().crs())
else:
self.widget.setCrs(QgsCoordinateReferenceSystem(value))
def value(self):
if self.dialogType == DIALOG_MODELER:
return self.comboValue(combobox=self.combo)
else:
crs = self.widget.crs()
if crs.isValid():
return self.widget.crs().authid()
else:
return None
class ExtentWidgetWrapper(WidgetWrapper):
USE_MIN_COVERING_EXTENT = "[Use min covering extent]"
def createWidget(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
widget = ExtentSelectionPanel(self.dialog, self.parameterDefinition())
widget.hasChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
widget = QComboBox()
widget.setEditable(True)
extents = self.dialog.getAvailableValuesOfType(QgsProcessingParameterExtent, (QgsProcessingOutputString))
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
widget.addItem(self.USE_MIN_COVERING_EXTENT, None)
layers = self.dialog.getAvailableValuesOfType([QgsProcessingParameterFeatureSource,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMeshLayer],
[QgsProcessingOutputRasterLayer,
QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer])
for ex in extents:
widget.addItem(self.dialog.resolveValueDescription(ex), ex)
for l in layers:
widget.addItem("Extent of " + self.dialog.resolveValueDescription(l), l)
if not self.parameterDefinition().defaultValue():
widget.setEditText(self.parameterDefinition().defaultValue())
return widget
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.widget.setExtentFromString(value)
else:
self.setComboValue(value)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
return self.widget.getValue()
else:
idx = self.widget.findText(self.widget.currentText())
if idx < 0:
s = str(self.widget.currentText()).strip()
if s:
try:
tokens = s.split(',')
if len(tokens) != 4:
raise InvalidParameterValue()
for token in tokens:
float(token)
except:
raise InvalidParameterValue()
elif self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
s = None
else:
raise InvalidParameterValue()
return s
else:
return self.widget.currentData()
class PointWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("PointWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
return PointSelectionPanel(self.dialog, self.parameterDefinition().defaultValue())
else:
item = QComboBox()
item.setEditable(True)
points = self.dialog.getAvailableValuesOfType((QgsProcessingParameterPoint, QgsProcessingParameterString), (QgsProcessingOutputString))
for p in points:
item.addItem(self.dialog.resolveValueDescription(p), p)
item.setEditText(str(self.parameterDefinition().defaultValue()))
return item
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.widget.setPointFromString(value)
else:
self.setComboValue(value)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
return self.widget.getValue()
else:
idx = self.widget.findText(self.widget.currentText())
if idx < 0:
s = str(self.widget.currentText()).strip()
if s:
try:
tokens = s.split(',')
if len(tokens) != 2:
raise InvalidParameterValue()
for token in tokens:
float(token)
except:
raise InvalidParameterValue()
elif self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
s = None
else:
raise InvalidParameterValue()
return s
else:
return self.widget.currentData()
class FileWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("FileWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
return FileSelectionPanel(self.parameterDefinition().behavior() == QgsProcessingParameterFile.Folder,
self.parameterDefinition().extension())
else:
self.combo = QComboBox()
self.combo.setEditable(True)
files = self.dialog.getAvailableValuesOfType(QgsProcessingParameterFile, (QgsProcessingOutputRasterLayer, QgsProcessingOutputVectorLayer, QgsProcessingOutputMapLayer, QgsProcessingOutputFile, QgsProcessingOutputString))
for f in files:
self.combo.addItem(self.dialog.resolveValueDescription(f), f)
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
self.combo.setEditText("")
widget = QWidget()
layout = QHBoxLayout()
layout.setMargin(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(6)
layout.addWidget(self.combo)
btn = QToolButton()
btn.setText('…')
btn.setToolTip(self.tr("Select file"))
btn.clicked.connect(self.selectFile)
layout.addWidget(btn)
widget.setLayout(layout)
return widget
def selectFile(self):
settings = QgsSettings()
if os.path.isdir(os.path.dirname(self.combo.currentText())):
path = os.path.dirname(self.combo.currentText())
if settings.contains('/Processing/LastInputPath'):
path = settings.value('/Processing/LastInputPath')
else:
path = ''
if self.parameterDefinition().extension():
filter = self.tr('{} files').format(
self.parameterDefinition().extension().upper()) + ' (*.' + self.parameterDefinition().extension() + self.tr(
');;All files (*.*)')
else:
filter = self.tr('All files (*.*)')
filename, selected_filter = QFileDialog.getOpenFileName(self.widget,
self.tr('Select File'), path,
filter)
if filename:
self.combo.setEditText(filename)
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.widget.setText(value)
else:
self.setComboValue(value, combobox=self.combo)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
return self.widget.getValue()
else:
return self.comboValue(combobox=self.combo)
class FixedTableWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("FixedTableWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
return FixedTablePanel(self.parameterDefinition())
else:
self.combobox = QComboBox()
values = self.dialog.getAvailableValuesOfType(QgsProcessingParameterMatrix)
for v in values:
self.combobox.addItem(self.dialog.resolveValueDescription(v), v)
return self.combobox
def setValue(self, value):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.widget.setValue(value)
else:
self.setComboValue(value, combobox=self.combobox)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
return self.widget.table
else:
return self.comboValue(combobox=self.combobox)
class MultipleLayerWidgetWrapper(WidgetWrapper):
def _getOptions(self):
if self.parameterDefinition().layerType() == QgsProcessing.TypeVectorAnyGeometry:
options = self.dialog.getAvailableValuesOfType((QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMultipleLayers),
[QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers])
elif self.parameterDefinition().layerType() == QgsProcessing.TypeVector:
options = self.dialog.getAvailableValuesOfType((QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMultipleLayers),
[QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers],
[QgsProcessing.TypeVector])
elif self.parameterDefinition().layerType() == QgsProcessing.TypeVectorPoint:
options = self.dialog.getAvailableValuesOfType((QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMultipleLayers),
[QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers],
[QgsProcessing.TypeVectorPoint,
QgsProcessing.TypeVectorAnyGeometry])
elif self.parameterDefinition().layerType() == QgsProcessing.TypeVectorLine:
options = self.dialog.getAvailableValuesOfType((QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMultipleLayers),
[QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers],
[QgsProcessing.TypeVectorLine,
QgsProcessing.TypeVectorAnyGeometry])
elif self.parameterDefinition().layerType() == QgsProcessing.TypeVectorPolygon:
options = self.dialog.getAvailableValuesOfType((QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMultipleLayers),
[QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers],
[QgsProcessing.TypeVectorPolygon,
QgsProcessing.TypeVectorAnyGeometry])
elif self.parameterDefinition().layerType() == QgsProcessing.TypeRaster:
options = self.dialog.getAvailableValuesOfType(
(QgsProcessingParameterRasterLayer, QgsProcessingParameterMultipleLayers),
[QgsProcessingOutputRasterLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers])
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMesh:
options = self.dialog.getAvailableValuesOfType(
(QgsProcessingParameterMeshLayer, QgsProcessingParameterMultipleLayers),
[])
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMapLayer:
options = self.dialog.getAvailableValuesOfType((QgsProcessingParameterRasterLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMeshLayer,
QgsProcessingParameterMultipleLayers),
[QgsProcessingOutputRasterLayer,
QgsProcessingOutputVectorLayer,
QgsProcessingOutputMapLayer,
QgsProcessingOutputMultipleLayers])
else:
options = self.dialog.getAvailableValuesOfType(QgsProcessingParameterFile, QgsProcessingOutputFile)
options = sorted(options, key=lambda opt: self.dialog.resolveValueDescription(opt))
return options
def createWidget(self):
if self.dialogType == DIALOG_STANDARD:
if self.parameterDefinition().layerType() == QgsProcessing.TypeFile:
return MultipleInputPanel(datatype=QgsProcessing.TypeFile)
else:
if self.parameterDefinition().layerType() == QgsProcessing.TypeRaster:
options = QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance(), False)
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMesh:
options = QgsProcessingUtils.compatibleMeshLayers(QgsProject.instance(), False)
elif self.parameterDefinition().layerType() in (QgsProcessing.TypeVectorAnyGeometry, QgsProcessing.TypeVector):
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [], False)
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMapLayer:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [], False)
options.extend(QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance(), False))
options.extend(QgsProcessingUtils.compatibleMeshLayers(QgsProject.instance(), False))
else:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [self.parameterDefinition().layerType()],
False)
opts = [getExtendedLayerName(opt) for opt in options]
return MultipleInputPanel(opts, datatype=self.parameterDefinition().layerType())
elif self.dialogType == DIALOG_BATCH:
widget = BatchInputSelectionPanel(self.parameterDefinition(), self.row, self.col, self.dialog)
widget.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
options = [self.dialog.resolveValueDescription(opt) for opt in self._getOptions()]
return MultipleInputPanel(options, datatype=self.parameterDefinition().layerType())
def refresh(self):
if self.parameterDefinition().layerType() != QgsProcessing.TypeFile:
if self.parameterDefinition().layerType() == QgsProcessing.TypeRaster:
options = QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance(), False)
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMesh:
options = QgsProcessingUtils.compatibleMeshLayers(QgsProject.instance(), False)
elif self.parameterDefinition().layerType() in (QgsProcessing.TypeVectorAnyGeometry, QgsProcessing.TypeVector):
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [], False)
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMapLayer:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [], False)
options.extend(QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance(), False))
options.extend(QgsProcessingUtils.compatibleMeshLayers(QgsProject.instance(), False))
else:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [self.parameterDefinition().layerType()],
False)
opts = [getExtendedLayerName(opt) for opt in options]
self.widget.updateForOptions(opts)
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType == DIALOG_STANDARD:
pass # TODO
elif self.dialogType == DIALOG_BATCH:
return self.widget.setValue(value)
else:
options = self._getOptions()
if not isinstance(value, (tuple, list)):
value = [value]
selected_options = []
for sel in value:
if sel in options:
selected_options.append(options.index(sel))
elif isinstance(sel, QgsProcessingModelChildParameterSource):
selected_options.append(sel.staticValue())
else:
selected_options.append(sel)
self.widget.setSelectedItems(selected_options)
def value(self):
if self.dialogType == DIALOG_STANDARD:
if self.parameterDefinition().layerType() == QgsProcessing.TypeFile:
return self.widget.selectedoptions
else:
if self.parameterDefinition().layerType() == QgsProcessing.TypeRaster:
options = QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance(), False)
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMesh:
options = QgsProcessingUtils.compatibleMeshLayers(QgsProject.instance(), False)
elif self.parameterDefinition().layerType() in (QgsProcessing.TypeVectorAnyGeometry, QgsProcessing.TypeVector):
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [], False)
elif self.parameterDefinition().layerType() == QgsProcessing.TypeMapLayer:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [], False)
options.extend(QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance(), False))
options.extend(QgsProcessingUtils.compatibleMeshLayers(QgsProject.instance(), False))
else:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [self.parameterDefinition().layerType()],
False)
return [options[i] if isinstance(i, int) else i for i in self.widget.selectedoptions]
elif self.dialogType == DIALOG_BATCH:
return self.widget.value()
else:
options = self._getOptions()
values = [options[i] if isinstance(i, int) else QgsProcessingModelChildParameterSource.fromStaticValue(i)
for i in self.widget.selectedoptions]
if len(values) == 0 and not self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
raise InvalidParameterValue()
return values
class NumberWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("NumberWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
widget = NumberInputPanel(self.parameterDefinition())
widget.hasChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
return ModelerNumberInputPanel(self.parameterDefinition(), self.dialog)
def setValue(self, value):
if value is None or value == NULL:
return
self.widget.setValue(value)
def value(self):
return self.widget.getValue()
def postInitialize(self, wrappers):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH) and self.parameterDefinition().isDynamic():
for wrapper in wrappers:
if wrapper.parameterDefinition().name() == self.parameterDefinition().dynamicLayerParameterName():
self.widget.setDynamicLayer(wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.parentLayerChanged)
break
def parentLayerChanged(self, wrapper):
self.widget.setDynamicLayer(wrapper.parameterValue())
class DistanceWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("DistanceWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
widget = DistanceInputPanel(self.parameterDefinition())
widget.hasChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
return ModelerNumberInputPanel(self.parameterDefinition(), self.dialog)
def setValue(self, value):
if value is None or value == NULL:
return
self.widget.setValue(value)
def value(self):
return self.widget.getValue()
def postInitialize(self, wrappers):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
for wrapper in wrappers:
if wrapper.parameterDefinition().name() == self.parameterDefinition().dynamicLayerParameterName():
self.widget.setDynamicLayer(wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.dynamicLayerChanged)
if wrapper.parameterDefinition().name() == self.parameterDefinition().parentParameterName():
self.widget.setUnitParameterValue(wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.parentParameterChanged)
def dynamicLayerChanged(self, wrapper):
self.widget.setDynamicLayer(wrapper.parameterValue())
def parentParameterChanged(self, wrapper):
self.widget.setUnitParameterValue(wrapper.parameterValue())
class RangeWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("RangeWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
widget = RangePanel(self.parameterDefinition())
widget.hasChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
def setValue(self, value):
if value is None or value == NULL:
return
self.widget.setValue(value)
def value(self):
return self.widget.getValue()
class MapLayerWidgetWrapper(WidgetWrapper):
NOT_SELECTED = '[Not selected]'
def createWidget(self):
if self.dialogType == DIALOG_STANDARD:
self.combo = QgsProcessingMapLayerComboBox(self.parameterDefinition())
self.context = dataobjects.createContext()
try:
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
self.combo.setValue(self.parameterDefinition().defaultValue(), self.context)
else:
if self.parameterDefinition().defaultValue():
self.combo.setvalue(self.parameterDefinition().defaultValue(), self.context)
else:
self.combo.setLayer(iface.activeLayer())
except:
pass
self.combo.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
self.combo.triggerFileSelection.connect(self.selectFile)
return self.combo
elif self.dialogType == DIALOG_BATCH:
widget = BatchInputSelectionPanel(self.parameterDefinition(), self.row, self.col, self.dialog)
widget.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
self.combo = QComboBox()
layers = self.getAvailableLayers()
self.combo.setEditable(True)
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
self.combo.addItem(self.NOT_SELECTED, self.NOT_SET_OPTION)
for layer in layers:
self.combo.addItem(self.dialog.resolveValueDescription(layer), layer)
widget = QWidget()
layout = QHBoxLayout()
layout.setMargin(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(6)
layout.addWidget(self.combo)
btn = QToolButton()
btn.setText('…')
btn.setToolTip(self.tr("Select file"))
btn.clicked.connect(self.selectFile)
layout.addWidget(btn)
widget.setLayout(layout)
return widget
def getAvailableLayers(self):
return self.dialog.getAvailableValuesOfType(
[QgsProcessingParameterRasterLayer, QgsProcessingParameterMeshLayer, QgsProcessingParameterVectorLayer, QgsProcessingParameterMapLayer, QgsProcessingParameterString],
[QgsProcessingOutputRasterLayer, QgsProcessingOutputVectorLayer, QgsProcessingOutputMapLayer, QgsProcessingOutputString, QgsProcessingOutputFile])
def selectFile(self):
filename, selected_filter = self.getFileName(self.combo.currentText())
if filename:
if isinstance(self.combo, QgsProcessingMapLayerComboBox):
self.combo.setValue(filename, self.context)
elif isinstance(self.combo, QgsMapLayerComboBox):
items = self.combo.additionalItems()
items.append(filename)
self.combo.setAdditionalItems(items)
self.combo.setCurrentIndex(self.combo.findText(filename))
else:
self.combo.setEditText(filename)
self.widgetValueHasChanged.emit(self)
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType == DIALOG_STANDARD:
if isinstance(value, str):
layer = QgsProject.instance().mapLayer(value)
if layer is not None:
value = layer
self.combo.setValue(value, self.context)
elif self.dialogType == DIALOG_BATCH:
self.widget.setValue(value)
else:
self.setComboValue(value, combobox=self.combo)
self.widgetValueHasChanged.emit(self)
def value(self):
if self.dialogType == DIALOG_STANDARD:
return self.combo.value()
elif self.dialogType == DIALOG_BATCH:
return self.widget.getValue()
else:
def validator(v):
if not bool(v):
return self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional
else:
return os.path.exists(v)
return self.comboValue(validator, combobox=self.combo)
class RasterWidgetWrapper(MapLayerWidgetWrapper):
def getAvailableLayers(self):
return self.dialog.getAvailableValuesOfType((QgsProcessingParameterRasterLayer, QgsProcessingParameterString),
(QgsProcessingOutputRasterLayer, QgsProcessingOutputFile, QgsProcessingOutputString))
def selectFile(self):
filename, selected_filter = self.getFileName(self.combo.currentText())
if filename:
filename = dataobjects.getRasterSublayer(filename, self.parameterDefinition())
if isinstance(self.combo, QgsProcessingMapLayerComboBox):
self.combo.setValue(filename, self.context)
elif isinstance(self.combo, QgsMapLayerComboBox):
items = self.combo.additionalItems()
items.append(filename)
self.combo.setAdditionalItems(items)
self.combo.setCurrentIndex(self.combo.findText(filename))
else:
self.combo.setEditText(filename)
self.widgetValueHasChanged.emit(self)
class MeshWidgetWrapper(MapLayerWidgetWrapper):
def getAvailableLayers(self):
return self.dialog.getAvailableValuesOfType((QgsProcessingParameterMeshLayer, QgsProcessingParameterString),
())
def selectFile(self):
filename, selected_filter = self.getFileName(self.combo.currentText())
if filename:
if isinstance(self.combo, QgsProcessingMapLayerComboBox):
self.combo.setValue(filename, self.context)
elif isinstance(self.combo, QgsMapLayerComboBox):
items = self.combo.additionalItems()
items.append(filename)
self.combo.setAdditionalItems(items)
self.combo.setCurrentIndex(self.combo.findText(filename))
else:
self.combo.setEditText(filename)
self.widgetValueHasChanged.emit(self)
class EnumWidgetWrapper(WidgetWrapper):
NOT_SELECTED = '[Not selected]'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("EnumWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self, useCheckBoxes=False, columns=1):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self._useCheckBoxes = useCheckBoxes
if self._useCheckBoxes and not self.dialogType == DIALOG_BATCH:
return CheckboxesPanel(options=self.parameterDefinition().options(),
multiple=self.parameterDefinition().allowMultiple(),
columns=columns)
if self.parameterDefinition().allowMultiple():
return MultipleInputPanel(options=self.parameterDefinition().options())
else:
widget = QComboBox()
for i, option in enumerate(self.parameterDefinition().options()):
widget.addItem(option, i)
if self.parameterDefinition().defaultValue():
widget.setCurrentIndex(widget.findData(self.parameterDefinition().defaultValue()))
return widget
else:
self.combobox = QComboBox()
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
self.combobox.addItem(self.NOT_SELECTED, self.NOT_SET_OPTION)
for i, option in enumerate(self.parameterDefinition().options()):
self.combobox.addItem(option, i)
values = self.dialog.getAvailableValuesOfType(QgsProcessingParameterEnum)
for v in values:
self.combobox.addItem(self.dialog.resolveValueDescription(v), v)
return self.combobox
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self._useCheckBoxes and not self.dialogType == DIALOG_BATCH:
self.widget.setValue(value)
return
if self.parameterDefinition().allowMultiple():
self.widget.setSelectedItems(value)
else:
self.widget.setCurrentIndex(self.widget.findData(value))
else:
self.setComboValue(value, combobox=self.combobox)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self._useCheckBoxes and not self.dialogType == DIALOG_BATCH:
return self.widget.value()
if self.parameterDefinition().allowMultiple():
return self.widget.selectedoptions
else:
return self.widget.currentData()
else:
return self.comboValue(combobox=self.combobox)
class FeatureSourceWidgetWrapper(WidgetWrapper):
NOT_SELECTED = '[Not selected]'
def createWidget(self):
if self.dialogType == DIALOG_STANDARD:
self.combo = QgsProcessingMapLayerComboBox(self.parameterDefinition())
self.context = dataobjects.createContext()
try:
if iface.activeLayer().type() == QgsMapLayerType.VectorLayer:
self.combo.setLayer(iface.activeLayer())
except:
pass
self.combo.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
self.combo.triggerFileSelection.connect(self.selectFile)
return self.combo
elif self.dialogType == DIALOG_BATCH:
widget = BatchInputSelectionPanel(self.parameterDefinition(), self.row, self.col, self.dialog)
widget.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
self.combo = QComboBox()
layers = self.dialog.getAvailableValuesOfType(
(QgsProcessingParameterFeatureSource, QgsProcessingParameterVectorLayer),
(QgsProcessingOutputVectorLayer, QgsProcessingOutputMapLayer, QgsProcessingOutputString, QgsProcessingOutputFile), self.parameterDefinition().dataTypes())
self.combo.setEditable(True)
for layer in layers:
self.combo.addItem(self.dialog.resolveValueDescription(layer), layer)
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
self.combo.setEditText("")
widget = QWidget()
layout = QHBoxLayout()
layout.setMargin(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
layout.addWidget(self.combo)
btn = QToolButton()
btn.setText('…')
btn.setToolTip(self.tr("Select file"))
btn.clicked.connect(self.selectFile)
layout.addWidget(btn)
widget.setLayout(layout)
return widget
def selectFile(self):
filename, selected_filter = self.getFileName(self.combo.currentText())
if filename:
filename = dataobjects.getRasterSublayer(filename, self.parameterDefinition())
if isinstance(self.combo, QgsProcessingMapLayerComboBox):
self.combo.setValue(filename, self.context)
elif isinstance(self.combo, QgsMapLayerComboBox):
items = self.combo.additionalItems()
items.append(filename)
self.combo.setAdditionalItems(items)
self.combo.setCurrentIndex(self.combo.findText(filename))
else:
self.combo.setEditText(filename)
self.widgetValueHasChanged.emit(self)
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType == DIALOG_STANDARD:
if isinstance(value, str):
layer = QgsProject.instance().mapLayer(value)
if layer is not None:
value = layer
self.combo.setValue(value, self.context)
elif self.dialogType == DIALOG_BATCH:
self.widget.setValue(value)
else:
self.setComboValue(value, combobox=self.combo)
self.widgetValueHasChanged.emit(self)
def value(self):
if self.dialogType == DIALOG_STANDARD:
return self.combo.value()
elif self.dialogType == DIALOG_BATCH:
return self.widget.getValue()
else:
def validator(v):
if not bool(v):
return self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional
else:
return os.path.exists(v)
if self.combo.currentText():
return self.comboValue(validator, combobox=self.combo)
else:
return None
class StringWidgetWrapper(WidgetWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("StringWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
def createWidget(self):
if self.dialogType == DIALOG_STANDARD:
if self.parameterDefinition().multiLine():
widget = QPlainTextEdit()
else:
self._lineedit = QLineEdit()
widget = self._lineedit
elif self.dialogType == DIALOG_BATCH:
widget = QLineEdit()
else:
# strings, numbers, files and table fields are all allowed input types
strings = self.dialog.getAvailableValuesOfType(
[QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterDistance, QgsProcessingParameterFile,
QgsProcessingParameterField, QgsProcessingParameterExpression],
[QgsProcessingOutputString, QgsProcessingOutputFile])
options = [(self.dialog.resolveValueDescription(s), s) for s in strings]
if self.parameterDefinition().multiLine():
widget = MultilineTextPanel(options)
else:
widget = QComboBox()
widget.setEditable(True)
for desc, val in options:
widget.addItem(desc, val)
return widget
def showExpressionsBuilder(self):
context = dataobjects.createExpressionContext()
value = self.value()
if not isinstance(value, str):
value = ''
dlg = QgsExpressionBuilderDialog(None, value, self.widget, 'generic', context)
dlg.setWindowTitle(self.tr('Expression based input'))
if dlg.exec_() == QDialog.Accepted:
exp = QgsExpression(dlg.expressionText())
if not exp.hasParserError():
if self.dialogType == DIALOG_STANDARD:
self.setValue(str(exp.evaluate(context)))
else:
self.setValue(dlg.expressionText())
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType == DIALOG_STANDARD:
if self.parameterDefinition().multiLine():
self.widget.setPlainText(value)
else:
self._lineedit.setText(value)
elif self.dialogType == DIALOG_BATCH:
self.widget.setText(value)
else:
if self.parameterDefinition().multiLine():
self.widget.setValue(value)
else:
self.setComboValue(value)
def value(self):
if self.dialogType == DIALOG_STANDARD:
if self.parameterDefinition().multiLine():
text = self.widget.toPlainText()
else:
text = self._lineedit.text()
return text
elif self.dialogType == DIALOG_BATCH:
return self.widget.text()
else:
if self.parameterDefinition().multiLine():
value = self.widget.getValue()
option = self.widget.getOption()
if option == MultilineTextPanel.USE_TEXT:
if value == '':
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
return None
else:
raise InvalidParameterValue()
else:
return value
else:
return value
else:
def validator(v):
return bool(v) or self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional
return self.comboValue(validator)
class ExpressionWidgetWrapper(WidgetWrapper):
def __init__(self, param, dialog, row=0, col=0, **kwargs):
"""
.. deprecated:: 3.4
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("StringWidgetWrapper is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
super().__init__(param, dialog, row, col, **kwargs)
self.context = dataobjects.createContext()
def createWidget(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self.parameterDefinition().parentLayerParameterName():
widget = QgsFieldExpressionWidget()
else:
widget = QgsExpressionLineEdit()
if self.parameterDefinition().defaultValue():
widget.setExpression(self.parameterDefinition().defaultValue())
else:
strings = self.dialog.getAvailableValuesOfType(
[QgsProcessingParameterExpression, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterDistance],
(QgsProcessingOutputString, QgsProcessingOutputNumber))
options = [(self.dialog.resolveValueDescription(s), s) for s in strings]
widget = QComboBox()
widget.setEditable(True)
for desc, val in options:
widget.addItem(desc, val)
widget.setEditText(self.parameterDefinition().defaultValue() or "")
return widget
def postInitialize(self, wrappers):
for wrapper in wrappers:
if wrapper.parameterDefinition().name() == self.parameterDefinition().parentLayerParameterName():
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.setLayer(wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.parentLayerChanged)
break
def parentLayerChanged(self, wrapper):
self.setLayer(wrapper.parameterValue())
def setLayer(self, layer):
if isinstance(layer, QgsProcessingFeatureSourceDefinition):
layer, ok = layer.source.valueAsString(self.context.expressionContext())
if isinstance(layer, str):
layer = QgsProcessingUtils.mapLayerFromString(layer, self.context)
self.widget.setLayer(layer)
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.widget.setExpression(value)
else:
self.setComboValue(value)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
try:
return self.widget.asExpression()
except:
return self.widget.expression()
else:
def validator(v):
return bool(v) or self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional
return self.comboValue(validator)
class VectorLayerWidgetWrapper(WidgetWrapper):
NOT_SELECTED = '[Not selected]'
def createWidget(self):
if self.dialogType == DIALOG_STANDARD:
self.combo = QgsProcessingMapLayerComboBox(self.parameterDefinition())
self.context = dataobjects.createContext()
try:
if iface.activeLayer().type() == QgsMapLayerType.VectorLayer:
self.combo.setLayer(iface.activeLayer())
except:
pass
self.combo.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
self.combo.triggerFileSelection.connect(self.selectFile)
return self.combo
elif self.dialogType == DIALOG_BATCH:
widget = BatchInputSelectionPanel(self.parameterDefinition(), self.row, self.col, self.dialog)
widget.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
self.combo = QComboBox()
self.combo.setEditable(True)
tables = self.dialog.getAvailableValuesOfType((QgsProcessingParameterVectorLayer, QgsProcessingParameterString),
(QgsProcessingOutputVectorLayer, QgsProcessingOutputMapLayer, QgsProcessingOutputFile, QgsProcessingOutputString))
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
self.combo.addItem(self.NOT_SELECTED, self.NOT_SET_OPTION)
for table in tables:
self.combo.addItem(self.dialog.resolveValueDescription(table), table)
widget = QWidget()
layout = QHBoxLayout()
layout.setMargin(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(6)
layout.addWidget(self.combo)
btn = QToolButton()
btn.setText('…')
btn.setToolTip(self.tr("Select file"))
btn.clicked.connect(self.selectFile)
layout.addWidget(btn)
widget.setLayout(layout)
return widget
def selectFile(self):
filename, selected_filter = self.getFileName(self.combo.currentText())
if filename:
filename = dataobjects.getRasterSublayer(filename, self.parameterDefinition())
if isinstance(self.combo, QgsProcessingMapLayerComboBox):
self.combo.setValue(filename, self.context)
elif isinstance(self.combo, QgsMapLayerComboBox):
items = self.combo.additionalItems()
items.append(filename)
self.combo.setAdditionalItems(items)
self.combo.setCurrentIndex(self.combo.findText(filename))
else:
self.combo.setEditText(filename)
self.widgetValueHasChanged.emit(self)
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType == DIALOG_STANDARD:
if isinstance(value, str):
layer = QgsProject.instance().mapLayer(value)
if layer is not None:
value = layer
self.combo.setValue(value, self.context)
elif self.dialogType == DIALOG_BATCH:
return self.widget.setValue(value)
else:
self.setComboValue(value, combobox=self.combo)
self.widgetValueHasChanged.emit(self)
def value(self):
if self.dialogType == DIALOG_STANDARD:
return self.combo.value()
elif self.dialogType == DIALOG_BATCH:
return self.widget.getValue()
else:
def validator(v):
return bool(v) or self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional
return self.comboValue(validator, combobox=self.combo)
class TableFieldWidgetWrapper(WidgetWrapper):
NOT_SET = '[Not set]'
def __init__(self, param, dialog, row=0, col=0, **kwargs):
super().__init__(param, dialog, row, col, **kwargs)
self.context = dataobjects.createContext()
def createWidget(self):
self._layer = None
self.parent_file_based_layers = {}
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self.parameterDefinition().allowMultiple():
return MultipleInputPanel(options=[])
else:
widget = QgsFieldComboBox()
widget.setAllowEmptyFieldName(self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional)
widget.fieldChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
if self.parameterDefinition().dataType() == QgsProcessingParameterField.Numeric:
widget.setFilters(QgsFieldProxyModel.Numeric)
elif self.parameterDefinition().dataType() == QgsProcessingParameterField.String:
widget.setFilters(QgsFieldProxyModel.String)
elif self.parameterDefinition().dataType() == QgsProcessingParameterField.DateTime:
widget.setFilters(QgsFieldProxyModel.Date | QgsFieldProxyModel.Time)
return widget
else:
widget = QComboBox()
widget.setEditable(True)
fields = self.dialog.getAvailableValuesOfType([QgsProcessingParameterField, QgsProcessingParameterString],
[QgsProcessingOutputString])
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
widget.addItem(self.NOT_SET, self.NOT_SET_OPTION)
for f in fields:
widget.addItem(self.dialog.resolveValueDescription(f), f)
widget.setToolTip(
self.tr(
'Input parameter, or name of field (separate field names with ; for multiple field parameters)'))
return widget
def postInitialize(self, wrappers):
for wrapper in wrappers:
if wrapper.parameterDefinition().name() == self.parameterDefinition().parentLayerParameterName():
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.setLayer(wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.parentValueChanged)
break
def parentValueChanged(self, wrapper):
value = wrapper.parameterValue()
if isinstance(value, str) and value in self.parent_file_based_layers:
self.setLayer(self.parent_file_based_layers[value])
else:
self.setLayer(value)
if isinstance(value, str):
self.parent_file_based_layers[value] = self._layer
def setLayer(self, layer):
if isinstance(layer, QgsProcessingFeatureSourceDefinition):
layer, ok = layer.source.valueAsString(self.context.expressionContext())
if isinstance(layer, str):
if not layer: # empty string
layer = None
else:
layer = QgsProcessingUtils.mapLayerFromString(layer, self.context)
if not isinstance(layer, QgsVectorLayer) or not layer.isValid():
self.dialog.messageBar().clearWidgets()
self.dialog.messageBar().pushMessage("", self.tr("Could not load selected layer/table. Dependent field could not be populated"),
level=Qgis.Warning, duration=5)
return
self._layer = layer
self.refreshItems()
def refreshItems(self):
if self.parameterDefinition().allowMultiple():
self.widget.updateForOptions(self.getFields())
else:
self.widget.setLayer(self._layer)
self.widget.setCurrentIndex(0)
if self.parameterDefinition().defaultValue() is not None:
self.setValue(self.parameterDefinition().defaultValue())
def getFields(self):
if self._layer is None:
return []
fieldTypes = []
if self.parameterDefinition().dataType() == QgsProcessingParameterField.String:
fieldTypes = [QVariant.String]
elif self.parameterDefinition().dataType() == QgsProcessingParameterField.Numeric:
fieldTypes = [QVariant.Int, QVariant.Double, QVariant.LongLong,
QVariant.UInt, QVariant.ULongLong]
elif self.parameterDefinition().dataType() == QgsProcessingParameterField.DateTime:
fieldTypes = [QVariant.Date, QVariant.Time, QVariant.DateTime]
fieldNames = []
for field in self._layer.fields():
if not fieldTypes or field.type() in fieldTypes:
fieldNames.append(str(field.name()))
return fieldNames
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self.parameterDefinition().allowMultiple():
options = self.widget.options
selected = []
if isinstance(value, str):
value = value.split(';')
for v in value:
for i, opt in enumerate(options):
if opt == v:
selected.append(i)
# case insensitive check - only do if matching case value is not present
elif v not in options and opt.lower() == v.lower():
selected.append(i)
self.widget.setSelectedItems(selected)
else:
self.widget.setField(value)
else:
self.setComboValue(value)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self.parameterDefinition().allowMultiple():
return [self.widget.options[i] for i in self.widget.selectedoptions]
else:
f = self.widget.currentField()
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional and not f:
return None
return f
else:
def validator(v):
return bool(v) or self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional
return self.comboValue(validator)
class BandWidgetWrapper(WidgetWrapper):
NOT_SET = '[Not set]'
def __init__(self, param, dialog, row=0, col=0, **kwargs):
super().__init__(param, dialog, row, col, **kwargs)
self.context = dataobjects.createContext()
def createWidget(self):
self._layer = None
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self.parameterDefinition().allowMultiple():
return MultipleInputPanel(options=[])
widget = QgsRasterBandComboBox()
widget.setShowNotSetOption(self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional)
widget.bandChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
widget = QComboBox()
widget.setEditable(True)
fields = self.dialog.getAvailableValuesOfType([QgsProcessingParameterBand, QgsProcessingParameterDistance, QgsProcessingParameterNumber],
[QgsProcessingOutputNumber])
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
widget.addItem(self.NOT_SET, self.NOT_SET_OPTION)
for f in fields:
widget.addItem(self.dialog.resolveValueDescription(f), f)
return widget
def postInitialize(self, wrappers):
for wrapper in wrappers:
if wrapper.parameterDefinition().name() == self.parameterDefinition().parentLayerParameterName():
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
self.setLayer(wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.parentValueChanged)
break
def parentValueChanged(self, wrapper):
self.setLayer(wrapper.parameterValue())
def setLayer(self, layer):
if isinstance(layer, QgsProcessingParameterRasterLayer):
layer, ok = layer.source.valueAsString(self.context.expressionContext())
if isinstance(layer, str):
layer = QgsProcessingUtils.mapLayerFromString(layer, self.context)
self._layer = layer
self.refreshItems()
def getBands(self):
bands = []
if self._layer is not None:
provider = self._layer.dataProvider()
for band in range(1, provider.bandCount() + 1):
name = provider.generateBandName(band)
interpretation = provider.colorInterpretationName(band)
if interpretation != "Undefined":
name = name + ' ({})'.format(interpretation)
bands.append(name)
return bands
def refreshItems(self):
if self.param.allowMultiple():
self.widget.setSelectedItems([])
self.widget.updateForOptions(self.getBands())
else:
self.widget.setLayer(self._layer)
self.widget.setCurrentIndex(0)
def setValue(self, value):
if value is None or value == NULL:
return
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self.parameterDefinition().allowMultiple():
options = self.widget.options
selected = []
if isinstance(value, str):
value = value.split(';')
for v in value:
for i, opt in enumerate(options):
match = re.search('(?:\\A|[^0-9]){}(?:\\Z|[^0-9]|)'.format(v), opt)
if match:
selected.append(i)
self.widget.setSelectedItems(selected)
else:
self.widget.setBand(value)
else:
self.setComboValue(value)
def value(self):
if self.dialogType in (DIALOG_STANDARD, DIALOG_BATCH):
if self.parameterDefinition().allowMultiple():
bands = []
for i in self.widget.selectedoptions:
match = re.search('(?:\\A|[^0-9])([0-9]+)(?:\\Z|[^0-9]|)', self.widget.options[i])
if match:
bands.append(match.group(1))
return bands
else:
f = self.widget.currentBand()
if self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional and not f:
return None
return f
else:
def validator(v):
return bool(v) or self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional
return self.comboValue(validator)
class WidgetWrapperFactory:
"""
Factory for parameter widget wrappers
"""
@staticmethod
def create_wrapper(param, dialog, row=0, col=0):
wrapper_metadata = param.metadata().get('widget_wrapper', None)
# VERY messy logic here to avoid breaking 3.0 API which allowed metadata "widget_wrapper" value to be either
# a string name of a class OR a dict.
# TODO QGIS 4.0 -- require widget_wrapper to be a dict.
if wrapper_metadata and (not isinstance(wrapper_metadata, dict) or wrapper_metadata.get('class', None) is not None):
return WidgetWrapperFactory.create_wrapper_from_metadata(param, dialog, row, col)
else:
# try from c++ registry first
class_type = dialog.__class__.__name__
if class_type == 'ModelerParametersDialog':
wrapper = QgsGui.processingGuiRegistry().createModelerParameterWidget(dialog.model,
dialog.childId,
param,
dialog.context)
else:
dialog_type = dialogTypes.get(class_type,
QgsProcessingGui.Standard)
wrapper = QgsGui.processingGuiRegistry().createParameterWidgetWrapper(param, dialog_type)
if wrapper is not None:
wrapper.setDialog(dialog)
return wrapper
# fallback to Python registry
return WidgetWrapperFactory.create_wrapper_from_class(param, dialog, row, col)
@staticmethod
def create_wrapper_from_metadata(param, dialog, row=0, col=0):
wrapper = param.metadata().get('widget_wrapper', None)
params = {}
# wrapper metadata should be a dict with class key
if isinstance(wrapper, dict):
params = deepcopy(wrapper)
wrapper = params.pop('class')
# wrapper metadata should be a class path
if isinstance(wrapper, str):
tokens = wrapper.split('.')
mod = __import__('.'.join(tokens[:-1]), fromlist=[tokens[-1]])
wrapper = getattr(mod, tokens[-1])
# or directly a class object
if isclass(wrapper):
wrapper = wrapper(param, dialog, row, col, **params)
# or a wrapper instance
return wrapper
@staticmethod
def create_wrapper_from_class(param, dialog, row=0, col=0):
wrapper = None
if param.type() == 'boolean':
# deprecated, moved to c++
wrapper = BooleanWidgetWrapper
elif param.type() == 'crs':
# deprecated, moved to c++
wrapper = CrsWidgetWrapper
elif param.type() == 'extent':
wrapper = ExtentWidgetWrapper
elif param.type() == 'point':
# deprecated, moved to c++
wrapper = PointWidgetWrapper
elif param.type() == 'file':
# deprecated, moved to c++
wrapper = FileWidgetWrapper
elif param.type() == 'multilayer':
wrapper = MultipleLayerWidgetWrapper
elif param.type() == 'number':
# deprecated, moved to c++
wrapper = NumberWidgetWrapper
elif param.type() == 'distance':
# deprecated, moved to c++
wrapper = DistanceWidgetWrapper
elif param.type() == 'raster':
wrapper = RasterWidgetWrapper
elif param.type() == 'enum':
# deprecated, moved to c++
wrapper = EnumWidgetWrapper
elif param.type() == 'string':
# deprecated, moved to c++
wrapper = StringWidgetWrapper
elif param.type() == 'expression':
# deprecated, moved to c++
wrapper = ExpressionWidgetWrapper
elif param.type() == 'vector':
wrapper = VectorLayerWidgetWrapper
elif param.type() == 'field':
wrapper = TableFieldWidgetWrapper
elif param.type() == 'source':
wrapper = FeatureSourceWidgetWrapper
elif param.type() == 'band':
wrapper = BandWidgetWrapper
elif param.type() == 'layer':
wrapper = MapLayerWidgetWrapper
elif param.type() == 'range':
# deprecated, moved to c++
wrapper = RangeWidgetWrapper
elif param.type() == 'matrix':
# deprecated, moved to c++
wrapper = FixedTableWidgetWrapper
elif param.type() == 'mesh':
wrapper = MeshWidgetWrapper
else:
assert False, param.type()
return wrapper(param, dialog, row, col)
| gpl-2.0 |
RobinQuetin/CAIRIS-web | cairis/cairis/GoalsDialog.py | 1 | 2976 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
import Goal
from GoalDialog import GoalDialog
from DialogClassParameters import DialogClassParameters
import ARM
from DimensionBaseDialog import DimensionBaseDialog
class GoalsDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,armid.GOALS_ID,'Goals',(930,300),'goal.png')
self.theMainWindow = parent
idList = [armid.GOALS_GOALLIST_ID,armid.GOALS_BUTTONADD_ID,armid.GOALS_BUTTONDELETE_ID]
columnList = ['Name','Originator','Status']
self.buildControls(idList,columnList,self.dbProxy.getColouredGoals,'goal')
listCtrl = self.FindWindowById(armid.GOALS_GOALLIST_ID)
listCtrl.SetColumnWidth(0,300)
def addObjectRow(self,listCtrl,listRow,goal):
listCtrl.InsertStringItem(listRow,goal.name())
listCtrl.SetStringItem(listRow,1,goal.originator())
if (goal.colour() == 'black'):
listCtrl.SetStringItem(listRow,2,'Check')
elif (goal.colour() == 'red'):
listCtrl.SetStringItem(listRow,2,'To refine')
else:
listCtrl.SetStringItem(listRow,2,'OK')
def onAdd(self,evt):
try:
addParameters = DialogClassParameters(armid.GOAL_ID,'Add goal',GoalDialog,armid.GOAL_BUTTONCOMMIT_ID,self.dbProxy.addGoal,True)
self.addObject(addParameters)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add goal',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
selectedObjt = self.objts[self.selectedLabel]
try:
updateParameters = DialogClassParameters(armid.GOAL_ID,'Edit goal',GoalDialog,armid.GOAL_BUTTONCOMMIT_ID,self.dbProxy.updateGoal,False)
self.updateObject(selectedObjt,updateParameters)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit goal',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def onDelete(self,evt):
try:
self.deleteObject('No goal','Delete goal',self.dbProxy.deleteGoal)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete goal',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
| apache-2.0 |
BIGBALLON/cifar-10-cnn | htd/ResNet.py | 1 | 8389 | import keras
import argparse
import math
import numpy as np
from keras.datasets import cifar10, cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Dense, Input, add, Activation, GlobalAveragePooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from keras.models import Model
from keras import optimizers, regularizers
from keras import backend as K
# set GPU memory
if('tensorflow' == K.backend()):
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# set parameters via parser
parser = argparse.ArgumentParser(description='Residual Network Training code(Keras version)')
parser.add_argument('-b','--batch_size', type=int, default=128, metavar='NUMBER',
help='batch size(default: 128)')
parser.add_argument('-e','--epochs', type=int, default=200, metavar='NUMBER',
help='epochs(default: 200)')
parser.add_argument('-n','--stack_n', type=int, default=5, metavar='NUMBER',
help='stack number n, total layers = 6 * n + 2 (default: 5)')
parser.add_argument('-d','--dataset', type=str, default="cifar10", metavar='STRING',
help='dataset. (default: cifar10)')
parser.add_argument('-s','--scheduler', type=str, default="step_decay", metavar='STRING',
help='learning rate scheduler. [step_decay, cos or tanh] (default: step_decay)')
parser.add_argument('-c','--count', type=int, default=1, metavar='NUMBER',
help='runs number. (default: 1)')
args = parser.parse_args()
stack_n = args.stack_n
batch_size = args.batch_size
epochs = args.epochs
scheduler = args.scheduler
layers = 6 * stack_n + 2
img_rows, img_cols = 32, 32
img_channels = 3
iterations = 50000 // batch_size + 1
weight_decay = 1e-4
# param for cos & tanh
start_lr = 0.1
end_lr = 0.0
# learning rate scheduler
# step_decay , cos and tanh
def step_decay_scheduler(epoch):
if epoch < 81:
return 0.1
if epoch < 122:
return 0.01
return 0.001
def cos_scheduler(epoch):
return (start_lr+end_lr)/2.+(start_lr-end_lr)/2.*math.cos(math.pi/2.0*(epoch/(epochs/2.0)))
def tanh_scheduler(epoch):
start = -6.0
end = 3.0
return start_lr / 2.0 * (1- math.tanh( (end-start)*epoch/epochs + start))
class AccHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.val_acc = {'batch':[], 'epoch':[]}
def on_batch_end(self, batch, logs={}):
self.val_acc['batch'].append(logs.get('val_acc'))
def on_epoch_end(self, batch, logs={}):
self.val_acc['epoch'].append(logs.get('val_acc'))
def print_best_acc(self):
print('== BEST ACC: {:.4f} =='.format(max(self.val_acc['epoch'])))
# residual networks
def residual_network(img_input,classes_num=10,stack_n=5):
def bn_relu(x):
x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
return Activation('relu')(x)
def conv(x,out,size,stride):
return Conv2D(out,kernel_size=size,strides=stride,
padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay)
)(x)
def residual_block(x,o_filters,increase=False):
stride = (2,2) if increase else (1,1)
o1 = bn_relu(x)
conv_1 = conv(o1,o_filters,(3,3),stride)
o2 = bn_relu(conv_1)
conv_2 = conv(o2,o_filters,(3,3),(1,1))
if increase:
projection = conv(o1,o_filters,(1,1),(2,2))
block = add([conv_2, projection])
else:
block = add([conv_2, x])
return block
# build model ( total layers = stack_n * 3 * 2 + 2 )
# stack_n = 5 by default, total layers = 32
# input: 32x32x3 output: 32x32x16
x = Conv2D(filters=16,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(img_input)
# input: 32x32x16 output: 32x32x16
for _ in range(stack_n):
x = residual_block(x,16,False)
# input: 32x32x16 output: 16x16x32
x = residual_block(x,32,True)
for _ in range(1,stack_n):
x = residual_block(x,32,False)
# input: 16x16x32 output: 8x8x64
x = residual_block(x,64,True)
for _ in range(1,stack_n):
x = residual_block(x,64,False)
x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# input: 64 output: 10
out = Dense(classes_num,activation='softmax',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(x)
return out
if __name__ == '__main__':
print("\n========================================")
print("MODEL: Residual Network ({:2d} layers)".format(6*stack_n+2))
print("BATCH SIZE: {:3d}".format(batch_size))
print("WEIGHT DECAY: {:.4f}".format(weight_decay))
print("EPOCHS: {:3d}".format(epochs))
print("DATASET: {:}".format(args.dataset))
print("LEARNING RATE SCHEDULER: {:}".format(args.scheduler))
print("\n== LOADING DATA... ==")
# load data cifar-10 or cifar-100
if args.dataset == "cifar100":
num_classes = 100
mean = [129.3, 124.1, 112.4]
std = [68.2, 65.4, 70.4]
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
else:
num_classes = 10
mean = [125.3, 123.0, 113.9]
std = [63.0, 62.1, 66.7]
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print("\n== DONE! ==\n\n== COLOR PREPROCESSING... ==")
# color preprocessing
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
for i in range(3):
x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]
x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]
print("\n== DONE! ==\n\n== BUILD MODEL... ==")
# build network
img_input = Input(shape=(img_rows,img_cols,img_channels))
output = residual_network(img_input,num_classes,stack_n)
resnet = Model(img_input, output)
# print model architecture if you need.
# print(resnet.summary())
# set optimizer
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
resnet.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# set callback
if scheduler == 'step_decay':
lrs = LearningRateScheduler(step_decay_scheduler)
elif scheduler == 'cos':
lrs = LearningRateScheduler(cos_scheduler)
elif scheduler == 'tanh':
lrs = LearningRateScheduler(tanh_scheduler)
tb = TensorBoard(log_dir='./resnet_{:d}_{}_{}_{}/'.format(layers,args.dataset,scheduler,args.count), histogram_freq=0)
ckpt = ModelCheckpoint('resnet_{:d}_{}_{}_{}.h5'.format(layers,args.dataset,scheduler,args.count),
monitor='val_acc', verbose=0, save_best_only=True, mode='auto', period=1)
his = AccHistory()
cbks = [tb,lrs,ckpt,his]
# set data augmentation
print("\n== USING REAL-TIME DATA AUGMENTATION, START TRAIN... ==")
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.125,
height_shift_range=0.125,
fill_mode='constant',cval=0.)
datagen.fit(x_train)
# start training
resnet.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size),
steps_per_epoch=iterations,
epochs=epochs,
callbacks=cbks,
validation_data=(x_test, y_test))
his.print_best_acc()
print("\n== FINISH TRAINING! ==") | mit |
jspargo/AneMo | thermo/flask/lib/python2.7/site-packages/jinja2/defaults.py | 659 | 1068 | # -*- coding: utf-8 -*-
"""
jinja2.defaults
~~~~~~~~~~~~~~~
Jinja default filters and tags.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import range_type
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
# defaults for the parser / lexer
BLOCK_START_STRING = '{%'
BLOCK_END_STRING = '%}'
VARIABLE_START_STRING = '{{'
VARIABLE_END_STRING = '}}'
COMMENT_START_STRING = '{#'
COMMENT_END_STRING = '#}'
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = '\n'
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
from jinja2.filters import FILTERS as DEFAULT_FILTERS
from jinja2.tests import TESTS as DEFAULT_TESTS
DEFAULT_NAMESPACE = {
'range': range_type,
'dict': lambda **kw: kw,
'lipsum': generate_lorem_ipsum,
'cycler': Cycler,
'joiner': Joiner
}
# export all constants
__all__ = tuple(x for x in locals().keys() if x.isupper())
| gpl-2.0 |
jsheedy/velotronheavyindustries.com | intro-to-d3-grid-map/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| mit |
alephobjects/Cura2 | plugins/CuraEngineBackend/StartSliceJob.py | 1 | 20903 | # Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import numpy
from string import Formatter
from enum import IntEnum
import time
from UM.Job import Job
from UM.Application import Application
from UM.Logger import Logger
from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
from UM.Settings.Validator import ValidatorState
from UM.Settings.SettingRelation import RelationType
from cura.Scene.CuraSceneNode import CuraSceneNode
from cura.OneAtATimeIterator import OneAtATimeIterator
from cura.Settings.ExtruderManager import ExtruderManager
NON_PRINTING_MESH_SETTINGS = ["anti_overhang_mesh", "infill_mesh", "cutting_mesh"]
class StartJobResult(IntEnum):
Finished = 1
Error = 2
SettingError = 3
NothingToSlice = 4
MaterialIncompatible = 5
BuildPlateError = 6
ObjectSettingError = 7 #When an error occurs in per-object settings.
## Formatter class that handles token expansion in start/end gcod
class GcodeStartEndFormatter(Formatter):
def get_value(self, key, args, kwargs): # [CodeStyle: get_value is an overridden function from the Formatter class]
# The kwargs dictionary contains a dictionary for each stack (with a string of the extruder_nr as their key),
# and a default_extruder_nr to use when no extruder_nr is specified
if isinstance(key, str):
try:
extruder_nr = kwargs["default_extruder_nr"]
except ValueError:
extruder_nr = -1
key_fragments = [fragment.strip() for fragment in key.split(',')]
if len(key_fragments) == 2:
try:
extruder_nr = int(key_fragments[1])
except ValueError:
try:
extruder_nr = int(kwargs["-1"][key_fragments[1]]) # get extruder_nr values from the global stack
except (KeyError, ValueError):
# either the key does not exist, or the value is not an int
Logger.log("w", "Unable to determine stack nr '%s' for key '%s' in start/end gcode, using global stack", key_fragments[1], key_fragments[0])
elif len(key_fragments) != 1:
Logger.log("w", "Incorrectly formatted placeholder '%s' in start/end gcode", key)
return "{" + str(key) + "}"
key = key_fragments[0]
try:
return kwargs[str(extruder_nr)][key]
except KeyError:
Logger.log("w", "Unable to replace '%s' placeholder in start/end gcode", key)
return "{" + key + "}"
else:
Logger.log("w", "Incorrectly formatted placeholder '%s' in start/end gcode", key)
return "{" + str(key) + "}"
## Job class that builds up the message of scene data to send to CuraEngine.
class StartSliceJob(Job):
def __init__(self, slice_message):
super().__init__()
self._scene = Application.getInstance().getController().getScene()
self._slice_message = slice_message
self._is_cancelled = False
self._build_plate_number = None
self._all_extruders_settings = None # cache for all setting values from all stacks (global & extruder) for the current machine
def getSliceMessage(self):
return self._slice_message
def setBuildPlate(self, build_plate_number):
self._build_plate_number = build_plate_number
## Check if a stack has any errors.
## returns true if it has errors, false otherwise.
def _checkStackForErrors(self, stack):
if stack is None:
return False
for key in stack.getAllKeys():
validation_state = stack.getProperty(key, "validationState")
if validation_state in (ValidatorState.Exception, ValidatorState.MaximumError, ValidatorState.MinimumError):
Logger.log("w", "Setting %s is not valid, but %s. Aborting slicing.", key, validation_state)
return True
Job.yieldThread()
return False
## Runs the job that initiates the slicing.
def run(self):
if self._build_plate_number is None:
self.setResult(StartJobResult.Error)
return
stack = Application.getInstance().getGlobalContainerStack()
if not stack:
self.setResult(StartJobResult.Error)
return
# Don't slice if there is a setting with an error value.
if Application.getInstance().getMachineManager().stacksHaveErrors:
self.setResult(StartJobResult.SettingError)
return
if Application.getInstance().getBuildVolume().hasErrors():
self.setResult(StartJobResult.BuildPlateError)
return
for extruder_stack in ExtruderManager.getInstance().getMachineExtruders(stack.getId()):
material = extruder_stack.findContainer({"type": "material"})
if material:
if material.getMetaDataEntry("compatible") == False:
self.setResult(StartJobResult.MaterialIncompatible)
return
# Don't slice if there is a per object setting with an error value.
for node in DepthFirstIterator(self._scene.getRoot()):
if type(node) is not CuraSceneNode or not node.isSelectable():
continue
if self._checkStackForErrors(node.callDecoration("getStack")):
self.setResult(StartJobResult.ObjectSettingError)
return
with self._scene.getSceneLock():
# Remove old layer data.
for node in DepthFirstIterator(self._scene.getRoot()):
if node.callDecoration("getLayerData") and node.callDecoration("getBuildPlateNumber") == self._build_plate_number:
node.getParent().removeChild(node)
break
# Get the objects in their groups to print.
object_groups = []
if stack.getProperty("print_sequence", "value") == "one_at_a_time":
for node in OneAtATimeIterator(self._scene.getRoot()):
temp_list = []
# Node can't be printed, so don't bother sending it.
if getattr(node, "_outside_buildarea", False):
continue
# Filter on current build plate
build_plate_number = node.callDecoration("getBuildPlateNumber")
if build_plate_number is not None and build_plate_number != self._build_plate_number:
continue
children = node.getAllChildren()
children.append(node)
for child_node in children:
if type(child_node) is CuraSceneNode and child_node.getMeshData() and child_node.getMeshData().getVertices() is not None:
temp_list.append(child_node)
if temp_list:
object_groups.append(temp_list)
Job.yieldThread()
if len(object_groups) == 0:
Logger.log("w", "No objects suitable for one at a time found, or no correct order found")
else:
temp_list = []
has_printing_mesh = False
for node in DepthFirstIterator(self._scene.getRoot()):
if node.callDecoration("isSliceable") and type(node) is CuraSceneNode and node.getMeshData() and node.getMeshData().getVertices() is not None:
per_object_stack = node.callDecoration("getStack")
is_non_printing_mesh = False
if per_object_stack:
is_non_printing_mesh = any(per_object_stack.getProperty(key, "value") for key in NON_PRINTING_MESH_SETTINGS)
if (node.callDecoration("getBuildPlateNumber") == self._build_plate_number):
if not getattr(node, "_outside_buildarea", False) or is_non_printing_mesh:
temp_list.append(node)
if not is_non_printing_mesh:
has_printing_mesh = True
Job.yieldThread()
#If the list doesn't have any model with suitable settings then clean the list
# otherwise CuraEngine will crash
if not has_printing_mesh:
temp_list.clear()
if temp_list:
object_groups.append(temp_list)
# There are cases when there is nothing to slice. This can happen due to one at a time slicing not being
# able to find a possible sequence or because there are no objects on the build plate (or they are outside
# the build volume)
if not object_groups:
self.setResult(StartJobResult.NothingToSlice)
return
self._buildGlobalSettingsMessage(stack)
self._buildGlobalInheritsStackMessage(stack)
# Build messages for extruder stacks
for extruder_stack in ExtruderManager.getInstance().getMachineExtruders(stack.getId()):
self._buildExtruderMessage(extruder_stack)
for group in object_groups:
group_message = self._slice_message.addRepeatedMessage("object_lists")
if group[0].getParent().callDecoration("isGroup"):
self._handlePerObjectSettings(group[0].getParent(), group_message)
for object in group:
mesh_data = object.getMeshData()
rot_scale = object.getWorldTransformation().getTransposed().getData()[0:3, 0:3]
translate = object.getWorldTransformation().getData()[:3, 3]
# This effectively performs a limited form of MeshData.getTransformed that ignores normals.
verts = mesh_data.getVertices()
verts = verts.dot(rot_scale)
verts += translate
# Convert from Y up axes to Z up axes. Equals a 90 degree rotation.
verts[:, [1, 2]] = verts[:, [2, 1]]
verts[:, 1] *= -1
obj = group_message.addRepeatedMessage("objects")
obj.id = id(object)
indices = mesh_data.getIndices()
if indices is not None:
flat_verts = numpy.take(verts, indices.flatten(), axis=0)
else:
flat_verts = numpy.array(verts)
obj.vertices = flat_verts
self._handlePerObjectSettings(object, obj)
Job.yieldThread()
self.setResult(StartJobResult.Finished)
def cancel(self):
super().cancel()
self._is_cancelled = True
def isCancelled(self):
return self._is_cancelled
## Creates a dictionary of tokens to replace in g-code pieces.
#
# This indicates what should be replaced in the start and end g-codes.
# \param stack The stack to get the settings from to replace the tokens
# with.
# \return A dictionary of replacement tokens to the values they should be
# replaced with.
def _buildReplacementTokens(self, stack) -> dict:
result = {}
for key in stack.getAllKeys():
result[key] = stack.getProperty(key, "value")
Job.yieldThread()
result["print_bed_temperature"] = result["material_bed_temperature"] # Renamed settings.
result["print_temperature"] = result["material_print_temperature"]
result["time"] = time.strftime("%H:%M:%S") #Some extra settings.
result["date"] = time.strftime("%d-%m-%Y")
result["day"] = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"][int(time.strftime("%w"))]
for extruder_stack in ExtruderManager.getInstance().getMachineExtruders(Application.getInstance().getGlobalContainerStack().getId()):
num = extruder_stack.getMetaDataEntry("position")
for key in extruder_stack.getAllKeys():
if extruder_stack.getProperty(key, "settable_per_extruder") == False:
continue
if key in ["material_soften_temperature", "material_wipe_temperature", "material_probe_temperature",
"material_print_temperature"]:
result["%s_%s" % (key, num)] = extruder_stack.getProperty(key, "value")
initial_extruder_stack = Application.getInstance().getExtruderManager().getUsedExtruderStacks()[0]
initial_extruder_nr = initial_extruder_stack.getProperty("extruder_nr", "value")
result["initial_extruder_nr"] = initial_extruder_nr
return result
## Replace setting tokens in a piece of g-code.
# \param value A piece of g-code to replace tokens in.
# \param default_extruder_nr Stack nr to use when no stack nr is specified, defaults to the global stack
def _expandGcodeTokens(self, value: str, default_extruder_nr: int = -1):
if not self._all_extruders_settings:
global_stack = Application.getInstance().getGlobalContainerStack()
# NB: keys must be strings for the string formatter
self._all_extruders_settings = {
"-1": self._buildReplacementTokens(global_stack)
}
for extruder_stack in ExtruderManager.getInstance().getMachineExtruders(global_stack.getId()):
extruder_nr = extruder_stack.getProperty("extruder_nr", "value")
self._all_extruders_settings[str(extruder_nr)] = self._buildReplacementTokens(extruder_stack)
try:
# any setting can be used as a token
fmt = GcodeStartEndFormatter()
settings = self._all_extruders_settings.copy()
settings["default_extruder_nr"] = default_extruder_nr
return str(fmt.format(value, **settings))
except:
Logger.logException("w", "Unable to do token replacement on start/end gcode")
return str(value)
## Create extruder message from stack
def _buildExtruderMessage(self, stack):
message = self._slice_message.addRepeatedMessage("extruders")
message.id = int(stack.getMetaDataEntry("position"))
settings = self._buildReplacementTokens(stack)
# Also send the material GUID. This is a setting in fdmprinter, but we have no interface for it.
settings["material_guid"] = stack.material.getMetaDataEntry("GUID", "")
# Replace the setting tokens in start and end g-code.
extruder_nr = stack.getProperty("extruder_nr", "value")
settings["machine_extruder_start_code"] = self._expandGcodeTokens(settings["machine_extruder_start_code"], extruder_nr)
settings["machine_extruder_end_code"] = self._expandGcodeTokens(settings["machine_extruder_end_code"], extruder_nr)
for key, value in settings.items():
# Do not send settings that are not settable_per_extruder.
if not stack.getProperty(key, "settable_per_extruder"):
continue
setting = message.getMessage("settings").addRepeatedMessage("settings")
setting.name = key
setting.value = str(value).encode("utf-8")
Job.yieldThread()
## Sends all global settings to the engine.
#
# The settings are taken from the global stack. This does not include any
# per-extruder settings or per-object settings.
def _buildGlobalSettingsMessage(self, stack):
settings = self._buildReplacementTokens(stack)
#start_gcode = settings["machine_start_gcode"]
#Pre-compute material material_bed_temp_prepend and material_print_temp_prepend
#bed_temperature_settings = {"material_bed_temperature", "material_bed_temperature_layer_0"}
settings["material_bed_temp_prepend"] = False #all(("{" + setting + "}" not in start_gcode for setting in bed_temperature_settings))
#print_temperature_settings = {"material_print_temperature", "material_print_temperature_layer_0", "default_material_print_temperature", "material_initial_print_temperature", "material_final_print_temperature", "material_standby_temperature"}
settings["material_print_temp_prepend"] = False #all(("{" + setting + "}" not in start_gcode for setting in print_temperature_settings))
manager = Application.getInstance().getMachineManager()
settings["printer_name"] = manager.activeDefinitionName
settings["material_name"] = manager.activeMaterialName
settings["quality_name"] = manager.activeQualityName
initial_extruder_stack = Application.getInstance().getExtruderManager().getUsedExtruderStacks()[0]
initial_extruder_nr = initial_extruder_stack.getProperty("extruder_nr", "value")
settings["machine_start_gcode"] = self._expandGcodeTokens(settings["machine_start_gcode"], initial_extruder_nr)
settings["machine_end_gcode"] = self._expandGcodeTokens(settings["machine_end_gcode"], initial_extruder_nr)
for key, value in settings.items(): #Add all submessages for each individual setting.
setting_message = self._slice_message.getMessage("global_settings").addRepeatedMessage("settings")
setting_message.name = key
setting_message.value = str(value).encode("utf-8")
Job.yieldThread()
## Sends for some settings which extruder they should fallback to if not
# set.
#
# This is only set for settings that have the limit_to_extruder
# property.
#
# \param stack The global stack with all settings, from which to read the
# limit_to_extruder property.
def _buildGlobalInheritsStackMessage(self, stack):
for key in stack.getAllKeys():
extruder = int(round(float(stack.getProperty(key, "limit_to_extruder"))))
if extruder >= 0: #Set to a specific extruder.
setting_extruder = self._slice_message.addRepeatedMessage("limit_to_extruder")
setting_extruder.name = key
setting_extruder.extruder = extruder
Job.yieldThread()
## Check if a node has per object settings and ensure that they are set correctly in the message
# \param node \type{SceneNode} Node to check.
# \param message object_lists message to put the per object settings in
def _handlePerObjectSettings(self, node, message):
stack = node.callDecoration("getStack")
# Check if the node has a stack attached to it and the stack has any settings in the top container.
if not stack:
return
# Check all settings for relations, so we can also calculate the correct values for dependent settings.
top_of_stack = stack.getTop() # Cache for efficiency.
changed_setting_keys = set(top_of_stack.getAllKeys())
# Add all relations to changed settings as well.
for key in top_of_stack.getAllKeys():
instance = top_of_stack.getInstance(key)
self._addRelations(changed_setting_keys, instance.definition.relations)
Job.yieldThread()
# Ensure that the engine is aware what the build extruder is.
if stack.getProperty("machine_extruder_count", "value") > 1:
changed_setting_keys.add("extruder_nr")
# Get values for all changed settings
for key in changed_setting_keys:
setting = message.addRepeatedMessage("settings")
setting.name = key
extruder = int(round(float(stack.getProperty(key, "limit_to_extruder"))))
# Check if limited to a specific extruder, but not overridden by per-object settings.
if extruder >= 0 and key not in changed_setting_keys:
limited_stack = ExtruderManager.getInstance().getActiveExtruderStacks()[extruder]
else:
limited_stack = stack
setting.value = str(limited_stack.getProperty(key, "value")).encode("utf-8")
Job.yieldThread()
## Recursive function to put all settings that require each other for value changes in a list
# \param relations_set \type{set} Set of keys (strings) of settings that are influenced
# \param relations list of relation objects that need to be checked.
def _addRelations(self, relations_set, relations):
for relation in filter(lambda r: r.role == "value" or r.role == "limit_to_extruder", relations):
if relation.type == RelationType.RequiresTarget:
continue
relations_set.add(relation.target.key)
self._addRelations(relations_set, relation.target.relations)
| lgpl-3.0 |
agentr13/python-phonenumbers | python/phonenumbers/data/region_UA.py | 10 | 2786 | """Auto-generated file, do not edit by hand. UA metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_UA = PhoneMetadata(id='UA', country_code=380, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[3-9]\\d{8}', possible_number_pattern='\\d{5,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3[1-8]|4[13-8]|5[1-7]|6[12459])\\d{7}', possible_number_pattern='\\d{5,9}', example_number='311234567'),
mobile=PhoneNumberDesc(national_number_pattern='(?:39|50|6[36-8]|73|9[1-9])\\d{7}', possible_number_pattern='\\d{9}', example_number='391234567'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{6}', possible_number_pattern='\\d{9}', example_number='800123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{6}', possible_number_pattern='\\d{9}', example_number='900123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='89\\d{7}', possible_number_pattern='\\d{9}', example_number='891234567'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
preferred_international_prefix='0~0',
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([3-9]\\d)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[38]9|4(?:[45][0-5]|87)|5(?:0|6[37]|7[37])|6[36-8]|73|9[1-9]', '[38]9|4(?:[45][0-5]|87)|5(?:0|6(?:3[14-7]|7)|7[37])|6[36-8]|73|9[1-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([3-689]\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['3[1-8]2|4[13678]2|5(?:[12457]2|6[24])|6(?:[49]2|[12][29]|5[24])|8[0-8]|90', '3(?:[1-46-8]2[013-9]|52)|4(?:[1378]2|62[013-9])|5(?:[12457]2|6[24])|6(?:[49]2|[12][29]|5[24])|8[0-8]|90'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([3-6]\\d{3})(\\d{5})', format='\\1 \\2', leading_digits_pattern=['3(?:5[013-9]|[1-46-8])|4(?:[137][013-9]|6|[45][6-9]|8[4-6])|5(?:[1245][013-9]|6[0135-9]|3|7[4-6])|6(?:[49][013-9]|5[0135-9]|[12][13-8])', '3(?:5[013-9]|[1-46-8](?:22|[013-9]))|4(?:[137][013-9]|6(?:[013-9]|22)|[45][6-9]|8[4-6])|5(?:[1245][013-9]|6(?:3[02389]|[015689])|3|7[4-6])|6(?:[49][013-9]|5[0135-9]|[12][13-8])'], national_prefix_formatting_rule='0\\1')])
| apache-2.0 |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/solvers/python/ops/least_squares.py | 121 | 4948 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solvers for linear least-squares."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
r"""Conjugate gradient least squares solver.
Solves a linear least squares problem \\(||A x - rhs||_2\\) for a single
righ-hand side, using an iterative, matrix-free algorithm where the action of
the matrix A is represented by `operator`. The CGLS algorithm implicitly
applies the symmetric conjugate gradient algorithm to the normal equations
\\(A^* A x = A^* rhs\\). The iteration terminates when either
the number of iterations exceeds `max_iter` or when the norm of the conjugate
residual (residual of the normal equations) have been reduced to `tol` times
its initial initial value, i.e.
\\(||A^* (rhs - A x_k)|| <= tol ||A^* rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
rhs: A rank-1 `Tensor` of shape `[M]` containing the right-hand size vector.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. The next descent direction.
- gamma: \\(||A^* r||_2^2\\)
"""
# ephemeral class holding CGLS state.
cgls_state = collections.namedtuple("CGLSState",
["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, state.gamma > tol)
# TODO(rmlarsen): add preconditioning
def cgls_step(i, state):
q = operator.apply(state.p)
alpha = state.gamma / util.l2norm_squared(q)
x = state.x + alpha * state.p
r = state.r - alpha * q
s = operator.apply_adjoint(r)
gamma = util.l2norm_squared(s)
beta = gamma / state.gamma
p = s + beta * state.p
return i + 1, cgls_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
s0 = operator.apply_adjoint(rhs)
gamma0 = util.l2norm_squared(s0)
tol = tol * tol * gamma0
x = array_ops.expand_dims(
array_ops.zeros(
n, dtype=rhs.dtype.base_dtype), -1)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cgls_state(i=i, x=x, r=rhs, p=s0, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cgls_step,
[i, state])
return cgls_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
| apache-2.0 |
sadig/DC2 | components/dc2-client/dc2/client/api/dc2/objects/hosts.py | 1 | 2981 | # -*- coding: utf-8 -*-
#
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import xmlrpclib
from servers import Servers
class Hosts(object):
def __init__(self, rpcurl=None):
self._rpcurl = rpcurl
self._proxy = xmlrpclib.ServerProxy(self._rpcurl, allow_none=True)
def find_by_server_serial(self, serial_no=None):
if serial_no is not None:
s = Servers(self._rpcurl)
server_rec = s.find_by_serial_no(serial_no)
if server_rec is not None:
host_list = self._proxy.dc2.inventory.hosts.find(
{"server_id": server_rec["_id"]})
if (host_list is not None and
len(host_list) > 0 and
host_list[0] is not None):
return host_list[0]
return None
def find_by_server_mac(self, mac_addr=None):
if mac_addr is not None:
s = Servers(self._rpcurl)
server_rec = s.find_by_mac(mac_addr)
if server_rec is not None:
host_list = self._proxy.dc2.inventory.hosts.find(
{"server_id": server_rec["_id"]})
if (host_list is not None and
len(host_list) > 0 and
host_list[0] is not None):
return host_list[0]
return None
def find_by_hostname(self, hostname=None, domainname=None):
if hostname is not None and domainname is not None:
host_list = self._proxy.dc2.inventory.hosts.find(
{"hostname": hostname, "domainname": domainname})
if (host_list is not None and
len(host_list) > 0 and
host_list[0] is not None):
return host_list[0]
return None
def write_network_config(self, interface_type=None, host=None):
try:
exec(
"from interfaces.{0} import '\
'write_host_network_configuration".format(interface_type)
)
write_host_network_configuration(host, self._rpcurl) # noqa
except ImportError:
print "Can't find module for %s" % interface_type
| gpl-2.0 |
Communities-Communications/cc-odoo | addons/lunch/report/report_lunch_order.py | 341 | 2771 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class report_lunch_order(osv.osv):
_name = "report.lunch.order.line"
_description = "Lunch Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date Order', readonly=True, select=True),
'year': fields.char('Year', size=4, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'user_id': fields.many2one('res.users', 'User Name'),
'price_total':fields.float('Total Price', readonly=True),
'note' : fields.text('Note', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_lunch_order_line')
cr.execute("""
create or replace view report_lunch_order_line as (
select
min(lo.id) as id,
lo.user_id as user_id,
lo.date as date,
to_char(lo.date, 'YYYY') as year,
to_char(lo.date, 'MM') as month,
to_char(lo.date, 'YYYY-MM-DD') as day,
lo.note as note,
sum(lp.price) as price_total
from
lunch_order_line as lo
left join lunch_product as lp on (lo.product_id = lp.id)
group by
lo.date,lo.user_id,lo.note
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pilou-/ansible | lib/ansible/modules/network/nxos/nxos_config.py | 29 | 21524 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_config
extends_documentation_fragment: nxos
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco NXOS configuration sections
description:
- Cisco NXOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with NXOS configuration sections in
a deterministic way. This module works with either CLI or NXAPI
transports.
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
version_added: "2.2"
replace_src:
description:
- The I(replace_src) argument provides path to the configuration file
to load into the remote system. This argument is used to replace the
entire config with a flat-file. This is used with argument I(replace)
with value I(config). This is mutually exclusive with the I(lines) and
I(src) arguments. This argument is supported on Nexus 9K device.
Use I(nxos_file_copy) module to copy the flat file to remote device and
then use the path with this argument.
version_added: "2.5"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct. replace I(config) is supported only on Nexus 9K device.
default: line
choices: ['line', 'block', 'config']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory or role root directory, if playbook is part of an
ansible role. If the directory does not exist, it is created.
type: bool
default: 'no'
version_added: "2.2"
running_config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(running_config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
aliases: ['config']
version_added: "2.4"
defaults:
description:
- The I(defaults) argument will influence how the running-config
is collected from the device. When the value is set to true,
the command used to collect the running-config is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword
type: bool
default: 'no'
version_added: "2.2"
save_when:
description:
- When changes are made to the device running-configuration, the
changes are not copied to non-volatile storage by default. Using
this argument will change that before. If the argument is set to
I(always), then the running-config will always be copied to the
startup-config and the I(modified) flag will always be set to
True. If the argument is set to I(modified), then the running-config
will only be copied to the startup-config if it has changed since
the last save to startup-config. If the argument is set to
I(never), the running-config will never be copied to the
startup-config. If the argument is set to I(changed), then the running-config
will only be copied to the startup-config if the task has made a change.
I(changed) was added in Ansible 2.6.
default: never
choices: ['always', 'never', 'modified', 'changed']
version_added: "2.4"
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument
the module can generate diffs against different sources.
- When this option is configure as I(startup), the module will return
the diff of the running-config against the startup-config.
- When this option is configured as I(intended), the module will
return the diff of the running-config against the configuration
provided in the C(intended_config) argument.
- When this option is configured as I(running), the module will
return the before and after diff of the running-config with respect
to any changes made to the device configuration.
default: startup
choices: ['startup', 'intended', 'running']
version_added: "2.4"
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
version_added: "2.4"
intended_config:
description:
- The C(intended_config) provides the master configuration that
the node should conform to and is used to check the final
running-config against. This argument will not modify any settings
on the remote device and is strictly used to check the compliance
of the current device's configuration against. When specifying this
argument, the task should also modify the C(diff_against) value and
set it to I(intended).
version_added: "2.4"
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(True), if C(backup) is set
to I(false) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exit it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
notes:
- Abbreviated commands are NOT idempotent, see
L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
"""
EXAMPLES = """
---
- name: configure top level configuration and save it
nxos_config:
lines: hostname {{ inventory_hostname }}
save_when: modified
- name: diff the running-config against a provided config
nxos_config:
diff_against: intended
intended_config: "{{ lookup('file', 'master.cfg') }}"
- nxos_config:
lines:
- 10 permit ip 192.0.2.1/32 any log
- 20 permit ip 192.0.2.2/32 any log
- 30 permit ip 192.0.2.3/32 any log
- 40 permit ip 192.0.2.4/32 any log
- 50 permit ip 192.0.2.5/32 any log
parents: ip access-list test
before: no ip access-list test
match: exact
- nxos_config:
lines:
- 10 permit ip 192.0.2.1/32 any log
- 20 permit ip 192.0.2.2/32 any log
- 30 permit ip 192.0.2.3/32 any log
- 40 permit ip 192.0.2.4/32 any log
parents: ip access-list test
before: no ip access-list test
replace: block
- name: replace config with flat file
nxos_config:
replace_src: config.txt
replace: config
- name: for idempotency, use full-form commands
nxos_config:
lines:
# - shut
- shutdown
# parents: int eth1/1
parents: interface Ethernet1/1
- name: configurable backup path
nxos_config:
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/nxos_config.2016-07-16@22:28:34
filename:
description: The name of the backup file
returned: when backup is yes and filename is not specified in backup options
type: str
sample: nxos_config.2016-07-16@22:28:34
shortname:
description: The full path to the backup file excluding the timestamp
returned: when backup is yes and filename is not specified in backup options
type: str
sample: /playbooks/ansible/backup/nxos_config
date:
description: The date extracted from the backup file name
returned: when backup is yes
type: str
sample: "2016-07-16"
time:
description: The time extracted from the backup file name
returned: when backup is yes
type: str
sample: "22:28:34"
"""
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands, get_connection
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import check_args as nxos_check_args
from ansible.module_utils.network.common.utils import to_list
def get_running_config(module, config=None, flags=None):
contents = module.params['running_config']
if not contents:
if config:
contents = config
else:
contents = get_config(module, flags=flags)
return contents
def get_candidate(module):
candidate = ''
if module.params['src']:
if module.params['replace'] != 'config':
candidate = module.params['src']
if module.params['replace'] == 'config':
candidate = 'config replace {0}'.format(module.params['replace_src'])
elif module.params['lines']:
candidate_obj = NetworkConfig(indent=2)
parents = module.params['parents'] or list()
candidate_obj.add(module.params['lines'], parents=parents)
candidate = dumps(candidate_obj, 'raw')
return candidate
def execute_show_commands(module, commands, output='text'):
cmds = []
for command in to_list(commands):
cmd = {'command': command,
'output': output,
}
cmds.append(cmd)
body = run_commands(module, cmds)
return body
def save_config(module, result):
result['changed'] = True
if not module.check_mode:
cmd = {'command': 'copy running-config startup-config', 'output': 'text'}
run_commands(module, [cmd])
else:
module.warn('Skipping command `copy running-config startup-config` '
'due to check_mode. Configuration not copied to '
'non-volatile storage')
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
replace_src=dict(),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
running_config=dict(aliases=['config']),
intended_config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'),
diff_against=dict(choices=['running', 'startup', 'intended']),
diff_ignore_lines=dict(type='list'),
)
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('lines', 'src', 'replace_src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['replace_src']),
('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
nxos_check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config = None
diff_ignore_lines = module.params['diff_ignore_lines']
path = module.params['parents']
connection = get_connection(module)
contents = None
flags = ['all'] if module.params['defaults'] else []
replace_src = module.params['replace_src']
if replace_src:
if module.params['replace'] != 'config':
module.fail_json(msg='replace: config is required with replace_src')
if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'):
contents = get_config(module, flags=flags)
config = NetworkConfig(indent=2, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['src'], module.params['lines'], replace_src)):
match = module.params['match']
replace = module.params['replace']
commit = not module.check_mode
candidate = get_candidate(module)
running = get_running_config(module, contents, flags=flags)
if replace_src:
commands = candidate.split('\n')
result['commands'] = result['updates'] = commands
if commit:
load_config(module, commands, replace=replace_src)
result['changed'] = True
else:
try:
response = connection.get_diff(candidate=candidate, running=running, diff_match=match, diff_ignore_lines=diff_ignore_lines, path=path,
diff_replace=replace)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
config_diff = response['config_diff']
if config_diff:
commands = config_diff.split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if commit:
load_config(module, commands, replace=replace_src)
result['changed'] = True
running_config = module.params['running_config']
startup_config = None
if module.params['save_when'] == 'always':
save_config(module, result)
elif module.params['save_when'] == 'modified':
output = execute_show_commands(module, ['show running-config', 'show startup-config'])
running_config = NetworkConfig(indent=2, contents=output[0], ignore_lines=diff_ignore_lines)
startup_config = NetworkConfig(indent=2, contents=output[1], ignore_lines=diff_ignore_lines)
if running_config.sha1 != startup_config.sha1:
save_config(module, result)
elif module.params['save_when'] == 'changed' and result['changed']:
save_config(module, result)
if module._diff:
if not running_config:
output = execute_show_commands(module, 'show running-config')
contents = output[0]
else:
contents = running_config
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(indent=2, contents=contents, ignore_lines=diff_ignore_lines)
if module.params['diff_against'] == 'running':
if module.check_mode:
module.warn("unable to perform diff against running-config due to check mode")
contents = None
else:
contents = config.config_text
elif module.params['diff_against'] == 'startup':
if not startup_config:
output = execute_show_commands(module, 'show startup-config')
contents = output[0]
else:
contents = startup_config.config_text
elif module.params['diff_against'] == 'intended':
contents = module.params['intended_config']
if contents is not None:
base_config = NetworkConfig(indent=2, contents=contents, ignore_lines=diff_ignore_lines)
if running_config.sha1 != base_config.sha1:
if module.params['diff_against'] == 'intended':
before = running_config
after = base_config
elif module.params['diff_against'] in ('startup', 'running'):
before = base_config
after = running_config
result.update({
'changed': True,
'diff': {'before': str(before), 'after': str(after)}
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
with-git/tensorflow | tensorflow/contrib/keras/python/keras/utils/vis_utils.py | 27 | 5370 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to model visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot # pylint: disable=g-import-not-at-top
except ImportError:
# Fall back on pydot if necessary.
# Silence a `print` statement that occurs in case of import error,
# by temporarily replacing sys.stdout.
_stdout = sys.stdout
sys.stdout = sys.stderr
try:
import pydot # pylint: disable=g-import-not-at-top
except ImportError:
pydot = None
finally:
# Restore sys.stdout.
sys.stdout = _stdout
def _check_pydot():
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
except Exception:
# pydot raises a generic Exception here,
# so no specific class can be caught.
raise ImportError('Failed to import pydot. You must install pydot'
' and graphviz for `pydotprint` to work.')
def model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='TB'):
"""Convert a Keras model to dot format.
Arguments:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
Returns:
A `pydot.Dot` instance representing the Keras model.
"""
from tensorflow.contrib.keras.python.keras.layers.wrappers import Wrapper # pylint: disable=g-import-not-at-top
from tensorflow.contrib.keras.python.keras.models import Sequential # pylint: disable=g-import-not-at-top
_check_pydot()
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set_node_defaults(shape='record')
if isinstance(model, Sequential):
if not model.built:
model.build()
model = model.model
layers = model.layers
# Create graph nodes.
for layer in layers:
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, Wrapper):
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including input/output shapes.
if show_shapes:
try:
outputlabels = str(layer.output_shape)
except AttributeError:
outputlabels = 'multiple'
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join([str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = 'multiple'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels,
outputlabels)
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer.inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model.container_nodes:
for inbound_layer in node.inbound_layers:
inbound_layer_id = str(id(inbound_layer))
layer_id = str(id(layer))
dot.add_edge(pydot.Edge(inbound_layer_id, layer_id))
return dot
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_layer_names=True,
rankdir='TB'):
"""Converts a Keras model to dot format and save to a file.
Arguments:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
"""
dot = model_to_dot(model, show_shapes, show_layer_names, rankdir)
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
dot.write(to_file, format=extension)
| apache-2.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.5/tests/regressiontests/test_utils/tests.py | 44 | 22759 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.forms import EmailField, IntegerField
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.unittest import skip
from .models import Person
class SkippingTestCase(TestCase):
def test_skip_unless_db_feature(self):
"A test that might be skipped is actually called."
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
self.assertRaises(ValueError, test_func)
class AssertNumQueriesTests(TestCase):
urls = 'regressiontests.test_utils.urls'
def test_assert_num_queries(self):
def test_func():
raise ValueError
self.assertRaises(ValueError,
self.assertNumQueries, 2, test_func
)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
class AssertNumQueriesContextManagerTests(TestCase):
urls = 'regressiontests.test_utils.urls'
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
with self.assertRaises(AssertionError) as exc_info:
with self.assertNumQueries(2):
Person.objects.count()
self.assertIn("1 queries executed, 2 expected", str(exc_info.exception))
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
class AssertTemplateUsedContextManagerTests(TestCase):
def test_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed(template_name='template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
render_to_string('template_used/base.html')
def test_nested_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/include.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/extends.html'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/alternative.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateNotUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
def test_not_used(self):
with self.assertTemplateNotUsed('template_used/base.html'):
pass
with self.assertTemplateNotUsed('template_used/alternative.html'):
pass
def test_error_message(self):
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed('template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed(template_name='template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html.*template_used/alternative\.html$'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
def test_failure(self):
with self.assertRaises(TypeError):
with self.assertTemplateUsed():
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
render_to_string('template_used/base.html')
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(template_name=''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
class SaveRestoreWarningState(TestCase):
def test_save_restore_warnings_state(self):
"""
Ensure save_warnings_state/restore_warnings_state work correctly.
"""
# In reality this test could be satisfied by many broken implementations
# of save_warnings_state/restore_warnings_state (e.g. just
# warnings.resetwarnings()) , but it is difficult to test more.
import warnings
self.save_warnings_state()
class MyWarning(Warning):
pass
# Add a filter that causes an exception to be thrown, so we can catch it
warnings.simplefilter("error", MyWarning)
self.assertRaises(Warning, lambda: warnings.warn("warn", MyWarning))
# Now restore.
self.restore_warnings_state()
# After restoring, we shouldn't get an exception. But we don't want a
# warning printed either, so we have to silence the warning.
warnings.simplefilter("ignore", MyWarning)
warnings.warn("warn", MyWarning)
# Remove the filter we just added.
self.restore_warnings_state()
class HTMLEqualTests(TestCase):
def test_html_parser(self):
from django.test.html import parse_html
element = parse_html('<div><p>Hello</p></div>')
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, 'p')
self.assertEqual(element.children[0].children[0], 'Hello')
parse_html('<p>')
parse_html('<p attr>')
dom = parse_html('<p>foo')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, 'p')
self.assertEqual(dom[0], 'foo')
def test_parse_html_in_script(self):
from django.test.html import parse_html
parse_html('<script>var a = "<p" + ">";</script>');
parse_html('''
<script>
var js_sha_link='<p>***</p>';
</script>
''')
# script content will be parsed to text
dom = parse_html('''
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
''')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
from django.test.html import parse_html
self_closing_tags = ('br' , 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
for tag in self_closing_tags:
dom = parse_html('<p>Hello <%s> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
dom = parse_html('<p>Hello <%s /> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
def test_simple_equal_html(self):
self.assertHTMLEqual('', '')
self.assertHTMLEqual('<p></p>', '<p></p>')
self.assertHTMLEqual('<p></p>', ' <p> </p> ')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div><p>Hello</p></div>')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div> <p>Hello</p> </div>')
self.assertHTMLEqual(
'<div>\n<p>Hello</p></div>',
'<div><p>Hello</p></div>\n')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<p>Hello World !</p>',
'<p>Hello World\n\n!</p>')
self.assertHTMLEqual('<p> </p>', '<p></p>')
self.assertHTMLEqual('<p/>', '<p></p>')
self.assertHTMLEqual('<p />', '<p></p>')
self.assertHTMLEqual('<input checked>', '<input checked="checked">')
self.assertHTMLEqual('<p>Hello', '<p> Hello')
self.assertHTMLEqual('<p>Hello</p>World', '<p>Hello</p> World')
def test_ignore_comments(self):
self.assertHTMLEqual(
'<div>Hello<!-- this is a comment --> World!</div>',
'<div>Hello World!</div>')
def test_unequal_html(self):
self.assertHTMLNotEqual('<p>Hello</p>', '<p>Hello!</p>')
self.assertHTMLNotEqual('<p>foobar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo bar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo nbsp</p>', '<p>foo </p>')
self.assertHTMLNotEqual('<p>foo #20</p>', '<p>foo </p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span><span>World</span></p>',
'<p><span>Hello</span>World</p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span>World</p>',
'<p><span>Hello</span><span>World</span></p>')
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />',
'<input id="id_name" type="text" />')
self.assertHTMLEqual(
'''<input type='text' id="id_name" />''',
'<input id="id_name" type="text" />')
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />')
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""",
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""")
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""", """
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""")
def test_html_contain(self):
from django.test.html import parse_html
# equal html contains each other
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertTrue(dom1 in dom2)
self.assertTrue(dom2 in dom1)
dom2 = parse_html('<div><p>foo</p></div>')
self.assertTrue(dom1 in dom2)
self.assertTrue(dom2 not in dom1)
self.assertFalse('<p>foo</p>' in dom2)
self.assertTrue('foo' in dom2)
# when a root element is used ...
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertTrue(dom1 in dom2)
dom1 = parse_html('<p>foo</p>')
self.assertTrue(dom1 in dom2)
dom1 = parse_html('<p>bar</p>')
self.assertTrue(dom1 in dom2)
def test_count(self):
from django.test.html import parse_html
# equal html contains each other one time
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo foo</p><p>foo</p>')
self.assertEqual(dom2.count('foo'), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count('bar'), 0)
self.assertEqual(dom2.count('class'), 0)
self.assertEqual(dom2.count('p'), 0)
self.assertEqual(dom2.count('o'), 2)
dom2 = parse_html('<p>foo</p><p>foo</p>')
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<div><div><p>foo</p></div></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>foo</p></p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>bar</p></p>')
self.assertEqual(dom2.count(dom1), 0)
def test_parsing_errors(self):
from django.test.html import HTMLParseError, parse_html
with self.assertRaises(AssertionError):
self.assertHTMLEqual('<p>', '')
with self.assertRaises(AssertionError):
self.assertHTMLEqual('', '<p>')
with self.assertRaises(HTMLParseError):
parse_html('</p>')
def test_contains_html(self):
response = HttpResponse('''<body>
This is a form: <form action="" method="get">
<input type="text" name="Hello" />
</form></body>''')
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form action="" method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form action="" method="get">', html=True)
invalid_response = HttpResponse('''<body <bad>>''')
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, '<p></p>')
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
from django.http import HttpResponse
response = HttpResponse('<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(response, '<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>', html=True)
class XMLEqualTests(TestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
class SkippingExtraTests(TestCase):
fixtures = ['should_not_be_loaded.json']
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super(SkippingExtraTests, self).__call__(result)
@skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
self.assertRaisesMessage(ValueError, "[.*x+]y?", func1)
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ['Enter a valid email address.']
self.assertFieldOutput(EmailField, {'a@a.com': 'a@a.com'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'a@a.com'}, {'aaa': error_invalid + ['Another error']})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'Wrong output'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'a@a.com'}, {'aaa': ['Come on, gimme some well formatted data, dude.']})
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
'required': 'This is really required.',
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
__test__ = {"API_TEST": r"""
# Some checks of the doctest output normalizer.
# Standard doctests do fairly
>>> import json
>>> from django.utils.xmlutils import SimplerXMLGenerator
>>> from django.utils.six import StringIO
>>> def produce_json():
... return json.dumps(['foo', {'bar': ('baz', None, 1.0, 2), 'whiz': 42}])
>>> def produce_xml():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startDocument()
... xml.startElement("foo", {"aaa" : "1.0", "bbb": "2.0"})
... xml.startElement("bar", {"ccc" : "3.0"})
... xml.characters("Hello")
... xml.endElement("bar")
... xml.startElement("whiz", {})
... xml.characters("Goodbye")
... xml.endElement("whiz")
... xml.endElement("foo")
... xml.endDocument()
... return stream.getvalue()
>>> def produce_xml_fragment():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startElement("foo", {"aaa": "1.0", "bbb": "2.0"})
... xml.characters("Hello")
... xml.endElement("foo")
... xml.startElement("bar", {"ccc": "3.0", "ddd": "4.0"})
... xml.endElement("bar")
... return stream.getvalue()
# JSON output is normalized for field order, so it doesn't matter
# which order json dictionary attributes are listed in output
>>> produce_json()
'["foo", {"bar": ["baz", null, 1.0, 2], "whiz": 42}]'
>>> produce_json()
'["foo", {"whiz": 42, "bar": ["baz", null, 1.0, 2]}]'
# XML output is normalized for attribute order, so it doesn't matter
# which order XML element attributes are listed in output
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo aaa="1.0" bbb="2.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo bbb="2.0" aaa="1.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml_fragment()
'<foo aaa="1.0" bbb="2.0">Hello</foo><bar ccc="3.0" ddd="4.0"></bar>'
>>> produce_xml_fragment()
'<foo bbb="2.0" aaa="1.0">Hello</foo><bar ddd="4.0" ccc="3.0"></bar>'
"""}
if not six.PY3:
__test__["API_TEST"] += """
>>> def produce_long():
... return 42L
>>> def produce_int():
... return 42
# Long values are normalized and are comparable to normal integers ...
>>> produce_long()
42
# ... and vice versa
>>> produce_int()
42L
"""
| mit |
alexandrucoman/vbox-nova-driver | nova/tests/unit/pci/test_utils.py | 28 | 2329 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova.pci import utils
from nova import test
class PciDeviceMatchTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceMatchTestCase, self).setUp()
self.fake_pci_1 = {'vendor_id': 'v1',
'device_id': 'd1'}
def test_single_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
def test_multiple_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_extra_key(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
class PciDeviceAddressParserTestCase(test.NoDBTestCase):
def test_parse_address(self):
self.parse_result = utils.parse_address("0000:04:12.6")
self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
def test_parse_address_wrong(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:04.12:6")
def test_parse_address_invalid_character(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:h4.12:6")
| apache-2.0 |
Big-B702/python-for-android | python-modules/zope/zope/interface/_flatten.py | 50 | 1112 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Adapter-style interface registry
See Adapter class.
$Id: _flatten.py 110536 2010-04-06 02:59:44Z tseaver $
"""
from zope.interface import Declaration
def _flatten(implements, include_None=0):
try:
r = implements.flattened()
except AttributeError:
if implements is None:
r=()
else:
r = Declaration(implements).flattened()
if not include_None:
return r
r = list(r)
r.append(None)
return r
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.