code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from . import test_stock_landed_costs_purchase
|
chienlieu2017/it_management
|
odoo/addons/stock_landed_costs/tests/__init__.py
|
Python
|
gpl-3.0
| 72
|
try:
import serial # Python2
except ImportError:
from serial3 import * # Python3
from nupic.frameworks.opf.modelfactory import ModelFactory
import os,sys
ser = serial.Serial('/dev/ttyACM0', 9600)
def get_online(number_of_records=20):# 0 means forever
model = ModelFactory.loadFromCheckpoint(os.getcwd() + "/model_save")
count=0
ser.flushInput()
while (count < number_of_records) or (number_of_records == 0):
count = count + 1
text = ser.readline()
if (len(text.split(",")) == 4):
result = model.run({
"s1": float(text.split(",")[0]),
"s2": float(text.split(",")[1]),
"s3": float(text.split(",")[2]),
"s4": float(text.split(",")[3])
})
prediction = int(result.inferences['multiStepBestPredictions'][4])
sys.stdout.write("\r"+ str(prediction))
sys.stdout.write("\t"+ text)
ser.write(str(prediction)+ '\n')
|
lmaag182/nupic_physical
|
online.py
|
Python
|
agpl-3.0
| 994
|
"""
WSGI config for print_web_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "print_web_django.settings")
application = get_wsgi_application()
|
aabmass/print-web
|
print_web_django/print_web_django/wsgi.py
|
Python
|
mit
| 410
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import admin_actions
from nova.compute import vm_states
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
class AdminActionsPolicyTest(base.BasePolicyTest):
"""Test Admin Actions APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(AdminActionsPolicyTest, self).setUp()
self.controller = admin_actions.AdminActionsController()
self.req = fakes.HTTPRequest.blank('')
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.compute.api.API.get')).mock
uuid = uuids.fake_id
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
# Check that admin is able to change the service
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to change the service
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.Instance.save')
def test_reset_state_policy(self, mock_save):
rule_name = "os_compute_api:os-admin-actions:reset_state"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller._reset_state,
self.req, self.instance.uuid,
body={'os-resetState': {'state': 'active'}})
def test_inject_network_info_policy(self):
rule_name = "os_compute_api:os-admin-actions:inject_network_info"
with mock.patch.object(self.controller.compute_api,
"inject_network_info"):
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller._inject_network_info,
self.req, self.instance.uuid, body={})
def test_reset_network_policy(self):
rule_name = "os_compute_api:os-admin-actions:reset_network"
with mock.patch.object(self.controller.compute_api, "reset_network"):
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller._reset_network,
self.req, self.instance.uuid, body={})
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
"""Test Admin Actions APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scopped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(AdminActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Check that system admin is able to perform the system level actions
# on server.
self.admin_authorized_contexts = [
self.system_admin_context]
# Check that non-system or non-admin is not able to perform the system
# level actions on server.
self.admin_unauthorized_contexts = [
self.legacy_admin_context, self.system_member_context,
self.system_reader_context, self.system_foo_context,
self.project_admin_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
|
rahulunair/nova
|
nova/tests/unit/policies/test_admin_actions.py
|
Python
|
apache-2.0
| 5,313
|
#!/usr/bin/env python
#
# Beautiful Capi generates beautiful C API wrappers for your C++ classes
# Copyright (C) 2015 Petr Petrovich Petrov
#
# This file is part of Beautiful Capi.
#
# Beautiful Capi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Beautiful Capi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Beautiful Capi. If not, see <http://www.gnu.org/licenses/>.
#
from FileGenerator import FileGenerator
from FileCache import FileCache
# Empty base class for all type generator classes just for annotation usage
class BaseTypeGenerator(object):
def wrap_argument_declaration(self) -> str:
pass
def c_argument_declaration(self) -> str:
pass
def c_2_wrap_var(self, result_var: str, expression: str) -> ([str], str):
pass
def wrap_2_c_var(self, result_var: str, expression: str) -> ([str], str):
pass
def c_2_implementation(self, expression: str) -> str:
pass
def c_2_implementation_pointer(self, expression: str) -> str:
pass
def snippet_implementation_declaration(self) -> str:
pass
def implementation_2_c_var(self, result_var: str, expression: str) -> ([str], str):
pass
def include_dependent_declaration_headers(self, file_generator: FileGenerator, file_cache: FileCache):
pass
def include_dependent_definition_headers(self, file_generator: FileGenerator, file_cache: FileCache):
pass
@staticmethod
def dependent_implementation_headers():
return []
class BuiltinTypeGenerator(BaseTypeGenerator):
def __init__(self, type_name: str):
self.type_name = type_name
self.c_2_impl = ''
@property
def is_void(self):
return True if self.type_name == 'void' or not self.type_name else False
def wrap_argument_declaration(self) -> str:
return '{type_name}'.format(type_name='void' if self.is_void else self.type_name)
def wrap_return_type(self) -> str:
return self.wrap_argument_declaration()
def c_argument_declaration(self) -> str:
return self.wrap_argument_declaration()
def c_2_wrap_var(self, result_var: str, expression: str) -> ([str], str):
if not self.is_void:
if result_var:
return ['{type_name} {result_var}({expression});'.format(
type_name=self.wrap_return_type(),
result_var=result_var,
expression=expression)], result_var
else:
return [], expression
else:
return [expression + ';'], ''
def wrap_2_c_var(self, result_var: str, expression: str) -> ([str], str):
return self.implementation_2_c_var(result_var, expression)
def c_2_implementation(self, expression: str) -> str:
cur_c_2_impl = self.c_2_impl
if not cur_c_2_impl:
cur_c_2_impl = '{expression}'
return cur_c_2_impl.format(
implementation_type=self.snippet_implementation_declaration(),
expression=expression)
def c_2_implementation_var(self, result_var: str, expression: str) -> ([str], str):
expression = self.c_2_implementation(expression)
if not self.is_void:
if result_var:
return ['{impl_name} {result_var}({expression});'.format(
impl_name=self.snippet_implementation_declaration(),
result_var=result_var,
expression=expression)], result_var
else:
return [], expression
else:
return [expression + ';'], ''
def snippet_implementation_declaration(self) -> str:
return 'void' if self.is_void else self.type_name
def implementation_2_c_var(self, result_var: str, expression: str) -> ([str], str):
if not self.is_void:
if result_var:
return ['{type_name} {result_var}({expression});'.format(
type_name=self.type_name,
result_var=result_var,
expression=expression)], result_var
else:
return [], expression
else:
return [expression + ';'], ''
def generate_c_default_return_value(self, out: FileGenerator):
if not self.is_void:
out.put_line('return static_cast<{type_name}>(0);'.format(type_name=self.type_name))
|
PetrPPetrov/beautiful-capi
|
source/BuiltinTypeGenerator.py
|
Python
|
gpl-3.0
| 4,832
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course_selection', '0018_auto_20150830_0319'),
]
operations = [
migrations.AlterUniqueTogether(
name='friend_relationship',
unique_together=None,
),
migrations.RemoveField(
model_name='friend_relationship',
name='from_user',
),
migrations.RemoveField(
model_name='friend_relationship',
name='to_user',
),
migrations.RemoveField(
model_name='nice_user',
name='friends',
),
migrations.DeleteModel(
name='Friend_Relationship',
),
]
|
maximz/recal
|
course_selection/migrations/0019_auto_20150903_0458.py
|
Python
|
mit
| 809
|
"""
OpenWebRX csdr plugin: do the signal processing with csdr
This file is part of OpenWebRX,
an open-source SDR receiver software with a web UI.
Copyright (c) 2013-2015 by Andras Retzler <randras@sdr.hu>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import subprocess
import time
import os
import code
import signal
import fcntl
class dsp:
def __init__(self):
self.samp_rate = 250000
self.output_rate = 11025 #this is default, and cannot be set at the moment
self.fft_size = 1024
self.fft_fps = 5
self.offset_freq = 0
self.low_cut = -4000
self.high_cut = 4000
self.bpf_transition_bw = 320 #Hz, and this is a constant
self.ddc_transition_bw_rate = 0.15 # of the IF sample rate
self.running = False
self.secondary_processes_running = False
self.audio_compression = "none"
self.fft_compression = "none"
self.demodulator = "nfm"
self.name = "csdr"
self.format_conversion = "csdr convert_u8_f"
self.base_bufsize = 512
self.nc_port = 4951
self.csdr_dynamic_bufsize = False
self.csdr_print_bufsizes = False
self.csdr_through = False
self.squelch_level = 0
self.fft_averages = 50
self.iqtee = False
self.iqtee2 = False
self.secondary_demodulator = None
self.secondary_fft_size = 1024
self.secondary_process_fft = None
self.secondary_process_demod = None
self.pipe_names=["bpf_pipe", "shift_pipe", "squelch_pipe", "smeter_pipe", "iqtee_pipe", "iqtee2_pipe"]
self.secondary_pipe_names=["secondary_shift_pipe"]
self.secondary_offset_freq = 1000
def chain(self,which):
any_chain_base="nc -v 127.0.0.1 {nc_port} | "
if self.csdr_dynamic_bufsize: any_chain_base+="csdr setbuf {start_bufsize} | "
if self.csdr_through: any_chain_base+="csdr through | "
any_chain_base+=self.format_conversion+(" | " if self.format_conversion!="" else "") ##"csdr flowcontrol {flowcontrol} auto 1.5 10 | "
if which == "fft":
fft_chain_base = any_chain_base+"csdr fft_cc {fft_size} {fft_block_size} | " + \
("csdr logpower_cf -70 | " if self.fft_averages == 0 else "csdr logaveragepower_cf -70 {fft_size} {fft_averages} | ") + \
"csdr fft_exchange_sides_ff {fft_size}"
if self.fft_compression=="adpcm":
return fft_chain_base+" | csdr compress_fft_adpcm_f_u8 {fft_size}"
else:
return fft_chain_base
chain_begin=any_chain_base+"csdr shift_addition_cc --fifo {shift_pipe} | csdr fir_decimate_cc {decimation} {ddc_transition_bw} HAMMING | csdr bandpass_fir_fft_cc --fifo {bpf_pipe} {bpf_transition_bw} HAMMING | csdr squelch_and_smeter_cc --fifo {squelch_pipe} --outfifo {smeter_pipe} 5 1 | "
if self.secondary_demodulator:
chain_begin+="csdr tee {iqtee_pipe} | "
chain_begin+="csdr tee {iqtee2_pipe} | "
chain_end = ""
if self.audio_compression=="adpcm":
chain_end = " | csdr encode_ima_adpcm_i16_u8"
if which == "nfm": return chain_begin + "csdr fmdemod_quadri_cf | csdr limit_ff | csdr old_fractional_decimator_ff {last_decimation} | csdr deemphasis_nfm_ff 11025 | csdr fastagc_ff 1024 | csdr convert_f_s16"+chain_end
elif which == "am": return chain_begin + "csdr amdemod_cf | csdr fastdcblock_ff | csdr old_fractional_decimator_ff {last_decimation} | csdr agc_ff | csdr limit_ff | csdr convert_f_s16"+chain_end
elif which == "ssb": return chain_begin + "csdr realpart_cf | csdr old_fractional_decimator_ff {last_decimation} | csdr agc_ff | csdr limit_ff | csdr convert_f_s16"+chain_end
def secondary_chain(self, which):
secondary_chain_base="cat {input_pipe} | "
if which == "fft":
return secondary_chain_base+"csdr realpart_cf | csdr fft_fc {secondary_fft_input_size} {secondary_fft_block_size} | csdr logpower_cf -70 " + (" | csdr compress_fft_adpcm_f_u8 {secondary_fft_size}" if self.fft_compression=="adpcm" else "")
elif which == "bpsk31":
return secondary_chain_base + "csdr shift_addition_cc --fifo {secondary_shift_pipe} | " + \
"csdr bandpass_fir_fft_cc $(csdr '=-(31.25)/{if_samp_rate}') $(csdr '=(31.25)/{if_samp_rate}') $(csdr '=31.25/{if_samp_rate}') | " + \
"csdr simple_agc_cc 0.001 0.5 | " + \
"csdr timing_recovery_cc GARDNER {secondary_samples_per_bits} 0.5 2 --add_q | " + \
"CSDR_FIXED_BUFSIZE=1 csdr dbpsk_decoder_c_u8 | " + \
"CSDR_FIXED_BUFSIZE=1 csdr psk31_varicode_decoder_u8_u8"
def set_secondary_demodulator(self, what):
self.secondary_demodulator = what
def secondary_fft_block_size(self):
return (self.samp_rate/self.decimation)/(self.fft_fps*2) #*2 is there because we do FFT on real signal here
def secondary_decimation(self):
return 1 #currently unused
def secondary_bpf_cutoff(self):
if self.secondary_demodulator == "bpsk31":
return (31.25/2) / self.if_samp_rate()
return 0
def secondary_bpf_transition_bw(self):
if self.secondary_demodulator == "bpsk31":
return (31.25/2) / self.if_samp_rate()
return 0
def secondary_samples_per_bits(self):
if self.secondary_demodulator == "bpsk31":
return int(round(self.if_samp_rate()/31.25))&~3
return 0
def secondary_bw(self):
if self.secondary_demodulator == "bpsk31":
return 31.25
def start_secondary_demodulator(self):
if(not self.secondary_demodulator): return
print "[openwebrx] starting secondary demodulator from IF input sampled at %d"%self.if_samp_rate()
secondary_command_fft=self.secondary_chain("fft")
secondary_command_demod=self.secondary_chain(self.secondary_demodulator)
self.try_create_pipes(self.secondary_pipe_names, secondary_command_demod + secondary_command_fft)
secondary_command_fft=secondary_command_fft.format( \
input_pipe=self.iqtee_pipe, \
secondary_fft_input_size=self.secondary_fft_size, \
secondary_fft_size=self.secondary_fft_size, \
secondary_fft_block_size=self.secondary_fft_block_size(), \
)
secondary_command_demod=secondary_command_demod.format( \
input_pipe=self.iqtee2_pipe, \
secondary_shift_pipe=self.secondary_shift_pipe, \
secondary_decimation=self.secondary_decimation(), \
secondary_samples_per_bits=self.secondary_samples_per_bits(), \
secondary_bpf_cutoff=self.secondary_bpf_cutoff(), \
secondary_bpf_transition_bw=self.secondary_bpf_transition_bw(), \
if_samp_rate=self.if_samp_rate()
)
print "[openwebrx-dsp-plugin:csdr] secondary command (fft) =", secondary_command_fft
print "[openwebrx-dsp-plugin:csdr] secondary command (demod) =", secondary_command_demod
#code.interact(local=locals())
my_env=os.environ.copy()
#if self.csdr_dynamic_bufsize: my_env["CSDR_DYNAMIC_BUFSIZE_ON"]="1";
if self.csdr_print_bufsizes: my_env["CSDR_PRINT_BUFSIZES"]="1";
self.secondary_process_fft = subprocess.Popen(secondary_command_fft, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setpgrp, env=my_env)
print "[openwebrx-dsp-plugin:csdr] Popen on secondary command (fft)"
self.secondary_process_demod = subprocess.Popen(secondary_command_demod, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setpgrp, env=my_env) #TODO digimodes
print "[openwebrx-dsp-plugin:csdr] Popen on secondary command (demod)" #TODO digimodes
self.secondary_processes_running = True
#open control pipes for csdr and send initialization data
# print "==========> 1"
if self.secondary_shift_pipe != None: #TODO digimodes
# print "==========> 2", self.secondary_shift_pipe
self.secondary_shift_pipe_file=open(self.secondary_shift_pipe,"w") #TODO digimodes
# print "==========> 3"
self.set_secondary_offset_freq(self.secondary_offset_freq) #TODO digimodes
# print "==========> 4"
self.set_pipe_nonblocking(self.secondary_process_demod.stdout)
self.set_pipe_nonblocking(self.secondary_process_fft.stdout)
def set_secondary_offset_freq(self, value):
self.secondary_offset_freq=value
if self.secondary_processes_running:
self.secondary_shift_pipe_file.write("%g\n"%(-float(self.secondary_offset_freq)/self.if_samp_rate()))
self.secondary_shift_pipe_file.flush()
def stop_secondary_demodulator(self):
if self.secondary_processes_running == False: return
self.try_delete_pipes(self.secondary_pipe_names)
if self.secondary_process_fft: os.killpg(os.getpgid(self.secondary_process_fft.pid), signal.SIGTERM)
if self.secondary_process_demod: os.killpg(os.getpgid(self.secondary_process_demod.pid), signal.SIGTERM)
self.secondary_processes_running = False
def read_secondary_demod(self, size):
return self.secondary_process_demod.stdout.read(size)
def read_secondary_fft(self, size):
return self.secondary_process_fft.stdout.read(size)
def get_secondary_demodulator(self):
return self.secondary_demodulator
def set_secondary_fft_size(self,secondary_fft_size):
#to change this, restart is required
self.secondary_fft_size=secondary_fft_size
def set_audio_compression(self,what):
self.audio_compression = what
def set_fft_compression(self,what):
self.fft_compression = what
def get_fft_bytes_to_read(self):
if self.fft_compression=="none": return self.fft_size*4
if self.fft_compression=="adpcm": return (self.fft_size/2)+(10/2)
def get_secondary_fft_bytes_to_read(self):
if self.fft_compression=="none": return self.secondary_fft_size*4
if self.fft_compression=="adpcm": return (self.secondary_fft_size/2)+(10/2)
def set_samp_rate(self,samp_rate):
#to change this, restart is required
self.samp_rate=samp_rate
self.decimation=1
while self.samp_rate/(self.decimation+1)>self.output_rate:
self.decimation+=1
self.last_decimation=float(self.if_samp_rate())/self.output_rate
def if_samp_rate(self):
return self.samp_rate/self.decimation
def get_name(self):
return self.name
def get_output_rate(self):
return self.output_rate
def set_output_rate(self,output_rate):
self.output_rate=output_rate
self.set_samp_rate(self.samp_rate) #as it depends on output_rate
def set_demodulator(self,demodulator):
#to change this, restart is required
self.demodulator=demodulator
def get_demodulator(self):
return self.demodulator
def set_fft_size(self,fft_size):
#to change this, restart is required
self.fft_size=fft_size
def set_fft_fps(self,fft_fps):
#to change this, restart is required
self.fft_fps=fft_fps
def set_fft_averages(self,fft_averages):
#to change this, restart is required
self.fft_averages=fft_averages
def fft_block_size(self):
if self.fft_averages == 0: return self.samp_rate/self.fft_fps
else: return self.samp_rate/self.fft_fps/self.fft_averages
def set_format_conversion(self,format_conversion):
self.format_conversion=format_conversion
def set_offset_freq(self,offset_freq):
self.offset_freq=offset_freq
if self.running:
self.shift_pipe_file.write("%g\n"%(-float(self.offset_freq)/self.samp_rate))
self.shift_pipe_file.flush()
def set_bpf(self,low_cut,high_cut):
self.low_cut=low_cut
self.high_cut=high_cut
if self.running:
self.bpf_pipe_file.write( "%g %g\n"%(float(self.low_cut)/self.if_samp_rate(), float(self.high_cut)/self.if_samp_rate()) )
self.bpf_pipe_file.flush()
def get_bpf(self):
return [self.low_cut, self.high_cut]
def set_squelch_level(self, squelch_level):
self.squelch_level=squelch_level
if self.running:
self.squelch_pipe_file.write( "%g\n"%(float(self.squelch_level)) )
self.squelch_pipe_file.flush()
def get_smeter_level(self):
if self.running:
line=self.smeter_pipe_file.readline()
return float(line[:-1])
def mkfifo(self,path):
try:
os.unlink(path)
except:
pass
os.mkfifo(path)
def ddc_transition_bw(self):
return self.ddc_transition_bw_rate*(self.if_samp_rate()/float(self.samp_rate))
def try_create_pipes(self, pipe_names, command_base):
# print "try_create_pipes"
for pipe_name in pipe_names:
# print "\t"+pipe_name
if "{"+pipe_name+"}" in command_base:
setattr(self, pipe_name, self.pipe_base_path+pipe_name)
self.mkfifo(getattr(self, pipe_name))
else:
setattr(self, pipe_name, None)
def try_delete_pipes(self, pipe_names):
for pipe_name in pipe_names:
pipe_path = getattr(self,pipe_name,None)
if pipe_path:
try: os.unlink(pipe_path)
except Exception as e: print "[openwebrx-dsp-plugin:csdr] try_delete_pipes() ::", e
def set_pipe_nonblocking(self, pipe):
flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
fcntl.fcntl(pipe, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def start(self):
command_base=self.chain(self.demodulator)
#create control pipes for csdr
self.pipe_base_path="/tmp/openwebrx_pipe_{myid}_".format(myid=id(self))
# self.bpf_pipe = self.shift_pipe = self.squelch_pipe = self.smeter_pipe = None
self.try_create_pipes(self.pipe_names, command_base)
# if "{bpf_pipe}" in command_base:
# self.bpf_pipe=pipe_base_path+"bpf"
# self.mkfifo(self.bpf_pipe)
# if "{shift_pipe}" in command_base:
# self.shift_pipe=pipe_base_path+"shift"
# self.mkfifo(self.shift_pipe)
# if "{squelch_pipe}" in command_base:
# self.squelch_pipe=pipe_base_path+"squelch"
# self.mkfifo(self.squelch_pipe)
# if "{smeter_pipe}" in command_base:
# self.smeter_pipe=pipe_base_path+"smeter"
# self.mkfifo(self.smeter_pipe)
# if "{iqtee_pipe}" in command_base:
# self.iqtee_pipe=pipe_base_path+"iqtee"
# self.mkfifo(self.iqtee_pipe)
# if "{iqtee2_pipe}" in command_base:
# self.iqtee2_pipe=pipe_base_path+"iqtee2"
# self.mkfifo(self.iqtee2_pipe)
#run the command
command=command_base.format( bpf_pipe=self.bpf_pipe, shift_pipe=self.shift_pipe, decimation=self.decimation, \
last_decimation=self.last_decimation, fft_size=self.fft_size, fft_block_size=self.fft_block_size(), fft_averages=self.fft_averages, \
bpf_transition_bw=float(self.bpf_transition_bw)/self.if_samp_rate(), ddc_transition_bw=self.ddc_transition_bw(), \
flowcontrol=int(self.samp_rate*2), start_bufsize=self.base_bufsize*self.decimation, nc_port=self.nc_port, \
squelch_pipe=self.squelch_pipe, smeter_pipe=self.smeter_pipe, iqtee_pipe=self.iqtee_pipe, iqtee2_pipe=self.iqtee2_pipe )
print "[openwebrx-dsp-plugin:csdr] Command =",command
#code.interact(local=locals())
my_env=os.environ.copy()
if self.csdr_dynamic_bufsize: my_env["CSDR_DYNAMIC_BUFSIZE_ON"]="1";
if self.csdr_print_bufsizes: my_env["CSDR_PRINT_BUFSIZES"]="1";
self.process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setpgrp, env=my_env)
self.running = True
#open control pipes for csdr and send initialization data
if self.bpf_pipe != None:
self.bpf_pipe_file=open(self.bpf_pipe,"w")
self.set_bpf(self.low_cut,self.high_cut)
if self.shift_pipe != None:
self.shift_pipe_file=open(self.shift_pipe,"w")
self.set_offset_freq(self.offset_freq)
if self.squelch_pipe != None:
self.squelch_pipe_file=open(self.squelch_pipe,"w")
self.set_squelch_level(self.squelch_level)
if self.smeter_pipe != None:
self.smeter_pipe_file=open(self.smeter_pipe,"r")
self.set_pipe_nonblocking(self.smeter_pipe_file)
self.start_secondary_demodulator()
def read(self,size):
return self.process.stdout.read(size)
def stop(self):
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
self.stop_secondary_demodulator()
#if(self.process.poll()!=None):return # returns None while subprocess is running
#while(self.process.poll()==None):
# #self.process.kill()
# print "killproc",os.getpgid(self.process.pid),self.process.pid
# os.killpg(self.process.pid, signal.SIGTERM)
#
# time.sleep(0.1)
self.try_delete_pipes(self.pipe_names)
# if self.bpf_pipe:
# try: os.unlink(self.bpf_pipe)
# except: print "[openwebrx-dsp-plugin:csdr] stop() :: unlink failed: " + self.bpf_pipe
# if self.shift_pipe:
# try: os.unlink(self.shift_pipe)
# except: print "[openwebrx-dsp-plugin:csdr] stop() :: unlink failed: " + self.shift_pipe
# if self.squelch_pipe:
# try: os.unlink(self.squelch_pipe)
# except: print "[openwebrx-dsp-plugin:csdr] stop() :: unlink failed: " + self.squelch_pipe
# if self.smeter_pipe:
# try: os.unlink(self.smeter_pipe)
# except: print "[openwebrx-dsp-plugin:csdr] stop() :: unlink failed: " + self.smeter_pipe
# if self.iqtee_pipe:
# try: os.unlink(self.iqtee_pipe)
# except: print "[openwebrx-dsp-plugin:csdr] stop() :: unlink failed: " + self.iqtee_pipe
# if self.iqtee2_pipe:
# try: os.unlink(self.iqtee2_pipe)
# except: print "[openwebrx-dsp-plugin:csdr] stop() :: unlink failed: " + self.iqtee2_pipe
self.running = False
def restart(self):
self.stop()
self.start()
def __del__(self):
self.stop()
del(self.process)
|
simonyiszk/openwebrx
|
csdr.py
|
Python
|
agpl-3.0
| 19,253
|
"""
WSGI config for Practica_Ajax project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Practica_Ajax.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Titulacion-Sistemas/PracticasDjango
|
Practica_Ajax/Practica_Ajax/wsgi.py
|
Python
|
gpl-2.0
| 401
|
import subprocess
import setup_util
import multiprocessing
import os
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/pypy/bin')
NCPU = multiprocessing.cpu_count()
proc = None
def start(args, logfile, errfile):
global proc
setup_util.replace_text("flask/app.py", "DBHOSTNAME", args.database_host)
proc = subprocess.Popen([
bin_dir + "/gunicorn",
"app:app",
"-k", "tornado",
"-b", "0.0.0.0:8080",
'-w', str(NCPU*3),
"--log-level=critical"],
cwd="flask", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
global proc
if proc is None:
return 0
proc.terminate()
proc = None
return 0
|
seem-sky/FrameworkBenchmarks
|
flask/setup_pypy.py
|
Python
|
bsd-3-clause
| 715
|
# Copyright nycz 2011-2020
# This file is part of Kalpana.
# Kalpana is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Kalpana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Kalpana. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import json
from collections import defaultdict
from dataclasses import dataclass
from importlib.resources import read_text
from pathlib import Path
from typing import Any, Callable, Optional, TypeVar
from libsyntyche.settings import (Option, QtConfig, QtOption,
QtSectionDictOption, QtSubOptionList,
QtSubSectionOption, Section)
from libsyntyche.widgets import mk_signal1
from PyQt5 import QtGui
def read_data_file(fname: str) -> str:
return read_text(f'{__package__}.data', fname)
class CommandHistory:
def __init__(self, config_dir: Path) -> None:
self._path = config_dir / 'command_history.json'
self.command_frequency: defaultdict[str, int] = defaultdict(int)
self.autocompletion_history: defaultdict[str, defaultdict[str, int]] \
= defaultdict(lambda: defaultdict(int))
try:
data: dict[str, Any] = json.loads(self._path.read_text())
autocompletion_history = data['autocompletion_history']
command_frequency = data['command_frequency']
except (IOError, json.JSONDecodeError):
pass
else:
self.command_frequency.update(command_frequency)
for abbr, data in autocompletion_history.items():
self.autocompletion_history[abbr].update(data)
def save(self) -> None:
data = {
'autocompletion_history': self.autocompletion_history,
'command_frequency': self.command_frequency,
}
self._path.write_text(json.dumps(data, sort_keys=True, indent=2))
@dataclass
class ExportFormat:
pattern: str
replacement: str
selection_pattern: Optional[str] = None
@staticmethod
def _from_option(option: Option) -> ExportFormat:
out = ExportFormat(
pattern=option.suboptions['pattern'].as_string(),
replacement=option.suboptions['repl'].as_string(),
)
selection_pattern = option.suboptions.get('selection_pattern')
if selection_pattern:
out.selection_pattern = selection_pattern.as_string()
return out
@dataclass
class ExportSettings:
copy_rich_text: bool = False
@staticmethod
def _from_option(option: Option) -> ExportSettings:
out = ExportSettings(copy_rich_text=option.suboptions['copy-rich-text'].as_bool())
return out
@dataclass
class Hotkey:
key: int
command: str
prompt: bool = False
@staticmethod
def _from_option(option: Option) -> Hotkey:
out = Hotkey(
key=QtGui.QKeySequence(option.suboptions['key'].as_string())[0],
command=option.suboptions['cmd'].as_string()
)
if (prompt := option.suboptions.get('prompt')) is not None:
out.prompt = prompt.as_bool()
return out
_Q = TypeVar('_Q')
class QtEditableOption(QtOption[_Q]):
def __init__(self, section_path: list[str], option_key: str,
parser: Callable[[Option], _Q], source: dict[str, Section],
parent: Settings) -> None:
super().__init__(section_path, [option_key], parser, source, parent)
self._overload_value: Optional[_Q] = None
parent._editable_options[option_key] = self
def change(self, new_value: _Q) -> None:
self._overload_value = new_value
self.changed.emit(new_value)
@property
def value(self) -> _Q:
if self._overload_value is not None:
return self._overload_value
return super().value
class Settings(QtConfig):
"""Loads and takes care of settings and stylesheets."""
css_changed = mk_signal1(str)
def __init__(self, config_dir: Optional[Path]) -> None:
super().__init__(
(config_dir or (Path.home() / '.config' / 'kalpana2')) / 'settings.cfg',
default_config=read_data_file('default_settings.cfg')
)
self._file_settings_path = self.config_dir / 'file_settings.json'
self.css = ''
self._active_file: Optional[Path] = None
self._editable_options: dict[str, QtEditableOption[Any]] = {}
self.command_history = CommandHistory(self.config_path.parent)
config = self.load_config()
# Standard settings
main_section = ['settings']
self.autohide_scrollbar = QtOption(
main_section, ['autohide-scrollbar'], Option.as_bool, config, self)
self.bold_marker = QtOption(
main_section, ['bold-marker'], Option.as_string, config, self)
self.chapter_keyword = QtOption(
main_section, ['chapter-keyword'], Option.as_string, config, self)
self.horizontal_ruler_marker = QtOption(
main_section, ['horizontal-ruler-marker'], Option.as_string, config, self)
self.italic_marker = QtOption(
main_section, ['italic-marker'], Option.as_string, config, self)
self.max_textarea_width = QtEditableOption(
main_section, 'max-textarea-width', Option.as_int, config, self)
self.show_line_numbers = QtEditableOption(
main_section, 'show-line-numbers', Option.as_bool, config, self)
self.spellcheck_active = QtEditableOption(
main_section, 'spellcheck-active', Option.as_bool, config, self)
self.spellcheck_language = QtEditableOption(
main_section, 'spellcheck-language', Option.as_string, config, self)
self.vim_mode = QtEditableOption(
main_section, 'vim-mode', Option.as_bool, config, self)
# Export formats
self.export_formats = QtSubSectionOption(
['export-formats'], ExportFormat._from_option, config, self,
default_to_empty=True,
)
self.export_settings = QtSectionDictOption(
['export-settings'], ExportSettings._from_option, config, self,
default_to_empty=True,
)
def parse_start_pos(opt: Option) -> tuple[int, int]:
cursor_pos, sb_pos = opt.as_string().split()
return (int(cursor_pos), int(sb_pos))
self.start_at_pos = QtEditableOption(
main_section, 'start-at-pos', parse_start_pos, config, self)
self.underline_marker = QtOption(
main_section, ['underline-marker'], Option.as_string, config, self)
# Keybindings
self.key_bindings = QtSubOptionList(
['hotkeys'], Hotkey._from_option, config, self)
def file_opened(self, filepath: str, is_new: bool) -> None:
if self._active_file is not None:
self.save_settings()
if filepath:
self._active_file = Path(filepath).resolve()
file_data = self._load_file_settings()
for key, value in file_data.get(str(self._active_file), {}).items():
if key in self._editable_options:
self._editable_options[key].change(value)
else:
for opt in self._editable_options.values():
opt._overload_value = None
opt.changed.emit(opt.value)
self._active_file = None
def file_saved(self, filepath: str, new_name: bool) -> None:
if new_name:
self._active_file = Path(filepath).resolve()
self.reload()
def _load_file_settings(self) -> dict[str, Any]:
try:
out: dict[str, Any] = json.loads(self._file_settings_path.read_text())
return out
except FileNotFoundError:
return {}
def reload_stylesheet(self) -> None:
# TODO: put the try_it in the calling function?
# with self.try_it("Couldn't reload stylesheet"):
default_css = read_data_file('qt.css')
try:
user_css = (self.config_dir / 'qt.css').read_text()
except OSError:
# No file present which is perfectly fine
user_css = ''
self.css = default_css + '\n' + user_css
self.css_changed.emit(self.css)
def save_settings(self) -> None:
self.command_history.save()
if self._active_file is not None:
file_data = self._load_file_settings()
data = {}
for key, opt in self._editable_options.items():
if opt._overload_value is not None:
data[key] = opt.value
file_data[str(self._active_file)] = data
self._file_settings_path.write_text(json.dumps(file_data))
|
kalpana-org/kalpana
|
kalpana/settings.py
|
Python
|
gpl-3.0
| 9,169
|
#coding: utf-8
__author__ = 'bozyao'
from math import log
from numpy import array
def getEnt(dict):
'''
给入字典统计结果,给出此结果的熵
dict: 统计的数据结果
返回: 香农熵
'''
ent = 0.0
total = sum(dict.values())
#total = array(dict.values()).sum()
for key in dict:
proportion = dict[key] * 1.0 / total
ent -= proportion * log(proportion, 2)
return ent
def stat2dict(listData):
dict = {}
for i in listData:
dict[i] = dict.get(i, 0) + 1
return dict
if __name__ == '__main__':
dict = {}
for i in range(100):
if i % 3 == 0:
dict[3] = dict.get(3, 0) + 1
elif i % 2 == 0:
dict[2] = dict.get(2, 0) + 1
else:
dict[1] = dict.get(1, 0) + 1
print dict
print getEnt(dict)
|
bozyao/data_dig_tools
|
ent.py
|
Python
|
apache-2.0
| 856
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import dimod
from dimod import ScaleComposite
try:
import dwave.preprocessing as preprocessing
except ImportError:
preprocessing = False
if preprocessing:
@dimod.testing.load_sampler_bqm_tests(ScaleComposite(dimod.ExactSolver()))
@dimod.testing.load_sampler_bqm_tests(ScaleComposite(dimod.NullSampler()))
class TestScaleComposite(unittest.TestCase):
def test_api(self):
with self.assertWarns(DeprecationWarning):
sampler = ScaleComposite(dimod.ExactSolver())
dimod.testing.assert_sampler_api(sampler)
def test_bias_range(self):
bqm = dimod.BQM.from_ising({'a': -4.0, 'b': -4.0},
{('a', 'b'): 3.2}, 1.5)
with self.assertWarns(DeprecationWarning):
sampler = ScaleComposite(dimod.TrackingComposite(dimod.ExactSolver()))
sampleset = sampler.sample(bqm, bias_range=[-2, 2])
# check that everything was restored properly
dimod.testing.assert_sampleset_energies(sampleset, bqm)
self.assertEqual(sampler.child.input['bqm'],
dimod.BQM.from_ising({'a': -2.0, 'b': -2.0},
{('a', 'b'): 1.6}, .75))
def test_bias_ranges(self):
bqm = dimod.BQM.from_ising({'a': -4.0, 'b': -4.0},
{('a', 'b'): 4}, 1.5)
with self.assertWarns(DeprecationWarning):
sampler = ScaleComposite(dimod.TrackingComposite(dimod.ExactSolver()))
sampleset = sampler.sample(bqm, bias_range=[-3, 3],
quadratic_range=[-2, 2])
# check that everything was restored properly
dimod.testing.assert_sampleset_energies(sampleset, bqm)
self.assertEqual(sampler.child.input['bqm'],
dimod.BQM.from_ising({'a': -2.0, 'b': -2.0},
{('a', 'b'): 2}, .75))
def test_ignored_interactions(self):
bqm = dimod.BQM.from_ising({'a': -4.0, 'b': -4.0},
{('a', 'b'): 3.2, ('b', 'c'): 1}, 1.5)
with self.assertWarns(DeprecationWarning):
sampler = ScaleComposite(dimod.TrackingComposite(dimod.ExactSolver()))
sampleset = sampler.sample(bqm, scalar=.5,
ignored_interactions=[('b', 'c')])
# check that everything was restored properly
dimod.testing.assert_sampleset_energies(sampleset, bqm)
self.assertEqual(sampler.child.input['bqm'],
dimod.BQM.from_ising({'a': -2.0, 'b': -2.0},
{'ab': 1.6, 'bc': 1}, .75))
def test_ignored_offset(self):
bqm = dimod.BQM.from_ising({'a': -4.0, 'b': -4.0},
{('a', 'b'): 3.2}, 1.5)
with self.assertWarns(DeprecationWarning):
sampler = ScaleComposite(dimod.TrackingComposite(dimod.ExactSolver()))
sampleset = sampler.sample(bqm, scalar=.5, ignore_offset=True)
# check that everything was restored properly
dimod.testing.assert_sampleset_energies(sampleset, bqm)
self.assertEqual(sampler.child.input['bqm'],
dimod.BQM.from_ising({'a': -2.0, 'b': -2.0},
{('a', 'b'): 1.6}, 1.5))
def test_ignored_variables(self):
bqm = dimod.BQM.from_ising({'a': -4.0, 'b': -4.0},
{('a', 'b'): 3.2}, 1.5)
with self.assertWarns(DeprecationWarning):
sampler = ScaleComposite(dimod.TrackingComposite(dimod.ExactSolver()))
sampleset = sampler.sample(bqm, scalar=.5, ignored_variables='a')
# check that everything was restored properly
dimod.testing.assert_sampleset_energies(sampleset, bqm)
self.assertEqual(sampler.child.input['bqm'],
dimod.BQM.from_ising({'a': -4.0, 'b': -2.0},
{('a', 'b'): 1.6}, .75))
def test_scalar(self):
bqm = dimod.BQM.from_ising({'a': -4.0, 'b': -4.0},
{('a', 'b'): 3.2}, 1.5)
with self.assertWarns(DeprecationWarning):
sampler = ScaleComposite(dimod.TrackingComposite(dimod.ExactSolver()))
sampleset = sampler.sample(bqm, scalar=.5)
# check that everything was restored properly
dimod.testing.assert_sampleset_energies(sampleset, bqm)
self.assertEqual(sampler.child.input['bqm'],
dimod.BQM.from_ising({'a': -2.0, 'b': -2.0},
{('a', 'b'): 1.6}, .75))
|
dwavesystems/dimod
|
tests/test_scalecomposite.py
|
Python
|
apache-2.0
| 5,604
|
import falcon
import requests
from falsy.jlog.jlog import JLog
log = JLog().bind()
def get_it(name):
log.debug('get it')
return {
'get': name
}
|
pingf/falsy
|
demo/easy/ops/hello.py
|
Python
|
mit
| 169
|
#!/usr/bin/python2.7
# -*- encoding: utf-8 -*-
__author__ = "Tomáš Beluský"
__date__ = "09.03. 2013"
import types
class VcfCreator:
"""
Creator of VCF output
"""
def __init__(self, filename, output):
"""
Initialize variables
"""
self.__filename = filename
self.__outputName = output
self.__headers = []
self.__contigs = []
self.__infos = []
self.__alts = []
if type(self.__outputName) == types.FileType:
self.__output = self.__outputName
else:
self.__output = open(self.__outputName, 'w')
def addHeader(self, key, value):
"""
Add header
"""
self.__headers.append((key, value))
def __addContigAttribute(self, attributes, key, attribute, result, addApostrophe=False):
"""
Add attribute with possible apostrophes into result string
"""
if key in attributes: # attribute exists
if result: # there is previous attribute
result += ","
if addApostrophe:
result += "%s=\"%s\"" % (attribute, attributes[key])
else:
result += "%s=%s" % (attribute, attributes[key])
return result
def addContig(self, contig):
"""
Add reference contig
"""
result = self.__addContigAttribute(contig, 'SN', 'ID', "")
result = self.__addContigAttribute(contig, 'LN', 'length', result)
result = self.__addContigAttribute(contig, 'AS', 'assembly', result)
result = self.__addContigAttribute(contig, 'M5', 'md5', result)
result = self.__addContigAttribute(contig, 'SP', 'species', result, True)
result = self.__addContigAttribute(contig, 'UR', 'URL', result)
self.__contigs.append(result)
def addInfo(self, iid, number, itype, description):
"""
Add info
"""
self.__infos.append((iid, number, itype, description))
def addAlt(self, aid, description):
"""
Add alternate
"""
self.__alts.append((aid, description))
def writeHeader(self):
"""
Write VCF header
"""
self.__output.write("##fileformat=VCFv4.1\n")
for key, value in self.__headers: # write header
self.__output.write("##%s=%s\n" % (key, value))
self.__output.write("##reference=%s\n" % self.__filename)
for contig in self.__contigs: # write contigs
self.__output.write("##contig=<%s>\n" % contig)
for iid, number, itype, description in self.__infos: # write info
self.__output.write("##INFO=<ID=%s,Number=%s,Type=%s,Description=\"%s\">\n" % (iid, number, itype, description))
for aid, description in self.__alts: # write alt
self.__output.write("##ALT=<ID=%s,Description=\"%s\">\n" % (aid, description))
self.__output.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
def writeRecord(self, record):
"""
Write record
"""
if len(record.strip()):
self.__output.write("%s\n" % record)
def close(self):
"""
Close file
"""
if type(self.__outputName) != types.FileType:
self.__output.close()
|
tomasbelusky/gataca
|
src/resources/VcfCreator.py
|
Python
|
gpl-3.0
| 2,975
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsagesOperations(object):
"""UsagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.network.v2017_11_01.models.UsagePaged[~azure.mgmt.network.v2017_11_01.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/usages_operations.py
|
Python
|
mit
| 4,282
|
"""empty message
Revision ID: b74c132e4997
Revises:
Create Date: 2017-03-03 12:29:36.271400
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b74c132e4997'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('full_name', sa.String(length=256), nullable=True),
sa.Column('email', sa.String(length=256), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('last_login', sa.DateTime(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('is_superuser', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
|
django-ch/olympus-project
|
src/migrations/versions/b74c132e4997_.py
|
Python
|
gpl-2.0
| 1,055
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RStringfish(RPackage):
"""Alt String Implementation
Provides an extendable, performant and multithreaded 'alt-string'
implementation backed by 'C++' vectors and strings."""
homepage = "https://github.com/traversc/stringfish"
url = "https://cloud.r-project.org/src/contrib/stringfish_0.14.2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/stringfish"
maintainers = ['dorton21']
version('0.14.2', sha256='9373cfc715cda1527fd20179435977b8e59e19d8c5ef82a31e519f93fb624ced')
depends_on('gmake', type='build')
depends_on('r@3.5.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.18.3:', type=('build', 'run'))
depends_on('r-rcppparallel', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-stringfish/package.py
|
Python
|
lgpl-2.1
| 961
|
import subprocess
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from openwisp_utils.base import KeyField
from .. import settings as app_settings
from .base import BaseConfig
class AbstractVpn(BaseConfig):
"""
Abstract VPN model
"""
host = models.CharField(
max_length=64, help_text=_('VPN server hostname or ip address')
)
ca = models.ForeignKey(
'django_x509.Ca', verbose_name=_('CA'), on_delete=models.CASCADE
)
key = KeyField(db_index=True)
cert = models.ForeignKey(
'django_x509.Cert',
verbose_name=_('x509 Certificate'),
help_text=_('leave blank to create automatically'),
blank=True,
null=True,
on_delete=models.CASCADE,
)
backend = models.CharField(
_('VPN backend'),
choices=app_settings.VPN_BACKENDS,
max_length=128,
help_text=_('Select VPN configuration backend'),
)
notes = models.TextField(blank=True)
# diffie hellman parameters are required
# in some VPN solutions (eg: OpenVPN)
dh = models.TextField(blank=True)
__vpn__ = True
class Meta:
verbose_name = _('VPN server')
verbose_name_plural = _('VPN servers')
abstract = True
def clean(self, *args, **kwargs):
"""
* ensure certificate matches CA
"""
super().clean(*args, **kwargs)
# certificate must be related to CA
if self.cert and self.cert.ca.pk != self.ca.pk:
msg = _('The selected certificate must match the selected CA.')
raise ValidationError({'cert': msg})
def save(self, *args, **kwargs):
"""
Calls _auto_create_cert() if cert is not set
"""
if not self.cert:
self.cert = self._auto_create_cert()
if not self.dh:
self.dh = self.dhparam(1024)
super().save(*args, **kwargs)
@classmethod
def dhparam(cls, length):
"""
Returns an automatically generated set of DH parameters in PEM
"""
return subprocess.check_output(
'openssl dhparam {0} 2> /dev/null'.format(length), shell=True
).decode('utf-8')
def _auto_create_cert(self):
"""
Automatically generates server x509 certificate
"""
common_name = slugify(self.name)
server_extensions = [
{"name": "nsCertType", "value": "server", "critical": False}
]
cert_model = self.__class__.cert.field.related_model
cert = cert_model(
name=self.name,
ca=self.ca,
key_length=self.ca.key_length,
digest=self.ca.digest,
country_code=self.ca.country_code,
state=self.ca.state,
city=self.ca.city,
organization_name=self.ca.organization_name,
email=self.ca.email,
common_name=common_name,
extensions=server_extensions,
)
cert = self._auto_create_cert_extra(cert)
cert.save()
return cert
def _auto_create_cert_extra(self, cert):
"""
this method can be overridden in order to perform
extra operations on a Cert object when auto-creating
certificates for VPN servers
"""
return cert
def get_context(self):
"""
prepares context for netjsonconfig VPN backend
"""
try:
c = {'ca': self.ca.certificate}
except ObjectDoesNotExist:
c = {}
if self.cert:
c.update({'cert': self.cert.certificate, 'key': self.cert.private_key})
if self.dh:
c.update({'dh': self.dh})
c.update(super().get_context())
return c
def _get_auto_context_keys(self):
"""
returns a dictionary which indicates the names of
the configuration variables needed to access:
* path to CA file
* CA certificate in PEM format
* path to cert file
* cert in PEM format
* path to key file
* key in PEM format
"""
pk = self.pk.hex
return {
'ca_path': 'ca_path_{0}'.format(pk),
'ca_contents': 'ca_contents_{0}'.format(pk),
'cert_path': 'cert_path_{0}'.format(pk),
'cert_contents': 'cert_contents_{0}'.format(pk),
'key_path': 'key_path_{0}'.format(pk),
'key_contents': 'key_contents_{0}'.format(pk),
}
def auto_client(self, auto_cert=True):
"""
calls backend ``auto_client`` method and returns a configuration
dictionary that is suitable to be used as a template
if ``auto_cert`` is ``False`` the resulting configuration
won't include autogenerated key and certificate details
"""
config = {}
backend = self.backend_class
if hasattr(backend, 'auto_client'):
context_keys = self._get_auto_context_keys()
# add curly brackets for netjsonconfig context evaluation
for key in context_keys.keys():
context_keys[key] = '{{%s}}' % context_keys[key]
# do not include cert and key if auto_cert is False
if not auto_cert:
for key in ['cert_path', 'cert_contents', 'key_path', 'key_contents']:
del context_keys[key]
conifg_dict_key = self.backend_class.__name__.lower()
auto = backend.auto_client(
host=self.host, server=self.config[conifg_dict_key][0], **context_keys
)
config.update(auto)
return config
class AbstractVpnClient(models.Model):
"""
m2m through model
"""
config = models.ForeignKey('django_netjsonconfig.Config', on_delete=models.CASCADE)
vpn = models.ForeignKey('django_netjsonconfig.Vpn', on_delete=models.CASCADE)
cert = models.OneToOneField(
'django_x509.Cert', on_delete=models.CASCADE, blank=True, null=True
)
# this flags indicates whether the certificate must be
# automatically managed, which is going to be almost in all cases
auto_cert = models.BooleanField(default=False)
class Meta:
abstract = True
unique_together = ('config', 'vpn')
verbose_name = _('VPN client')
verbose_name_plural = _('VPN clients')
def save(self, *args, **kwargs):
"""
automatically creates an x509 certificate when ``auto_cert`` is True
"""
if self.auto_cert:
cn = self._get_common_name()
self._auto_create_cert(name=self.config.device.name, common_name=cn)
super().save(*args, **kwargs)
def _get_common_name(self):
"""
returns the common name for a new certificate
"""
d = self.config.device
cn_format = app_settings.COMMON_NAME_FORMAT
if cn_format == '{mac_address}-{name}' and d.name == d.mac_address:
cn_format = '{mac_address}'
return cn_format.format(**d.__dict__)
@classmethod
def post_delete(cls, **kwargs):
"""
class method for ``post_delete`` signal
automatically deletes certificates when ``auto_cert`` is ``True``
"""
instance = kwargs['instance']
if instance.auto_cert:
instance.cert.delete()
def _auto_create_cert(self, name, common_name):
"""
Automatically creates and assigns a client x509 certificate
"""
server_extensions = [
{"name": "nsCertType", "value": "client", "critical": False}
]
ca = self.vpn.ca
cert_model = self.__class__.cert.field.related_model
cert = cert_model(
name=name,
ca=ca,
key_length=ca.key_length,
digest=str(ca.digest),
country_code=ca.country_code,
state=ca.state,
city=ca.city,
organization_name=ca.organization_name,
email=ca.email,
common_name=common_name,
extensions=server_extensions,
)
cert = self._auto_create_cert_extra(cert)
cert.full_clean()
cert.save()
self.cert = cert
return cert
def _auto_create_cert_extra(self, cert):
"""
this method can be overridden in order to perform
extra operations on a Cert object when auto-creating
certificates for VPN clients
"""
return cert
|
openwisp/django-netjsonconfig
|
django_netjsonconfig/base/vpn.py
|
Python
|
gpl-3.0
| 8,633
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: yyg
@Create: 2016MMDD
@LastUpdate: 2016-12-15 HH:MM:SS
@Version: 0.0
"""
from json import load
from logging import (Formatter, _defaultFormatter, exception,
getLogger, FileHandler, basicConfig, StreamHandler)
from cloghandler import ConcurrentRotatingFileHandler
from params import (LOG_CONF_FILE, LOG_LVL, LOGGER_NAME,
LOG_FILE, LOG_DAT_FMT, LOG_FMT)
class LaserjetLogger(object):
"""
Compatible to python 2.6+
"""
def __init__(self):
self.fmt = LOG_FMT
self.datefmt = LOG_DAT_FMT
self._start()
def _start(self):
logger = getLogger(LOGGER_NAME)
log_handler = ConcurrentRotatingFileHandler(LOG_FILE)
log_formatter = Formatter(self.fmt, self.datefmt)
log_handler.setFormatter(log_formatter)
console_handler = StreamHandler()
console_handler.setFormatter(log_formatter)
logger.setLevel(LOG_LVL)
logger.addHandler(log_handler)
logger.addHandler(console_handler)
logger.info("Logger activated")
def print_func(anything_str):
log = getLogger(LOGGER_NAME)
log.info(anything_str)
if __name__ == "__main__":
logger = LaserjetLogger()
test_pool = Pool()
for i in range(5):
test_pool.apply_async(print_func, args=(i,))
test_pool.close()
test_pool.join()
|
hipnusleo/laserjet
|
lib/core/loggers.py
|
Python
|
apache-2.0
| 1,506
|
HEADER_HUP = b"\x00"
HEADER_RESOLVE = b"\x11"
HEADER_NOTIFY = b"\x12"
HEADER_EVAL = b"\x13"
HEADER_REPLY = b"\x14"
HEADER_MESSAGE_ERROR = b"\x15"
HEADER_GENERAL_ERROR = b"\x16"
HEADER_TIME = b"\x17"
HEADER_FILTER_IN = b"\x18"
HEADER_FILTER_OUT = b"\x19"
HEADER_FILTER_ERR = b"\x1A"
|
disnesquick/ripley
|
scratch/py/backups/old/headers.py
|
Python
|
gpl-2.0
| 342
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import inspect
import os
import sys
from time import sleep
from CommandExecutor import *
from OSEncryptionState import *
class PatchBootSystemState(OSEncryptionState):
def __init__(self, context):
super(PatchBootSystemState, self).__init__('PatchBootSystemState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter patch_boot_system state")
if not super(PatchBootSystemState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for patch_boot_system state")
self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)
self.command_executor.Execute('umount /oldroot', True)
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering patch_boot_system state")
self.command_executor.Execute('mount /boot', False)
self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)
self.command_executor.Execute('mount --make-rprivate /', True)
self.command_executor.Execute('mkdir /oldroot/memroot', True)
self.command_executor.Execute('pivot_root /oldroot /oldroot/memroot', True)
self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /memroot/$i /$i; done', True)
self.command_executor.ExecuteInBash('[ -e "/boot/luks" ]', True)
try:
self._modify_pivoted_oldroot()
except Exception as e:
self.command_executor.Execute('mount --make-rprivate /')
self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')
self.command_executor.Execute('rmdir /oldroot/memroot')
self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')
raise
else:
self.command_executor.Execute('mount --make-rprivate /')
self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')
self.command_executor.Execute('rmdir /oldroot/memroot')
self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')
extension_full_name = 'Microsoft.Azure.Security.' + CommonVariables.extension_name
self.command_executor.Execute('cp -ax' +
' /var/log/azure/{0}'.format(extension_full_name) +
' /oldroot/var/log/azure/{0}.Stripdown'.format(extension_full_name),
True)
self.command_executor.Execute('umount /boot')
self.command_executor.Execute('umount /oldroot')
self.context.logger.log("Pivoted back into memroot successfully")
def should_exit(self):
self.context.logger.log("Verifying if machine should exit patch_boot_system state")
return super(PatchBootSystemState, self).should_exit()
def _append_contents_to_file(self, contents, path):
with open(path, 'a') as f:
f.write(contents)
def _modify_pivoted_oldroot(self):
self.context.logger.log("Pivoted into oldroot successfully")
scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
encryptscriptsdir = os.path.join(scriptdir, '../encryptscripts')
injectscriptpath = os.path.join(encryptscriptsdir, 'inject_luks_header.sh')
if not os.path.exists(injectscriptpath):
message = "Inject-script not found at path: {0}".format(injectscriptpath)
self.context.logger.log(message)
raise Exception(message)
else:
self.context.logger.log("Inject-script found at path: {0}".format(injectscriptpath))
self.command_executor.Execute('cp {0} /usr/share/initramfs-tools/hooks/luksheader'.format(injectscriptpath), True)
self.command_executor.Execute('chmod +x /usr/share/initramfs-tools/hooks/luksheader', True)
scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
patchesdir = os.path.join(scriptdir, '../encryptpatches')
patchpath = os.path.join(patchesdir, 'ubuntu_1404_initramfs.patch')
if not os.path.exists(patchpath):
message = "Patch not found at path: {0}".format(patchpath)
self.context.logger.log(message)
raise Exception(message)
else:
self.context.logger.log("Patch found at path: {0}".format(patchpath))
self.command_executor.ExecuteInBash('patch -b -d /usr/share/initramfs-tools -p1 <{0}'.format(patchpath), True)
entry = 'osencrypt /dev/sda1 none luks,discard,header=/boot/luks/osluksheader,keyscript=/usr/sbin/azure_crypt_key.sh'
self._append_contents_to_file(entry, '/etc/crypttab')
self.command_executor.Execute('update-initramfs -u -k all', True)
proc_comm = ProcessCommunicator()
self.command_executor.ExecuteInBash(command_to_execute="lsinitramfs /boot/initrd*",
raise_exception_on_failure=True,
communicator=proc_comm)
if not "azure_crypt_key.sh" in proc_comm.stdout or not "osluksheader" in proc_comm.stdout:
raise Exception("initramfs update failed")
self.command_executor.Execute('update-grub', True)
self.command_executor.Execute('grub-install --recheck --force {0}'.format(self.rootfs_disk), True)
def _get_uuid(self, partition_name):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="blkid -s UUID -o value {0}".format(partition_name),
raise_exception_on_failure=True,
communicator=proc_comm)
return proc_comm.stdout.strip()
|
Azure/azure-linux-extensions
|
VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/PatchBootSystemState.py
|
Python
|
apache-2.0
| 6,662
|
import unittest, os, sys
from blackjack.cmake.target.LibTarget_Imported import LibTarget_Imported
from blackjack.cmake.target.LibTypes import LibTypes
class Test_LibTarget_Imported(unittest.TestCase):
def test_render(self):
block1 = LibTarget_Imported("lib target1", LibTypes.STATIC, True)
result = block1.render()
print(result)
if result != ['## Library Target - Imported', 'add_library(lib_target1 STATIC IMPORTED GLOBAL)']:
self.fail("Unexpected result")
return
if __name__ == '__main__':
unittest.main()
|
grbd/GBD.Build.BlackJack
|
blackjack/test/cmake/target/LibTarget_Imported.py
|
Python
|
apache-2.0
| 574
|
import pytest
from flask import Flask, request
from skylines.api.middleware import HTTPMethodOverrideMiddleware
@pytest.fixture
def app():
test_app = Flask(__name__)
test_app.wsgi_app = HTTPMethodOverrideMiddleware(test_app.wsgi_app)
@test_app.route('/', methods=HTTPMethodOverrideMiddleware.allowed_methods)
def index():
return request.method
return test_app
def test_get(client):
assert client.get('/').data == 'GET'
def test_head(client):
assert not client.head('/').data
def test_post(client):
assert client.post('/').data == 'POST'
def test_delete(client):
assert client.delete('/').data == 'DELETE'
def test_put(client):
assert client.put('/').data == 'PUT'
def test_patch(client):
assert client.patch('/').data == 'PATCH'
def test_options(client):
assert client.options('/').data == 'OPTIONS'
def test_invalid_method(client):
assert client.open('/', method='INVALID').status.upper() == '405 METHOD NOT ALLOWED'
def test_get_with_head_override(client):
assert not client.get('/', headers=[('X-HTTP-Method-Override', 'HEAD')]).data
def test_get_with_post_override(client):
assert client.get('/', headers=[('X-HTTP-Method-Override', 'POST')]).data == 'POST'
def test_get_with_delete_override(client):
assert client.get('/', headers=[('X-HTTP-Method-Override', 'DELETE')]).data == 'DELETE'
def test_get_with_put_override(client):
assert client.get('/', headers=[('X-HTTP-Method-Override', 'PUT')]).data == 'PUT'
def test_get_with_patch_override(client):
assert client.get('/', headers=[('X-HTTP-Method-Override', 'PATCH')]).data == 'PATCH'
def test_get_with_options_override(client):
assert client.get('/', headers=[('X-HTTP-Method-Override', 'OPTIONS')]).data == 'OPTIONS'
def test_get_with_invalid_method_override(client):
assert client.open('/', headers=[('X-HTTP-Method-Override', 'INVALID')]).status.upper() == '405 METHOD NOT ALLOWED'
|
kerel-fs/skylines
|
tests/api/middleware/method_override_test.py
|
Python
|
agpl-3.0
| 1,967
|
from lightbulb.api.api_native import LightBulb
import base64
lightbulbapp = LightBulb()
path = "/test/env/bin/lightbulb" #Path to binary
configuration_A = {'TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'}
configuration_B = {'TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'}
handlerconfig_A = {'WSPORT': '5000','WBPORT': '5080', 'BROWSERPARSE': 'True', 'DELAY': '50', 'HOST': 'localhost'}
handlerconfig_B = {'URL': 'http://127.0.0.1/~fishingspot/securitycheck/index.php', 'BLOCK':'Impact', 'REQUEST_TYPE':'GET','PARAM':'input','BYPASS':'None', 'PROXY_SCHEME': 'None', 'PROXY_HOST': 'None', 'PROXY_PORT': 'None', 'PROXY_USERNAME': 'None', 'PROXY_PASSWORD': 'None','USER_AGENT': "Mozilla/5.0", 'REFERER': "http://google.com"}
stats = lightbulbapp.start_sfadiff_algorithm(
path,
configuration_A,
configuration_B,
handlerconfig_A,
handlerconfig_B,
"BrowserHandler",
"HTTPHandler")
print stats
|
lightbulb-framework/lightbulb-framework
|
examples/test_custom_api_native_example_1.py
|
Python
|
mit
| 1,286
|
import sys
MAX_VOL_X = int(sys.argv[1]) if len(sys.argv) >= 2 else 9
MAX_VOL_Y = int(sys.argv[2]) if len(sys.argv) >= 3 else 4
TARGET_VOLUME = int(sys.argv[3]) if len(sys.argv) >= 4 else 2
class CupState:
volume_x = 0
volume_y = 0
parent = False
def __init__(self , vol_x , vol_y , parent_state):
self.volume_x = vol_x
self.volume_y = vol_y
self.parent = parent_state
def empty_x(self):
return CupState(0 , self.volume_y , self)
def empty_y(self):
return CupState(self.volume_x , 0 , self)
def fill_x(self):
return CupState(MAX_VOL_X , self.volume_y , self)
def fill_y(self):
return CupState(self.volume_x , MAX_VOL_Y , self)
def pour_x_to_y(self):
y = min(MAX_VOL_Y , self.volume_y + self.volume_x)
x = self.volume_x - (y-self.volume_y)
return CupState(x, y, self)
def pour_y_to_x(self):
x = min(MAX_VOL_X , self.volume_y + self.volume_x)
y = self.volume_y - (x-self.volume_x)
return CupState(x, y, self)
def isTargetVolume(self):
return self.volume_y == TARGET_VOLUME or self.volume_x == TARGET_VOLUME
bfs_queue = [];
initial_state = CupState(0 , 0 , False)
bfs_queue.append(initial_state)
visited_states = [[0 for x in range(MAX_VOL_X + 1)] for y in range(MAX_VOL_Y + 1)]
while len(bfs_queue) > 0:
current_state = bfs_queue.pop(0)
if visited_states[current_state.volume_y][current_state.volume_x]:
continue
if current_state.isTargetVolume():
num_x = []
num_y = []
while True:
num_x.append(current_state.volume_x)
num_y.append(current_state.volume_y)
current_state = current_state.parent
if current_state == False:
break
print("Found a solution with %s steps!" % (len(num_x) - 1))
while len(num_x):
print("(%s , %s)" % (num_x.pop() , num_y.pop()))
exit()
visited_states[current_state.volume_y][current_state.volume_x] = True
for mutation in range(0,6):
if mutation == 0: next_state = current_state.empty_x()
elif mutation == 1: next_state = current_state.empty_y()
elif mutation == 2: next_state = current_state.pour_x_to_y()
elif mutation == 3: next_state = current_state.pour_y_to_x()
elif mutation == 4: next_state = current_state.fill_x()
elif mutation == 5: next_state = current_state.fill_y()
#print next_state
#print len(visited_states), len(visited_states[0]), next_state.volume_y, next_state.volume_x
if visited_states[next_state.volume_y][next_state.volume_x] == False:
bfs_queue.append(next_state)
print("No possible solutions!")
|
fyquah95/fyquah95.github.io
|
projects/cups/cups.py
|
Python
|
mit
| 2,458
|
try:
import hashlib as md5
except ImportError: # Python <2.5
import md5
import random
import types
import urllib.parse, urllib.error
import urllib.parse
from python_digest.utils import parse_parts, format_parts
_REQUIRED_DIGEST_RESPONSE_PARTS = ['username', 'realm', 'nonce', 'uri', 'response', 'algorithm',
'opaque', 'qop', 'nc', 'cnonce']
_REQUIRED_DIGEST_CHALLENGE_PARTS = ['realm', 'nonce', 'stale', 'algorithm',
'opaque', 'qop']
def validate_uri(digest_uri, request_path):
digest_url_components = urllib.parse.urlparse(digest_uri)
return urllib.parse.unquote(digest_url_components[2]) == request_path
def validate_nonce(nonce, secret):
'''
Is the nonce one that was generated by this library using the provided secret?
'''
nonce_components = nonce.split(':', 2)
if not len(nonce_components) == 3:
return False
timestamp = nonce_components[0]
salt = nonce_components[1]
nonce_signature = nonce_components[2]
calculated_nonce = calculate_nonce(timestamp, secret, salt)
if not nonce == calculated_nonce:
return False
return True
def calculate_partial_digest(username, realm, password):
'''
Calculate a partial digest that may be stored and used to authenticate future
HTTP Digest sessions.
'''
return md5.md5(("%s:%s:%s" % (username, realm, password)).encode('utf-8')).hexdigest()
def build_digest_challenge(timestamp, secret, realm, opaque, stale):
'''
Builds a Digest challenge that may be sent as the value of the 'WWW-Authenticate' header
in a 401 or 403 response.
'opaque' may be any value - it will be returned by the client.
'timestamp' will be incorporated and signed in the nonce - it may be retrieved from the
client's authentication request using get_nonce_timestamp()
'''
nonce = calculate_nonce(timestamp, secret)
return 'Digest %s' % format_parts(realm=realm, qop='auth', nonce=nonce,
opaque=opaque, algorithm='MD5',
stale=stale and 'true' or 'false')
def calculate_request_digest(method, partial_digest, digest_response=None,
uri=None, nonce=None, nonce_count=None, client_nonce=None):
'''
Calculates a value for the 'response' value of the client authentication request.
Requires the 'partial_digest' calculated from the realm, username, and password.
Either call it with a digest_response to use the values from an authentication request,
or pass the individual parameters (i.e. to generate an authentication request).
'''
if digest_response:
if uri or nonce or nonce_count or client_nonce:
raise Exception("Both digest_response and one or more "
"individual parameters were sent.")
uri = digest_response.uri
nonce = digest_response.nonce
nonce_count = digest_response.nc
client_nonce=digest_response.cnonce
elif not (uri and nonce and (nonce_count != None) and client_nonce):
raise Exception("Neither digest_response nor all individual parameters were sent.")
ha2 = md5.md5(("%s:%s" % (method, uri)).encode('utf-8')).hexdigest()
data = "%s:%s:%s:%s:%s" % (nonce, "%08x" % nonce_count, client_nonce, 'auth', ha2)
kd = md5.md5(("%s:%s" % (partial_digest, data)).encode('utf-8')).hexdigest()
return kd
def get_nonce_timestamp(nonce):
'''
Extract the timestamp from a Nonce. To be sure the timestamp was generated by this site,
make sure you validate the nonce using validate_nonce().
'''
components = nonce.split(':',2)
if not len(components) == 3:
return None
try:
return float(components[0])
except ValueError:
return None
def calculate_nonce(timestamp, secret, salt=None):
'''
Generate a nonce using the provided timestamp, secret, and salt. If the salt is not provided,
(and one should only be provided when validating a nonce) one will be generated randomly
in order to ensure that two simultaneous requests do not generate identical nonces.
'''
if not salt:
salt = ''.join([random.choice('0123456789ABCDEF') for x in range(4)])
return "%s:%s:%s" % (timestamp, salt,
md5.md5(("%s:%s:%s" % (timestamp, salt, secret)).encode('utf-8')).hexdigest())
def build_authorization_request(username, method, uri, nonce_count, digest_challenge=None,
realm=None, nonce=None, opaque=None, password=None,
request_digest=None, client_nonce=None):
'''
Builds an authorization request that may be sent as the value of the 'Authorization'
header in an HTTP request.
Either a digest_challenge object (as returned from parse_digest_challenge) or its required
component parameters (nonce, realm, opaque) must be provided.
The nonce_count should be the last used nonce_count plus one.
Either the password or the request_digest should be provided - if provided, the password
will be used to generate a request digest. The client_nonce is optional - if not provided,
a random value will be generated.
'''
if not client_nonce:
client_nonce = ''.join([random.choice('0123456789ABCDEF') for x in range(32)])
if digest_challenge and (realm or nonce or opaque):
raise Exception("Both digest_challenge and one or more of realm, nonce, and opaque"
"were sent.")
if digest_challenge:
if isinstance(digest_challenge, bytes):
digest_challenge_header = digest_challenge
digest_challenge = parse_digest_challenge(digest_challenge_header)
if not digest_challenge:
raise Exception("The provided digest challenge header could not be parsed: %s" %
digest_challenge_header)
realm = digest_challenge.realm
nonce = digest_challenge.nonce
opaque = digest_challenge.opaque
elif not (realm and nonce and opaque):
raise Exception("Either digest_challenge or realm, nonce, and opaque must be sent.")
if password and request_digest:
raise Exception("Both password and calculated request_digest were sent.")
elif not request_digest:
if not password:
raise Exception("Either password or calculated request_digest must be provided.")
partial_digest = calculate_partial_digest(username, realm, password)
request_digest = calculate_request_digest(method, partial_digest, uri=uri, nonce=nonce,
nonce_count=nonce_count,
client_nonce=client_nonce)
return 'Digest %s' % format_parts(username=username, realm=realm, nonce=nonce, uri=uri,
response=request_digest, algorithm='MD5', opaque=opaque,
qop='auth', nc='%08x' % nonce_count, cnonce=client_nonce)
def _check_required_parts(parts, required_parts):
if parts == None:
return False
missing_parts = [part for part in required_parts if not part in parts]
return len(missing_parts) == 0
def _build_object_from_parts(parts, names):
obj = type("", (), {})()
for part_name in names:
setattr(obj, part_name, parts[part_name])
return obj
def parse_digest_response(digest_response_string):
'''
Parse the parameters of a Digest response. The input is a comma separated list of
token=(token|quoted-string). See RFCs 2616 and 2617 for details.
Known issue: this implementation will fail if there are commas embedded in quoted-strings.
'''
parts = parse_parts(digest_response_string, defaults={'algorithm': 'MD5'})
if not _check_required_parts(parts, _REQUIRED_DIGEST_RESPONSE_PARTS):
return None
if not parts['nc'] or [c for c in parts['nc'] if not c in '0123456789abcdefABCDEF']:
return None
parts['nc'] = int(parts['nc'], 16)
digest_response = _build_object_from_parts(parts, _REQUIRED_DIGEST_RESPONSE_PARTS)
if ('md5', 'auth') != (digest_response.algorithm.lower(), digest_response.qop.lower()):
return None
return digest_response
def is_digest_credential(authorization_header):
'''
Determines if the header value is potentially a Digest response sent by a client (i.e.
if it starts with 'Digest ' (case insensitive).
'''
return authorization_header[:7].lower() == 'digest '
def parse_digest_credentials(authorization_header):
'''
Parses the value of an 'Authorization' header. Returns an object with properties
corresponding to each of the recognized parameters in the header.
'''
if not is_digest_credential(authorization_header):
return None
return parse_digest_response(authorization_header[7:])
def is_digest_challenge(authentication_header):
'''
Determines if the header value is potentially a Digest challenge sent by a server (i.e.
if it starts with 'Digest ' (case insensitive).
'''
return authentication_header[:7].lower() == 'digest '
def parse_digest_challenge(authentication_header):
'''
Parses the value of a 'WWW-Authenticate' header. Returns an object with properties
corresponding to each of the recognized parameters in the header.
'''
if not is_digest_challenge(authentication_header):
return None
parts = parse_parts(authentication_header[7:], defaults={'algorithm': 'MD5',
'stale': 'false'})
if not _check_required_parts(parts, _REQUIRED_DIGEST_CHALLENGE_PARTS):
return None
parts['stale'] = parts['stale'].lower() == 'true'
digest_challenge = _build_object_from_parts(parts, _REQUIRED_DIGEST_CHALLENGE_PARTS)
if ('MD5', 'auth') != (digest_challenge.algorithm, digest_challenge.qop):
return None
return digest_challenge
|
webitup/python3-digest
|
python_digest/__init__.py
|
Python
|
bsd-3-clause
| 10,127
|
BRANCH = "mozilla-aurora"
MOZ_UPDATE_CHANNEL = "aurora"
MOZILLA_DIR = BRANCH
JAVA_HOME = "/tools/jdk6"
JARSIGNER = "tools/release/signing/mozpass.py"
OBJDIR = "obj-l10n"
EN_US_BINARY_URL = "http://stage.mozilla.org/pub/mozilla.org/mobile/nightly/latest-%s-android/en-US" % (BRANCH)
#STAGE_SERVER = "dev-stage01.srv.releng.scl3.mozilla.com"
STAGE_SERVER = "stage.mozilla.org"
STAGE_USER = "ffxbld"
STAGE_SSH_KEY = "~/.ssh/ffxbld_dsa"
#AUS_SERVER = "dev-stage01.srv.releng.scl3.mozilla.com"
AUS_SERVER = "aus3-staging.mozilla.org"
AUS_USER = "ffxbld"
AUS_SSH_KEY = "~/.ssh/auspush"
AUS_UPLOAD_BASE_DIR = "/opt/aus2/incoming/2/Fennec"
AUS_BASE_DIR = BRANCH + "/%(build_target)s/%(buildid)s/%(locale)s"
HG_SHARE_BASE_DIR = "/builds/hg-shared"
config = {
"log_name": "single_locale",
"objdir": OBJDIR,
"locales_file": "%s/mobile/android/locales/all-locales" % MOZILLA_DIR,
"locales_dir": "mobile/android/locales",
"ignore_locales": ["en-US"],
"repos": [{
"repo": "http://hg.mozilla.org/releases/mozilla-aurora",
"revision": "default",
"dest": MOZILLA_DIR,
},{
"repo": "http://hg.mozilla.org/build/buildbot-configs",
"revision": "default",
"dest": "buildbot-configs"
},{
"repo": "http://hg.mozilla.org/build/tools",
"revision": "default",
"dest": "tools"
},{
"repo": "http://hg.mozilla.org/build/compare-locales",
"revision": "RELEASE_AUTOMATION"
}],
"hg_l10n_base": "http://hg.mozilla.org/releases/l10n/%s" % BRANCH,
"hg_l10n_tag": "default",
'vcs_share_base': HG_SHARE_BASE_DIR,
"l10n_dir": MOZILLA_DIR,
"repack_env": {
"JAVA_HOME": JAVA_HOME,
"PATH": JAVA_HOME + "/bin:%(PATH)s",
"MOZ_OBJDIR": OBJDIR,
"EN_US_BINARY_URL": EN_US_BINARY_URL,
"JARSIGNER": "%(abs_work_dir)s/" + JARSIGNER,
"LOCALE_MERGEDIR": "%(abs_merge_dir)s/",
"MOZ_UPDATE_CHANNEL": MOZ_UPDATE_CHANNEL,
},
# TODO ideally we could get this info from a central location.
# However, the agility of these individual config files might trump that.
"upload_env": {
"UPLOAD_USER": STAGE_USER,
"UPLOAD_SSH_KEY": STAGE_SSH_KEY,
"UPLOAD_HOST": STAGE_SERVER,
"POST_UPLOAD_CMD": "post_upload.py -b mozilla-aurora-android-l10n -p mobile -i %(buildid)s --release-to-latest --release-to-dated",
"UPLOAD_TO_TEMP": "1",
},
"merge_locales": True,
"make_dirs": ['config'],
"mozilla_dir": MOZILLA_DIR,
"mozconfig": "%s/mobile/android/config/mozconfigs/android/l10n-nightly" % MOZILLA_DIR,
"jarsigner": JARSIGNER,
"signature_verification_script": "tools/release/signing/verify-android-signature.sh",
# AUS
"build_target": "Android_arm-eabi-gcc3",
"aus_server": AUS_SERVER,
"aus_user": AUS_USER,
"aus_ssh_key": AUS_SSH_KEY,
"aus_upload_base_dir": AUS_UPLOAD_BASE_DIR,
"aus_base_dir": AUS_BASE_DIR,
}
|
ctalbert/mozharness
|
configs/single_locale/mozilla-aurora_android.py
|
Python
|
mpl-2.0
| 2,960
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as SsnProvider
class Provider(SsnProvider):
ssn_formats = ("?#########",)
@classmethod
def ssn(cls):
return cls.bothify(cls.random_element(cls.ssn_formats)).upper()
|
Nebucatnetzer/tamagotchi
|
pygame/lib/python3.4/site-packages/faker/providers/ssn/zh_TW/__init__.py
|
Python
|
gpl-2.0
| 267
|
"""
Django settings for cavelanguage project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import sys
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(a+jy((!m)316ce*y)y&_75vvc76vw((wuw$u-5y11&bfkl(@5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True if os.environ.get('CL_DEBUG',None) else False
ALLOWED_HOSTS = ['cavelanguage.herokuapp.com','www.cavelanguage.org','www.cavelanguage.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djax',
'cavelanguage',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cavelanguage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cavelanguage.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {}
if os.environ.get('CL_RUNLOCAL',None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('DB_NAME','cavelanguage'),
'USER': os.environ.get('DB_USER',''),
'PASSWORD': os.environ.get('DB_PASSWORD',''),
}
}
else:
DATABASES['default'] = dj_database_url.config()
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'site_media', 'static')
local_static = os.environ.get('CL_LOCALSTATIC',None)
# Storage
if not local_static:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME','')
STATIC_HOST = os.environ.get('CL_STATIC_HOST','//s3.amazonaws.com')
STATIC_URL = '%s/%s/' % (STATIC_HOST,AWS_STORAGE_BUCKET_NAME)
AXILENT_API_KEY = os.environ.get('AXILENT_API_KEY','')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'simple',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
},
'djax': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
}
|
Axilent/cave-language
|
cavelanguage/settings.py
|
Python
|
mit
| 4,870
|
# -*- coding: utf-8 -*-
"""
release
~~~~~~~
Helper script that performs a release. Does pretty much everything
automatically for us.
:copyright: (c) 2014 by the J5.
:license: BSD, see LICENSE for more details.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pkg_resources
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)')
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search('^Version\s+(.*)', line.strip())
if match is None:
continue
version = match.group(1).strip()
if lineiter.next().count('-') != len(match.group(0)):
continue
while True:
change_info = lineiter.next().strip()
if change_info:
break
match = re.search(r'released on (.*)', change_info)
if match is None:
continue
datestr = match.groups()[0]
return version, parse_date(datestr)
def parse_date(string):
string = _date_clean_re.sub(r'\1', string)
return datetime.strptime(string, '%B %d %Y')
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts))
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern,
inject_version, f.read())
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting __init__.py version to %s', version)
set_filename_version('zask/__init__.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def build_and_upload():
Popen([sys.executable, 'setup.py', 'sdist', 'upload', '-r', 'pypi']).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[
0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
version, release_date = parse_changelog()
dev_version = bump_version(version) + '-dev'
info('Releasing %s, release date %s',
version, release_date.strftime('%d/%m/%Y'))
tags = get_git_tags()
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today (%s != %s)',
release_date.date(), date.today())
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
set_init_version(dev_version)
if __name__ == '__main__':
main()
|
j-5/zask
|
bin/release.py
|
Python
|
bsd-3-clause
| 3,872
|
# coding: utf-8
import numpy as np
from .cameraConfig import Camera_config
from .._global import OptionalModule
try:
from PIL import ImageTk, Image
except (ModuleNotFoundError, ImportError):
ImageTk = OptionalModule("pillow")
Image = OptionalModule("pillow")
try:
import cv2
except (ModuleNotFoundError, ImportError):
cv2 = OptionalModule("opencv-python")
class Camera_config_with_boxes(Camera_config):
"""
Config window for camera, with lines to highlight boxes on the image
Used for VE blocks when the selection is not interactive (unlike ve_config)
"""
def __init__(self, camera, boxes):
self.boxes = boxes
Camera_config.__init__(self, camera)
def clamp(self, t: tuple) -> tuple:
if isinstance(t[0], slice):
return t[0], min(max(0, t[1]), self.img_shape[1] - 1)
else:
return min(max(0, t[0]), self.img_shape[0] - 1), t[1]
def draw_box(self, box, img):
miny, minx, h, w = box
maxy = miny + h
maxx = minx + w
for s in [
(miny, slice(minx, maxx)),
(maxy, slice(minx, maxx)),
(slice(miny, maxy), minx),
(slice(miny, maxy), maxx)
]:
# Turn these pixels white or black for highest possible contrast
s = self.clamp(s)
img[s] = 255 * int(np.mean(img[s]) < 128)
def resize_img(self, sl: tuple) -> None:
rimg = cv2.resize(self.img8[sl[1], sl[0]], tuple(reversed(self.img_shape)),
interpolation=0)
for b in self.boxes:
lbox = [0] * 4
for i in range(4):
n = b[i] - self.zoom_window[i % 2] * self.img.shape[i % 2]
n /= (self.zoom_window[2 + i % 2] - self.zoom_window[i % 2])
lbox[i] = int(n / self.img.shape[i % 2] * self.img_shape[i % 2])
self.draw_box(lbox, rimg)
self.c_img = ImageTk.PhotoImage(Image.fromarray(rimg))
|
LaboratoireMecaniqueLille/crappy
|
crappy/tool/cameraConfigBoxes.py
|
Python
|
gpl-2.0
| 1,822
|
import xbmcaddon
import xbmcgui
import xbmc
import os
import sys
import re
import json
__addon__ = xbmcaddon.Addon()
__plugin__ = __addon__.getAddonInfo('name')
__addonid__ = __addon__.getAddonInfo('id')
__addonversion__ = __addon__.getAddonInfo('version')
__language__ = __addon__.getLocalizedString
__icon__ = __addon__.getAddonInfo('icon')
__cachedir__ = __addon__.getAddonInfo('profile')
__settings__ = xbmcaddon.Addon(id='plugin.program.1.search')
__cwd__ = xbmc.translatePath(__addon__.getAddonInfo('path')).decode('utf-8')
BASE_RESOURCE_PATH = xbmc.translatePath(os.path.join(__cwd__, 'resources', 'lib'))
PLUGINPATH = xbmc.translatePath(os.path.join(__cwd__))
sys.path.append(BASE_RESOURCE_PATH)
PLEX_CACHEDATA = __cachedir__
ONE_VERSION = __addonversion__
'''
# test for plexbmc
try:
plexbmc_addon = xbmcaddon.Addon ('plugin.video.plexbmc')
except:
plexbmc_addon = False
# add plex to path
if plexbmc_addon:
plexbmc_path = plexbmc_addon.getAddonInfo('path')
print 'plexbmc_path = %s' % plexbmc_path
sys.path.append (xbmc.translatePath( os.path.join( plexbmc_path,'resources', 'lib' ) ))
from plexgdm import plexgdm
print 'plexbmc server list=%s' % plexgdm().getServerList()
plexbmc_profile = plexbmc_addon.getAddonInfo('profile')
print 'plexbmc_profile=%s' % plexbmc_profile
'''
#ustv now directory
#ustv_files = get_search_dirs("plugin.video.ustvnow/live?mode=live")
'''
from one import Plexbmc
plexbmc = Plexbmc()
params = {"properties":["art", "genre", "plot", "title", "originaltitle", "year", "rating", "thumbnail", "playcount", "file", "fanart"],
"sort": { "method": "label"}
}
resp = plexbmc.send_json('?url=http%3A//192.168.1.102%3A32400/library/metadata/8958/&mode=1', params)
plexbmc.print_response(resp, 'TEST query', True)
#youtube = Youtube()
'''
# get top level directories
print '1 SEARCH -> STARTED'
#one = One()
#one.search('saul')
# all path
#plugin://plugin.video.plexbmc/?url=http%3A//192.168.1.102%3A32400/library/sections/2/all&mode=0
# search path
#plugin://plugin.video.plexbmc/?url=http%3A//192.168.1.102%3A32400/library/sections/2/search%3Ftype%3D4&mode=0
if ( __name__ == "__main__" ):
searchstring = ''
try:
params = dict( arg.split( "=" ) for arg in sys.argv[ 1 ].split( "&" ) )
searchstring = params.get("searchstring")
searchstring = urllib.unquote_plus(searchstring)
except:
keyboard = xbmc.Keyboard( '', __language__(32101), False )
print '1 SEARCH -> D0 MODAL for KEYBOARD'
keyboard.setDefault(searchstring)
keyboard.doModal()
if ( keyboard.isConfirmed() ):
searchstring = keyboard.getText()
if searchstring:
print '1 SEARCH -> START GUI'
import gui
ui = gui.GUI( "script-globalsearch-main.xml", __cwd__, "Default", searchstring=searchstring )
print '1 SEARCH -> D0 MODAL for GUI'
ui.doModal()
del ui
print '1 SEARCH -> ENDED'
''' this is good!
# get all video plugins
json_query = uni('{"jsonrpc":"2.0","method":"Addons.GetAddons","params":{"type":"xbmc.addon.video","content":"video","enabled":true,"properties":["path","name"]}, "id": 1 }')
plugins_files = get_files(json_query)
'''
#xbmcgui.Dialog().ok(addonname, str(plex), line2, line3)
'''
#get actioncodes from https://github.com/xbmc/xbmc/blob/master/xbmc/guilib/Key.h
ACTION_PREVIOUS_MENU = 10
#get actioncodes from https://github.com/xbmc/xbmc/blob/master/xbmc/guilib/Key.h
ACTION_PREVIOUS_MENU = 10
class ListDiag(xbmcgui.Window):
def __init__(self):
self.strActionInfo = xbmcgui.ControlLabel(250, 80, 200, 200, '', 'font14', '0xFFBBBBFF')
self.addControl(self.strActionInfo)
self.strActionInfo.setLabel('Push BACK to quit')
self.list = xbmcgui.ControlList(200, 150, 800, 400)
self.addControl(self.list)
self.setFocus(self.list)
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU:
self.close()
def onControl(self, control):
if control == self.list:
item = self.list.getSelectedItem()
self.message('You selected : ' + item.getLabel())
def message(self, message):
dialog = xbmcgui.Dialog()
dialog.ok(" My message title", message)
def add_items(self, lc):
for i in lc:
self.list.addItem(str(i))
print 'plex_files', plex_files
mydisplay = ListDiag()
mydisplay.add_items(plex_files)
mydisplay.doModal()
del mydisplay
'''
|
arKtelix/plugin.program.1.search
|
1search.py
|
Python
|
gpl-2.0
| 4,431
|
class Holder(object):
"""
A holder is used to encapsulate messages between Rockit and its plugins.
It creates a standard way of communication.
This object contains common functionality for all holders.
"""
def __init__(self):
self._content = dict()
def append(self, item, group='items', override=False):
"""
Append arbitary item to holder
If item is a type list then extend will be used instead of append
"""
if override:
self._content[group] = item
else:
self.create_group(group)
if type(item) is list:
self._content[group].extend(item)
else:
self._content[group].append(item)
def consume(self):
"""
Consume contents from holder (read it and reset holder afterwards)
"""
result = self.get_content()
self.reset()
return result
def create_group(self, group):
"""
Create a new group in content dict
"""
if group not in self._content:
self.reset_group(group)
def extend(self, holder):
"""
Extend current holder with another one
"""
for key in holder.get_content():
self.append(holder.get_content()[key], key)
def get_content(self):
"""
Get contents from holder
"""
return self._content
def reset(self):
"""
Clear contents from this holder
"""
self._content = dict()
def reset_group(self, group):
"""
Reset specific group
"""
if group:
self._content[group] = list()
|
acreations/rockit-server
|
rockit/core/holders/holder.py
|
Python
|
mit
| 1,708
|
from django.conf.urls import patterns, url
urlpatterns = patterns('tendenci.apps.dashboard',
url(r'^$', 'views.new', name="dashboard"),
url(r'^new/$', 'views.new', name="dashboard-new"),
url(r'^old/$', 'views.index', name="dashboard-old"),
url(r'^customize/$', 'views.customize', name="dashboard_customize"),
)
|
alirizakeles/tendenci
|
tendenci/apps/dashboard/urls.py
|
Python
|
gpl-3.0
| 328
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from .plugin import Plugin
from .server import *
|
WCCCEDU/twitter-commons
|
src/python/twitter/common/http/__init__.py
|
Python
|
apache-2.0
| 950
|
# encoding=utf-8
__author__ = 'Q-Whai'
'''
DESC: 这个是测试程序的入口
Blog: http://blog.csdn.net/lemon_tree12138
Create Date: 2016/2/24
Last Modify: 2016/3/9
version: 0.0.1
'''
import sys
import config.config as conf
import cipher.image_cipher as cipher
from games.game_2048 import Game2048
import cipher.hash_cipher as hah
import cipher.base64_cipher as base64
import db.db_server as helper
import db.db_config as db_conf
# ----------------------------------------- #
# 基于图片的信息隐藏逻辑 #
# ----------------------------------------- #
def data_cipher(argv=None):
print("正在进行数据隐藏...")
cipher.data_append(argv[1], argv[2], argv[3])
# ----------------------------------------- #
# 执行2048游戏代码逻辑 #
# ----------------------------------------- #
def play_game(argv=None):
print('玩游戏2048')
game = Game2048()
game.play()
# ----------------------------------------- #
# 哈希加密逻辑 #
# ----------------------------------------- #
def hash_cipher(argv=None):
print(hah.hash_sha1("F:/Temp/Trojans.zip"))
# ----------------------------------------- #
# base64加密算法加密字符串 #
# ----------------------------------------- #
def base64_cipher(argv=None):
ciphertext = base64.encode("这是一段明文。")
print("base64加密结果:%s" % ciphertext)
plaintext = base64.decode(ciphertext)
print("base64解密结果:%s" % plaintext)
# ----------------------------------------- #
# 数据库的测试函数 #
# ----------------------------------------- #
def database_demo(argv=None):
print(argv)
db_server = helper.DatabaseServer()
print(db_server.fetch_all('show databases;'))
db_server.update('CREATE TABLE %s(id INT NOT NULL AUTO_INCREMENT, name VARCHAR(20),'
' sex INT, age INT, info VARCHAR(50), PRIMARY KEY (id));' % db_conf.DB_TABLE_NAME)
db_server.close()
# 测试逻辑与标签的对应关系
operator = {
conf.IMAGE_CIPHER_APPEND: data_cipher,
conf.IMAGE_CIPHER_HIDE: data_cipher,
conf.GAME_2048: play_game,
conf.HASH_CIPHER_SHA1: hash_cipher,
conf.HASH_CIPHER_DM5: hash_cipher,
conf.HASH_CIPHER_SHA224: hash_cipher,
conf.HASH_CIPHER_SHA256: hash_cipher,
conf.HASH_CIPHER_SHA384: hash_cipher,
conf.HASH_CIPHER_SHA512: hash_cipher,
conf.BASE64_CIPHER: base64_cipher,
conf.DATABASE_DEMO: database_demo,
}
# ----------------------------------------- #
# 程序的主执行逻辑 #
# ----------------------------------------- #
def execute(argv=None, mode=conf.DEMO_MODE):
operator.get(mode)(argv)
# ----------------------------------------- #
# 主程序入口 #
# ----------------------------------------- #
if "__main__" == __name__:
execute(sys.argv)
|
William-Hai/SimpleDemo-python
|
demo_main.py
|
Python
|
gpl-3.0
| 2,966
|
import logging
import logging.handlers
import time
import multiprocessing
from datetime import datetime
import enum
import flask
from flask import (
Flask, flash, redirect, render_template, request,
session, url_for
)
from flask.ext.sqlalchemy import SQLAlchemy
from flask_wtf import Form
import itertools
import jinja2
import json
import requests
from sqlalchemy import and_, not_, or_
from sqlalchemy.exc import SQLAlchemyError
import werkzeug.security as ws
from wtforms import HiddenField, IntegerField, PasswordField, StringField
from wtforms.validators import DataRequired
from wtforms.widgets import HiddenInput
import wtforms
import threading
from app import go
from app import sgftools
app = Flask(__name__)
app.config.from_object('config')
app.jinja_env.undefined = jinja2.StrictUndefined
if app.debug:
logging.basicConfig(level=logging.DEBUG)
db = SQLAlchemy(app)
def use_log_file_handler():
# for test runners etc. that want to log to files; using this
# function allows them to all provide the same behaviour.
handler = logging.handlers.RotatingFileHandler(
'generated/test.log', maxBytes=1000000, backupCount=5)
handler.setLevel(logging.DEBUG)
app.logger.handlers = []
app.logger.propagate = False
app.logger.addHandler(handler)
def async(f):
def wrapper(*args, **kwargs):
thr = threading.Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
def redirect_url(default='front_page'):
""" A simple helper function to redirect the user back to where they came
from. See: http://flask.pocoo.org/docs/0.10/reqcontext/ and also
here: http://stackoverflow.com/questions/14277067/redirect-back-in-flask
"""
return request.args.get('next') or \
request.referrer or \
url_for(default)
@app.template_filter('flash_bootstrap_category')
def flash_bootstrap_category(flash_category):
return {'success': 'success',
'info': 'info',
'warning': 'warning',
'error': 'danger',
'danger': 'danger'}.get(flash_category, 'info')
# Views
#
# Since view functions tend to have side-effects and to depend on global state,
# try to keep complexity (if, for...) out of them and move it into pure
# function helpers instead.
@app.route('/')
def front_page():
if is_logged_in():
return redirect(url_for('status'))
return render_template_with_basics("frontpage.html")
@app.route('/game/<int:game_no>')
def game(game_no):
try:
game = db.session.query(Game).filter_by(id=game_no).one()
except SQLAlchemyError:
flash("Game #{} not found".format(game_no))
return redirect('/')
sgf = game.sgf
comments = game.comments
color_turn = go.next_color(sgf).name
is_your_turn = is_players_turn_in_game(game)
is_passed_twice = go.is_sgf_passed_twice(sgf)
form_data = {'game_no': game.id, 'data': sgf}
form = PlayStoneForm(data=form_data)
chatform = ChatForm(data=form_data)
return render_template_with_basics(
"game.html",
game=game,
black_user=game.black,
white_user=game.white,
color_turn=color_turn,
form=form, chatform=chatform, game_no=game_no,
on_turn=is_your_turn, with_scoring=is_passed_twice,
comments=comments)
@app.route('/grab_game_comments', methods=['POST'])
def grab_game_comments():
game_id = flask.request.form['game_id']
db_game = db.session.query(Game).filter_by(id=game_id).one()
return db_game.jsonify_comments()
@app.route('/chat/<int:game_no>', methods=['POST'])
def comment(game_no):
try:
game = db.session.query(Game).filter_by(id=game_no).one()
except SQLAlchemyError:
flash("Game #{} not found".format(game_no))
return redirect('/')
try:
current_user = logged_in_user()
except NoLoggedInPlayerException:
flash("You must be logged in to comment.")
return redirect(redirect_url())
form = ChatForm()
if form.validate_on_submit():
comment = GameComment(game, form.comment.data, current_user)
db.session.add(comment)
db.session.commit()
return ''
flash("Comment not validated!")
return redirect(redirect_url())
def notify_user(username, content, commit_session=False):
try:
user = db.session.query(User).filter_by(username=username).one()
notification = Notification(user, content)
db.session.add(notification)
if commit_session:
db.session.commit()
except SQLAlchemyError:
message = """We have made an error attempting to create a notification
for the user: {}. We're sorry about this, but you can probably
ignore it.""".format(username)
flash(message)
@app.route('/play/<int:game_no>', methods=['POST'])
def play(game_no):
app.logger.debug("play() called for game {}".format(game_no))
try:
game = db.session.query(Game).filter_by(id=game_no).one()
except SQLAlchemyError:
flash("Game #{} not found".format(game_no))
return redirect('/')
try:
user = logged_in_user()
app.logger.debug("play(): logged in user: {}".format(user))
except NoLoggedInPlayerException:
flash('You must be logged in to play a move.')
app.logger.debug("play(): no logged in user")
return redirect(redirect_url())
arguments = request.form.to_dict()
try:
go.check_continuation(old_sgf=game.sgf,
new_sgf=arguments['response'],
allowed_new_moves=1)
app.logger.debug(
"play(): valid SGF, ends: '{}'".format(
arguments['response'][-12:]))
except go.ValidationException as e:
app.logger.debug("play(): invalid SGF received")
flash("Invalid move: {}".format(e.args[0]))
return redirect(url_for('game', game_no=game_no))
except KeyError:
flash("Invalid request.")
return redirect(url_for('game', game_no=game_no))
if (not is_players_turn_in_game(game) and
not go.check_resignation(old_sgf=game.sgf,
new_sgf=arguments['response'])):
flash("It's not your turn in that game.")
return redirect('/')
game.sgf = arguments['response']
game.last_move_time = datetime.now()
game_result = go.get_game_result(game.sgf)
game.result = game_result.value
if game_result != go.GameResult.not_finished:
result_summary = {
go.GameResult.white_by_resign: 'white won by resignation',
go.GameResult.white_by_count: 'white won on points',
go.GameResult.black_by_resign: 'black won by resignation',
go.GameResult.black_by_count: 'black won on points',
go.GameResult.draw: ''}.get(game_result, '')
game_url = url_for('game', game_no=game_no)
view_game_link = """<a href="{}" class="game-link">
View game</a>""".format(game_url)
message = "Your game has ended, {}. {}".format(result_summary,
view_game_link)
notify_user(game.black, message, commit_session=False)
notify_user(game.white, message, commit_session=False)
db.session.commit()
if 'submit_and_next_game_button' in arguments:
try:
return redirect(
url_for('game',
game_no=next_game_for_user(logged_in_user()).id))
except NoPendingGamesException:
return redirect(url_for('front_page'))
return redirect(url_for('game', game_no=game_no))
@app.route('/challenge/<string:challenged>/', methods=['GET'])
@app.route('/challenge/', methods=['GET', 'POST'])
def challenge(challenged=""):
form = ChallengeForm()
if form.validate_on_submit():
game = Game(black=form.opponent.data,
white=logged_in_user(),
last_move_time=datetime.now(),
sgf="(;)")
db.session.add(game)
db.session.commit()
return redirect(url_for('status'))
elif request.method == 'POST':
flash('There was a problem with the challenge form.')
return render_template_with_basics("challenge.html", form=form,
challenged=challenged)
@app.route('/users')
def users():
query = db.session.query(User)
# The use of 'all' turns this into a list, might be better
# for it to simply iterate through the results.
db_users = query.limit(100).all()
return render_template_with_basics('list_users.html', user_list=db_users)
@app.route('/userprofile/<int:user_no>')
def user_profile(user_no):
db_user = db.session.query(User).filter(User.id == user_no).one()
return render_template_with_basics('user_profile.html', user=db_user)
@app.route('/marknotificationread/', methods=['POST'])
def mark_notification_read():
notify_id = flask.request.form['notify_id']
query = db.session.query(Notification)
db_notify = query.filter(Notification.id == notify_id).one()
db_notify.unread = False
db.session.commit()
return flask.jsonify({'result': True})
@app.route('/status')
def status():
try:
user = logged_in_user()
except NoLoggedInPlayerException:
return redirect('/')
your_turn_games, not_your_turn_games = get_status_lists(user)
unread_notifications = get_unread_notifications(user)
return render_template_with_basics(
"status.html",
your_turn_games=your_turn_games,
not_your_turn_games=not_your_turn_games,
unread_notifications=unread_notifications)
def get_unread_notifications(username):
try:
user = db.session.query(User).filter(User.username == username).one()
query = db.session.query(Notification)
notifications = query.filter(and_((Notification.user_id == user.id),
Notification.unread)).limit(100).all()
except SQLAlchemyError as e:
message = """We have made an error attempting to grab the notifications
for the user: {}. We're sorry about this. The error was: {}.
""".format(username, e)
flash(message)
notifications = None
return notifications
def get_status_lists(user):
"""Return two lists of games for the player, split by on-turn or not.
Sorts game lists with most time since last move first.
Accesses database.
"""
player_games = get_player_games(user)
def sort_key(game):
t = game.last_move_time
if t is None:
t = datetime.min
return t
your_turn_games = [g for g in player_games
if user_to_move_in_game(g) == user]
not_your_turn_games = [g for g in player_games
if user_to_move_in_game(g) != user]
return (sorted(your_turn_games, key=sort_key),
sorted(not_your_turn_games, key=sort_key))
def get_player_games(user):
"""Returns the list of games in which `user` is involved.
Only includes running games, ie. not finished.
Accesses database.
"""
query = db.session.query(Game)
games = query.filter(and_((Game.result == go.GameResult.not_finished.value),
or_(Game.black == user, Game.white == user))).all()
return games
class NoPendingGamesException(Exception):
pass
def next_game_for_user(user):
your_turn_games, _ = get_status_lists(user)
if len(your_turn_games) < 1:
raise NoPendingGamesException
return your_turn_games[0]
def is_players_turn_in_game(game):
"""Test if it's the logged-in player's turn to move in `game`.
Reads user from the session.
"""
try:
current_user = logged_in_user()
except NoLoggedInPlayerException:
return False
next_in_game = user_to_move_in_game(game)
return next_in_game == current_user
def user_to_move_in_game(game):
"""Return the user id of the player to move in game.
Accesses database. Return None if game is finished.
"""
if game.finished:
return None
black_or_white = go.next_color(game.sgf)
next_in_game = {go.Color.black: game.black,
go.Color.white: game.white}[black_or_white]
return next_in_game
class FeedbackForm(Form):
feedback_name = wtforms.StringField("Name:")
feedback_email = wtforms.StringField("Email:")
feedback_text = wtforms.TextAreaField("Feedback:")
@async
def send_email_message_mailgun(email):
sandbox = app.config['MAILGUN_SANDBOX']
url = "https://api.mailgun.net/v3/{0}/messages".format(sandbox)
sender_address = "mailgun@{0}".format(sandbox)
if email.sender_name is not None:
sender = "{0} <{1}>".format(email.sender_name, sender_address)
else:
sender = sender_address
api_key = app.config['MAILGUN_API_KEY']
return requests.post(url,
auth=("api", api_key),
data={"from": sender,
"to": email.recipients,
"subject": email.subject,
"text": email.body})
class Email(object):
""" Simple representation of an email message to be sent."""
def __init__(self, subject, body, sender_name, recipients):
self.subject = subject
self.body = body
self.sender_name = sender_name
self.recipients = recipients
def send_email_message(email):
# We don't want to actually send the message every time we're testing.
# Note that if we really wish to record the emails and check that the
# correct ones were "sent" out, then we have to do something a bit clever
# because this code will be executed in a different process to the
# test code. We could have some kind of test-only route that returns the
# list of emails sent as a JSON object or something.
if not app.config['TESTING']:
send_email_message_mailgun(email)
@app.route('/give_feedback', methods=['POST'])
def give_feedback():
form = FeedbackForm()
if not form.validate_on_submit():
message = ('Feedback form has not been validated.'
'Sorry it was probably my fault')
flash(message, 'error')
return redirect(redirect_url())
feedback_email = form.feedback_email.data.lstrip()
feedback_name = form.feedback_name.data.lstrip()
feedback_content = form.feedback_text.data
subject = 'Feedback for Tesuji Charm'
sender_name = 'Tesuji Charm Feedback Form'
recipients = app.config['ADMINS']
message_body = """
You got some feedback from the 'tesuji-charm' web application.
Sender's name = {0}
Sender's email = {1}
Content: {2}
""".format(feedback_name, feedback_email, feedback_content)
email = Email(subject, message_body, sender_name, recipients)
send_email_message(email)
flash("Thanks for your feedback!", 'info')
return redirect(redirect_url())
@app.route('/login', methods=['POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
users = (db.session.query(User)
.filter_by(username=form.username.data).all())
if len(users) == 0:
flash('Username not found', 'error')
return redirect(redirect_url())
user = users[0]
if not user.check_password(form.password.data):
flash('Password incorrect', 'error')
return redirect(redirect_url())
set_logged_in_user(form.username.data)
return redirect(redirect_url())
else:
return redirect(redirect_url())
@app.route('/create_account', methods=['GET', 'POST'])
def create_account():
form = CreateAccountForm()
if form.validate_on_submit():
if form.password1.data != form.password2.data:
flash("Passwords do not match", 'error')
return render_template_with_basics('create_account.html',
form=form)
user = User(username=form.username.data,
password=form.password1.data)
db.session.add(user)
db.session.commit()
set_logged_in_user(form.username.data)
return redirect('/')
else:
if request.method == 'POST':
flash('Sign-up form incomplete.', 'error')
return render_template_with_basics('create_account.html',
form=form)
@app.route('/finished', methods=['GET'])
def finished():
try:
user = logged_in_user()
except NoLoggedInPlayerException:
return redirect('/')
finished_games = (
db.session.query(Game)
.filter(Game.result != go.GameResult.not_finished.value) # noqa
.filter(or_(Game.black == user, Game.white == user))
.all()
)
return render_template_with_basics(
"finished.html",
finished_games=finished_games)
@app.route('/logout', methods=['GET', 'POST'])
def logout():
try:
logout_current_user()
except KeyError:
pass
if request.method == 'POST':
return ''
else:
return redirect('/')
# test-only routes (used in testing to access the server more directly than
# users are normally allowed to), and their helpers. These should all use the
# `test_only_route` decorator below:
def test_only_route(self, rule, **options):
"""A wrapper for `app.route`, that disables the route outside testing"""
def decorator(f):
# we can't just check at compile time whether testing mode is on,
# because it's not set until after this file is imported (until then,
# the importing module has no app object to set the testing flag on).
#
# Therefore we have to check at the time the wrapped view function is
# called.
def guarded_f(*f_args, **f_options):
if self.config['TESTING']:
return f(*f_args, **f_options)
else:
return ""
if 'endpoint' not in options:
options['endpoint'] = f.__name__
self.route(rule, **options)(guarded_f)
return guarded_f
return decorator
Flask.test_only_route = test_only_route
@app.test_only_route('/testing_delete_user', methods=['POST'])
def testing_delete_user():
"""Delete the user with the given username."""
username = request.form['username']
users = db.session.query(User).filter_by(username=username).all()
for user in users:
db.session.delete(user)
db.session.commit()
return ''
@app.test_only_route('/testing_create_login_session', methods=['POST'])
def testing_create_login_session():
"""Log in the given user id."""
set_logged_in_user(request.form['email'])
app.logger.debug(
"logged in user set to {} for testing".format(
request.form['email']))
return ''
@app.test_only_route('/testing_create_game', methods=['POST'])
def testing_create_game():
"""Create a custom game in the database directly"""
black_user = request.form['black_email']
white_user = request.form['white_email']
stones = json.loads(request.form['stones'])
create_game_internal(black_user, white_user, stones)
return ''
def create_game_internal(black, white,
sgf_or_stones=None,
stones=None, sgf=None):
"""Create a custom game for testing purposes.
Can be initialized with an SGF or a 'text map', ie. a list of strings
representing setup stones like this:
['w.',
'.b']
"""
assert sum(1 for x in [sgf_or_stones, stones, sgf]
if x is not None) <= 1, \
"can't supply more than one initial state to create_game_internal"
if sgf_or_stones:
if isinstance(sgf_or_stones, str):
assert sgf_or_stones[0] == '(', \
"invalid SGF passed to create_game_internal; if you meant " \
"a text map, make it a list"
sgf = sgf_or_stones
else:
stones = sgf_or_stones
if not sgf:
if not stones:
stones = []
sgf = sgf_from_text_map(stones)
game = Game(black=black, white=white, sgf=sgf,
last_move_time=datetime.now())
db.session.add(game)
db.session.commit()
return game
def sgf_from_text_map(text_map):
assert not isinstance(text_map, str), \
"text maps should be lists of strings, not a single string"
ab_coords = []
aw_coords = []
for rowno, row in enumerate(text_map):
for colno, char in enumerate(row):
if char == 'b':
ab_coords.append((colno, rowno))
elif char == 'w':
aw_coords.append((colno, rowno))
def sgfify(coords, tag):
if not coords:
return ''
return (tag + '[' +
']['.join(sgftools.encode_coord(x, y)
for (x, y) in coords)
+ ']')
ab_str = sgfify(ab_coords, 'AB')
aw_str = sgfify(aw_coords, 'AW')
return "(;{ab}{aw})".format(ab=ab_str, aw=aw_str)
@app.test_only_route('/testing_setup_finished_game', methods=['POST'])
def testing_setup_finished_game():
"""Create a finished game (in the marking phase)."""
black_user = request.form['black_email']
white_user = request.form['white_email']
setup_finished_game_internal(black_user, white_user)
return ''
def setup_finished_game_internal(black_user, white_user):
stones = ['.....bww.wbb.......',
'.bb...bw.wwbb......',
'.wwbbb.bw.wbwwb..b.',
'b.www..b.w.b.bbb.b.',
'.w.w..bww.wwbbwwww.',
'.bwwb.bw.w.wwbbbbbb',
'.bbbbbbw.bbbbwwwwwb',
'...bwwwbbbwbwww..ww',
'....bbwwb.wwbbbww..',
'.bbbbww.ww.ww..wb..',
'bbwbw.....wbw..wb..',
'bwww.w...wbbw......',
'w.......w...w..w.ww',
'w.w.....wbbbw...wwb',
'bw...www.b.bbww.wbb',
'bbwwwwbwbbwww.wwbb.',
'.bbbwbbbwbbbwwbwb..',
'...bbw.bwb..bbbb...',
'...................']
sgf = sgf_from_text_map(stones)
passed_sgf = sgf[:-1] + 'B[];W[])'
create_game_internal(black_user, white_user, sgf=passed_sgf)
@app.test_only_route('/testing_clear_games_for_player', methods=['POST'])
def testing_clear_games_for_player():
"""Clear all of `email`'s games from the database."""
user = request.form['email']
clear_games_for_player_internal(user)
return ''
def clear_games_for_player_internal(user):
games_as_black = Game.query.filter(Game.black == user).all()
games_as_white = Game.query.filter(Game.white == user).all()
games = games_as_black + games_as_white
for game in games:
db.session.delete(game)
db.session.commit()
# helper functions
def is_logged_in():
"""True if a user is logged in."""
return 'user' in session
def set_logged_in_user(user):
session.update(user=user)
def logout_current_user():
del session['user']
class NoLoggedInPlayerException(Exception):
pass
def logged_in_user():
"""Return user id of logged in player, or raise NoLoggedInPlayerException.
Accesses the session.
"""
try:
return session['user']
except KeyError:
raise NoLoggedInPlayerException()
def render_template_with_basics(template_name_or_list, **context):
"""A wrapper around flask.render_template, setting always-present fields.
Depends on the session object.
"""
try:
user = logged_in_user()
except NoLoggedInPlayerException:
user = ''
return render_template(
template_name_or_list,
current_user=user,
login_form=LoginForm(),
feedback_form=FeedbackForm(),
**context)
def max_with_sentinel(sentinel, *iterables):
return max(itertools.chain([sentinel], *iterables))
# Server player
class ServerPlayer(object):
""" A class used to represent server players. The hope is that to create a
new server player, one need only override the `act` method. It should
be then possible to create a daemon which runs all registered server
players at convenient times.
"""
def __init__(self, player_email, rest_interval=3600):
""" Specify the player-email and the rest-interval in seconds. This can
be specified as a floating point number for more accuracy than
seconds if need be.
"""
self.player_email = player_email
self.rest_interval = rest_interval
def _daemon(self):
while True:
self.act()
time.sleep(self.rest_interval)
def start_daemon(self):
self._daemon_process = multiprocessing.Process(target=self._daemon)
self._daemon_process.daemon = True
self._daemon_process.start()
def terminate_daemon(self):
if self._daemon_process is not None:
db.session.commit()
db.session.close()
self._daemon_process.terminate()
def act(self):
""" The base `act` method of the `ServerPlayer` is so simple that it
plays a pass on every waiting game.
"""
waiting_games, _not_waiting_games = get_status_lists(self.player_email)
for game in waiting_games:
# A request would normally include the 'move number' to make sure
# we are not replaying a previous move. But we're directly
# accessing the db here, so we get the move number from the db
# itself. Note that this still prevents replaying a move in the
# case in which (presumably, accidentally) we have two daemons
# running the same computer player.
arguments = {'move_no': game.move_no}
validate_turn_and_record(
"pass", self.player_email, game, arguments)
# models
# TODO: In SQLAlchemy 1.1, you can directly use an Enum, but that is not yet
# released and it seems a pain to require a development version of SQLAlchemy,
# hence, we're using a slightly temporary fix. This means that unfortunately,
# scattered about the code we will have a few `GameResult.<result>.value` where
# ideally we'd like to just write `GameResult.<result>`. I've tried to keep this
# mostly in the Game class itself, however, that is awkward when creating a
# query filter.
game_results = [r.value for r in go.GameResult]
@app.template_filter('game_result_summary')
def game_result_summary(game_result):
return {'WBR': 'White won by resignation',
'WBC': 'White won on points',
'BBR': 'Black won by resignation',
'BBC': 'Black won on points',
'D': 'The Game was Drawn'}.get(game_result, '')
@app.template_filter('game_result_summary_short')
def game_result_summary_short(game_result):
return {'WBR': 'White (resignation)',
'WBC': 'White (points)',
'BBR': 'Black (resignation)',
'BBC': 'Black (points)',
'D': 'Draw'}.get(game_result, '')
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True)
black = db.Column(db.String(length=254))
white = db.Column(db.String(length=254))
sgf = db.Column(db.Text())
result = db.Column(db.Enum(*game_results),
default=go.GameResult.not_finished.value)
last_move_time = db.Column(db.DateTime())
def __repr__(self):
return "<Game {no}, {b} vs. {w}>".format(
no=self.id, b=self.black, w=self.white)
def player_opponent(self, player):
if player == self.black:
return self.white
elif player == self.white:
return self.black
else:
return None
def player_color(self, player):
if player == self.black:
return 'black'
elif player == self.white:
return 'white'
else:
return None
def resign(self, player):
if player == self.black:
self.result = go.GameResult.white_by_resign.value
elif player == self.white:
self.result = go.GameResult.black_by_resign.value
else:
app.logger.debug("Attempt to resign by non-player")
@property
def finished(self):
return self.result
def jsonify_comments(self):
return flask.jsonify(moments=[c.jsonify() for c in self.comments])
class GameComment(db.Model):
id = db.Column(db.Integer, primary_key=True)
pub_date = db.Column(db.DateTime)
speaker = db.Column(db.String(length=254))
game_id = db.Column(db.Integer, db.ForeignKey('games.id'))
game = db.relationship('Game',
backref=db.backref('comments', lazy='dynamic'))
content = db.Column(db.Text())
def __init__(self, game, content, speaker, pub_date=None):
self.game = game
self.content = content
self.speaker = speaker
self.pub_date = pub_date if pub_date is not None else datetime.utcnow()
def jsonify(self):
return {'content': self.content,
'speaker': self.speaker,
'pub_date': self.pub_date}
class Notification(db.Model):
__tablename__ = 'notifications'
id = db.Column(db.Integer, primary_key=True)
pub_date = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User',
backref=db.backref('notifications', lazy='dynamic'))
unread = db.Column(db.Boolean, default=True)
content = db.Column(db.Text())
def __init__(self, user, content, pub_date=None):
self.user = user
self.content = content
self.pub_date = pub_date if pub_date is not None else datetime.utcnow()
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(length=254))
password_hash = db.Column(db.String(length=254))
def __init__(self, username, password):
self.username = username
self.set_password(password)
def set_password(self, password):
self.password_hash = ws.generate_password_hash(password,
method='pbkdf2:sha256')
def check_password(self, password):
return ws.check_password_hash(pwhash=self.password_hash,
password=password)
# forms
class ChallengeForm(Form):
opponent = StringField(
"Opponent's email or username", validators=[DataRequired()])
class LoginForm(Form):
username = StringField("Username",
validators=[DataRequired()],
description="Username")
password = PasswordField("Password",
validators=[DataRequired()],
description="Password")
class CreateAccountForm(Form):
username = StringField("Username",
validators=[DataRequired()])
password1 = PasswordField("Password",
validators=[DataRequired()])
password2 = PasswordField("Password again",
validators=[DataRequired()])
class HiddenInteger(IntegerField):
widget = HiddenInput()
class PlayStoneForm(Form):
game_no = HiddenInteger("game_no", validators=[DataRequired()])
move_no = HiddenInteger("move_no", validators=[DataRequired()])
data = HiddenField("data")
response = HiddenField("response", validators=[DataRequired()])
class ChatForm(Form):
game_no = HiddenInteger("game_no", validators=[DataRequired()])
# move_no = HiddenInteger("move_no", validators=[DataRequired()])
comment = StringField('Comment', validators=[DataRequired()])
|
karlorg/drunken-octo-avenger
|
app/main.py
|
Python
|
cc0-1.0
| 32,063
|
from Wordclient import *
from Hypers import alpha
import operator
class Sentenceclient:
def __init__(self, sentence1, sentence2):
wordhash = Shelveopen('Hash#1.shelve')
self.sent1 = Purify(sentence1, wordhash) # Sending Hash from here as prevents opening and closing again n again
self.sent2 = Purify(sentence2, wordhash)
self.wordset = []
# Deduplication while preserving order
for word in self.sent1+self.sent2:
if word not in self.wordset:
self.wordset.append(word)
self.semantic_vectors = [] # To store of both sentences
self.order_vectors = [] # To store order vectors of both sentences
self.threshold = 0.001 # To decide if something is not at all similar
self.pathacc = {} # To accumulate all paths after crawling
self.wordmap = [[],[]] # # Words of Sentence1 found in Sentence2 & vice-versa
def Getsemantics(self):
'''
To access semantic vectors
'''
if self.semantic_vectors == []:
# Vectors haven't been created yet
self.Createvectors()
else:
return self.semantic_vectors
def getPathsacc(self):
'''
Getter for accumulation of paths
'''
return self.pathacc
def getWordmap(self):
'''
Getter for accumulation of paths
'''
return self.wordmap
def Getorder(self):
'''
To access order vectors
'''
if self.order_vectors == []:
# Vectors haven't been created yet
self.Createvectors()
else:
return self.order_vectors
def Createvectors(self):
'''
To create semantic vectors
'''
sem1 = []
sem2 = []
ord1 = []
ord2 = []
ls2 = []
# For First document
for i, word in enumerate(self.wordset):
allpaths = []
allscores = []
if word in self.sent1:
sem1.append(1) # No Semantic Match needed
ord1.append(self.sent1.index(word)+1)
else:
wc = Wordclient(word)
for key in self.sent1:
wc.init_client(key)
score = wc.getmetric()
if score > 0:
# Some Semantic Match
self.Updatepath(word, wc.getpaths())
# Enter in wordmap, word from sentence2 to sentence1 map
phrase = word+'->'+key
if phrase not in self.wordmap[1]:
self.wordmap[1].append(phrase)
allscores.append(score)
allpaths.append(wc.getpaths())
index, score = max(enumerate(allscores), key=operator.itemgetter(1))
if score > self.threshold:
# act_index = self.wordset.index(self.sent1[index])
sem1.append(score)
# ord1.append(act_index+1)
ord1.append(index+1)
else:
sem1.append(0)
ord1.append(0)
# For Second document
for i, word in enumerate(self.wordset):
allpaths = []
allscores = []
if word in self.sent2:
sem2.append(1) # No Semantic Match needed
ord2.append(self.sent2.index(word)+1)
else:
wc = Wordclient(word)
for key in self.sent2:
wc.init_client(key)
score = wc.getmetric()
if score > 0:
# Some Semantic Match
self.Updatepath(word, wc.getpaths())
# Enter in wordmap, word from sentence2 to sentence1 map
phrase = word+'->'+key
if phrase not in self.wordmap[0]:
self.wordmap[0].append(phrase)
allscores.append(score)
allpaths.append(wc.getpaths())
index, score = max(enumerate(allscores), key=operator.itemgetter(1))
if score > self.threshold:
# act_index = self.wordset.index(self.sent2[index])
sem2.append(score)
# ord2.append(act_index+1)
ord2.append(index+1)
else:
sem2.append(0)
ord2.append(0)
self.semantic_vectors.append(sem1)
self.semantic_vectors.append(sem2)
self.order_vectors.append(ord1)
self.order_vectors.append(ord2)
def Updatepath(self, word, paths):
# Creating paths to show in UI
if word not in self.pathacc:
# Initializing empty list
self.pathacc[word] = []
for path in paths:
packet = []
for edge in path:
ls = []
ls.append(edge.src)
ls.append(edge.weight)
ls.append(edge.kind)
ls.append(edge.dest)
packet.append(ls)
if packet not in self.pathacc[word]:
self.pathacc[word].append(packet)
def Semantic_calc(self):
# Calcuating cosine Similarity of semantic vectors
vector1 = self.semantic_vectors[0]
vector2 = self.semantic_vectors[1]
cosine = Cosine_similarity(vector1, vector2)
return cosine
def Order_calc(self):
# Normalizing order vectors to a score
vector1 = [x - y for x, y in zip(self.order_vectors[0],self.order_vectors[1])]
vector2 = [x + y for x, y in zip(self.order_vectors[0],self.order_vectors[1])]
if vector2 == 0:
# prevent division by zero
return 1.0
numerator = Vectormag(vector1)
denominator = Vectormag(vector2)
normalize = 1 - (numerator/denominator)
return round(normalize,3)
def getmetric(self):
semantic_score = 0
order_score = 0
if self.semantic_vectors == [] and self.sent1 != [] and self.sent2 != []:
# Vectors haven't been created yet
self.Createvectors()
semantic_score = self.Semantic_calc()
order_score = self.Order_calc()
score = alpha*semantic_score + (1-alpha)*order_score
# File logging
log = '\n************************'
Filedump('SentenceComparison.log',log)
log = 'Sentence 1 : '+str(self.sent1)
Filedump('SentenceComparison.log',log)
log = 'Sentence 2 : '+str(self.sent2)
Filedump('SentenceComparison.log',log)
log = 'WordSet : '+str(self.wordset)
Filedump('SentenceComparison.log',log)
log = 'Semantic Vectors : '+str(self.semantic_vectors)
Filedump('SentenceComparison.log',log)
log = 'Order Vectors : '+str(self.order_vectors)
Filedump('SentenceComparison.log',log)
log = 'All Paths : '+str(self.wordmap)
Filedump('SentenceComparison.log',log)
log = '####### Semantic Sentence Score : '+str(score)+' #######'
Filedump('SentenceComparison.log',log)
return round(score,4)
if __name__ == '__main__':
start_time = time.time()
try:
ss = Sentenceclient('dog',' frump domestic dog ')
score = ss.getmetric()
print ('Execution Time : ',time.time() - start_time)
except Exception as e:
print ('Error Sentenceclient- ',e)
|
anirudhagar13/SS_Graph
|
Sentenceclient.py
|
Python
|
apache-2.0
| 5,987
|
import warnings
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(ContextMixin):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
prefix = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial.copy()
def get_prefix(self):
"""
Returns the prefix to use for forms on this view
"""
return self.prefix
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {
'initial': self.get_initial(),
'prefix': self.get_prefix(),
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
fields = None
def get_form_class(self):
"""
Returns the form class to use in this view.
"""
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
if self.fields is None:
warnings.warn("Using ModelFormMixin (base class of %s) without "
"the 'fields' attribute is deprecated." % self.__class__.__name__,
DeprecationWarning)
return model_forms.modelform_factory(model, fields=self.fields)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
if hasattr(self, 'object'):
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
"""
Returns the supplied URL.
"""
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
class ProcessFormView(View):
"""
A mixin that renders a form on GET and processes it on POST.
"""
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template.
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
# Add support for browsers which only accept GET and POST for now.
def post(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url % self.object.__dict__
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
|
DrMeers/django
|
django/views/generic/edit.py
|
Python
|
bsd-3-clause
| 8,675
|
#!/usr/bin/env python
"""
The fMRIPrep on Docker wrapper
This is a lightweight Python wrapper to run fMRIPrep.
Docker must be installed and running. This can be checked
running ::
docker info
Please report any feedback to our GitHub repository
(https://github.com/poldracklab/fmriprep) and do not
forget to credit all the authors of software that fMRIPrep
uses (https://fmriprep.readthedocs.io/en/latest/citing.html).
"""
import sys
import os
import re
import subprocess
__version__ = '99.99.99'
__copyright__ = 'Copyright 2020, Center for Reproducible Neuroscience, Stanford University'
__credits__ = ['Craig Moodie', 'Ross Blair', 'Oscar Esteban', 'Chris Gorgolewski',
'Shoshana Berleant', 'Christopher J. Markiewicz', 'Russell A. Poldrack']
__bugreports__ = 'https://github.com/poldracklab/fmriprep/issues'
MISSING = """
Image '{}' is missing
Would you like to download? [Y/n] """
PKG_PATH = '/usr/local/miniconda/lib/python3.7/site-packages'
TF_TEMPLATES = (
'MNI152Lin',
'MNI152NLin2009cAsym',
'MNI152NLin6Asym',
'MNI152NLin6Sym',
'MNIInfant',
'MNIPediatricAsym',
'NKI',
'OASIS30ANTs',
'PNC',
'UNCInfant',
'fsLR',
'fsaverage',
'fsaverage5',
'fsaverage6',
)
NONSTANDARD_REFERENCES = (
'anat',
'T1w',
'run',
'func',
'sbref',
'fsnative'
)
# Monkey-patch Py2 subprocess
if not hasattr(subprocess, 'DEVNULL'):
subprocess.DEVNULL = -3
if not hasattr(subprocess, 'run'):
# Reimplement minimal functionality for usage in this file
def _run(args, stdout=None, stderr=None):
from collections import namedtuple
result = namedtuple('CompletedProcess', 'stdout stderr returncode')
devnull = None
if subprocess.DEVNULL in (stdout, stderr):
devnull = open(os.devnull, 'r+')
if stdout == subprocess.DEVNULL:
stdout = devnull
if stderr == subprocess.DEVNULL:
stderr = devnull
proc = subprocess.Popen(args, stdout=stdout, stderr=stderr)
stdout, stderr = proc.communicate()
res = result(stdout, stderr, proc.returncode)
if devnull is not None:
devnull.close()
return res
subprocess.run = _run
# De-fang Python 2's input - we don't eval user input
try:
input = raw_input
except NameError:
pass
def check_docker():
"""Verify that docker is installed and the user has permission to
run docker images.
Returns
-------
-1 Docker can't be found
0 Docker found, but user can't connect to daemon
1 Test run OK
"""
try:
ret = subprocess.run(['docker', 'version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
from errno import ENOENT
if e.errno == ENOENT:
return -1
raise e
if ret.stderr.startswith(b"Cannot connect to the Docker daemon."):
return 0
return 1
def check_image(image):
"""Check whether image is present on local system"""
ret = subprocess.run(['docker', 'images', '-q', image],
stdout=subprocess.PIPE)
return bool(ret.stdout)
def check_memory(image):
"""Check total memory from within a docker container"""
ret = subprocess.run(['docker', 'run', '--rm', '--entrypoint=free',
image, '-m'],
stdout=subprocess.PIPE)
if ret.returncode:
return -1
mem = [line.decode().split()[1]
for line in ret.stdout.splitlines()
if line.startswith(b'Mem:')][0]
return int(mem)
def merge_help(wrapper_help, target_help):
# Matches all flags with up to one nested square bracket
opt_re = re.compile(r'(\[--?[\w-]+(?:[^\[\]]+(?:\[[^\[\]]+\])?)?\])')
# Matches flag name only
flag_re = re.compile(r'\[--?([\w-]+)[ \]]')
# Normalize to Unix-style line breaks
w_help = wrapper_help.rstrip().replace('\r', '')
t_help = target_help.rstrip().replace('\r', '')
w_usage, w_details = w_help.split('\n\n', 1)
w_groups = w_details.split('\n\n')
t_usage, t_details = t_help.split('\n\n', 1)
t_groups = t_details.split('\n\n')
w_posargs = w_usage.split('\n')[-1].lstrip()
t_posargs = t_usage.split('\n')[-1].lstrip()
w_options = opt_re.findall(w_usage)
w_flags = sum(map(flag_re.findall, w_options), [])
t_options = opt_re.findall(t_usage)
t_flags = sum(map(flag_re.findall, t_options), [])
# The following code makes this assumption
assert w_flags[:2] == ['h', 'version']
assert w_posargs.replace(']', '').replace('[', '') == t_posargs
# Make sure we're not clobbering options we don't mean to
overlap = set(w_flags).intersection(t_flags)
expected_overlap = {
'anat-derivatives',
'bids-database-dir',
'fs-license-file',
'fs-subjects-dir',
'config-file',
'h',
'use-plugin',
'version',
'w',
}
assert overlap == expected_overlap, "Clobbering options: {}".format(
', '.join(overlap - expected_overlap))
sections = []
# Construct usage
start = w_usage[:w_usage.index(' [')]
indent = ' ' * len(start)
new_options = sum((
w_options[:2],
[opt for opt, flag in zip(t_options, t_flags) if flag not in overlap],
w_options[2:]
), [])
opt_line_length = 79 - len(start)
length = 0
opt_lines = [start]
for opt in new_options:
opt = ' ' + opt
olen = len(opt)
if length + olen <= opt_line_length:
opt_lines[-1] += opt
length += olen
else:
opt_lines.append(indent + opt)
length = olen
opt_lines.append(indent + ' ' + t_posargs)
sections.append('\n'.join(opt_lines))
# Use target description and positional args
sections.extend(t_groups[:2])
for line in t_groups[2].split('\n')[1:]:
content = line.lstrip().split(',', 1)[0]
if content[1:] not in overlap:
w_groups[2] += '\n' + line
sections.append(w_groups[2])
# All remaining sections, show target then wrapper (skipping duplicates)
sections.extend(t_groups[3:] + w_groups[6:])
return '\n\n'.join(sections)
def is_in_directory(filepath, directory):
return os.path.realpath(filepath).startswith(
os.path.realpath(directory) + os.sep)
def get_parser():
"""Defines the command line interface of the wrapper"""
import argparse
from functools import partial
class ToDict(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
d = {}
for kv in values:
k, v = kv.split("=")
d[k] = os.path.abspath(v)
setattr(namespace, self.dest, d)
def _is_file(path, parser):
"""Ensure a given path exists and it is a file."""
path = os.path.abspath(path)
if not os.path.isfile(path):
raise parser.error(
"Path should point to a file (or symlink of file): <%s>." % path
)
return path
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False)
IsFile = partial(_is_file, parser=parser)
# Standard FMRIPREP arguments
parser.add_argument('bids_dir', nargs='?', type=os.path.abspath,
default='')
parser.add_argument('output_dir', nargs='?', type=os.path.abspath,
default='')
parser.add_argument('analysis_level', nargs='?', choices=['participant'],
default='participant')
parser.add_argument('-h', '--help', action='store_true',
help="show this help message and exit")
parser.add_argument('--version', action='store_true',
help="show program's version number and exit")
# Allow alternative images (semi-developer)
parser.add_argument('-i', '--image', metavar='IMG', type=str,
default='poldracklab/fmriprep:{}'.format(__version__),
help='image name')
# Options for mapping files and directories into container
# Update `expected_overlap` variable in merge_help() when adding to this
g_wrap = parser.add_argument_group(
'Wrapper options',
'Standard options that require mapping files into the container')
g_wrap.add_argument('-w', '--work-dir', action='store', type=os.path.abspath,
help='path where intermediate results should be stored')
g_wrap.add_argument(
'--output-spaces', nargs="*",
help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<TEMPLATE>[:res-<resolution>][:cohort-<label>][...]``, where ``<TEMPLATE>`` is \
a keyword (valid keywords: %s) or path pointing to a user-supplied template, and \
may be followed by optional, colon-separated parameters. \
Non-standard spaces (valid keywords: %s) imply specific orientations and sampling \
grids. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization.""" % (', '.join('"%s"' % s for s in TF_TEMPLATES),
', '.join(NONSTANDARD_REFERENCES)))
g_wrap.add_argument(
'--fs-license-file', metavar='PATH', type=IsFile,
default=os.getenv('FS_LICENSE', None),
help='Path to FreeSurfer license key file. Get it (for free) by registering'
' at https://surfer.nmr.mgh.harvard.edu/registration.html')
g_wrap.add_argument(
'--fs-subjects-dir', metavar='PATH', type=os.path.abspath,
help='Path to existing FreeSurfer subjects directory to reuse. '
'(default: OUTPUT_DIR/freesurfer)')
g_wrap.add_argument(
'--config-file', metavar='PATH', type=os.path.abspath,
help="Use pre-generated configuration file. Values in file will be overridden "
"by command-line arguments.")
g_wrap.add_argument(
'--anat-derivatives', metavar='PATH', type=os.path.abspath,
help='Path to existing sMRIPrep/fMRIPrep-anatomical derivatives to fasttrack '
'the anatomical workflow.')
g_wrap.add_argument(
'--use-plugin', metavar='PATH', action='store', default=None,
type=os.path.abspath, help='nipype plugin configuration file')
g_wrap.add_argument(
'--bids-database-dir', metavar='PATH', type=os.path.abspath,
help="Path to an existing PyBIDS database folder, for faster indexing "
"(especially useful for large datasets).")
# Developer patch/shell options
g_dev = parser.add_argument_group(
'Developer options',
'Tools for testing and debugging FMRIPREP')
g_dev.add_argument('--patch', nargs="+", metavar="PACKAGE=PATH", action=ToDict,
help='local repository to use within container')
g_dev.add_argument('--shell', action='store_true',
help='open shell in image instead of running FMRIPREP')
g_dev.add_argument('--config', metavar='PATH', action='store',
type=os.path.abspath, help='Use custom nipype.cfg file')
g_dev.add_argument('-e', '--env', action='append', nargs=2, metavar=('ENV_VAR', 'value'),
help='Set custom environment variable within container')
g_dev.add_argument('-u', '--user', action='store',
help='Run container as a given user/uid. Additionally, group/gid can be'
'assigned, (i.e., --user <UID>:<GID>)')
g_dev.add_argument('--network', action='store',
help='Run container with a different network driver '
'("none" to simulate no internet connection)')
g_dev.add_argument('--no-tty', action='store_true',
help='Run docker without TTY flag -it')
return parser
def main():
"""Entry point"""
parser = get_parser()
# Capture additional arguments to pass inside container
opts, unknown_args = parser.parse_known_args()
# Set help if no directories set
if (opts.bids_dir, opts.output_dir, opts.version) == ('', '', False):
opts.help = True
# Stop if no docker / docker fails to run
check = check_docker()
if check < 1:
if opts.version:
print('fmriprep wrapper {!s}'.format(__version__))
if opts.help:
parser.print_help()
if check == -1:
print("fmriprep: Could not find docker command... Is it installed?")
else:
print("fmriprep: Make sure you have permission to run 'docker'")
return 1
# For --help or --version, ask before downloading an image
if not check_image(opts.image):
resp = 'Y'
if opts.version:
print('fmriprep wrapper {!s}'.format(__version__))
if opts.help:
parser.print_help()
if opts.version or opts.help:
try:
resp = input(MISSING.format(opts.image))
except KeyboardInterrupt:
print()
return 1
if resp not in ('y', 'Y', ''):
return 0
print('Downloading. This may take a while...')
# Warn on low memory allocation
mem_total = check_memory(opts.image)
if mem_total == -1:
print('Could not detect memory capacity of Docker container.\n'
'Do you have permission to run docker?')
return 1
if not (opts.help or opts.version or '--reports-only' in unknown_args) and mem_total < 8000:
print('Warning: <8GB of RAM is available within your Docker '
'environment.\nSome parts of fMRIPrep may fail to complete.')
if '--mem_mb' not in unknown_args:
resp = 'N'
try:
resp = input('Continue anyway? [y/N]')
except KeyboardInterrupt:
print()
return 1
if resp not in ('y', 'Y', ''):
return 0
ret = subprocess.run(['docker', 'version', '--format', "{{.Server.Version}}"],
stdout=subprocess.PIPE)
docker_version = ret.stdout.decode('ascii').strip()
command = ['docker', 'run', '--rm', '-e',
'DOCKER_VERSION_8395080871=%s' % docker_version]
if not opts.no_tty:
command.append('-it')
# Patch working repositories into installed package directories
if opts.patch:
for pkg, repo_path in opts.patch.items():
command.extend(
['-v', '{}:{}/{}:ro'.format(repo_path, PKG_PATH, pkg)]
)
if opts.env:
for envvar in opts.env:
command.extend(['-e', '%s=%s' % tuple(envvar)])
if opts.user:
command.extend(['-u', opts.user])
if opts.fs_license_file:
command.extend([
'-v', '{}:/opt/freesurfer/license.txt:ro'.format(
opts.fs_license_file)])
main_args = []
if opts.bids_dir:
command.extend(['-v', ':'.join((opts.bids_dir, '/data', 'ro'))])
main_args.append('/data')
if opts.output_dir:
if not os.path.exists(opts.output_dir):
# create it before docker does
os.makedirs(opts.output_dir)
command.extend(['-v', ':'.join((opts.output_dir, '/out'))])
main_args.append('/out')
main_args.append(opts.analysis_level)
if opts.fs_subjects_dir:
command.extend(['-v', '{}:/opt/subjects'.format(opts.fs_subjects_dir)])
unknown_args.extend(['--fs-subjects-dir', '/opt/subjects'])
if opts.config_file:
command.extend(['-v', '{}:/tmp/config.toml'.format(opts.config_file)])
unknown_args.extend(['--config-file', '/tmp/config.toml'])
if opts.anat_derivatives:
command.extend(['-v', '{}:/opt/smriprep/subjects'.format(opts.anat_derivatives)])
unknown_args.extend(['--anat-derivatives', '/opt/smriprep/subjects'])
if opts.work_dir:
command.extend(['-v', ':'.join((opts.work_dir, '/scratch'))])
unknown_args.extend(['-w', '/scratch'])
# Check that work_dir is not a child of bids_dir
if opts.work_dir and opts.bids_dir:
if is_in_directory(opts.work_dir, opts.bids_dir):
print(
'The selected working directory is a subdirectory of the input BIDS folder. '
'Please modify the output path.')
return 1
if opts.config:
command.extend(['-v', ':'.join((
opts.config, '/home/fmriprep/.nipype/nipype.cfg', 'ro'))])
if opts.use_plugin:
command.extend(['-v', ':'.join((opts.use_plugin, '/tmp/plugin.yml',
'ro'))])
unknown_args.extend(['--use-plugin', '/tmp/plugin.yml'])
if opts.bids_database_dir:
command.extend(['-v', ':'.join((opts.bids_database_dir, '/tmp/bids_db'))])
unknown_args.extend(['--bids-database-dir', '/tmp/bids_db'])
if opts.output_spaces:
spaces = []
for space in opts.output_spaces:
if space.split(':')[0] not in (TF_TEMPLATES + NONSTANDARD_REFERENCES):
tpl = os.path.basename(space)
if not tpl.startswith('tpl-'):
raise RuntimeError("Custom template %s requires a `tpl-` prefix" % tpl)
target = '/home/fmriprep/.cache/templateflow/' + tpl
command.extend(['-v', ':'.join((os.path.abspath(space), target, 'ro'))])
spaces.append(tpl[4:])
else:
spaces.append(space)
unknown_args.extend(['--output-spaces'] + spaces)
if opts.shell:
command.append('--entrypoint=bash')
if opts.network:
command.append('--network=' + opts.network)
command.append(opts.image)
# Override help and version to describe underlying program
# Respects '-i' flag, so will retrieve information from any image
if opts.help:
command.append('-h')
targethelp = subprocess.check_output(command).decode()
print(merge_help(parser.format_help(), targethelp))
return 0
elif opts.version:
# Get version to be run and exit
command.append('--version')
ret = subprocess.run(command)
return ret.returncode
if not opts.shell:
command.extend(main_args)
command.extend(unknown_args)
print("RUNNING: " + ' '.join(command))
ret = subprocess.run(command)
if ret.returncode:
print("fMRIPrep: Please report errors to {}".format(__bugreports__))
return ret.returncode
if __name__ == '__main__':
sys.exit(main())
|
poldracklab/fmriprep
|
wrapper/fmriprep_docker.py
|
Python
|
bsd-3-clause
| 18,834
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
PY3 = sys.version_info[0] == 3
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Cannot compute the git version. {}'.format(e))
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version, sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
atlas = ['atlasclient>=0.1.2']
azure_blob_storage = ['azure-storage>=0.34.0']
azure_data_lake = [
'azure-mgmt-resource==1.2.2',
'azure-mgmt-datalake-store==0.4.0',
'azure-datalake-store==0.0.19'
]
cassandra = ['cassandra-driver>=3.13.0']
celery = [
'celery>=4.1.1, <4.2.0',
'flower>=0.7.3, <1.0'
]
cgroups = [
'cgroupspy>=0.1.4',
]
# major update coming soon, clamp to 0.x
cloudant = ['cloudant>=0.5.9,<2.0']
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.17.1, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker>=2.0.0']
druid = ['pydruid>=0.4.1']
elasticsearch = [
'elasticsearch>=5.0.0,<6.0.0',
'elasticsearch-dsl>=5.0.0,<6.0.0'
]
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2>=0.9.2',
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'google-cloud-container>=0.1.1',
'PyOpenSSL',
'pandas-gbq'
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
hdfs = ['snakebite>=2.7.8']
hive = [
'hmsclient>=0.1.0',
'pyhive>=0.6.0',
]
jdbc = ['jaydebeapi>=1.1.1']
jenkins = ['python-jenkins>=0.4.15']
jira = ['JIRA>1.0.7']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
kubernetes = ['kubernetes>=3.0.0',
'cryptography>=2.0.0']
ldap = ['ldap3>=0.9.9.1']
mssql = ['pymssql>=2.1.1']
mysql = ['mysqlclient>=1.3.6']
oracle = ['cx_Oracle>=5.1.2']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = ['pinotdb>=0.1.1']
postgres = ['psycopg2-binary>=2.7.4']
qds = ['qds-sdk>=1.9.6']
rabbitmq = ['librabbitmq>=1.6.1']
redis = ['redis>=2.10.5']
s3 = ['boto3>=1.7.0']
salesforce = ['simple-salesforce>=0.72']
samba = ['pysmbclient>=0.1.3']
segment = ['analytics-python>=1.2.9']
sendgrid = ['sendgrid>=5.2.0']
slack = ['slackclient>=1.0.0']
mongo = ['pymongo>=3.6.0']
snowflake = ['snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0']
ssh = ['paramiko>=2.1.1', 'pysftp>=0.2.9']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
winrm = ['pywinrm==0.2.2']
zendesk = ['zdesk']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant + druid + pinot \
+ cassandra + mongo
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'mongomock',
'moto==1.1.19',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'paramiko',
'pysftp',
'pywinrm',
'qds-sdk>=1.9.6',
'rednose',
'requests_mock'
]
devel_minreq = devel + kubernetes + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = (sendgrid + devel + all_dbs + doc + samba + s3 + slack + crypto + oracle +
docker + ssh + kubernetes + celery + azure_blob_storage + redis + gcp_api +
datadog + zendesk + jdbc + ldap + kerberos + password + webhdfs + jenkins +
druid + pinot + segment + snowflake + elasticsearch + azure_data_lake +
atlas)
# Snakebite & Google Cloud Dataflow are not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8']]
else:
devel_ci = devel_all
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach~=2.1.3',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.12.4, <0.13',
'flask-appbuilder>=1.11.1, <2.0.0',
'flask-admin==1.4.1',
'flask-caching>=1.3.3, <1.4.0',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.2.0',
'sqlalchemy-utc>=0.9.0',
'tabulate>=0.7.5, <0.8.0',
'tenacity==4.8.0',
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async,
'azure_blob_storage': azure_blob_storage,
'azure_data_lake': azure_data_lake,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.incubator.apache.org',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
|
cfei18/incubator-airflow
|
setup.py
|
Python
|
apache-2.0
| 11,546
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import shutil
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from webdriver_pages import settings
from webdriver_pages.settings import Behaviors, ContentTypes
class PrefsTest(pyauto.PyUITest):
"""TestCase for Preferences."""
INFOBAR_TYPE = 'rph_infobar'
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter> to dump prefs... ')
self.pprint(self.GetPrefsInfo().Prefs())
def testSessionRestore(self):
"""Test session restore preference."""
url1 = 'http://www.google.com/'
url2 = 'http://news.google.com/'
self.NavigateToURL(url1)
self.AppendTab(pyauto.GURL(url2))
num_tabs = self.GetTabCount()
# Set pref to restore session on startup.
self.SetPrefs(pyauto.kRestoreOnStartup, 1)
logging.debug('Setting %s to 1' % pyauto.kRestoreOnStartup)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 1)
self.assertEqual(num_tabs, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testNavigationStateOnSessionRestore(self):
"""Verify navigation state is preserved on session restore."""
urls = ('http://www.google.com/',
'http://news.google.com/',
'http://dev.chromium.org/',)
for url in urls:
self.NavigateToURL(url)
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[-2])
self.SetPrefs(pyauto.kRestoreOnStartup, 1) # set pref to restore session
self.RestartBrowser(clear_profile=False)
# Verify that navigation state (forward/back state) is restored.
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[0])
for i in (-2, -1):
tab.GoForward()
self.assertEqual(self.GetActiveTabURL().spec(), urls[i])
def testSessionRestoreURLs(self):
"""Verify restore URLs preference."""
url1 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
url2 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title2.html'))
# Set pref to restore given URLs on startup
self.SetPrefs(pyauto.kRestoreOnStartup, 4) # 4 is for restoring URLs
self.SetPrefs(pyauto.kURLsToRestoreOnStartup, [url1, url2])
self.RestartBrowser(clear_profile=False)
# Verify
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 4)
self.assertEqual(2, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testSessionRestoreShowBookmarkBar(self):
"""Verify restore for bookmark bar visibility."""
assert not self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar)
self.SetPrefs(pyauto.kShowBookmarkBar, True)
self.assertEqual(True, self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar))
self.RestartBrowser(clear_profile=False)
self.assertEqual(True, self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar))
self.assertTrue(self.GetBookmarkBarVisibility())
def testDownloadDirPref(self):
"""Verify download dir pref."""
test_dir = os.path.join(self.DataDir(), 'downloads')
file_url = self.GetFileURLForPath(os.path.join(test_dir, 'a_zip_file.zip'))
download_dir = self.GetDownloadDirectory().value()
new_dl_dir = os.path.join(download_dir, 'My+Downloads Folder')
downloaded_pkg = os.path.join(new_dl_dir, 'a_zip_file.zip')
os.path.exists(new_dl_dir) and shutil.rmtree(new_dl_dir)
os.makedirs(new_dl_dir)
# Set pref to download in new_dl_dir
self.SetPrefs(pyauto.kDownloadDefaultDirectory, new_dl_dir)
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
self.assertTrue(os.path.exists(downloaded_pkg))
shutil.rmtree(new_dl_dir, ignore_errors=True) # cleanup
def testToolbarButtonsPref(self):
"""Verify toolbar buttons prefs."""
# Assert defaults first
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kShowHomeButton))
self.SetPrefs(pyauto.kShowHomeButton, True)
self.RestartBrowser(clear_profile=False)
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kShowHomeButton))
def testNetworkPredictionEnabledPref(self):
"""Verify DNS prefetching pref."""
# Assert default
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kNetworkPredictionEnabled))
self.SetPrefs(pyauto.kNetworkPredictionEnabled, False)
self.RestartBrowser(clear_profile=False)
self.assertFalse(self.GetPrefsInfo().Prefs(
pyauto.kNetworkPredictionEnabled))
def testHomepagePrefs(self):
"""Verify homepage prefs."""
# "Use the New Tab page"
self.SetPrefs(pyauto.kHomePageIsNewTabPage, True)
logging.debug('Setting %s to 1' % pyauto.kHomePageIsNewTabPage)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage),
True)
# "Open this page"
url = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
self.SetPrefs(pyauto.kHomePage, url)
self.SetPrefs(pyauto.kHomePageIsNewTabPage, False)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePage), url)
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage))
# TODO(nirnimesh): Actually verify that homepage loads.
# This requires telling pyauto *not* to set about:blank as homepage.
def testGeolocationPref(self):
"""Verify geolocation pref.
Checks for the geolocation infobar.
"""
# GetBrowserInfo() call seems to fail later on in this test. Call it early.
# crbug.com/89000
branding = self.GetBrowserInfo()['properties']['branding']
url = self.GetFileURLForPath(os.path.join( # triggers geolocation
self.DataDir(), 'geolocation', 'geolocation_on_load.html'))
self.assertEqual(3, # default state
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.NavigateToURL(url)
self.assertTrue(self.WaitForInfobarCount(1))
self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars'])
# Disable geolocation
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 2)
self.assertEqual(2,
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.ReloadTab()
# Fails on Win7/Vista Chromium bots. crbug.com/89000
if (self.IsWin7() or self.IsWinVista()) and branding == 'Chromium':
return
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
def testUnderTheHoodPref(self):
"""Verify the security preferences for Under the Hood.
The setting is enabled by default."""
pref_list = [pyauto.kNetworkPredictionEnabled, pyauto.kSafeBrowsingEnabled,
pyauto.kAlternateErrorPagesEnabled,
pyauto.kSearchSuggestEnabled, pyauto.kShowOmniboxSearchHint]
for pref in pref_list:
# Verify the default value
self.assertEqual(self.GetPrefsInfo().Prefs(pref), True)
self.SetPrefs(pref, False)
self.RestartBrowser(clear_profile=False)
for pref in pref_list:
self.assertEqual(self.GetPrefsInfo().Prefs(pref), False)
def testJavaScriptEnableDisable(self):
"""Verify enabling disabling javascript prefs work """
self.assertTrue(
self.GetPrefsInfo().Prefs(pyauto.kWebKitJavascriptEnabled))
url = self.GetFileURLForDataPath(
os.path.join('javaScriptTitle.html'))
title1 = 'Title from script javascript enabled'
self.NavigateToURL(url)
self.assertEqual(title1, self.GetActiveTabTitle())
self.SetPrefs(pyauto.kWebKitJavascriptEnabled, False)
title = 'This is html title'
self.NavigateToURL(url)
self.assertEqual(title, self.GetActiveTabTitle())
def testHaveLocalStatePrefs(self):
"""Verify that we have some Local State prefs."""
self.assertTrue(self.GetLocalStatePrefsInfo())
def testAllowSelectedGeoTracking(self):
"""Verify hostname pattern and behavior for allowed tracking."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('accept', infobar_index=0) # Allow tracking.
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Allow the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 1}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testDismissedInfobarSavesNoEntry(self):
"""Verify dismissing infobar does not save an exception entry."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetFileURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('dismiss', infobar_index=0)
self.assertEqual(
{}, self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testGeolocationBlockedWhenTrackingDenied(self):
"""Verify geolocations is blocked when tracking is denied.
The test verifies the blocked hostname pattern entry on the Geolocations
exceptions page.
"""
# Ask for permission when site wants to track.
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('cancel', infobar_index=0) # Deny tracking.
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Block the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 2}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def _CheckForVisibleImage(self, tab_index=0, windex=0):
"""Checks whether or not an image is visible on the webpage.
Args:
tab_index: Tab index. Defaults to 0 (first tab).
windex: Window index. Defaults to 0 (first window).
Returns:
True if image is loaded, otherwise returns False if image is not loaded.
"""
# Checks whether an image is loaded by checking the area (width
# and height) of the image. If the area is non zero then the image is
# visible. If the area is zero then the image is not loaded.
# Chrome zeros the |naturalWidth| and |naturalHeight|.
script = """
for (i=0; i < document.images.length; i++) {
if ((document.images[i].naturalWidth != 0) &&
(document.images[i].naturalHeight != 0)) {
window.domAutomationController.send(true);
}
}
window.domAutomationController.send(false);
"""
return self.ExecuteJavascript(script, windex=windex, tab_index=tab_index)
def testImageContentSettings(self):
"""Verify image content settings show or hide images."""
url = self.GetHttpURLForDataPath('settings', 'image_page.html')
self.NavigateToURL(url)
self.assertTrue(self._CheckForVisibleImage(),
msg='No visible images found.')
# Set to block all images from loading.
self.SetPrefs(pyauto.kDefaultContentSettings, {'images': 2})
self.NavigateToURL(url)
self.assertFalse(self._CheckForVisibleImage(),
msg='At least one visible image found.')
def testImagesNotBlockedInIncognito(self):
"""Verify images are not blocked in Incognito mode."""
url = self.GetHttpURLForDataPath('settings', 'image_page.html')
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 1, 0)
self.assertTrue(self._CheckForVisibleImage(windex=1),
msg='No visible images found in Incognito mode.')
def testBlockImagesForHostname(self):
"""Verify images blocked for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.BLOCK)
# Add an exception BLOCK for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertFalse(self._CheckForVisibleImage(),
msg='At least one visible image found.')
def testAllowImagesForHostname(self):
"""Verify images allowed for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.ALLOW)
# Add an exception ALLOW for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertTrue(self._CheckForVisibleImage(),
msg='No visible images found.')
def testProtocolHandlerRegisteredCorrectly(self):
"""Verify sites that ask to be default handlers registers correctly."""
url = self.GetHttpURLForDataPath('settings', 'protocol_handler.html')
self.NavigateToURL(url)
# Returns a dictionary with the custom handler.
asked_handler_dict = self._driver.execute_script(
'return registerCustomHandler()')
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self._driver.find_element_by_id('test_protocol').click()
self.assertTrue(
self._driver.execute_script(
'return doesQueryConformsToProtocol("%s", "%s")'
% (asked_handler_dict['query_key'],
asked_handler_dict['query_value'])),
msg='Protocol did not register correctly.')
if __name__ == '__main__':
pyauto_functional.Main()
|
plxaye/chromium
|
src/chrome/test/functional/prefs.py
|
Python
|
apache-2.0
| 15,070
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import gevent
import logging
import kazoo.client
import kazoo.exceptions
import kazoo.handlers.gevent
import kazoo.recipe.election
from kazoo.client import KazooState
from kazoo.retry import KazooRetry
from bitarray import bitarray
from cfgm_common.exceptions import ResourceExhaustionError, ResourceExistsError
from gevent.coros import BoundedSemaphore
import uuid
LOG_DIR = '/var/log/contrail/'
class IndexAllocator(object):
def __init__(self, zookeeper_client, path, size=0, start_idx=0,
reverse=False,alloc_list=None, max_alloc=0):
self._size = size
self._start_idx = start_idx
if alloc_list is None:
self._alloc_list = [{'start':start_idx, 'end':start_idx+size}]
else:
sorted_alloc_list = sorted(alloc_list, key=lambda k: k['start'])
self._alloc_list = sorted_alloc_list
alloc_count = len(self._alloc_list)
total_size = 0
size = 0
#check for overlap in alloc_list --TODO
for alloc_idx in range (0, alloc_count -1):
idx_start_addr = self._alloc_list[alloc_idx]['start']
idx_end_addr = self._alloc_list[alloc_idx]['end']
next_start_addr = self._alloc_list[alloc_idx+1]['start']
if next_start_addr <= idx_end_addr:
raise Exception(
'Allocation Lists Overlapping: %s' %(alloc_list))
size += idx_end_addr - idx_start_addr + 1
size += self._alloc_list[alloc_count-1]['end'] - self._alloc_list[alloc_count-1]['start'] + 1
if max_alloc == 0:
self._max_alloc = size
else:
self._max_alloc = max_alloc
self._zookeeper_client = zookeeper_client
self._path = path
self._in_use = bitarray('0')
self._reverse = reverse
for idx in self._zookeeper_client.get_children(path):
idx_int = self._get_bit_from_zk_index(int(idx))
if idx_int >= 0:
self._set_in_use(idx_int)
# end for idx
# end __init__
def _get_zk_index_from_bit(self, idx):
size = idx
if self._reverse:
for alloc in reversed(self._alloc_list):
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['start']-size - 1
else:
for alloc in self._alloc_list:
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['end']+size + 1
raise ResourceExhaustionError(
'Cannot get zk index from bit %s' %(idx))
# end _get_zk_index
def _get_bit_from_zk_index(self, idx):
size = 0
if self._reverse:
for alloc in reversed(self._alloc_list):
if alloc['start'] <= idx <= alloc['end']:
return alloc['end'] - idx + size
size += alloc['end'] - alloc['start'] + 1
pass
else:
for alloc in self._alloc_list:
if alloc['start'] <= idx <= alloc['end']:
return idx - alloc['start'] + size
size += alloc['end'] - alloc['start'] + 1
return -1
# end _get_bit_from_zk_index
def _set_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
temp = bitarray(bitnum - self._in_use.length())
temp.setall(0)
temp.append('1')
self._in_use.extend(temp)
else:
self._in_use[bitnum] = 1
# end _set_in_use
def _reset_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
return
else:
self._in_use[bitnum] = 0
# end _reset_in_use
def set_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._set_in_use(bit_idx)
# end set_in_use
def reset_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._reset_in_use(bit_idx)
# end reset_in_use
def get_alloc_count(self):
return self._in_use.count()
# end get_alloc_count
def alloc(self, value=None):
# Allocates a index from the allocation list
if self._in_use.all():
idx = self._in_use.length()
if idx > self._max_alloc:
raise ResourceExhaustionError()
self._in_use.append(1)
else:
idx = self._in_use.index(0)
self._in_use[idx] = 1
idx = self._get_zk_index_from_bit(idx)
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
return idx
except ResourceExistsError:
return self.alloc(value)
# end alloc
def reserve(self, idx, value=None):
# Reserves the requested index if available
if not self._start_idx <= idx < self._start_idx + self._size:
return None
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
self.set_in_use(idx)
return idx
except ResourceExistsError:
self.set_in_use(idx)
existing_value = self.read(idx)
if (value == existing_value):
# idempotent reserve
return idx
msg = 'For index %s reserve conflicts with existing value %s.' \
%(idx, existing_value)
self._zookeeper_client.syslog(msg, level='notice')
raise
# end reserve
def delete(self, idx):
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.delete_node(self._path + id_str)
bit_idx = self._get_bit_from_zk_index(idx)
if 0 <= bit_idx < self._in_use.length():
self._in_use[bit_idx] = 0
# end delete
def read(self, idx):
id_str = "%(#)010d" % {'#': idx}
id_val = self._zookeeper_client.read_node(self._path+id_str)
if id_val is not None:
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx >= 0:
self._set_in_use(bit_idx)
return id_val
# end read
def empty(self):
return not self._in_use.any()
# end empty
@classmethod
def delete_all(cls, zookeeper_client, path):
try:
zookeeper_client.delete_node(path, recursive=True)
except kazoo.exceptions.NotEmptyError:
#TODO: Add retries for NotEmptyError
zookeeper_client.syslog("NotEmptyError while deleting %s" % path)
# end delete_all
#end class IndexAllocator
class ZookeeperClient(object):
def __init__(self, module, server_list, logging_fn=None):
# logging
logger = logging.getLogger(module)
logger.setLevel(logging.DEBUG)
try:
handler = logging.handlers.RotatingFileHandler(
LOG_DIR + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
except IOError:
print "Cannot open log file in %s" %(LOG_DIR)
else:
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
if logging_fn:
self.log = logging_fn
else:
self.log = self.syslog
# KazooRetry to retry keeper CRUD operations
self._retry = KazooRetry(max_tries=None, max_delay=300,
sleep_func=gevent.sleep)
self._zk_client = kazoo.client.KazooClient(
server_list,
timeout=400,
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger,
connection_retry=self._retry,
command_retry=self._retry)
self._zk_client.add_listener(self._zk_listener)
self._logger = logger
self._election = None
self._server_list = server_list
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
self._lost_cb = None
self._suspend_cb = None
self.connect()
# end __init__
# start
def connect(self):
while True:
try:
self._zk_client.start()
break
except gevent.event.Timeout as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
# end
def is_connected(self):
return self._zk_client.state == KazooState.CONNECTED
# end is_connected
def syslog(self, msg, *args, **kwargs):
if not self._logger:
return
level = kwargs.get('level', 'info')
if isinstance(level, int):
from pysandesh.sandesh_logger import SandeshLogger
level = SandeshLogger.get_py_logger_level(level)
self._logger.log(level, msg)
return
log_method = getattr(self._logger, level, self._logger.info)
log_method(msg)
# end syslog
def set_lost_cb(self, lost_cb=None):
# set a callback to be called when kazoo state is lost
# set to None for default action
self._lost_cb = lost_cb
# end set_lost_cb
def set_suspend_cb(self, suspend_cb=None):
# set a callback to be called when kazoo state is suspend
# set to None for default action
self._suspend_cb = suspend_cb
# end set_suspend_cb
def _zk_listener(self, state):
if state == KazooState.CONNECTED:
if self._election:
self._election.cancel()
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
elif state == KazooState.LOST:
# Lost the session with ZooKeeper Server
# Best of option we have is to exit the process and restart all
# over again
self._sandesh_connection_info_update(status='DOWN',
message='Connection to Zookeeper lost')
if self._lost_cb:
self._lost_cb()
else:
os._exit(2)
elif state == KazooState.SUSPENDED:
# Update connection info
self._sandesh_connection_info_update(status='INIT',
message = 'Connection to zookeeper lost. Retrying')
if self._suspend_cb:
self._suspend_cb()
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._election = self._zk_client.Election(path, identifier)
self._election.run(func, *args, **kwargs)
# end master_election
def create_node(self, path, value=None):
try:
if value is None:
value = uuid.uuid4()
retry = self._retry.copy()
retry(self._zk_client.create, path, str(value), makepath=True)
except kazoo.exceptions.NodeExistsError:
current_value = self.read_node(path)
if current_value == value:
return True;
raise ResourceExistsError(path, str(current_value), 'zookeeper')
# end create_node
def delete_node(self, path, recursive=False):
try:
retry = self._retry.copy()
retry(self._zk_client.delete, path, recursive=recursive)
except kazoo.exceptions.NoNodeError:
pass
except Exception as e:
raise e
# end delete_node
def read_node(self, path, include_timestamp=False):
try:
retry = self._retry.copy()
value = retry(self._zk_client.get, path)
if include_timestamp:
return value
return value[0]
except Exception:
return None
# end read_node
def get_children(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.get_children, path)
except Exception:
return []
# end read_node
def exists(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.exists, path)
except Exception:
return []
# end exists
def _sandesh_connection_info_update(self, status, message):
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._server_list.split(','))
if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self.log(msg, level=SandeshLevel.SYS_ERR)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self.log(msg, level=SandeshLevel.SYS_NOTICE)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
# end class ZookeeperClient
|
tcpcloud/contrail-controller
|
src/config/common/zkclient.py
|
Python
|
apache-2.0
| 14,983
|
#!/usr/bin/env python
"""conference_session.py
Models for the ConferenceSession ndb kind and protorpc messages.
"""
from protorpc import messages
from google.appengine.ext import ndb
class ConferenceSession(ndb.Model):
"""Session -- Session object"""
title = ndb.StringProperty(required=True)
highlights = ndb.TextProperty()
websafeConferenceKey = ndb.StringProperty()
speakerKeys = ndb.TextProperty(repeated=True, indexed=True)
duration = ndb.IntegerProperty()
typeOfSession = ndb.StringProperty()
dateTime = ndb.DateTimeProperty(required=True)
hour = ndb.IntegerProperty()
class ConferenceSessionForm(messages.Message):
"""SessionForm -- Session query inbound form message"""
title = messages.StringField(1)
highlights = messages.StringField(2)
speakerEmails = messages.StringField(3, repeated=True)
duration = messages.IntegerField(4)
typeOfSession = messages.StringField(5)
date = messages.StringField(6)
startTime = messages.StringField(7)
websafeConferenceKey = messages.StringField(8)
websafeSessionKey = messages.StringField(9)
class ConferenceSessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(ConferenceSessionForm, 1, repeated=True)
class ConferenceSessionQueryForm(messages.Message):
"""ConferenceSessionQueryForm -- ConferenceSession query inbound
form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceSessionQueryForms(messages.Message):
"""ConferenceSessionQueryForms -- multiple ConferenceSessionQueryForm
inbound form message"""
filters = messages.MessageField(ConferenceSessionQueryForm, 1,
repeated=True)
typeOfSession = messages.StringField(2, required=True)
websafeConferenceKey = messages.StringField(3)
|
kevindoole/udacity_p4
|
models/conference_session.py
|
Python
|
apache-2.0
| 1,949
|
#!/usr/bin/env python
# This is the Python3 implementation of the UEAlite stemmer
# DJS Oct 2015
# import modules used here
import sys
import re
import os
import codecs
# Gather our code in a main() function
def main():
#textin = open('COMMON.TXT', 'rU')
#outlist = open('stemmed_text.txt', 'w')
stem_doc(textin)
#textin.close()
#outlist.close()
def stem_doc(textin):
if isinstance (textin, str):
stemmed_word = ['','999']
#print ('stem_doc: textin is string')
elif isinstance (textin, bytes):
stemmed_word = ['','999']
textin = textin.decode('utf-8')
#print ('stem_doc: textin is bytes, conveted len=',len(textin))
else:
print ('UEAlist stemmer: Error - input is not string or bytes')
textout = '';
lines = textin.split('\n')
for line in lines: ## iterates over the lines of the input
words = line.split(' ')
for word in words:
word = re.sub('\s+', '', word)
if (word == ''): continue
if (not word.find('\w')): continue
#print('main1: word='+word)
stemmed_word, rule = stem(word)
if (rule == '90.2'): # hyphenated words
m = re.search('(\w+)-(\w+)', word) # split into 2, treat separately
word_a = m.group(0)
word_b = m.group(1)
stemmed_word, rule = stem(word_a)
textout += ' '+stemmed_word.lower()
stemmed_word, rule = stem(word_b)
textout += ' '+stemmed_word.lower()
elif (rule == '91'): # word is all caps - assumed acronym
textout += ' '+stemmed_word
else:
textout += ' '+stemmed_word.lower()
#print('stem_doc2: word='+word+' stemmed_word='+stemmed_word+' '+rule)
#print ('main3: textout = '+textout)
return textout
def stem(word):
#DJS 02Mar2004, V0.X MCJ Feb2004 - Perl version
#DJS Oct 2013 - JavaScript version
#DJS Oct 2015 - Python version
stemmed_word = ['','999'] # word, ruleno
stemmed_word[0] = word
origword = word
maxWord = 'deoxyribonucleicacid'
maxWordLength = len(maxWord) # or some other suitable value, e.g antidisestablishmentarianism
maxAcronym = 'CAVASSOO'
maxAcronymLength = len(maxAcronym) # or some other suitable value, e.g antidisestablishmentarianism
## first stage deals with spurious words, NNP, apostrophes, specific problem words ##
# preliminaries
if re.search('^is$|^as$|^this$|^has$|^was$|^during$', word): # word is a frequent problem word (1.01 added)
stemmed_word[1] = '90'
stemmed_word[0] = word
return stemmed_word
if len(word) > maxWordLength: # word is too long to be proper 95
stemmed_word[1]= '95'
stemmed_word[0] = word
return stemmed_word
if re.search("'", word): # word had apostrophe(s) - remove and continue 94
word = re.sub("'s$", '', word) # remove possessive singular
word = re.sub("'$", '', word) # remove possessive plural
word = re.sub("n't",'not', word) # expand contraction n't
word = re.sub("'ve",'have', word) # expand contraction 've
word = re.sub("'re",'are', word) # expand contraction 're
word = re.sub("'m",'am', word) # expand contraction I'm
stemmed_word[1] = '94'
# 90-92 detect NNP, acronym, program variable, ...
if re.search('\d+', word) and re.search('[a-zA-Z]', word): # word is all digits 90.3
stemmed_word[1]= '90.3'
stemmed_word[0] = word
return stemmed_word
elif re.search('(\w+)-(\w+)', word): # word is hyphenated 90.2
stemmed_word[0] = word
stemmed_word[1] = '90.2'
#print ('stem_word90.2: stemmed_word =',stemmed_word[0])
return stemmed_word
elif re.search('-', word): # word has hyphen 90.1
stemmed_word[1]= '90.1'
stemmed_word[0] = word
return stemmed_word
elif re.search('_|\d', word): # word has underscore, digit 90
stemmed_word[1]= '90'
stemmed_word[0] = word
return stemmed_word
elif re.search('^[A-Z]+s$', word): # word is all uppercase with terminal s 91.1
re.sub('s$', '', word)
stemmed_word[1]= '91'
stemmed_word[0] = word
return stemmed_word
elif word.isupper(): # word is all uppercase 91
stemmed_word[1]= '91'
stemmed_word[0] = word
return stemmed_word
elif re.search('\p{IsUpper}.*\p{IsUpper}', word): # word has multiple uppercase chars 92
stemmed_word[1]= '92'
stemmed_word[0] = word
return stemmed_word
elif re.search('^\p{IsUpper}{1}', word): # word is capitalised 93
stemmed_word[1]= '93' # assume capitalised words without punctuation are NNP
stemmed_word[0] = word
return stemmed_word
orig_word = word
stemmed_word = suffix_remove(word, stemmed_word)
if (stemmed_word[1] == '68' and word != stemmed_word[0]):
# may be understemmed, so go again
word = stemmed_word[0]
stemmed_word = suffix_remove(word, stemmed_word)
return stemmed_word
def suffix_remove (word, stemmed_word):
#print('suffix_remove word='+word)
# 139 rule version
if re.search('[a-zA-Z]', word) == -1:
#print ('suffix_remove1: word = '+word+' stemmed_word = '+stemmed_word[0])
return stemmed_word
stemmed_word[1] = '0'
stemmed_word[0] = word
a1 =''
a2 = ''
origword = word
if re.search('aceous$', word.casefold()): # word ends in -aceous 1
word = re.sub('aceous$', '', word)
stemmed_word[1] = '1'
elif re.search('ces$', word.casefold()): # word ends in -ces 2
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '2'
elif re.search('cs$', word.casefold()): # word ends in -cs 3
stemmed_word[1] = '3'
elif re.search('sis$', word.casefold()): # word ends in -sis 4
stemmed_word[1] = '4'
elif re.search('tis$', word.casefold()): # word ends in -tis 5
stemmed_word[1] = '5'
elif re.search('ss$', word.casefold()): # word ends in -ss 6
stemmed_word[1] = '6'
elif re.search('eed$', word.casefold()): # word ends in -eed 7
stemmed_word[1] = '7'
elif re.search('ued$', word.casefold()): # word ends in -ued 8
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '8'
elif re.search('ues$', word.casefold()): # word ends in -ues 9
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '9'
elif re.search('ees$', word.casefold()): # word ends in -ees 10
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '10'
elif re.search('iases$', word.casefold()): # word ends in -iases 11.4
word = re.sub('es$', '', word) # strip -es
word = re.sub('ES$', '', word) # strip -es
stemmed_word[1] = '11.4'
elif re.search('uses$', word.casefold()): # word ends in -uses 11.3 (change 1.01: more take e than not)
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '11.3'
elif re.search('sses$', word.casefold()): # word ends in -sses 11.2
word = re.sub('es$', '', word) # strip -es
word = re.sub('ES$', '', word) # strip -es
stemmed_word[1] = '11.2'
elif re.search('eses$', word.casefold()): # word ends in -eses 11.1
word = re.sub('es$','is', word)
word = re.sub('ES$','IS', word)
stemmed_word[1] = '11.1'
elif re.search('ses$', word.casefold()): # word ends in -ses 11
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '11'
elif re.search('tled$', word.casefold()): # word ends in -tled 12.5
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '12.5'
elif re.search('pled$', word.casefold()): # word ends in -pled 12.4
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '12.4'
elif re.search('bled$', word.casefold()): # word ends in -bled 12.3
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '12.3'
elif re.search('eled$', word.casefold()): # word ends in -eled 12.2
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '12.2'
elif re.search('lled$', word.casefold()): # word ends in -lled 12.1
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '12.1'
elif re.search('led$', word.casefold()): # word ends in -led 12
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '12'
elif re.search('ened$', word.casefold()): # word ends in -ened 13.7
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '13.7'
elif re.search('ained$', word.casefold()): # word ends in -ained 13.6
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '13.6'
elif re.search('erned$', word.casefold()): # word ends in -erned 13.5
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '13.5'
elif re.search('rned$', word.casefold()): # word ends in -rned 13.4
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '13.4'
elif re.search('nned$', word.casefold()): # word ends in -nned 13.3
word = re.sub('ned$', '', word) # strip -ned
word = re.sub('NED$', '', word) # strip -ned
stemmed_word[1] = '13.3'
elif re.search('oned$', word.casefold()): # word ends in -oned 13.2
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '13.2'
elif re.search('gned$', word.casefold()): # word ends in -gned 13.1
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '13.1'
elif re.search('ned$', word.casefold()): # word ends in -ned 13
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '13'
elif re.search('ifted$', word.casefold()): # word ends in -ifted 14
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '14'
elif re.search('ected$', word.casefold()): # word ends in -ected 15
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '15'
elif re.search('vided$', word.casefold()): # word ends in -vied 16
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '16'
elif re.search('ved$', word.casefold()): # word ends in -ved 17
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '17'
elif re.search('ced$', word.casefold()): # word ends in -ced 18
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '18'
elif re.search('erred$', word.casefold()): # word ends in -erred 19
word = re.sub('red$', '', word) # strip -red
word = re.sub('RED$', '', word) # strip -red
stemmed_word[1] = '19'
elif re.search('urred$', word.casefold()): # word ends in -urred 20.5
word = re.sub('red$', '', word) # strip -red
word = re.sub('RED$', '', word) # strip -red
stemmed_word[1] = '20.5'
elif re.search('lored$', word.casefold()): # word ends in -lored 20.4
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '20.4'
elif re.search('eared$', word.casefold()): # word ends in -eared 20.3
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '20.3'
elif re.search('tored$', word.casefold()): # word ends in -tored 20.2
word = re.sub('ed$','e', word)
word = re.sub('ED$','E', word)
stemmed_word[1] = '20.2'
elif re.search('ered$', word.casefold()): # word ends in -ered 20.1
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('eD$', '', word) # strip -ed
stemmed_word[1] = '20.1'
elif re.search('red$', word.casefold()): # word ends in -red 20
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '20'
elif re.search('tted$', word.casefold()): # word ends in -tted 21
word = re.sub('ted$', '', word) # strip -ted
word = re.sub('TED$', '', word) # strip -ted
stemmed_word[1] = '21'
elif re.search('noted$', word.casefold()): # word ends in -noted 22.4
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '22.4'
elif re.search('leted$', word.casefold()): # word ends in -leted 22.3
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '22.3'
elif re.search('uted$', word.casefold()): # word ends in -ated 22.2
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '22.2'
elif re.search('ated$', word.casefold()): # word ends in -ated 22.1
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '22.1'
elif re.search('ted$', word.casefold()): # word ends in -ted 22
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '22'
elif re.search('anges$', word.casefold()): # word ends in -anges 23
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '23'
elif re.search('aining$', word.casefold()): # word ends in -aining 24
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '24'
elif re.search('acting$', word.casefold()): # word ends in -acting 25
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '25'
elif re.search('tting$', word.casefold()): # word ends in -tting 26
word = re.sub('ting$', '', word) # strip -ting
word = re.sub('TING$', '', word) # strip -ting
stemmed_word[1] = '26'
elif re.search('viding$', word.casefold()): # word ends in -viding 27
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '27'
elif re.search('ssed$', word.casefold()): # word ends in -ssed 28
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '28'
elif re.search('sed$', word.casefold()): # word ends in -sed 29
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '29'
elif re.search('titudes$', word.casefold()): # word ends in -titudes 30
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '30'
elif re.search('umed$', word.casefold()): # word ends in -umed 31
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '31'
elif re.search('ulted$', word.casefold()): # word ends in -ulted 32
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '32'
elif re.search('uming$', word.casefold()): # word ends in -uming 33
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '33'
elif re.search('fulness$', word.casefold()): # word ends in -fulness 34
word = re.sub('ness$', '', word) # strip -ness
word = re.sub('NESS$', '', word) # strip -ness
stemmed_word[1] = '34'
elif re.search('ousness$', word.casefold()): # word ends in -ousness 35
word = re.sub('ness$','e', word)
word = re.sub('NESS$','E', word)
stemmed_word[1] = '35'
elif re.search('r[aeiou]bed$', word.casefold()): # word ends in -r*bed 36.1 (1.01 added)
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '36'
elif re.search('bed$', word.casefold()): # word ends in -bed 36 (1.01 changed)
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '36'
elif re.search('ssing$', word.casefold()): # word ends in -ding 37
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '37'
elif re.search('ulting$', word.casefold()): # word ends in -ulting 38
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '38'
elif re.search('ving$', word.casefold()): # word ends in -ving 39
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '39'
elif re.search('eading$', word.casefold()): # word ends in -eading 40.7
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '40.7'
elif re.search('oading$', word.casefold()): # word ends in -oading 40.6
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '40.6'
elif re.search('eding$', word.casefold()): # word ends in -eding 40.5
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '40.5'
elif re.search('dding$', word.casefold()): # word ends in -dding 40.4
word = re.sub('ding$', '', word) # strip -ding
word = re.sub('DING$', '', word) # strip -ding
stemmed_word[1] = '40.4'
elif re.search('lding$', word.casefold()): # word ends in -lding 40.3
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '40.3'
elif re.search('rding$', word.casefold()): # word ends in -rding 40.2
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '40.2'
elif re.search('nding$', word.casefold()): # word ends in -nding 40.1
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '40.1'
elif re.search('ding$', word.casefold()): # word ends in -ding 40
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '40'
elif re.search('lling$', word.casefold()): # word ends in -lling 41
word = re.sub('ling$', '', word) # strip -ling
word = re.sub('LING$', '', word) # strip -ling
stemmed_word[1] = '41'
elif re.search('ealing$', word.casefold()): # word ends in -ealing 42.4
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '42.4'
elif re.search('oling$', word.casefold()): # word ends in -oling 42.3
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '42.3'
elif re.search('ailing$', word.casefold()): # word ends in -ailing 42.2
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '42.2'
elif re.search('eling$', word.casefold()): # word ends in -ling 42.1
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '42.1'
elif re.search('ling$', word.casefold()): # word ends in -ling 42
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '42'
elif re.search('nged$', word.casefold()): # word ends in -nged 43.2
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '43.2'
elif re.search('gged$', word.casefold()): # word ends in -gged 43.1
word = re.sub('ged$', '', word) # strip -ged
word = re.sub('GED$', '', word) # strip -ged
stemmed_word[1] = '43.1'
elif re.search('ged$', word.casefold()): # word ends in -ged 43
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '43'
elif re.search('mming$', word.casefold()): # word ends in -mming 44.3
word = re.sub('ming$', '', word) # strip -ming
word = re.sub('MING$', '', word) # strip -ming
stemmed_word[1] = '44.3'
elif re.search('rming$', word.casefold()): # word ends in -rming 44.2
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '44.2'
elif re.search('lming$', word.casefold()): # word ends in -lming 44.1
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '44.1'
elif re.search('ming$', word.casefold()): # word ends in -ming 44
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '44'
elif re.search('nging$', word.casefold()): # word ends in -ging 45.2
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '45.2'
elif re.search('gging$', word.casefold()): # word ends in -ging 45.1
word = re.sub('ging$', '', word) # strip -ging
word = re.sub('GING$', '', word) # strip -ging
stemmed_word[1] = '45.1'
elif re.search('ging$', word.casefold()): # word ends in -ging 45
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '45'
elif re.search('aning$', word.casefold()): # word ends in -aning 46.6
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '46.6'
elif re.search('ening$', word.casefold()): # word ends in -ening 46.5
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '46.5'
elif re.search('gning$', word.casefold()): # word ends in -gning 46.4
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '46.4'
elif re.search('nning$', word.casefold()): # word ends in -nning 46.3
word = re.sub('ning$', '', word) # strip -ning
word = re.sub('NING$', '', word) # strip -ning
stemmed_word[1] = '46.3'
elif re.search('oning$', word.casefold()): # word ends in -oning 46.2
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '46.2'
elif re.search('rning$', word.casefold()): # word ends in -rning 46.1
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '46.1'
elif re.search('ning$', word.casefold()): # word ends in -ning 46
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '46'
elif re.search('sting$', word.casefold()): # word ends in -sting 47
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '47'
elif re.search('eting$', word.casefold()): # word ends in -pting 48.4
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '48.4'
elif re.search('pting$', word.casefold()): # word ends in -pting 48.3
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '48.3'
elif re.search('nting$', word.casefold()): # word ends in -nting 48.2
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '48.2'
elif re.search('cting$', word.casefold()): # word ends in -cting 48.1
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '48.1'
elif re.search('ting$', word.casefold()): # word ends in -ting 48
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '48'
elif re.search('ssed$', word.casefold()): # word ends in -ssed 49
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '49'
elif re.search('les$', word.casefold()): # word ends in -les 50
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '50'
elif re.search('tes$', word.casefold()): # word ends in -tes 51
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
stemmed_word[1] = '51'
elif re.search('zed$', word.casefold()): # word ends in -zed 52
word = re.sub('d$', '', word) # strip -d
word = re.sub('D$', '', word) # strip -d
stemmed_word[1] = '52'
elif re.search('lled$', word.casefold()): # word ends in -lled 53
word = re.sub('ed$', '', word) # strip -ed
word = re.sub('ED$', '', word) # strip -ed
stemmed_word[1] = '53'
elif re.search('iring$', word.casefold()): # word ends in -iring 54.4
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '54.4'
elif re.search('uring$', word.casefold()): # word ends in -uring 54.3
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '54.3'
elif re.search('ncing$', word.casefold()): # word ends in -ncing 54.2
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '54.2'
elif re.search('zing$', word.casefold()): # word ends in -zing 54.1
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '54.1'
elif re.search('sing$', word.casefold()): # word ends in -sing 54
word = re.sub('ing$','e', word)
word = re.sub('ING$','E', word)
stemmed_word[1] = '54'
elif re.search('lling$', word.casefold()): # word ends in -lling 55
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '55'
elif re.search('ied$', word.casefold()): # word ends in -ied 56
word = re.sub('ied$','y', word)
word = re.sub('IED$','Y', word)
stemmed_word[1] = '56'
elif re.search('ating$', word.casefold()): # word ends in -ating 57
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
stemmed_word[1] = '57'
elif re.search('thing$', word.casefold()): # word ends in -thing 58.1 (1.01 added)
stemmed_word[1] = '58.1'
elif re.search('(\w)(\w)ing$', word.casefold()): # word ends in -ing 58 xxxxxx
# a1 = $1
# a2 = $2
word = re.sub('ing$', '', word) # strip -ing
word = re.sub('ING$', '', word) # strip -ing
if (a1 == a2):
word = word+a1
stemmed_word[1] = '58'
elif re.search('ies$', word.casefold()): # word ends in -ies 59
word = re.sub('ies$', 'y', word) #strip -es
stemmed_word[1] = '59'
elif re.search('lves$', word.casefold()): # word ends in -lves 60.1
word = re.sub('ves$','f', word)
stemmed_word[1] = '60.1'
elif re.search('ves$', word.casefold()): # word ends in -ves 60
word = re.sub('s$', '', word) #changed from s/ves$/f/
word = re.sub('S$', '', word) #changed from s/ves$/f/
stemmed_word[1] = '60'
elif re.search('aped$', word.casefold()): # word ends in -uded 61.3
word = re.sub('d$', '', word) #strip -d
word = re.sub('D$', '', word) #strip -d
stemmed_word[1] = '61.3'
elif re.search('uded$', word.casefold()): # word ends in -uded 61.2
word = re.sub('d$', '', word) #strip -d
word = re.sub('D$', '', word) #strip -d
stemmed_word[1] = '61.2'
elif re.search('oded$', word.casefold()): # word ends in -oded 61.1
word = re.sub('d$', '', word) #strip -d
word = re.sub('D$', '', word) #strip -d
stemmed_word[1] = '61.1'
elif re.search('ated$', word.casefold()): # word ends in -ated 61
word = re.sub('d$', '', word) #strip -d
word = re.sub('D$', '', word) #strip -d
stemmed_word[1] = '61'
elif re.search('(\w)(\w)ed$', word.casefold()): # word ends in -ed 62 xxxx
m = re.search('(\w)(\w)ed$', word.casefold())
a1 = m.group(0)
a2 = m.group(1)
word = re.sub('ed$', '', word) #strip -ed
word = re.sub('ED$', '', word) #strip -ed
#print ('stem_word62: a1='+a1+' a2='+a2+'word='+word)
if a1 == a2:
word = word+a1
stemmed_word[0] = word
stemmed_word[1] = '62'
elif re.search('pes$', word.casefold()): # word ends in -pes 63.8 (1.01 added)
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.8'
elif re.search('mes$', word.casefold()): # word ends in -mes 63.7 (1.01 added)
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.7'
elif re.search('ones$', word.casefold()): # word ends in -ones 63.6
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.6'
elif re.search('izes$', word.casefold()): # word ends in -izes 63.5
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.5'
elif re.search('ures$', word.casefold()): # word ends in -ures 63.4
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.4'
elif re.search('ines$', word.casefold()): # word ends in -ines 63.3
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.3'
elif re.search('ides$', word.casefold()): # word ends in -ides 63.2
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.2'
elif re.search('ges$', word.casefold()): # word ends in -ges 63.1
word = re.sub('s$', '', word) #strip -s
word = re.sub('S$', '', word) #strip -s
stemmed_word[1] = '63.1'
elif re.search('es$', word.casefold()): # word ends in -es 63
word = re.sub('es$', '', word) #strip -es
word = re.sub('ES$', '', word) #strip -es
stemmed_word[1] = '63'
elif re.search('is$', word.casefold()): # word ends in -is 64
word = re.sub('is$','e', word) # replace -is
word = re.sub('IS$','E', word) # replace -is
stemmed_word[1] = '64'
elif re.search('ous$', word.casefold()): # word ends in -ous 65
stemmed_word[1] = '65'
elif re.search('ums$', word.casefold()): # word ends in -ums 66
stemmed_word[1] = '66'
elif re.search('us$', word.casefold()): # word ends in -us 67
stemmed_word[1] = '67'
elif re.search('s$', word.casefold()): # word ends in -s 68
word = re.sub('s$', '', word) # strip -s
word = re.sub('S$', '', word) # strip -s
a1 = word
stemmed_word[0] = word
if stemmed_word[1] == '0':
stemmed_word[1] = '68'
stemmed_word[0] = word
#if word != origword:
# print ('suffix_remove1: '+origword+' to '+stemmed_word[0]+' ('+stemmed_word[1]+')')
return stemmed_word
# Standard boilerplate to call the main() function to begin
# the program.
if __name__ == '__main__':
main()
|
JoeDurrant/pysearch
|
UEAlite.py
|
Python
|
gpl-2.0
| 30,679
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
master = []
list = []
self.traverse(root, list, master)
return master
def traverse(self, root, list, master):
if not root:
return
list.append(str(root.val))
if root.left == None and root.right == None:
master.append("->".join(list))
return
self.traverse(root.left, list[:], master)
self.traverse(root.right, list[:], master)
from TestObjects import *
b = BinarySearchTree()
s = Solution()
print s.binaryTreePaths(None)
|
Jspsun/LEETCodePractice
|
Python/BinaryTreePaths.py
|
Python
|
mit
| 849
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.cluster.remoteaccount import RemoteCommandError
from ducktape.utils.util import wait_until
class JmxMixin(object):
"""This mixin helps existing service subclasses start JmxTool on their worker nodes and collect jmx stats.
A couple things worth noting:
- this is not a service in its own right.
- we assume the service using JmxMixin also uses KafkaPathResolverMixin
"""
def __init__(self, num_nodes, jmx_object_names=None, jmx_attributes=None):
self.jmx_object_names = jmx_object_names
self.jmx_attributes = jmx_attributes or []
self.jmx_port = 9192
self.started = [False] * num_nodes
self.jmx_stats = [{} for x in range(num_nodes)]
self.maximum_jmx_value = {} # map from object_attribute_name to maximum value observed over time
self.average_jmx_value = {} # map from object_attribute_name to average value observed over time
self.jmx_tool_log = "/mnt/jmx_tool.log"
self.jmx_tool_err_log = "/mnt/jmx_tool.err.log"
def clean_node(self, node):
node.account.kill_process("jmx", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % self.jmx_tool_log, allow_fail=False)
def start_jmx_tool(self, idx, node):
if self.jmx_object_names is None:
self.logger.debug("%s: Not starting jmx tool because no jmx objects are defined" % node.account)
return
if self.started[idx-1]:
self.logger.debug("%s: jmx tool has been started already on this node" % node.account)
return
cmd = "%s kafka.tools.JmxTool " % self.path.script("kafka-run-class.sh", node)
cmd += "--reporting-interval 1000 --jmx-url service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi" % self.jmx_port
for jmx_object_name in self.jmx_object_names:
cmd += " --object-name %s" % jmx_object_name
for jmx_attribute in self.jmx_attributes:
cmd += " --attributes %s" % jmx_attribute
cmd += " 1>> %s" % self.jmx_tool_log
cmd += " 2>> %s &" % self.jmx_tool_err_log
self.logger.debug("%s: Start JmxTool %d command: %s" % (node.account, idx, cmd))
node.account.ssh(cmd, allow_fail=False)
wait_until(lambda: self._jmx_has_output(node), timeout_sec=10, backoff_sec=.5, err_msg="%s: Jmx tool took too long to start" % node.account)
self.started[idx-1] = True
def _jmx_has_output(self, node):
"""Helper used as a proxy to determine whether jmx is running by that jmx_tool_log contains output."""
try:
node.account.ssh("test -z \"$(cat %s)\"" % self.jmx_tool_log, allow_fail=False)
return False
except RemoteCommandError:
return True
def read_jmx_output(self, idx, node):
if not self.started[idx-1]:
return
object_attribute_names = []
cmd = "cat %s" % self.jmx_tool_log
self.logger.debug("Read jmx output %d command: %s", idx, cmd)
lines = [line for line in node.account.ssh_capture(cmd, allow_fail=False)]
assert len(lines) > 1, "There don't appear to be any samples in the jmx tool log: %s" % lines
for line in lines:
if "time" in line:
object_attribute_names = line.strip()[1:-1].split("\",\"")[1:]
continue
stats = [float(field) for field in line.split(',')]
time_sec = int(stats[0]/1000)
self.jmx_stats[idx-1][time_sec] = {name: stats[i+1] for i, name in enumerate(object_attribute_names)}
# do not calculate average and maximum of jmx stats until we have read output from all nodes
# If the service is multithreaded, this means that the results will be aggregated only when the last
# service finishes
if any(len(time_to_stats) == 0 for time_to_stats in self.jmx_stats):
return
start_time_sec = min([min(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
end_time_sec = max([max(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
for name in object_attribute_names:
aggregates_per_time = []
for time_sec in xrange(start_time_sec, end_time_sec + 1):
# assume that value is 0 if it is not read by jmx tool at the given time. This is appropriate for metrics such as bandwidth
values_per_node = [time_to_stats.get(time_sec, {}).get(name, 0) for time_to_stats in self.jmx_stats]
# assume that value is aggregated across nodes by sum. This is appropriate for metrics such as bandwidth
aggregates_per_time.append(sum(values_per_node))
self.average_jmx_value[name] = sum(aggregates_per_time) / len(aggregates_per_time)
self.maximum_jmx_value[name] = max(aggregates_per_time)
def read_jmx_output_all_nodes(self):
for node in self.nodes:
self.read_jmx_output(self.idx(node), node)
|
airbnb/kafka
|
tests/kafkatest/services/monitor/jmx.py
|
Python
|
apache-2.0
| 5,768
|
# -*- coding: utf-8 -*-
from apps.registro.models.AnexoConexionInternet import AnexoConexionInternet
from apps.registro.models.TipoConexion import TipoConexion
from django.core.exceptions import ValidationError
from django import forms
class AnexoConexionInternetForm(forms.ModelForm):
tipo_conexion = forms.ModelChoiceField(queryset = TipoConexion.objects.all().order_by('nombre'), required=False)
verificado = forms.BooleanField(required=False)
class Meta:
model = AnexoConexionInternet
exclude = ['anexo']
def __chequear_si_tiene_conexion(self, field):
if self.cleaned_data['tiene_conexion']:
if (self.cleaned_data[field] is None
or self.cleaned_data[field] == ''):
raise ValidationError('Este campo es obligatorio.')
return self.cleaned_data[field]
return None
def clean_tipo_conexion(self):
return self.__chequear_si_tiene_conexion('tipo_conexion')
def clean_proveedor(self):
return self.__chequear_si_tiene_conexion('proveedor')
def clean_costo(self):
return self.__chequear_si_tiene_conexion('costo')
def clean_cantidad(self):
return self.__chequear_si_tiene_conexion('cantidad')
|
MERegistro/meregistro
|
meregistro/apps/registro/forms/AnexoConexionInternetForm.py
|
Python
|
bsd-3-clause
| 1,169
|
from configuration import main_configuration as config
from flask import Flask
from flask import abort, redirect, url_for
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
import base64, random
import string
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config.database_connection_string
app.config['SQLALCHEMY_MAX_OVERFLOW'] = config.database_max_pool_overflow
app.config['SQLALCHEMY_POOL_TIMEOUT'] = config.database_pool_timeout
db = SQLAlchemy(app)
db.engine.echo = config.echo_sql
from flask import _app_ctx_stack
from sqlalchemy.orm import scoped_session, sessionmaker
Session = scoped_session(sessionmaker(), scopefunc=_app_ctx_stack.__ident_func__)
Session.configure(bind=db.engine)
def getSession():
global Session
session = Session()
session._model_changes = {}
return session
def cleanupSession():
Session.remove()
nonURLChars = ''.join(c for c in map(chr, range(256)) if not c.isalnum())
URLCharTranslation = ''.join('-' for c in nonURLChars)
URLCharTranslationTable = string.maketrans(nonURLChars, URLCharTranslation)
class TournamentType(db.Model):
__tablename__ = "tournament_types"
query = None
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=False)
description = db.Column(db.String(512), unique=False)
icon = db.Column(db.String(128), unique=False)
team_count = db.Column(db.Integer, unique=False)
# for communication with the simulator
internal_identifier = db.Column(db.String(128), unique=False)
# for rendering the results
custom_view_function = db.Column(db.String(128), unique=False)
def __init__(self, name, description, team_count, icon, internal_identifier, custom_view_function=None):
self.name = name
self.description = description
self.team_count = team_count
self.icon = icon
self.internal_identifier = internal_identifier
self.custom_view_function = custom_view_function
def __repr__(self):
return "[Tournament " + name + "]"
class TournamentState:
pending, running, finished, error = range(1, 5)
class Tournament(db.Model):
__tablename__ = "tournaments"
query = None
id = db.Column(db.Integer, primary_key=True)
state = db.Column(db.SmallInteger, unique=False, default=TournamentState.pending)
run_count = db.Column(db.Integer)
hash = db.Column(db.String(32))
type_id = db.Column(db.Integer, db.ForeignKey('tournament_types.id'))
# this is an optimization to quickly have access to rule names & weights that were used for this tournament
rule_weight_json = db.Column(db.String(256), unique=False)
def __init__(self, type_id, hash, run_count):
self.type_id = type_id
self.hash = hash
self.run_count = run_count
self.rule_weight_json = None
def __repr__(self):
return "[Play-Off " + str(self.id) + " - type " + str(self.type_id) + "]"
def getStateName(self):
if self.state == TournamentState.pending:
return "pending"
if self.state == TournamentState.running:
return "running"
if self.state == TournamentState.finished:
return "finished"
if self.state == TournamentState.error:
return "error"
return "unknown"
# used to manage the tournaments that are visible to a certain user
class UserTournamentMapping(db.Model):
__tablename__ = "user2tournaments"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
tournament_id = db.Column(db.Integer, db.ForeignKey("tournaments.id"))
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
tournament = db.relationship("Tournament")
def __init__(self, user, tournament):
self.tournament = tournament
user.tournaments.append(self)
class User(db.Model):
__tablename__ = "users"
query = None
id = db.Column(db.Integer, primary_key=True)
tournaments = db.relationship("UserTournamentMapping", order_by=UserTournamentMapping.timestamp)
def __init__(self):
pass
def __repr__(self):
return "[User " + str(self.id) + "]"
class TournamentExecutionError(db.Model):
__tablename__ = "tournament_execution_errors"
query = None
id = db.Column(db.Integer, primary_key=True)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
text = db.Column(db.String(256), unique=False)
def __init__(self, tournament_id, text):
self.tournament_id = tournament_id
self. text = text
def __repr__(self):
return "[Error " + str(self.id) + ": " + self.text + "]"
class Team(db.Model):
__tablename__ = "teams"
query = None
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=False)
country_code = db.Column(db.String(3), unique=False)
def __init__(self, name, country_code):
self.name = name
self.country_code = country_code
def __repr__(self):
return "[Team " + self.name + " from " + self.country_code + "]"
@staticmethod
def getAllTeamsForTournament(tournament_id, session):
all_teams = []
for participation in session.query(Participation).filter_by(tournament_id=tournament_id):
team = session.query(Team).filter_by(id=participation.team_id).first()
all_teams.append(team)
return all_teams
class Participation(db.Model):
__tablename__ = "participations"
query = None
id = db.Column(db.Integer, primary_key=True)
order = db.Column(db.Integer, unique=False)
#tournament = db.relationship('Tournament', backref=db.backref('participations', lazy='dynamic'))
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
#team = db.relationship('Team', backref=db.backref('participations', lazy='dynamic'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
def __init__(self, tournament_id, team_id, order):
self.tournament_id = tournament_id
self.team_id = team_id
self.order = order
def __repr__(self):
return "[Participation of " + str(self.team_id) + " in play-off " + str(self.tournament_id) + "]"
class Rule2ScoreAssociation(db.Model):
__tablename__ = "rule2scores"
query = None
id = db.Column(db.Integer, primary_key=True)
rule_type_id = db.Column(db.Integer, db.ForeignKey('rule_types.id'))
score_type_id = db.Column(db.Integer, db.ForeignKey('score_types.id'))
def __init__(self, rule_type, score_type):
self.rule_type_id = rule_type.id
self.score_type_id = score_type.id
def __repr__(self):
return "<R2S " + str(self.rule_type_id) + "<-" + str(self.score_type_id) + ">"
class Rule2ParameterAssociation(db.Model):
__tablename__ = "rule2parameters"
query = None
id = db.Column(db.Integer, primary_key=True)
rule_type_id = db.Column(db.Integer, db.ForeignKey('rule_types.id'))
parameter_type_id = db.Column(db.Integer, db.ForeignKey('rule_parameter_types.id'))
def __init__(self, rule_type, parameter_type):
self.rule_type_id = rule_type.id
self.parameter_type_id = parameter_type.id
def __repr__(self):
return "<R2P " + str(self.rule_type_id) + "<-" + str(self.parameter_type_id) + ">"
class RuleType(db.Model):
__tablename__ = "rule_types"
query = None
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=False)
long_name = db.Column(db.String(128), unique=False)
description = db.Column(db.String(512), unique=False)
#score_types = db.relationship("ScoreType", secondary=rule_score_table)
#parameter_types = db.relationship("RuleParameterType", secondary=rule_parameter_table)
# whether the rule needs the win expectancy output of other rules as its input
is_backref_rule = db.Column(db.Boolean, unique=False)
# whether the rule needs custom ratings entered by the user
needs_custom_ratings = db.Column(db.Boolean, unique=False)
# for communication with the simulator
internal_function_identifier = db.Column(db.String(128), unique=False)
# standard rules that are displayed in the quick-creation dialog
standard_weight = db.Column(db.Float, unique=False)
is_default_rule = db.Column(db.Boolean, unique=False)
def __init__(self, name, description, internal_function_identifier, long_name=None, is_backref_rule=False, needs_custom_ratings=False):
self.name = name
self.long_name = name
if long_name:
self.long_name = long_name
self.description = description
self.internal_function_identifier = internal_function_identifier
self.is_backref_rule = is_backref_rule
self.needs_custom_ratings = needs_custom_ratings
self.standard_weight = 0.0
self.is_default_rule = False
def addScoreType(self, score_type, session):
session.add(Rule2ScoreAssociation(self, score_type))
def getScoreTypes(self, session):
types = []
all_assocs = session.query(Rule2ScoreAssociation).filter_by(rule_type_id=self.id).all()
for assoc in all_assocs:
score_type = session.query(ScoreType).filter_by(id=assoc.score_type_id).first()
types.append(score_type)
return types
def addParameterType(self, parameter_type, session):
session.add(Rule2ParameterAssociation(self, parameter_type))
def getParameterTypes(self, session):
types = []
all_assocs = session.query(Rule2ParameterAssociation).filter_by(rule_type_id=self.id).all()
for assoc in all_assocs:
type = session.query(RuleParameterType).filter_by(id=assoc.parameter_type_id).first()
types.append(type)
return types
def makeDefaultRule(self, standard_weight):
self.is_default_rule = True
self.standard_weight = standard_weight
def __repr__(self):
return "[Rule " + self.name + "]"
def toDictionary(self):
return {'id':self.id, 'name':self.name, 'desc':self.description, 'long_name':self.long_name}
class Rule(db.Model):
__tablename__ = "rules"
query = None
id = db.Column(db.Integer, primary_key=True)
type_id = db.Column(db.Integer, db.ForeignKey('rule_types.id'))
weight = db.Column(db.Float, unique=False)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
def __init__(self, tournament_id, type_id, weight):
self.tournament_id = tournament_id
self.type_id = type_id
self.weight = weight
def __repr__(self):
return "[Rule " + str(self.type_id) + " set to " + str(self.weight) + "]"
class RuleParameterType(db.Model):
__tablename__ = "rule_parameter_types"
query = None
id = db.Column(db.Integer, primary_key=True)
default_value = db.Column(db.Float)
# for communication with the simulator
internal_identifier = db.Column(db.String(128), unique=False)
def __init__(self, internal_identifier, default_value):
self.internal_identifier = internal_identifier
self.default_value = default_value
def __repr__(self):
return "[ParameterType " + str(self.id) + ": " + self.internal_identifier + "]"
class RuleParameter(db.Model):
__tablename__ = "rule_parameters"
query = None
id = db.Column(db.Integer, primary_key=True)
type_id = db.Column(db.Integer, db.ForeignKey('rule_parameter_types.id'))
value = db.Column(db.Float, unique=False)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
def __init__(self, tournament_id, type_id, value):
self.tournament_id = tournament_id
self.type_id = type_id
self.value = value
def __repr__(self):
return "[Parameter of type " + str(self.type_id) + " for tournament " + str(self.tournament_id) + " = " + str(self.value) + "]"
class ScoreType(db.Model):
__tablename__ = "score_types"
query = None
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=False)
long_name = db.Column(db.String(128), unique=False)
description = db.Column(db.String(512), unique=False)
# whether to hide the score type from the team list
hidden = db.Column(db.Boolean)
# set if the score type was customized for a specific tournament
#tournament = db.relationship('Tournament', backref=db.backref('score_types', lazy='dynamic'))
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
#parent = db.relationship('ScoreType', backref=db.backref('score_types', lazy='dynamic'))
parent_id = db.Column(db.Integer, db.ForeignKey('score_types.id'))
def __init__(self, name, description, long_name=None, hidden=False):
self.name = name
self.long_name = self.name
self.description = description
self.hidden = hidden
if long_name:
self.long_name = long_name
def __repr__(self):
return "[Score " + self.name + "]"
class Score(db.Model):
__tablename__ = "scores"
query = None
id = db.Column(db.Integer, primary_key=True)
type_id = db.Column(db.Integer, db.ForeignKey('score_types.id'))
value = db.Column(db.Float, unique=False)
#tournament = db.relationship('Tournament', backref=db.backref('scores', lazy='dynamic'))
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
#team = db.relationship('Team', backref=db.backref('teams', lazy='dynamic'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
def __init__(self, type_id, team_id, value, tournament_id=None):
self.type_id = type_id
self.value = value
self.team_id = team_id
self.tournament_id = tournament_id
def __repr__(self):
return "[Rating " + str(self.id) + " for play-off " + str(self.tournament_id) + " of team " + str(self.team_id) + "]"
# returns the score value for a team
# this can either the the global value or the tournament-specific one
@staticmethod
def getForTournament(type_id, tournament_id, team_id, session):
local_score = session.query(Score).filter_by(type_id=type_id, tournament_id=tournament_id, team_id=team_id).first()
if local_score:
return local_score
return session.query(Score).filter_by(type_id=type_id, team_id=team_id).first()
class ResultPlaceType(db.Model):
__tablename__ = "result_place_types"
query = None
id = db.Column(db.Integer, primary_key=True)
place = db.Column(db.SmallInteger, unique=False)
name = db.Column(db.String(64), unique=False)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
def __init__(self, tournament_id, place, name):
self.place = place
self.name = name
self.tournament_id = tournament_id
def __repr__(self):
return "[Place " + self.name + " tournament " + str(self.tournament_id) + "]"
class ResultPlace(db.Model):
__tablename__ = "result_places"
query = None
id = db.Column(db.Integer, primary_key=True)
place = db.Column(db.SmallInteger, unique=False)
percentage = db.Column(db.Float, unique=False)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
def __init__(self, tournament_id, team_id, place, percentage):
self.tournament_id = tournament_id
self.team_id = team_id
self.place = place
self.percentage = percentage
def __repr__(self):
return "[Ranked team " + str(self.team_id) + " on " + str(self.place) + " ~" + str(self.percentage) + "]"
class Result(db.Model):
__tablename__ = "results"
query = None
id = db.Column(db.Integer, primary_key=True)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
average_goals = db.Column(db.Float, unique=False)
average_place = db.Column(db.Float, unique=False)
def __init__(self, tournament_id, team_id, average_goals, average_place):
self.tournament_id = tournament_id
self.team_id = team_id
self.average_goals = average_goals
self.average_place = average_place
def __repr__(self):
return "[Result " + str(self.id) + "]"
class BracketTeamResult(db.Model):
__tablename__ = "bracket_team_results"
query = None
id = db.Column(db.Integer, primary_key=True)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
bof_round = db.Column(db.Integer, unique=False)
game_in_round = db.Column(db.Integer, unique=False)
wins = db.Column(db.Integer, unique=False)
draws = db.Column(db.Integer, unique=False)
matches = db.Column(db.Integer, unique=False)
# Average rank in that bracket. This is only sensible for the group phase.
average_group_rank = db.Column(db.Float, unique=False)
# optimization, the most frequent result will always be shown first
most_frequent = db.Column(db.Boolean, unique=False)
def __init__(self, tournament_id, bof_round, game_in_round, team_id, wins, draws, matches, average_group_rank):
self.tournament_id = tournament_id
self.bof_round = bof_round
self.game_in_round = game_in_round
self.team_id = team_id
self.wins = wins
self.draws = draws
self.matches = matches
self.average_group_rank = average_group_rank
self.most_frequent = False
def getLossCount(self):
return self.matches - (self.wins + self.draws)
def getMatchName(self):
return "game_" + str(self.bof_round) + "_" + str(self.game_in_round)
def __repr__(self):
return "[BracketResult " + self.getMatchName() + " - " + self.team_id + "]"
def toDictionary(self):
return {
"team": self.team_id,
"chance": self.wins / float(self.matches) if self.matches != 0 else 0.0
}
# this table contains all the available match results that can be used in the simulation instead of actually simulating the match
class DatabaseMatchResult(db.Model):
__tablename__ = "database_match_results"
query = None
id = db.Column(db.Integer, primary_key=True)
bof_round = db.Column(db.Integer)
team_left_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
team_right_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
goals_left = db.Column(db.Integer)
goals_right = db.Column(db.Integer)
def __init__(self, bof_round, team_left, team_right, goals_left, goals_right):
self.bof_round = bof_round
self.team_left_id = team_left.id
self.team_right_id = team_right.id
self.goals_left = goals_left
self.goals_right = goals_right
def __repr__(self):
return "[Known Result in bof" + str(self.bof_round) + " " + str(self.team_left_id) + "vs" + str(self.team_right_id) + " -> " + str(self.goals_left) + ":" + str(left.goals_right) + "]"
class MatchResult(db.Model):
__tablename__ = "match_results"
query = None
id = db.Column(db.Integer, primary_key=True)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournaments.id'))
team_left_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
team_right_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
bof_round = db.Column(db.Integer, unique=False)
game_in_round = db.Column(db.Integer, unique=False)
average_goals_left = db.Column(db.Float, unique=False)
average_goals_right = db.Column(db.Float, unique=False)
number_of_games = db.Column(db.Integer, unique=False)
# optimization, the most frequent result will always be shown first
most_frequent = db.Column(db.Boolean, unique=False)
def __init__(self, tournament_id, bof_round, game_in_round, teams, goals, number_of_games):
self.tournament_id = tournament_id
self.bof_round = bof_round
self.game_in_round = game_in_round
(self.team_left_id, self.team_right_id) = teams
(self.average_goals_left, self.average_goals_right) = goals
self.number_of_games = number_of_games
self.most_frequent = False
# and normalize
if self.number_of_games != 0:
(self.average_goals_left, self.average_goals_right) = (self.average_goals_left / float(self.number_of_games), self.average_goals_right / float(self.number_of_games))
def getMatchName(self):
return "game_" + str(self.bof_round) + "_" + str(self.game_in_round)
def __repr__(self):
return "[MatchResult " + self.getMatchName() + " - " + self.team_left_id + " vs " + self.team_right_id + "]"
def toDictionary(self):
return {
"teams": [self.team_left_id, self.team_right_id],
"goals": [self.average_goals_left, self.average_goals_right]
}
# returns a list with all matches that lead to this match
# usually, this method is called on a finals game and returns the complete brackets
def resolveBrackets(self, session):
resolved = [self]
# best of 32? nothing more to resolve..
if self.bof_round == 16:
return resolved
# get all (two) matches that lead to this match
for team_id in [self.team_left_id, self.team_right_id]:
parent_match = session.query(MatchResult)\
.filter(MatchResult.tournament_id==self.tournament_id, MatchResult.bof_round==self.bof_round*2)\
.filter((MatchResult.team_left_id==team_id) | (MatchResult.team_right_id==team_id))\
.order_by(MatchResult.number_of_games.desc())\
.first()
assert parent_match != None
for previous_match in parent_match.resolveBrackets():
resolved.append(previous_match)
return resolved
class OddsData(db.Model):
__tablename__ = "odds_data"
query = None
id = db.Column(db.Integer, primary_key=True)
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
odds = db.Column(db.Float)
date = db.Column(db.Date)
source = db.Column(db.String(32))
def __init__(self, team, odds, date, source):
self.team_id = team.id
self.odds = odds
self.date = date
self.source = source
def __repr__(self):
return "[odds " + self.source + str(self.team_id) + " " + str(self.date) + " " + str(self.odds) + "]"
|
walachey/football-worldcup-simulator
|
web/database_models.py
|
Python
|
mit
| 21,042
|
import os
import xmlrpc.client
from subprocess import Popen, PIPE
import shutil
pypi = xmlrpc.client.ServerProxy('https://pypi.org')
DIRECTORIES_TO_SEARCH_FORM = [
os.path.join('.'),
os.path.join('pyforms-web'),
os.path.join('pyforms-web', 'orquestra'),
os.path.join('pyforms-gui'),
os.path.join('pyforms-terminal'),
]
CURRENT_DIRECTORY = os.getcwd()
Popen(['pip','install','--upgrade','setuptools','wheel','twine']).communicate()
def version_compare(a, b):
a = a.split('.')
b = b.split('.')
for a_value, b_value in zip(a, b):
a_value = int(a_value)
b_value = int(b_value)
if a_value>b_value:
return -1
elif a_value<b_value:
return 1
if len(a)>len(b):
return -1
elif len(a)<len(b):
return 1
return 0
for dir_name in DIRECTORIES_TO_SEARCH_FORM:
dir_path = os.path.abspath(dir_name)
print('---', dir_path)
if not os.path.isdir(dir_path): continue
setup_filepath = os.path.join(dir_path, 'setup.py')
if not os.path.isfile(setup_filepath): continue
os.chdir(dir_path)
version = Popen(["python", setup_filepath, '--version'], stdout=PIPE).stdout.read()
version = version.strip().decode()
package_name = Popen(["python", setup_filepath, '--name'], stdout=PIPE).stdout.read()
package_name = package_name.strip().decode().replace(' ', '-')
remote_version = pypi.package_releases(package_name)
print( dir_name, version, remote_version )
if len(remote_version)==0 or version_compare(version, remote_version[0])<0:
print('UPLOADING PYPI')
if os.path.isdir('./dist'):
shutil.rmtree('./dist')
Popen(['python', 'setup.py', 'sdist', 'bdist_wheel']).communicate()
Popen(['twine', 'upload', os.path.join('dist','*')]).communicate()
os.chdir(CURRENT_DIRECTORY)
"""
def get_pypi_distribution(self, name):
new_version = self.pypi.package_releases(name)
if not new_version:
new_version = self.pypi.package_releases(name.capitalize())
if new_version is None: return new_version
new_version = new_version[0]
all_versions = self.pypi.package_releases(name, True)
data = self.pypi.release_data(name, new_version)
return new_version, all_versions, data.get('summary', '')
"""
|
UmSenhorQualquer/pyforms
|
utils/deploy-pypi.py
|
Python
|
mit
| 2,402
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""\
Implementation of the Str validator classes.
"""
from .validator import Validator
from .sequence import Sequence
from .check_default import CheckDefault
from .check_doc import CheckDoc
from .check_none import CheckNone
from .check_range import CheckMinLen, CheckMaxLen
from .check_scalar import CheckStr
from .check_choice import CheckChoice
from .check_sequence import CheckList, CheckTuple
__author__ = "Simone Campagna"
__copyright__ = 'Copyright (c) 2015 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__all__ = [
'Str',
'StrChoice',
'StrList',
'StrTuple',
]
class Str(Validator):
"""Validator for a str scalar option."""
__checks__ = (CheckNone, CheckDefault, CheckStr, CheckMinLen, CheckMaxLen, CheckDoc)
class StrChoice(Validator):
"""Validator for a str option option."""
__checks__ = (CheckNone, CheckDefault, CheckStr, CheckChoice, CheckDoc)
class StrList(Sequence):
"""Validator for a str list option."""
__checks__ = (CheckNone, CheckDefault, CheckList, CheckMinLen, CheckMaxLen, CheckDoc)
ITEM_VALIDATOR_CLASS = Str
class StrTuple(Sequence):
"""Validator for a str tuple option."""
__checks__ = (CheckNone, CheckDefault, CheckTuple, CheckMinLen, CheckMaxLen, CheckDoc)
ITEM_VALIDATOR_CLASS = Str
|
simone-campagna/daikon
|
zirkon/validator/str_validators.py
|
Python
|
apache-2.0
| 1,901
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-07 16:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("elections", "0013_election_group")]
operations = [
migrations.AlterField(
model_name="election",
name="division",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="organisations.OrganisationDivision",
),
),
migrations.AlterField(
model_name="election",
name="seats_contested",
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name="election",
name="seats_total",
field=models.IntegerField(null=True),
),
]
|
DemocracyClub/EveryElection
|
every_election/apps/elections/migrations/0014_auto_20170107_1605.py
|
Python
|
bsd-3-clause
| 939
|
import json
from datetime import datetime, timedelta
from functools import lru_cache
from logging import getLogger
import boto3
from mypy_boto3_pricing import PricingClient
from sdcm.utils.cloud_monitor.common import InstanceLifecycle
LOGGER = getLogger(__name__)
# TODO: get all prices in __init__
class AWSPricing:
def __init__(self):
self.pricing_client: PricingClient = boto3.client('pricing', region_name='us-east-1')
@lru_cache(maxsize=None)
def get_on_demand_instance_price(self, region_name: str, instance_type: str):
regions_names_map = {
'us-east-2': 'US East (Ohio)',
'us-east-1': 'US East (N. Virginia)',
'us-west-1': 'US West (N. California)',
'us-west-2': 'US West (Oregon)',
'ap-south-1': 'Asia Pacific (Mumbai)',
'ap-northeast-3': 'Asia Pacific (Osaka)',
'ap-northeast-2': 'Asia Pacific (Seoul)',
'ap-southeast-1': 'Asia Pacific (Singapore)',
'ap-southeast-2': 'Asia Pacific (Sydney)',
'ap-northeast-1': 'Asia Pacific (Tokyo)',
'ca-central-1': 'Canada (Central)',
'eu-central-1': 'EU (Frankfurt)',
'eu-west-1': 'EU (Ireland)',
'eu-west-2': 'EU (London)',
'eu-west-3': 'EU (Paris)',
'eu-north-1': 'EU (Stockholm)',
'sa-east-1': 'South America (Sao Paulo)'
}
response = self.pricing_client.get_products(
ServiceCode='AmazonEC2',
Filters=[
{'Type': 'TERM_MATCH', 'Field': 'operatingSystem', 'Value': 'Linux'},
{'Type': 'TERM_MATCH', 'Field': 'instanceType', 'Value': instance_type},
{'Type': 'TERM_MATCH', 'Field': 'preInstalledSw', 'Value': 'NA'},
{'Type': 'TERM_MATCH', 'Field': 'tenancy', 'Value': 'Shared'},
{'Type': 'TERM_MATCH', 'Field': 'capacitystatus', 'Value': 'Used'},
{'Type': 'TERM_MATCH', 'Field': 'location', 'Value': regions_names_map[region_name]}
],
MaxResults=10
)
assert response['PriceList'], "failed to get price for {instance_type} in {region_name}".format(
region_name=region_name, instance_type=instance_type)
price = response['PriceList'][0]
price_dimensions = next(iter(json.loads(price)['terms']['OnDemand'].values()))['priceDimensions']
instance_price = next(iter(price_dimensions.values()))['pricePerUnit']['USD']
return float(instance_price)
@staticmethod
@lru_cache(maxsize=None)
def get_spot_instance_price(region_name, instance_type):
"""currently doesn't take AZ into consideration"""
client = boto3.client('ec2', region_name=region_name)
result = client.describe_spot_price_history(InstanceTypes=[instance_type],
ProductDescriptions=['Linux/UNIX (Amazon VPC)', 'Linux/UNIX'],
StartTime=datetime.now() - timedelta(hours=3),
EndTime=datetime.now())
prices = result['SpotPriceHistory']
if prices:
# average between different AZs
all_prices = [float(p['SpotPrice']) for p in prices]
return sum(all_prices) / len(all_prices)
else:
LOGGER.warning("Spot price not found for '%s' in '%s':\n%s", instance_type, region_name, result)
return 0
def get_instance_price(self, region, instance_type, state, lifecycle):
if state == "running":
if instance_type.startswith("i4"):
LOGGER.warning("AWSPricing: i4 instance type is not generally available")
return 0
if lifecycle == InstanceLifecycle.ON_DEMAND:
return self.get_on_demand_instance_price(region_name=region, instance_type=instance_type)
if lifecycle == InstanceLifecycle.SPOT:
spot_price = self.get_spot_instance_price(region_name=region, instance_type=instance_type)
return spot_price
else:
raise Exception("Unsupported instance lifecycle")
else:
# TODO: calculate EBS price
return 0
class GCEPricing: # pylint: disable=too-few-public-methods
# TODO: use https://github.com/googleapis/python-billing
prices = {
InstanceLifecycle.ON_DEMAND: {
# based on us-east1
"n1-standard-1": 0.0475,
"n1-standard-2": 0.0950,
"n1-standard-4": 0.1900,
"n1-standard-8": 0.3800,
"n1-standard-16": 0.7600,
"n1-standard-32": 1.5200,
"n1-standard-64": 3.0400,
"n1-standard-96": 4.5600,
"n2-standard-2": 0.0971,
"n2-standard-4": 0.1942,
"n2-standard-8": 0.3885,
"n2-standard-16": 0.7769,
"n2-standard-32": 1.5539,
"n2-standard-48": 2.3308,
"n2-standard-64": 3.1078,
"n2-standard-80": 3.8847,
"n2-highmem-2": 0.1310,
"n2-highmem-4": 0.2620,
"n2-highmem-8": 0.5241,
"n2-highmem-16": 1.0481,
"n2-highmem-32": 2.0962,
"n2-highmem-48": 3.1443,
"n2-highmem-64": 4.1924,
"n2-highmem-80": 5.2406,
"n2-highcpu-2": 0.0717,
"n2-highcpu-4": 0.1434,
"n2-highcpu-8": 0.2868,
"n2-highcpu-16": 0.5736,
"n2-highcpu-32": 1.1471,
"n2-highcpu-48": 1.7207,
"n2-highcpu-64": 2.2943,
"n2-highcpu-80": 2.8678,
"e2-standard-2": 0.06701,
"e2-standard-4": 0.13402,
"e2-standard-8": 0.26805,
"e2-standard-16": 0.53609,
"e2-micro": 0.00838,
"e2-small": 0.01675,
"e2-medium": 0.03351,
"f1-micro": 0.0076,
"g1-small": 0.0257,
"m1-ultramem-40": 6.3039,
"m1-ultramem-80": 12.6078,
"m1-ultramem-160": 25.2156,
"m1-megamem-96": 10.6740,
"n1-highmem-2": 0.1184,
"n1-highmem-4": 0.2368,
"n1-highmem-8": 0.4736,
"n1-highmem-16": 0.9472,
"n1-highmem-32": 1.8944,
"n1-highmem-64": 3.7888,
"n1-highmem-96": 5.6832,
"c2-standard-4": 0.2088,
"c2-standard-8": 0.4176,
"c2-standard-16": 0.8352,
"c2-standard-30": 1.5660,
"c2-standard-60": 3.1321,
# based on us-central1
"n2d-standard-2": 0.0845,
"n2d-standard-4": 0.1690,
"n2d-standard-8": 0.3380,
"n2d-standard-16": 0.6759,
"n2d-standard-32": 1.3519,
"n2d-standard-48": 2.0278,
"n2d-standard-64": 2.7038,
"n2d-standard-80": 3.3797,
"n2d-standard-96": 4.0556,
"n2d-standard-128": 5.4075,
"n2d-standard-224": 9.4632,
# special instance type, evaluation quota on us-central1
"m2-ultramem-208": 42.186,
"m2-ultramem-416": 84.371,
},
InstanceLifecycle.SPOT: {
"n1-standard-1": 0.0100,
"n1-standard-2": 0.0200,
"n1-standard-4": 0.0400,
"n1-standard-8": 0.0800,
"n1-standard-16": 0.1600,
"n1-standard-32": 0.3200,
"n1-standard-64": 0.6400,
"n1-standard-96": 0.9600,
"n2-standard-2": 0.0235,
"n2-standard-4": 0.0470,
"n2-standard-8": 0.0940,
"n2-standard-16": 0.1880,
"n2-standard-32": 0.3760,
"n2-standard-48": 0.5640,
"n2-standard-64": 0.7520,
"n2-standard-80": 0.9400,
"n2-highmem-2": 0.0317,
"n2-highmem-4": 0.0634,
"n2-highmem-8": 0.1268,
"n2-highmem-16": 0.2536,
"n2-highmem-32": 0.5073,
"n2-highmem-48": 0.7609,
"n2-highmem-64": 1.0145,
"n2-highmem-80": 1.2681,
"n2-highcpu-2": 0.0173,
"n2-highcpu-4": 0.0347,
"n2-highcpu-8": 0.0694,
"n2-highcpu-16": 0.1388,
"n2-highcpu-32": 0.2776,
"n2-highcpu-48": 0.4164,
"n2-highcpu-64": 0.5552,
"n2-highcpu-80": 0.6940,
"e2-standard-2": 0.02010,
"e2-standard-4": 0.04021,
"e2-standard-8": 0.08041,
"e2-standard-16": 0.16083,
"e2-micro": 0.00251,
"e2-small": 0.00503,
"e2-medium": 0.01005,
"f1-micro": 0.0035,
"g1-small": 0.0070,
"m1-ultramem-40": 1.3311,
"m1-ultramem-80": 2.6622,
"m1-ultramem-160": 5.3244,
"m1-megamem-96": 2.2600,
"n1-highmem-2": 0.0250,
"n1-highmem-4": 0.0500,
"n1-highmem-8": 0.1000,
"n1-highmem-16": 0.2000,
"n1-highmem-32": 0.4000,
"n1-highmem-64": 0.8000,
"n1-highmem-96": 1.2000,
"c2-standard-4": 0.0505,
"c2-standard-8": 0.1011,
"c2-standard-16": 0.2021,
"c2-standard-30": 0.3790,
"c2-standard-60": 0.7579,
# based on us-central1
"n2d-standard-2": 0.0204,
"n2d-standard-4": 0.0409,
"n2d-standard-8": 0.0818,
"n2d-standard-16": 0.1636,
"n2d-standard-32": 0.3271,
"n2d-standard-48": 0.4907,
"n2d-standard-64": 0.6543,
"n2d-standard-80": 0.8178,
"n2d-standard-96": 0.9814,
"n2d-standard-128": 1.3085,
"n2d-standard-224": 2.2900,
},
}
def get_instance_price(self, region, instance_type, state, lifecycle): # pylint: disable=unused-argument
"""Using us-east1 to estimate"""
if state == "running":
price = self.prices[lifecycle].get(instance_type, 0)
if price == 0:
LOGGER.warning("No price for %s", instance_type)
return price
else:
# calculate disk price
return 0
|
scylladb/scylla-cluster-tests
|
sdcm/utils/pricing.py
|
Python
|
agpl-3.0
| 10,284
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008 Zikzakmedia S.L. (http://zikzakmedia.com)
# All Rights Reserved.Jordi Esteve <jesteve@zikzakmedia.com>
# AvanzOSC, Avanzed Open Source Consulting
# Copyright (C) 2011-2012 Iker Coranti (www.avanzosc.com). All Rights Reserved
# Copyright (C) 2013 Akretion Ltda ME (www.akretion.com) All Rights Reserved
# Renato Lima <renato.lima@akretion.com.br>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Payment Extension',
'version': '1.1.1',
'author': 'Zikzakmedia SL',
'category': 'Accounting & Finance',
'website': 'www.zikzakmedia.com',
'license': 'AGPL-3',
'description': """
Account payment extension.
==========================
This module extends the account_payment module with a lot of features:
----------------------------------------------------------------------
* Definition of payment types (cash, bank transfer, automatical bank transfer, ...). The payment type has a translatable name and note that can be shown in the invoices.
* Two default payment types for partners (client and supplier).
* Automatic selection of payment type in invoices. Now an invoice can have a payment term (30 days, 30/60 days, ...) and a payment type (cash, bank transfer, ...).
* A default check field in partner bank accounts. The default partner bank accounts are selected in invoices and payments.
* New menu/tree/forms to see payments to receive and payments to pay.
* The payments show tree editable fields: Due date, bank account and a check field (for example to write down if a bank check in paper support has been received).
* Two types of payment orders: Payable payment orders (from supplier invoices) and receivable payment orders (from client invoices). So we can make payment orders to receive the payments of our client invoices. Each payment order type has its own sequence.
* The payment orders allow negative payment amounts. So we can have payment orders for supplier invoices (pay money) and refund supplier invoices (return or receive money). Or for client invoices (receive money) and refund client invoices (return or pay money).
* Payment orders: Selected invoices are filtered by payment type, the second message communication can be set at the same time for several invoices.
Based on previous work of Pablo Rocandio & Zikzakmedia (version for 4.2).
""",
'depends': [
'base',
'account',
'account_payment',
],
'data': [
'security/ir.model.access.csv',
'wizard/account_payment_order_view.xml',
'account_payment_extension_view.xml',
'account_payment_sequence.xml',
'account_payment_view.xml',
'account_invoice_view.xml',
'res_partner_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
|
MarcosCommunity/odoo
|
comunity_modules/account_payment_extension/__openerp__.py
|
Python
|
agpl-3.0
| 3,717
|
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
# from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from wtforms import TextField, HiddenField, ValidationError, RadioField, BooleanField, SubmitField
from wtforms.validators import Required
import gevent.monkey
from gevent.pywsgi import WSGIServer
gevent.monkey.patch_all()
from inquire import inquire
class ExampleForm(Form):
question = TextField('Question', description='', validators=[Required()])
submit_button = SubmitField('Go')
def create_app(configfile=None):
app = Flask(__name__)
# AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Bootstrap(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkeyqwerasdf'
#app.config['RECAPTCHA_PUBLIC_KEY'] = '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
if request.method == 'POST':
question = request.form['question']
print(question)
answer = inquire.answer_question(question)
return render_template('answer.html', answer=answer, question=question)
form = ExampleForm()
return render_template('index.html', form=form)
# @app.route('/answer')
# def answer():
# form
return app
# create main callable
app = create_app()
if __name__ == '__main__':
http_server = WSGIServer(('127.0.0.1', 9191), app)
print("starting server on port 9191")
http_server.serve_forever()
# app.run(debug=True, host='0.0.0.0', port=9191)
|
jcelliott/inquire
|
app.py
|
Python
|
mit
| 1,788
|
#!/usr/bin/python
# coding=utf-8
import hashlib
import os
import re
import subprocess
import sys
import tempfile
from datetime import datetime
from gtts import gTTS
while 1:
line = sys.stdin.readline().strip()
if line == '':
break
key, data = line.split(':')
if key[:4] != 'agi_':
#skip input that doesn't begin with agi_
sys.stderr.write("Did not work!\n")
sys.stderr.flush()
continue
key = key.strip()
data = data.strip()
if key != '':
env[key] = data
def _speak_espeak(text):
base_file_name = tempfile.named_temporary_file().name
raw_file_name = tempfile.named_temporary_file().name + '-raw.wav'
subprocess.call(['espeak', text, '-vbrazil-mbrola-4', '-g0.5', '-p60', '-s130', '-w', raw_file_name])
subprocess.call(['sox', raw_file_name, base_file_name + '.wav', 'rate', '8k'])
os.remove(raw_file_name)
return base_file_name
def _speak_gtts(text):
try:
text.decode('utf-8')
except:
text = text.encode('utf-8')
digest = '/tmp/' + hashlib.sha224(text).hexdigest()
file_name = digest + '.mp3'
if os.path.isfile(file_name):
return file_name
raw_file_name = digest + '-raw.mp3'
tts = gTTS(text=text, lang='pt-br')
tts.save(raw_file_name)
subprocess.call(['lame', '--scale', '10', raw_file_name, file_name])
os.remove(raw_file_name)
return file_name
def busy(timeout):
sys.stdout.write("EXEC Busy %s\n %timeout ")
sys.stdout.flush()
sys.stderr.write("EXEC Busy %s\n %timeout ")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def checkresult (params):
sys.stderr.write("checkresult: %s\n" % params)
params = params.rstrip()
if re.search('^200', params):
result = re.search('result=(\d+)', params)
if (not result):
sys.stderr.write("FAIL ('%s')\n" % params)
sys.stderr.flush()
return -1
else:
result = result.group(1)
sys.stderr.write("PASS (%s)\n" % result)
sys.stderr.flush()
return result
else:
sys.stderr.write("FAIL (unexpected result '%s')\n" % params)
sys.stderr.flush()
return -2
def hangup():
sys.stdout.write("EXEC Hangup")
sys.stdout.flush()
sys.stderr.write("EXEC Hangup")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def read_digit(timeout):
sys.stdout.write("WAIT FOR DIGIT %s\n" %timeout )
sys.stdout.flush()
sys.stderr.write("WAIT FOR DIGIT %s\n" %timeout )
sys.stderr.flush()
line = sys.stdin.readline()
sys.stderr.write('wait_for_digit line: %s\n' % line)
result = line.strip()
return int(checkresult(result)) - 48
def record(filepath):
sys.stdout.write("EXEC MixMonitor " + filepath)
sys.stdout.flush()
sys.stderr.write("MixMonitor(wav, " + filepath +", mb)\n")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def speak(text):
try:
file_name = _speak_gtts(text)
sys.stdout.write("EXEC MP3Player %s\n" % file_name)
except:
print(sys.exc_info())
file_name = _speak_espeak(text)
sys.stdout.write("EXEC PLAYBACK %s\n" % file_name)
sys.stdout.flush()
result = sys.stdin.readline().strip()
return checkresult(result)
def transfer(tech, dest):
sys.stdout.write("EXEC DIAL %s/%s\n" % (tech,dest))
sys.stdout.flush()
result = sys.stdin.readline().strip()
checkresult(result)
monitor()
def wait_exten(timeout):
sys.stdout.write("EXEC WaitExten %s\n %timeout ")
sys.stdout.flush()
sys.stderr.write("EXEC WaitExten %s\n %timeout ")
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
def write_digit(digit, timeout, duration):
if timeout is None and duration is None:
sys.stdout.write("EXEC SendDTMF %s\n" % digit )
sys.stdout.flush()
elif duration is None:
sys.stdout.write("EXEC SendDTMF %s/%s\n" % (digit, timeout) )
sys.stdout.flush()
elif timeout is None:
sys.stdout.write("EXEC SendDTMF %s/%s\n" % (digit, duration) )
sys.stdout.flush()
else:
sys.stdout.write("EXEC SendDTMF %s %s %s\n" % (digit, timeout, duration) )
sys.stdout.flush()
sys.stderr.write("EXEC SendDTMF %s/%s\n" % (digit, duration))
sys.stderr.flush()
line = sys.stdin.readline()
result = line.strip()
return int(checkresult(result)) - 48
|
lucascudo/pytherisk
|
pytherisk.py
|
Python
|
gpl-3.0
| 4,654
|
# -*- coding: utf-8 -*-
from functools import update_wrapper
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves.urllib.parse import urljoin
from cms import constants
from cms import __version__
__all__ = ['get_cms_setting']
class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES
def _load_from_file(module_path):
"""
Load a python module from its absolute filesystem path
"""
from imp import load_module, PY_SOURCE
imported = None
if module_path:
with open(module_path, 'r') as openfile:
imported = load_module("mod", openfile, module_path, ('imported', 'r', PY_SOURCE))
return imported
def default(name):
def decorator(wrapped):
def wrapper():
if hasattr(settings, name):
return getattr(settings, name)
return wrapped()
update_wrapper(wrapper, wrapped)
return wrapped
return decorator
DEFAULTS = {
'TEMPLATE_INHERITANCE': True,
'DEFAULT_X_FRAME_OPTIONS': constants.X_FRAME_OPTIONS_INHERIT,
'TOOLBAR_SIMPLE_STRUCTURE_MODE': True,
'PLACEHOLDER_CONF': {},
'PERMISSION': False,
# Whether to use raw ID lookups for users when PERMISSION is True
'RAW_ID_USERS': False,
'PUBLIC_FOR': 'all',
'APPHOOKS': [],
'TOOLBARS': [],
'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices',
'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices',
'MEDIA_PATH': 'cms/',
'PAGE_MEDIA_PATH': 'cms_page_media/',
'TITLE_CHARACTER': '+',
'PAGE_CACHE': True,
'PLACEHOLDER_CACHE': True,
'PLUGIN_CACHE': True,
'CACHE_PREFIX': 'cms_{}_'.format(__version__),
'PLUGIN_PROCESSORS': [],
'PLUGIN_CONTEXT_PROCESSORS': [],
'UNIHANDECODE_VERSION': None,
'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'],
'UNIHANDECODE_DEFAULT_DECODER': 'diacritic',
'TOOLBAR_ANONYMOUS_ON': True,
'TOOLBAR_URL__EDIT_ON': 'edit',
'TOOLBAR_URL__EDIT_OFF': 'edit_off',
'TOOLBAR_URL__BUILD': 'structure',
'TOOLBAR_URL__DISABLE': 'toolbar_off',
'ADMIN_NAMESPACE': 'admin',
'APP_NAME': None,
'TOOLBAR_HIDE': False,
'INTERNAL_IPS': [],
'REQUEST_IP_RESOLVER': 'cms.utils.request_ip_resolvers.default_request_ip_resolver',
'PAGE_WIZARD_DEFAULT_TEMPLATE': constants.TEMPLATE_INHERITANCE_MAGIC,
'PAGE_WIZARD_CONTENT_PLUGIN': 'TextPlugin',
'PAGE_WIZARD_CONTENT_PLUGIN_BODY': 'body',
'PAGE_WIZARD_CONTENT_PLACEHOLDER': None, # Use first placeholder it finds.
}
def get_cache_durations():
"""
Returns the setting: CMS_CACHE_DURATIONS or the defaults.
"""
return getattr(settings, 'CMS_CACHE_DURATIONS', {
'menus': 60 * 60,
'content': 60,
'permissions': 60 * 60,
})
@default('CMS_MEDIA_ROOT')
def get_media_root():
return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH'))
@default('CMS_MEDIA_URL')
def get_media_url():
return urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH'))
@default('CMS_TOOLBAR_URL__EDIT_ON')
def get_toolbar_url__edit_on():
return get_cms_setting('TOOLBAR_URL__EDIT_ON')
@default('CMS_TOOLBAR_URL__EDIT_OFF')
def get_toolbar_url__edit_off():
return get_cms_setting('TOOLBAR_URL__EDIT_OFF')
@default('CMS_TOOLBAR_URL__BUILD')
def get_toolbar_url__structure():
return get_cms_setting('TOOLBAR_URL__BUILD')
@default('CMS_TOOLBAR_URL__DISABLE')
def get_toolbar_url__disable():
return get_cms_setting('TOOLBAR_URL__DISABLE')
def get_templates():
if getattr(settings, 'CMS_TEMPLATES_DIR', False):
tpldir = getattr(settings, 'CMS_TEMPLATES_DIR', False)
# CMS_TEMPLATES_DIR can either be a string poiting to the templates directory
# or a dictionary holding 'site: template dir' entries
if isinstance(tpldir, dict):
tpldir = tpldir[settings.SITE_ID]
# We must extract the relative path of CMS_TEMPLATES_DIR to the neares
# valid templates directory. Here we mimick what the filesystem and
# app_directories template loaders do
prefix = ''
# Relative to TEMPLATE['DIRS'] for filesystem loader
path = [template['DIRS'][0] for template in settings.TEMPLATES]
for basedir in path:
if tpldir.find(basedir) == 0:
prefix = tpldir.replace(basedir + os.sep, '')
break
# Relative to 'templates' directory that app_directory scans
if not prefix:
components = tpldir.split(os.sep)
try:
prefix = os.path.join(*components[components.index('templates') + 1:])
except ValueError:
# If templates is not found we use the directory name as prefix
# and hope for the best
prefix = os.path.basename(tpldir)
config_path = os.path.join(tpldir, '__init__.py')
# Try to load templates list and names from the template module
# If module file is not present skip configuration and just dump the filenames as templates
if os.path.isfile(config_path):
template_module = _load_from_file(config_path)
templates = [(os.path.join(prefix, data[0].strip()), data[1]) for data in template_module.TEMPLATES.items()]
else:
templates = list((os.path.join(prefix, tpl), tpl) for tpl in os.listdir(tpldir))
else:
templates = list(getattr(settings, 'CMS_TEMPLATES', []))
if get_cms_setting('TEMPLATE_INHERITANCE'):
templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _('Inherit the template of the nearest ancestor')))
return templates
def _ensure_languages_settings(languages):
valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public']
required_language_keys = ['code', 'name']
simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated']
if not isinstance(languages, dict):
raise ImproperlyConfigured(
"CMS_LANGUAGES must be a dictionary with site IDs and 'default'"
" as keys. Please check the format.")
defaults = languages.pop('default', {})
default_fallbacks = defaults.get('fallbacks')
needs_fallbacks = []
for key in defaults:
if key not in valid_language_keys:
raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: %s" % key)
for key in simple_defaults:
if key not in defaults:
defaults[key] = True
for site, language_list in languages.items():
if site != hash(site):
raise ImproperlyConfigured(
"CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'"
" for default values. %s is not a valid key." % site)
for language_object in language_list:
for required_key in required_language_keys:
if required_key not in language_object:
raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r "
"in site %r" % (key, site))
language_code = language_object['code']
for key in language_object:
if key not in valid_language_keys:
raise ImproperlyConfigured(
"CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site)
)
if 'fallbacks' not in language_object:
if default_fallbacks:
language_object['fallbacks'] = default_fallbacks
else:
needs_fallbacks.append((site, language_object))
for key in simple_defaults:
if key not in language_object:
language_object[key] = defaults[key]
site_fallbacks = {}
for site, language_object in needs_fallbacks:
if site not in site_fallbacks:
site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']]
language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if
lang_code != language_object['code']]
languages['default'] = defaults
languages[VERIFIED] = True # this will be busted by @override_settings and cause a re-check
return languages
def get_languages():
if settings.SITE_ID != hash(settings.SITE_ID):
raise ImproperlyConfigured(
"SITE_ID must be an integer"
)
if not settings.USE_I18N:
return _ensure_languages_settings(
{settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]})
if settings.LANGUAGE_CODE not in dict(settings.LANGUAGES):
raise ImproperlyConfigured(
'LANGUAGE_CODE "%s" must have a matching entry in LANGUAGES' % settings.LANGUAGE_CODE
)
languages = getattr(settings, 'CMS_LANGUAGES', {
settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES]
})
if VERIFIED in languages:
return languages
return _ensure_languages_settings(languages)
def get_unihandecode_host():
host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None)
if not host:
return host
if host.endswith('/'):
return host
else:
return host + '/'
COMPLEX = {
'CACHE_DURATIONS': get_cache_durations,
'MEDIA_ROOT': get_media_root,
'MEDIA_URL': get_media_url,
# complex because not prefixed by CMS_
'TEMPLATES': get_templates,
'LANGUAGES': get_languages,
'UNIHANDECODE_HOST': get_unihandecode_host,
'CMS_TOOLBAR_URL__EDIT_ON': get_toolbar_url__edit_on,
'CMS_TOOLBAR_URL__EDIT_OFF': get_toolbar_url__edit_off,
'CMS_TOOLBAR_URL__BUILD': get_toolbar_url__structure,
'CMS_TOOLBAR_URL__DISABLE': get_toolbar_url__disable,
}
def get_cms_setting(name):
if name in COMPLEX:
return COMPLEX[name]()
return getattr(settings, 'CMS_%s' % name, DEFAULTS[name])
def get_site_id(site):
from django.contrib.sites.models import Site
if isinstance(site, Site):
return site.id
try:
return int(site)
except (TypeError, ValueError):
pass
return settings.SITE_ID
|
czpython/django-cms
|
cms/utils/conf.py
|
Python
|
bsd-3-clause
| 10,405
|
import sys
from antlr4 import *
from antlr4.InputStream import InputStream
from lib.MySQL.MySQLParser import MySQLParser
from lib.MySQL.MySQLListener import MySQLListener
class SQLLoader(MySQLListener):
def __init__(self):
self.sql = {}
self.alias = dict()
self.where = list()
self.select = list()
self.test = None
self.data = None #This will be insert, delete, select
def getSQL(self):
return self.sql
def getWhere(self, ctx):
temp_table1 = None
temp_table2 = None
temp_col1 = None
temp_col2 = None
temp_relation = None
expression = list()
expression = ctx.getText()
# if ctx.getChildCount() == 0:
# return None
# # if the context has a where clause it will have at least two children
# #1ST GEN: 1st child is WHERE, 2nd child is expression, every other child in addition is an expression
# #2ND GEN: The next generation is the bool primary, this one should have three children, predicate, relational_op, and preidcate
# # If the predicate is simple, then you can stop at this level, if it is not continue
# #8TH GEN: For a complex predicate you must go down to the 8th gen to get the escape clause
# else:
# expression = ctx.getText()
# # for i in range(1,ctx.getChildCount(),2):
# # gen1 = self.visitChild(ctx,i)
# # for j in range (0, gen1.getChild(0).getChildCount(),2):
# # gen2 = gen1.getChild(0)
# # # print (type(gen2).__name__)
# # if temp_relation == None:
# # temp_relation = self.visitChild(gen2,j+1).getText()
# # if temp_col1 == None:
# # temp_col1 = self.visitChild(gen2,j).getText()
# # else:
# # temp_col2 = self.visitChild(gen2,j).getText()
# #
# # if temp_col1 != None and temp_col2 != None:
# # expression.insert(-1, (temp_col1, temp_relation, temp_col2))
# # temp_col1 = None
# # temp_col2 = None
# # temp_relation = None
#
# # getChild(0).getChild(0).getChild(0).getChild(0).getChild(0).getChild(0)
# # for i in range(1,ctx.getChildCount(),2):
# #
# # return ctx.getChild(1).getChild(0).getChildCount()
return expression
def visitChild(self, ctx, branch):
return ctx.getChild(branch)
def getSelect(self,ctx):
# print(ctx.getText())
for i in range(0,ctx.getChildCount(),2):
gen1 = self.visitChild(ctx,i)
self.select.insert(-1, (gen1.getChild(0).getText(), gen1.getChild(2).getText()))
def getFrom(self,ctx):
for i in range(0,ctx.getChildCount(),2):
gen1 = self.visitChild(ctx, i)
if gen1.getChild(0).getChild(0).getChildCount() > 1:
self.alias['{}'.format(gen1.getChild(0).getChild(0).getChild(1).getText())] = gen1.getChild(0).getChild(0).getChild(0).getText()
else:
self.alias['{}'.format(None)] = gen1.getChild(0).getChild(0).getChild(0).getText()
def enterDisplayed_column(self, ctx:MySQLParser.Displayed_columnContext):
# print(type(ctx.getChild(0)).__name__)
# print(ctx.getChild(0).getText())
self.getSelect(ctx)
def enterTable_references(self, ctx:MySQLParser.Table_referencesContext):
self.getFrom(ctx)
def enterWhere_clause(self, ctx:MySQLParser.Where_clauseContext):
self.where = self.getWhere(ctx)
def enterData_manipulation_statements(self, ctx:MySQLParser.Data_manipulation_statementsContext):
# print(type(ctx.getChild(0)).__name__)
if type(ctx.getChild(0)).__name__ == "Select_statementContext":
self.data = "select"
elif type(ctx.getChild(0)).__name__ == "Insert_statementsContext":
self.data = "insert"
elif type(ctx.getChild(0)).__name__ == "Delete_statementsContext":
self.data = "delete"
else:
self.data = "update"
# def enterStatement(self, ctx:MySQLParser.StatementContext):
# print(type(ctx.getChild(0)).__name__)
# print(ctx.getChild(0).getText())
|
RoryAndrews/Python-Parallel-DB
|
lib/SQLLoader.py
|
Python
|
mit
| 4,415
|
import json
districts_for_2016 = {
11: ('Andrew Cohen', 'Bronx'),
22: ('Costa Constantinides', 'Queens'),
36: ('Robert Cornegy', 'Brooklyn'),
35: ('Laurie Cumbo', 'Brooklyn'),
30: ('Elizabeth Crowley', 'Queens'),
40: ('Mathieu Eugene', 'Brooklyn'),
21: ('Julissa Ferreras', 'Queens'),
44: ('David Greenfield', 'Brooklyn'),
3: ('Corey Johnson', 'Manhattan'),
5: ('Ben Kallos', 'Manhattan'),
29: ('Karen Koslowitz', 'Queens'),
39: ('Brad Lander', 'Brooklyn'),
33: ('Steve Levin', 'Brooklyn'),
7: ('Mark Levine', 'Manhattan'),
8: ('Melissa Mark-Viverito', 'Manhattan_Bronx'),
38: ('Carlos Menchaca', 'Brooklyn'),
27: ('Daneek Miller', 'Queens'),
34: ('Antonio Reynoso', 'Brooklyn_Queens'),
31: ('Donovan Richards', 'Queens'),
10: ('Ydanis Rodriguez', 'Manhattan'),
6: ('Helen Rosenthal', 'Manhattan'),
15: ('Ritchie Torres', 'Bronx'),
47: ('Mark Treyger', 'Brooklyn'),
32: ('Eric Ulrich', 'Queens'),
19: ('Paul Vallone', 'Queens'),
26: ('Jimmy Van Bramer', 'Queens'),
45: ('Jumaane Williams', 'Brooklyn'),
}
with open('alldistricts.geojson') as jsonfile:
indata = json.load(jsonfile)
outdata = {attr:indata[attr] for attr in indata if attr != 'features'}
outdata['features'] = []
for infeature in indata['features']:
district_id = infeature['properties']['CounDist']
if district_id in districts_for_2016:
outfeature = infeature.copy()
outfeature['properties']['CounPerson'] = districts_for_2016[district_id][0]
outfeature['properties']['Borough'] = districts_for_2016[district_id][1]
outdata['features'].append(outfeature)
with open('districts.geojson', 'w') as jsonfile:
json.dump(outdata, jsonfile)
|
poepublic/shareabouts-pbnyc
|
data/modifydistricts1516.py
|
Python
|
gpl-3.0
| 1,764
|
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from dateutil.parser import parse
import time
log = CPLog(__name__)
class NZBClub(NZBProvider, RSS):
urls = {
'search': 'http://www.nzbclub.com/nzbfeed.aspx?%s',
}
http_time_between_calls = 4 #seconds
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s" %s' % (title, movie['library']['year'], quality.get('identifier'))
params = tryUrlencode({
'q': q,
'ig': '1',
'rpp': 200,
'st': 1,
'sp': 1,
'ns': 1,
})
nzbs = self.getRSSData(self.urls['search'] % params)
for nzb in nzbs:
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
if ignored in full_description:
log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
return False
return True
results.append({
'id': nzbclub_id,
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'].replace(' ', '_'),
'detail_url': self.getTextElement(nzb, "link"),
'get_more_info': self.getMoreInfo,
'extra_check': extra_check
})
def getMoreInfo(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('pre', attrs = {'class':'nfo'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
def extraCheck(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
if 'ARCHIVE inside ARCHIVE' in full_description:
log.info('Wrong: Seems to be passworded files: %s', item['name'])
return False
return True
|
jayme-github/CouchPotatoServer
|
couchpotato/core/providers/nzb/nzbclub/main.py
|
Python
|
gpl-3.0
| 2,892
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module is a special module to define functions or other resources
which need to be imported outside of openstack_dashboard.api.nova
(like cinder.py) to avoid cyclic imports.
"""
from django.conf import settings
from glanceclient import exc as glance_exceptions
from novaclient import api_versions
from novaclient import client as nova_client
from horizon import exceptions as horizon_exceptions
from horizon.utils import memoized
from openstack_dashboard.api import base
from openstack_dashboard.api import glance
from openstack_dashboard.api import microversions
from openstack_dashboard.contrib.developer.profiler import api as profiler
# Supported compute versions
VERSIONS = base.APIVersionManager("compute", preferred_version=2)
VERSIONS.load_supported_version(1.1, {"client": nova_client, "version": 1.1})
VERSIONS.load_supported_version(2, {"client": nova_client, "version": 2})
INSECURE = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
CACERT = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server.
Preserves the request info so image name can later be retrieved.
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links', 'description',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'locked',
'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state',
'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:host',
'OS-EXT-AZ:availability_zone', 'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
if not self.image:
return None
elif hasattr(self.image, 'name'):
return self.image.name
elif 'name' in self.image:
return self.image['name']
else:
try:
image = glance.image_get(self.request, self.image['id'])
self.image['name'] = image.name
return image.name
except (glance_exceptions.ClientException,
horizon_exceptions.ServiceCatalogException):
self.image['name'] = None
return None
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
@property
def host_server(self):
return getattr(self, 'OS-EXT-SRV-ATTR:host', '')
@memoized.memoized
def get_microversion(request, features):
client = novaclient(request)
min_ver, max_ver = api_versions._get_server_version_range(client)
return (microversions.get_microversion_for_features(
'nova', features, api_versions.APIVersion, min_ver, max_ver))
def get_auth_params_from_request(request):
"""Extracts properties needed by novaclient call from the request object.
These will be used to memoize the calls to novaclient.
"""
return (
request.user.username,
request.user.token.id,
request.user.tenant_id,
request.user.token.project.get('domain_id'),
base.url_for(request, 'compute'),
base.url_for(request, 'identity')
)
@memoized.memoized
def cached_novaclient(request, version=None):
(
username,
token_id,
project_id,
project_domain_id,
nova_url,
auth_url
) = get_auth_params_from_request(request)
if version is None:
version = VERSIONS.get_active_version()['version']
c = nova_client.Client(version,
username,
token_id,
project_id=project_id,
project_domain_id=project_domain_id,
auth_url=auth_url,
insecure=INSECURE,
cacert=CACERT,
http_log_debug=settings.DEBUG,
auth_token=token_id,
endpoint_override=nova_url)
return c
def novaclient(request, version=None):
if isinstance(version, api_versions.APIVersion):
version = version.get_string()
return cached_novaclient(request, version)
def get_novaclient_with_instance_desc(request):
microversion = get_microversion(request, "instance_description")
return novaclient(request, version=microversion)
@profiler.trace
def server_get(request, instance_id):
return Server(get_novaclient_with_instance_desc(request).servers.get(
instance_id), request)
|
NeCTAR-RC/horizon
|
openstack_dashboard/api/_nova.py
|
Python
|
apache-2.0
| 5,498
|
'''
Created on May 31, 2014
@author: Fenriswolf
'''
import logging
import inspect
import re
from collections import OrderedDict
from tornado.web import Application, RequestHandler, HTTPError
app_routing_log = logging.getLogger("tornado.application.routing")
class RoutingApplication(Application):
def __init__(self, handlers=None, default_host="", transforms=None, wsgi=False, **settings):
Application.__init__(self, handlers, default_host, transforms, wsgi, **settings)
self.handler_map = OrderedDict()
def expose(self, rule='', methods = ['GET'], kwargs=None, name=None):
"""
A decorator that is used to register a given URL rule.
"""
def decorator(func, *args, **kwargs):
func_name = func.__name__
frm = inspect.stack()[1]
class_name = frm[3]
module_name = frm[0].f_back.f_globals["__name__"]
full_class_name = module_name + '.' + class_name
for method in methods:
func_rule = rule if rule else None
if not func_rule:
if func_name == 'index':
func_rule = class_name
else:
func_rule = class_name + '/' + func_name
func_rule = r'/%s(.*)(/?)' % func_rule
if full_class_name not in self.handler_map:
self.handler_map.setdefault(full_class_name, {})[method] = [(func_rule, func_name)]
else:
self.handler_map[full_class_name][method] += [(func_rule, func_name)]
app_routing_log.info("register %s %s to %s.%s" % (method, func_rule, full_class_name, func_name))
return func
return decorator
def setRouteHandlers(self):
handlers = [(rule[0], full_class_name)
for full_class_name, methods in self.handler_map.items()
for rules in methods.values()
for rule in rules]
self.add_handlers(".*$", handlers)
class RequestRoutingHandler(RequestHandler):
def _get_func_name(self):
full_class_name = self.__module__ + '.' + self.__class__.__name__
rules = self.application.handler_map.get(full_class_name, {}).get(self.request.method, [])
for rule, func_name in rules:
if not rule or not func_name:
continue
match = re.match(rule, self.request.path)
if match:
return func_name
raise HTTPError(404, "")
def _execute_method(self):
if not self._finished:
func_name = self._get_func_name()
method = getattr(self, func_name)
self._when_complete(method(*self.path_args, **self.path_kwargs),
self._execute_finish)
|
whitepyro/debian_server_setup
|
tornado/routing.py
|
Python
|
gpl-3.0
| 2,844
|
import re
import ast
import subprocess
with open("CV.list","r",encoding="utf-8") as f:
CV = ast.literal_eval(f.read())
CV_header = open("CV_header.html",encoding='utf-8').read().format(
css=open("CV.css",encoding="utf-8").read(),
js=open("CV.js",encoding="utf-8").read(),
email=open("email.js",encoding="utf-8").read()
)
CV_footer='''
</div>
</body>
</html>
'''
diploma_template = '''
<section class="diploma" id="{anchor}" data-shortname="{short_name}">
<span class="caret-up-down fa fa-caret-square-o-up"></span>
<span class="caret-up-down fa fa-caret-square-o-down"></span>
<div class="diploma_content">
<h3>{canonical_name}</h3>
<p class="date">{when}</p>
<div style="clear:both;"></div>
<div class="abstract">{abstract}</div>
<div class="full_text">
{text}
</div>
</div>
</section>
'''
skill_template='''
<section class="skill" id="{anchor}" data-shortname="{short_name}">
<span class="caret-up-down fa fa-caret-square-o-up"></span>
<span class="caret-up-down fa fa-caret-square-o-down"></span>
<div class="skill_content">
<h3>{canonical_name}</h3>
<div style="clear:both;"></div>
<div class="abstract">{abstract}</div>
<div class="full_text">
{text}
</div>
</div>
</section>
'''
keyword_template=r'''<span class="keywords"><a href="{link_target}" data-target="{link_target}">{decoration}{link_text}</a>{other_keywords}</span>'''
class CV_item:
def __init__(self, d):
'''Create a CV_item from the dictionary d'''
self.type = d["type"] #One of "diploma", "experience", "skill" or "external_ressource"
self.names = set([d["name"]])
self.canonical_name = d["name"]
try:
self.short_name = d["shortname"]
except KeyError:
self.short_name = self.canonical_name
try:
self.names = self.names.union(d["synonyms"])
except KeyError:
pass
try:
self.sort_key = int(d["sort_key"])
except KeyError:
try:
self.sort_key = int(d["when"][-4:])
except KeyError:
self.sort_key = 0
try:
self.when = d["when"]
except KeyError:
pass
try:
self.text = d["text"]
except KeyError:
self.text = "<p>FIXME</p>"
if self.type != "external_ressource":
print("Empty text")
else:
self.text = "<p>"+self.text+"</p>"
self.text = self.text.replace("\n","</p>\n<p>")
try:
self.abstract = d["abstract"]
except KeyError:
if self.type != "external_ressource":
self.abstract = "<p>FIXME</p>"
print("Empty abstract")
self.anchor = self.short_name.lower().replace(' ','').replace("'","").replace(',','').replace('/','').replace('+','p')
try:
self.target = d["target"] #for type external_ressource
except KeyError:
self.target = '#'+self.anchor
self.links_to = set() #Set of items that talk about self
return
def answers_to(self, name):
return name.lower() in map(lambda x:x.lower(), self.names)
def talked_about_in(self, item):
self.links_to.add(item)
def scan_text(self):
'''Look for keywords in the text field and refer self to the corresonding items.'''
for kw in [x for x in KEYWORDS if not self.answers_to(x)]:
#Without \W, keywords like 'C' would match 'c' in words like cocaine
#[^\w'+] is to avoid matching "C'est" in french and "C++"
#s\W is to match the plural forms
if re.search(r'($|\W)'+re.escape(kw)+r"([^\w'\+]|s\W)", self.text, flags=re.IGNORECASE):
item_self_talks_about = find_CV_item(kw)
item_self_talks_about.talked_about_in(self)
def replacement_text(self, kw_text):
'''Return the HTML code to write in lieu of kw_text when found in the text of self'''
if self.answers_to(kw_text):
return kw_text #No links for e.g. "Python" or a synonym of it in the section about Python
kw_item = find_CV_item(kw_text)
if kw_item.type == "external_ressource":
decoration = '<small><span class="fa fa-external-link"></span></small>'
else:
decoration = ""
other_keywords_links = ""
for other_item in [x for x in kw_item.links_to if not self.answers_to(x.canonical_name)]:
other_keywords_links += r'<a href="{target}" data-target="{target}" class="list-group-item">{text}</a>'.format(
target = other_item.target,
text = other_item.short_name)
if other_keywords_links:
other_keywords_links = '<span class="keyword_list list_group">'+ other_keywords_links + '</span>'
return keyword_template.format(
link_target=kw_item.target,
link_text=kw_text,
other_keywords=other_keywords_links,
decoration=decoration)
def remove_overlapping_spans(self, edits):
index = 0
while index < len(edits)-1:
previous = edits[index]
current = edits[index+1]
if previous[0][1] > current[0][0]: #overlapping spans
if previous[0][1] - previous[0][0] > current[0][1] - current[0][0]:
#previous is longer
edits.remove(current)
else:
edits.remove(previous)
else:
index+=1
return edits
def apply_edit_list(self, text, edits):
'''Return text after modifications specified in edits have been applied'''
if not edits:
return text
if len(edits) > 1:
edits.sort(key=lambda x:x[0][0]) #Sorting by beginning of span
if any([x[0][1] > y[0][0] for x,y in zip(edits[:-1],edits[1:])]):
edits = self.remove_overlapping_spans(edits)
assert(all([x[0][1] < y[0][0] for x,y in zip(edits[:-1],edits[1:])])) #No overlapping spans
dot = 0
modified_text = ""
for span,replacement_text in edits:
modified_text += text[dot:span[0]]
modified_text += replacement_text
dot = span[1]
modified_text += text[dot:]
return modified_text
def linkify_text(self):
'''Transform keywords from the text into links.'''
edit_list = [] #The text must be modified at once, in the end, and not during the loop.
#Otherwise, keywords will appear in the HTML formating (e.g. href="#objective-c"), and be replaced
#during a future iteration of the for loop.
#So we maintain an edit list of pairs [span,replacement_text]
for kw in KEYWORDS:
match = re.search(r'(^|\W)('+re.escape(kw)+r")([^\w'\+]|s\W)",
self.text,
flags=re.IGNORECASE)
if match:
corrected_span = [match.span()[0]+len(match.group(1)),
match.span()[1]-len(match.group(3))]
edit_list.append([corrected_span, self.replacement_text(match.group(2))])
self.text = self.apply_edit_list(self.text,edit_list)
def find_CV_item(name): #Can be refactored out of existence by making CV_item hashable and CV_items a dict
#But the hash function must be non trivial enough to handle synonyms
l = [x for x in CV_items if x.answers_to(name)]
assert(len(l)==1)
return l[0]
CV_items = [CV_item(x) for x in CV]
KEYWORDS = set()
for x in CV_items:
KEYWORDS = KEYWORDS.union(x.names)
for x in CV_items:
x.scan_text()
for x in CV_items:
x.linkify_text()
diplomas = [x for x in CV_items if x.type == "diploma"]
diplomas.sort(key=lambda x:x.sort_key)
diplomas.reverse()
skills = [x for x in CV_items if x.type == "skill"]
skills.sort(key=lambda x:x.sort_key)
skills.reverse()
experiences = [x for x in CV_items if x.type == "experience"]
experiences.sort(key=lambda x:x.sort_key)
experiences.reverse()
#Graph generation using the dot language
edges = {}
nodes = {}
for kw in KEYWORDS:
to_item = find_CV_item(kw)
nodes['"'+to_item.short_name+'"']=to_item.anchor
for target in map(lambda x:x.canonical_name,to_item.links_to):
from_item = find_CV_item(target)
try:
edges['"'+from_item.short_name+'"->"'+to_item.short_name+'"'] += 1
except KeyError:
edges['"'+from_item.short_name+'"->"'+to_item.short_name+'"'] = 1
with open('CV.dot','w',encoding='utf-8') as f:
f.write('digraph G {')
for node in nodes.keys():
f.write(node+' [href="index.html#'+nodes[node]+'", id="svg_'+nodes[node]+'" target="_top"];\n')
for edge in edges.keys():
f.write(edge+' [weight='+str(edges[edge])+'];\n')
f.write("}")
with open('index.html','w',encoding='utf-8') as f:
f.write(CV_header)
f.write('<h2 id="education">Formation, diplômes et récompenses</h2><div style="clear:both;"></div>')
f.write("\n".join([diploma_template.format(**x.__dict__) for x in diplomas]))
f.write('<h2 id="skills">Compétences</h2><div style="clear:both;"></div>')
f.write("\n".join([skill_template.format(**x.__dict__) for x in skills]))
f.write('<h2 id="experience">Expérience</h2><div style="clear:both;"></div>')
f.write("\n".join([diploma_template.format(**x.__dict__) for x in experiences]))
f.write('<h2 id="graph">Sous forme graphique...</h2><div style="clear:both;"></div>')
f.write(str(subprocess.Popen("dot CV.dot -T svg", shell=True, stdout=subprocess.PIPE).stdout.read(),encoding="utf-8"))
f.write(CV_footer)
|
edouardklein/RelationalResume
|
CV.py
|
Python
|
gpl-3.0
| 9,728
|
from . import models
def update_votes(vote, obj, created=False):
obj.votes = obj.votes + (1 if vote == models.Vote.UP else -1)
obj.save()
|
TheRedLady/codebook
|
codebook/posts/utils.py
|
Python
|
gpl-3.0
| 148
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import logging
import json
from flask import Flask
from flask_restful import Api
from resources.org_api import OrganizationApi
from resources.form_api import FormApi
from resources.filled_form_api import FilledFormApi
from resources.user_api import UserApi, UserFrontPageApi
from resources.place_api import PlaceApi
from resources.form_relationships import FilledFormsByOrgApi, FormsByOrgApi, FilledFormByUserInOrgApi
from resources.place_relationships import PlacesByOrgApi
from resources.org_user_relationships import InviteUserToOrg, AddUserToOrg, GetAllWorkersForOrg, RemoveUserFromOrg, GetAllOrgsForWorker
from resources.clockin_api import ClockInApi
from resources.blobstore_api import BlobstoreURLGen, PhotoApi, PhotoUploadHandler
from google.appengine.api import users
from google.appengine.ext import ndb
from flask_cors import CORS, cross_origin
app = Flask(__name__)
app.debug = True
CORS(app)
api = Api(app)
clientId = '692772929154-2nasht9k1q88nm15mekm0s6evt1pjin2.apps.googleusercontent.com'
client_secret = 'ayrXnhy-BmSXafYo-5Iei93I'
@app.route('/rest/')
def hello():
return 'Hello World!'
@app.route('/rest/auth2')
def auth_test():
user = users.get_current_user()
if user:
nickname = user.nickname()
logout_url = users.create_logout_url('/')
greeting = nickname, logout_url
else:
login_url = users.create_login_url('/')
greeting = login_url
return '{}'.format(greeting)
@app.route('/rest/auth')
def auth():
user = users.get_current_user()
if user:
org_key = ndb.Key('Organization', user.user_id())
org = org_key.get()
if org is not None:
return json.dumps({'id': user.user_id(), 'account': 'organization', 'email': user.email()})
user_key = ndb.Key('User', user.user_id())
user_entity = user_key.get()
if user_entity is not None:
return json.dumps({'id': user.user_id(), 'account': 'user', 'email': user.email()})
else:
return json.dumps({'id': user.user_id(), 'account': 'none', 'email': user.email()})
else:
login_url = users.create_login_url('/signup')
print login_url
return json.dumps({'error': 'not signed in', 'login_url': login_url}), 401
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
# [END app]
api.add_resource(OrganizationApi, '/rest/org/<string:id>', '/rest/org')
api.add_resource(FormApi, '/rest/form/<string:parent_id>/<string:id>', '/rest/form/<string:parent_id>')
api.add_resource(PlaceApi, '/rest/place/<string:parent_id>/<string:id>', '/rest/place/<string:parent_id>')
api.add_resource(FilledFormApi, '/rest/filledform/<string:parent_id>/<string:id>', '/rest/filledform/<string:parent_id>')
api.add_resource(ClockInApi, '/rest/clockin/<string:user_id>', '/rest/clockin/<string:user_id>/<string:org_id>/<string:place_id>')
api.add_resource(UserApi, '/rest/user/<string:id>', '/rest/user')
api.add_resource(FilledFormsByOrgApi, '/rest/filledform/org/<string:id>')
api.add_resource(FormsByOrgApi, '/rest/form/org/<string:id>')
api.add_resource(PlacesByOrgApi, '/rest/place/org/<string:id>')
api.add_resource(FilledFormByUserInOrgApi, '/rest/filledform/org/<string:id>/user/<string:user_id>')
api.add_resource(InviteUserToOrg, '/rest/invite/<string:org_id>/<string:user_email>')
api.add_resource(AddUserToOrg, '/rest/org/add/worker/<string:org_id>/<string:user_id>')
api.add_resource(GetAllWorkersForOrg, '/rest/org/workers/<string:org_id>')
api.add_resource(RemoveUserFromOrg, '/rest/org/remove/worker/<string:org_id>/<string:user_id>')
api.add_resource(GetAllOrgsForWorker, '/rest/user/orgs/<string:user_id>')
api.add_resource(BlobstoreURLGen, '/rest/blobstore/url')
api.add_resource(PhotoUploadHandler, '/rest/upload_photo')
api.add_resource(PhotoApi, '/rest/photo/<string:blob_id>')
api.add_resource(UserFrontPageApi, '/rest/user/front_page/<string:id>')
|
jtovar2/demo_app
|
backend/main.py
|
Python
|
mit
| 4,621
|
import random
from population import Population
from individual import Individual
class Algorithm(object):
"""Algorithm class. Mostly contains static methods."""
def __init__(self, populationObject, mutationRate, tournamentSize, elitismFlag):
self.population = populationObject
self.mutation = mutationRate
self.elitism = elitismFlag
self.tourSize = tournamentSize
self.start = 1 if elitismFlag else 0
def evolve(self):
"""Create the next generation of the population.
Returns a `Population` object"""
p_ = Population(self.population.size, False)
p_.individuals = [self.mutate(self.crossover(self.select(),
self.select())) for i in range(self.start, self.population.size)]
p_.updateFitnesses()
return p_
def mutate(self, indv):
"""Mutate a given individual based on the mutationRate
Returns an `Individual` object"""
if random.random() <= self.mutation:
indv = Individual().generateIndividual()
return indv
def crossover(self, parent1, parent2):
"""Crossover two individuals to generate a child individual
Returns an `Individual` object"""
child = Individual()
#split = int(len(parent1.phrase)/2)
for i in range(len(Individual.targetPhrase())):
# TODO: Experiment with other ways of combining strings
split = random.randint(0, len(Individual.targetPhrase()))
child.phrase = parent1.phrase[:split] + parent2.phrase[split:]
return child
def select(self):
"""Select an individual for crossover from a sample population
Returns an `Individual` object"""
# TODO: Make this faster to improve speed of algorithm
tournament = Population(self.tourSize, False)
tournament.individuals = [random.choice(self.population.individuals) for i in range(self.tourSize)]
return tournament.fittest()
|
bluerama/string-evolve
|
stringevolve/algorithm.py
|
Python
|
mit
| 1,984
|
# Copyright (c) 2011 NTT.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# Tables to alter
#
networks = Table('networks', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('injected', Boolean(create_constraint=True, name=None)),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('netmask',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('bridge',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('gateway',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('broadcast',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('dns',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vlan', Integer()),
Column('vpn_public_address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vpn_public_port', Integer()),
Column('vpn_private_address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('dhcp_start',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('cidr_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('ra_server', String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column(
'label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('network_id',
Integer(),
ForeignKey('networks.id'),
nullable=True),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=True),
Column('allocated', Boolean(create_constraint=True, name=None)),
Column('leased', Boolean(create_constraint=True, name=None)),
Column('reserved', Boolean(create_constraint=True, name=None)),
Column("addressV6", String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column("netmaskV6", String(length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column("gatewayV6", String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
)
#
# New Tables
#
# None
#
# Columns to add to existing tables
#
networks_netmask_v6 = Column(
'netmask_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
# Alter column name
networks.c.ra_server.alter(name='gateway_v6')
# Add new column to existing table
networks.create_column(networks_netmask_v6)
# drop existing columns from table
fixed_ips.c.addressV6.drop()
fixed_ips.c.netmaskV6.drop()
fixed_ips.c.gatewayV6.drop()
|
termie/pupa
|
nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
|
Python
|
apache-2.0
| 6,625
|
# the maximum age of an event, in seconds, before it is published to the LRS
PUBLISH_MAX_WAIT_TIME = 60
# the number of statements to publish per batch
PUBLISH_MAX_PAYLOAD = 10
# lrs credentials
LRS_ENDPOINT = 'https://lrs.adlnet.gov/xAPI/'
LRS_USERNAME = 'fakeuser'
LRS_PASSWORD = 'fakepassword'
|
daafgo/Edx_bridge
|
xapi-bridge/settings.py
|
Python
|
apache-2.0
| 300
|
from office365.entity import Entity
class OutlookCategory(Entity):
"""epresents a category by which a user can group Outlook items such as messages and events.
The user defines categories in a master list, and can apply one or more
of these user-defined categories to an item."""
pass
|
vgrem/Office365-REST-Python-Client
|
office365/outlook/outlook_category.py
|
Python
|
mit
| 303
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lasboundary.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.parameters.ParameterString import ParameterString
from sextante.outputs.OutputVector import OutputVector
from sextante.lidar.lastools.LasToolsUtils import LasToolsUtils
from sextante.parameters.ParameterBoolean import ParameterBoolean
from sextante.parameters.ParameterNumber import ParameterNumber
from sextante.lidar.lastools.LasToolsAlgorithm import LasToolsAlgorithm
from sextante.parameters.ParameterFile import ParameterFile
class lasboundary(LasToolsAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
CONCAVITY = "CONCAVITY"
DISJOINT = "DISJOINT"
HOLES = "HOLES"
def defineCharacteristics(self):
self.name = "lasboundary"
self.group = "Tools"
self.addParameter(ParameterFile(lasboundary.INPUT, "Input las layer"))
self.addParameter(ParameterNumber(lasboundary.CONCAVITY, "Concavity threshold", 0, None, 50.0))
self.addParameter(ParameterBoolean(lasboundary.HOLES, "Compute also interior holes", False))
self.addParameter(ParameterBoolean(lasboundary.DISJOINT, "Compute disjoint hull", False))
self.addOutput(OutputVector(lasboundary.OUTPUT, "Output boundary layer"))
self.addCommonParameters()
def processAlgorithm(self, progress):
commands = [os.path.join(LasToolsUtils.LasToolsPath(), "bin", "lasboundary.exe")]
commands.append("-i")
commands.append(self.getParameterValue(lasboundary.INPUT))
commands.append("-o")
commands.append(self.getOutputValue(lasboundary.OUTPUT))
commands.append("-concavity")
commands.append(str(self.getParameterValue(lasboundary.CONCAVITY)))
if self.getParameterValue(lasboundary.HOLES):
commands.append("-holes")
if self.getParameterValue(lasboundary.DISJOINT):
commands.append("-disjoint")
self.addCommonParameterValuesToCommand(commands)
LasToolsUtils.runLasTools(commands, progress)
|
innotechsoftware/Quantum-GIS
|
python/plugins/sextante/lidar/lastools/lasboundary.py
|
Python
|
gpl-2.0
| 3,125
|
import os
import logging
import logging.handlers
PATH = os.path.abspath(os.path.dirname(__file__))
def spawn_logger(cfg, logger_name):
"""
Get logger instance for config_name and logger name
:param cfg: Current config name
:param logger_name: Logger name, now error and info
:return:
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
os.path.join(PATH, 'logs/%s.%s.log' % (cfg, logger_name)),
backupCount=5
)
handler.setFormatter(logging.Formatter(u'[%(asctime)s] %(message)s'))
logger.addHandler(handler)
return logger
def get_variable(cfg, name, default, false_condition, fail_message):
v = getattr(cfg, name, default)
if false_condition(v):
raise RuntimeError(fail_message)
return v
|
mayflower/geordi
|
utils.py
|
Python
|
gpl-3.0
| 854
|
# Nimble Storage, Inc. (c) 2013-2014
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from unittest import mock
from oslo_utils import uuidutils
from six.moves import http_client
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder.objects import volume as obj_volume
from cinder.objects import volume_type
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_group
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.volume.drivers.hpe import nimble
from cinder.volume import volume_types
from cinder.volume import volume_utils
NIMBLE_CLIENT = 'cinder.volume.drivers.hpe.nimble.NimbleRestAPIExecutor'
NIMBLE_URLLIB2 = 'cinder.volume.drivers.hpe.nimble.requests'
NIMBLE_RANDOM = 'cinder.volume.drivers.hpe.nimble.random'
NIMBLE_ISCSI_DRIVER = 'cinder.volume.drivers.hpe.nimble.NimbleISCSIDriver'
NIMBLE_FC_DRIVER = 'cinder.volume.drivers.hpe.nimble.NimbleFCDriver'
DRIVER_VERSION = '4.2.0'
nimble.DEFAULT_SLEEP = 0
FAKE_POSITIVE_LOGIN_RESPONSE_1 = '2c20aad78a220ed1dae21dcd6f9446f5'
FAKE_POSITIVE_LOGIN_RESPONSE_2 = '2c20aad78a220ed1dae21dcd6f9446ff'
FAKE_POSITIVE_HEADERS = {'X-Auth-Token': FAKE_POSITIVE_LOGIN_RESPONSE_1}
FAKE_POSITIVE_NETCONFIG_RESPONSE = {
'role': 'active',
'subnet_list': [{'network': '172.18.212.0',
'discovery_ip': '172.18.108.21',
'type': 'data',
'allow_iscsi': True,
'label': 'data1',
'allow_group': True,
'vlan_id': 0}],
'array_list': [{'nic_list': [{'subnet_label': 'data1',
'tagged': False,
'data_ip': '172.18.212.82',
'name': 'eth3'}]}],
'name': 'test-array'}
FAKE_NEGATIVE_NETCONFIG_RESPONSE = exception.VolumeDriverException(
"Session expired")
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE = {
'clone': False,
'name': "testvolume"}
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION = {
'clone': False,
'name': "testvolume-encryption"}
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERF_POLICY = {
'clone': False,
'name': "testvolume-perf-policy"}
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_MULTI_INITIATOR = {
'clone': False,
'name': "testvolume-multi-initiator"}
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_DEDUPE = {
'clone': False,
'name': "testvolume-dedupe"}
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_QOS = {
'clone': False,
'name': "testvolume-qos"}
FAKE_EXTRA_SPECS = {'multiattach': '<is> True',
'nimble:iops-limit': '1024'}
FAKE_GET_VOL_INFO_RESPONSE = {'name': 'testvolume',
'clone': False,
'target_name': 'iqn.test',
'online': True,
'agent_type': 'openstack'}
FAKE_GET_VOL_INFO_RESPONSE_MANAGE = {'name': 'testvolume',
'agent_type': 'none',
'online': False,
'target_name': 'iqn.test'}
FAKE_GET_VOL_INFO_ONLINE = {'name': 'testvolume',
'size': 2048,
'online': True,
'agent_type': 'none'}
FAKE_GET_VOL_INFO_RETYPE = {'name': 'testvolume',
'size': 2048,
'online': True,
'agent_type': 'none',
'pool_id': 'none',
'pool_name': 'none'}
FAKE_GET_VOL_INFO_BACKUP_RESPONSE = {'name': 'testvolume',
'clone': True,
'target_name': 'iqn.test',
'online': False,
'agent_type': 'openstack',
'parent_vol_id': 'volume-' +
fake.VOLUME2_ID,
'base_snap_id': 'test-backup-snap'}
FAKE_GET_SNAP_INFO_BACKUP_RESPONSE = {
'description': "backup-vol-" + fake.VOLUME2_ID,
'name': 'test-backup-snap',
'id': fake.SNAPSHOT_ID,
'vol_id': fake.VOLUME_ID,
'volume_name': 'volume-' + fake.VOLUME_ID}
FAKE_POSITIVE_GROUP_CONFIG_RESPONSE = {
'name': 'group-test',
'version_current': '0.0.0.0',
'access_protocol_list': ['iscsi']}
FAKE_LOGIN_POST_RESPONSE = {
'data': {'session_token': FAKE_POSITIVE_LOGIN_RESPONSE_1}}
FAKE_EXTEND_VOLUME_PARAMS = {'data': {'size': 5120,
'reserve': 0,
'warn_level': 80,
'limit': 100,
'snap_limit': sys.maxsize}}
FAKE_IGROUP_LIST_RESPONSE = [
{'iscsi_initiators': [{'iqn': 'test-initiator1'}],
'name': 'test-igrp1'},
{'iscsi_initiators': [{'iqn': 'test-initiator2'}],
'name': 'test-igrp2'}]
FAKE_IGROUP_LIST_RESPONSE_FC = [
{'fc_initiators': [{'wwpn': '10:00:00:00:00:00:00:00'}],
'name': 'test-igrp1'},
{'fc_initiators': [{'wwpn': '10:00:00:00:00:00:00:00'},
{'wwpn': '10:00:00:00:00:00:00:01'}],
'name': 'test-igrp2'}]
FAKE_GET_VOL_INFO_REVERT = {'name': 'testvolume',
'id': fake.VOLUME_ID,
'clone': False,
'target_name': 'iqn.test',
'online': True,
'agent_type': 'openstack',
'last_snap': {'snap_id': fake.SNAPSHOT_ID}}
FAKE_SNAP_INFO_REVERT = {'name': 'testsnap',
'id': fake.SNAPSHOT2_ID}
FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException(
"Volume testvolume not found")
FAKE_VOLUME_INFO_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException(
"Volume testvolume not found")
FAKE_CREATE_VOLUME_NEGATIVE_ENCRYPTION = exception.VolumeBackendAPIException(
"Volume testvolume-encryption not found")
FAKE_CREATE_VOLUME_NEGATIVE_PERFPOLICY = exception.VolumeBackendAPIException(
"Volume testvolume-perfpolicy not found")
FAKE_CREATE_VOLUME_NEGATIVE_DEDUPE = exception.VolumeBackendAPIException(
"The specified pool is not capable of hosting deduplicated volumes")
FAKE_CREATE_VOLUME_NEGATIVE_QOS = exception.VolumeBackendAPIException(
"Please set valid IOPS limitin the range [256, 4294967294]")
FAKE_VOLUME_RESTORE_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException(
"No recent Snapshot found")
FAKE_POSITIVE_GROUP_INFO_RESPONSE = {
'version_current': '3.0.0.0',
'group_target_enabled': False,
'name': 'group-nimble',
'usage_valid': True,
'usable_capacity_bytes': 8016883089408,
'free_space': 101111111901}
FAKE_GET_VOL_INFO_RESPONSE = {'name': 'testvolume-cg',
'clone': False,
'target_name': 'iqn.test',
'online': True,
'agent_type': 'openstack'}
FAKE_EXTRA_SPECS_CG = {'consistent_group_snapshot_enabled': "<is> False"}
FAKE_VOLUME_TYPE = {'extra_specs': FAKE_EXTRA_SPECS_CG}
SRC_CG_VOLUME_ID = 'bd21d11b-c765-4c68-896c-6b07f63cfcb6'
SRC_CG_VOLUME_NAME = 'volume-' + SRC_CG_VOLUME_ID
volume_src_cg = {'name': SRC_CG_VOLUME_NAME,
'id': SRC_CG_VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': 'FAKE_CINDER_HOST',
'volume_type': None,
'volume_type_id': None}
VOLUME_TYPE_ID_CG = 'd03338a9-9115-48a3-8dfc-44444444444'
VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
admin_context = context.get_admin_context()
VOLUME_NAME = 'volume-' + VOLUME_ID
FAKE_GROUP = fake_group.fake_group_obj(
admin_context, id=fake.GROUP_ID, status='available')
volume_cg = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'provider_location': 12,
'size': 2,
'host': 'FAKE_CINDER_HOST',
'volume_type': 'cg_type',
'volume_type_id': VOLUME_TYPE_ID_CG}
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_CG = {
'clone': False,
'name': "testvolume-cg"}
FAKE_GET_VOLID_INFO_RESPONSE = {'vol_id': fake.VOLUME_ID}
FAKE_GET_VOLCOLL_INFO_RESPONSE = {'volcoll_id': fake.VOLUME2_ID}
FAKE_ASSOCIATE_VOLCOLL_INFO_RESPONSE = {'vol_id': fake.VOLUME_ID,
'volcoll_id': fake.VOLUME2_ID}
FAKE_GENERIC_POSITIVE_RESPONSE = ""
FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE = "Object has a clone"
FAKE_TYPE_ID = fake.VOLUME_TYPE_ID
FAKE_TYPE_ID_NEW = fake.VOLUME_TYPE2_ID
FAKE_POOL_ID = fake.GROUP_ID
FAKE_PERFORMANCE_POLICY_ID = fake.OBJECT_ID
NIMBLE_MANAGEMENT_IP = "10.18.108.55"
NIMBLE_SAN_LOGIN = "nimble"
NIMBLE_SAN_PASS = "nimble_pass"
SRC_CONSIS_GROUP_ID = '7d7dfa02-ac6e-48cb-96af-8a0cd3008d47'
FAKE_SRC_GROUP = fake_group.fake_group_obj(
admin_context, id = SRC_CONSIS_GROUP_ID, status = 'available')
def create_configuration(username, password, ip_address,
pool_name=None, subnet_label=None,
thin_provision=True):
configuration = mock.Mock()
configuration.san_login = username
configuration.san_password = password
configuration.san_ip = ip_address
configuration.san_thin_provision = thin_provision
configuration.nimble_pool_name = pool_name
configuration.nimble_subnet_label = subnet_label
configuration.safe_get.return_value = 'NIMBLE'
return configuration
class NimbleDriverBaseTestCase(test.TestCase):
"""Base Class for the NimbleDriver Tests."""
def setUp(self):
super(NimbleDriverBaseTestCase, self).setUp()
self.mock_client_service = None
self.mock_client_class = None
self.driver = None
@staticmethod
def client_mock_decorator(configuration):
def client_mock_wrapper(func):
def inner_client_mock(
self, mock_client_class, mock_urllib2, *args, **kwargs):
self.mock_client_class = mock_client_class
self.mock_client_service = mock.MagicMock(name='Client')
self.mock_client_class.return_value = self.mock_client_service
self.driver = nimble.NimbleISCSIDriver(
configuration=configuration)
mock_login_response = mock_urllib2.post.return_value
mock_login_response = mock.MagicMock()
mock_login_response.status_code.return_value = http_client.OK
mock_login_response.json.return_value = (
FAKE_LOGIN_POST_RESPONSE)
self.driver.do_setup(context.get_admin_context())
self.driver.APIExecutor.login()
func(self, *args, **kwargs)
return inner_client_mock
return client_mock_wrapper
@staticmethod
def client_mock_decorator_fc(configuration):
def client_mock_wrapper(func):
def inner_client_mock(
self, mock_client_class, mock_urllib2, *args, **kwargs):
self.mock_client_class = mock_client_class
self.mock_client_service = mock.MagicMock(name='Client')
self.mock_client_class.return_value = (
self.mock_client_service)
self.driver = nimble.NimbleFCDriver(
configuration=configuration)
mock_login_response = mock_urllib2.post.return_value
mock_login_response = mock.MagicMock()
mock_login_response.status_code.return_value = http_client.OK
mock_login_response.json.return_value = (
FAKE_LOGIN_POST_RESPONSE)
self.driver.do_setup(context.get_admin_context())
self.driver.APIExecutor.login()
func(self, *args, **kwargs)
return inner_client_mock
return client_mock_wrapper
@staticmethod
def client_mock_decorator_nimble_api(username, password, ip, verify):
def client_mock_wrapper(func):
def inner_client_mock(
self, mock_client_class, mock_urllib2, *args, **kwargs):
self.mock_client_class = mock_client_class
self.mock_client_service = mock.MagicMock(name='Client')
self.mock_client_class.return_value = (
self.mock_client_service)
self.driver = nimble.NimbleRestAPIExecutor(
username=username, password=password, ip=ip, verify=verify)
mock_login_response = mock_urllib2.post.return_value
mock_login_response = mock.MagicMock()
mock_login_response.status_code.return_value = http_client.OK
mock_login_response.json.return_value = (
FAKE_LOGIN_POST_RESPONSE)
func(self, *args, **kwargs)
return inner_client_mock
return client_mock_wrapper
class NimbleDriverLoginTestCase(NimbleDriverBaseTestCase):
"""Tests do_setup api."""
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
"nimble", "nimble_pass", "10.18.108.55", 'default', '*'))
def test_do_setup_positive(self):
expected_call_list = [mock.call.login()]
self.mock_client_service.assert_has_calls(expected_call_list)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_expire_session_id(self):
expected_call_list = [mock.call.login()]
self.mock_client_service.assert_has_calls(expected_call_list)
self.driver.APIExecutor.get("groups")
expected_call_list = [mock.call.get_group_info(),
mock.call.login(),
mock.call.get("groups")]
self.assertEqual(
self.mock_client_service.method_calls,
expected_call_list)
class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
"""Tests volume related api's."""
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'yes'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
def test_create_volume_positive(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual({
'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume({'name': 'testvolume',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''}))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'yes'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
def test_create_volume_with_unicode(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual({
'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume({'name': 'testvolume',
'size': 1,
'volume_type_id': None,
'display_name': u'unicode_name',
'display_description': ''}))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume',
'size': 1,
'volume_type_id': None,
'display_name': u'unicode_name',
'display_description': ''},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'yes',
'multiattach': 'false'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_encryption_positive(self):
self.mock_client_service._execute_create_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
volume = {'name': 'testvolume-encryption',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': ''}
self.assertEqual({
'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume(volume))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume-encryption',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': '',
},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'VMware ESX',
'nimble:encryption': 'no',
'multiattach': 'false'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_perfpolicy_positive(self):
self.mock_client_service._execute_create_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERF_POLICY)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual(
{'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume({'name': 'testvolume-perfpolicy',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': ''}))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume-perfpolicy',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': '',
},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'no',
'multiattach': 'true'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_multi_initiator_positive(self):
self.mock_client_service._execute_create_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_MULTI_INITIATOR)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual(
{'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume({'name': 'testvolume-multi-initiator',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': ''}))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume-multi-initiator',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': '',
},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'no',
'nimble:dedupe': 'true'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_dedupe_positive(self):
self.mock_client_service._execute_create_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_DEDUPE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual(
{'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume({'name': 'testvolume-dedupe',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': ''}))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume-dedupe',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': '',
},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:iops-limit': '1024'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_qos_positive(self):
self.mock_client_service._execute_create_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_QOS)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual(
{'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume({'name': 'testvolume-qos',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': ''}))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume-qos',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': '',
},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'no',
'multiattach': 'false'}))
def test_create_volume_negative(self):
self.mock_client_service.get_vol_info.side_effect = (
FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': 'testvolume',
'size': 1,
'volume_type_id': FAKE_TYPE_ID,
'display_name': '',
'display_description': ''})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_encryption_negative(self):
self.mock_client_service.get_vol_info.side_effect = (
FAKE_CREATE_VOLUME_NEGATIVE_ENCRYPTION)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': 'testvolume-encryption',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_perfpolicy_negative(self):
self.mock_client_service.get_vol_info.side_effect = (
FAKE_CREATE_VOLUME_NEGATIVE_PERFPOLICY)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': 'testvolume-perfpolicy',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_dedupe_negative(self):
self.mock_client_service.get_vol_info.side_effect = (
FAKE_CREATE_VOLUME_NEGATIVE_DEDUPE)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': 'testvolume-dedupe',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:iops-limit': '200'}))
def test_create_volume_qos_negative(self):
self.mock_client_service.get_vol_info.side_effect = (
FAKE_CREATE_VOLUME_NEGATIVE_QOS)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': 'testvolume-qos',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
return_value=['', '']))
def test_delete_volume(self):
self.mock_client_service.online_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.delete_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.driver.delete_volume({'name': 'testvolume'})
expected_calls = [mock.call.online_vol(
'testvolume', False),
mock.call.delete_vol('testvolume')]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
return_value=['', '']))
def test_delete_volume_with_clone(self):
self.mock_client_service.delete_vol.side_effect = \
nimble.NimbleAPIException(FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE)
self.assertRaises(
exception.VolumeIsBusy,
self.driver.delete_volume,
{'name': 'testvolume'})
expected_calls = [
mock.call.login(),
mock.call.online_vol('testvolume', False),
mock.call.delete_vol('testvolume'),
mock.call.delete_vol('testvolume'),
mock.call.delete_vol('testvolume'),
mock.call.online_vol('testvolume', True)]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
return_value=['test-backup-snap', 'volume-' + fake.VOLUME_ID]))
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host')
def test_delete_volume_with_backup(self, mock_volume_list):
mock_volume_list.return_value = []
self.mock_client_service.online_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.delete_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.online_snap.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.delete_snap.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.driver.delete_volume({'name': 'testvolume'})
expected_calls = [mock.call.online_vol(
'testvolume', False),
mock.call.delete_vol('testvolume'),
mock.call.online_snap('volume-' + fake.VOLUME_ID,
False,
'test-backup-snap'),
mock.call.delete_snap('volume-' + fake.VOLUME_ID,
'test-backup-snap')]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_extend_volume(self):
self.mock_client_service.edit_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE)
self.driver.extend_volume({'name': 'testvolume'}, 5)
self.mock_client_service.edit_vol.assert_called_once_with(
'testvolume', FAKE_EXTEND_VOLUME_PARAMS)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID,
return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'yes',
'multiattach': False,
'nimble:iops-limit': '1024'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', False))
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host')
@mock.patch(NIMBLE_RANDOM)
def test_create_cloned_volume(self, mock_random, mock_volume_list):
mock_random.sample.return_value = fake.VOLUME_ID
mock_volume_list.return_value = []
self.mock_client_service.snap_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.clone_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
volume = obj_volume.Volume(context.get_admin_context(),
id=fake.VOLUME_ID,
size=5.0,
_name_id=None,
display_name='',
volume_type_id=FAKE_TYPE_ID
)
src_volume = obj_volume.Volume(context.get_admin_context(),
id=fake.VOLUME2_ID,
_name_id=None,
size=5.0)
self.assertEqual({
'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_cloned_volume(volume, src_volume))
expected_calls = [mock.call.snap_vol(
{'volume_name': "volume-" + fake.VOLUME2_ID,
'name': 'openstack-clone-volume-' + fake.VOLUME_ID + "-" +
fake.VOLUME_ID,
'volume_size': src_volume['size'],
'display_name': volume['display_name'],
'display_description': ''}),
mock.call.clone_vol(volume,
{'volume_name': "volume-" + fake.VOLUME2_ID,
'name': 'openstack-clone-volume-' +
fake.VOLUME_ID + "-" +
fake.VOLUME_ID,
'volume_size': src_volume['size'],
'display_name': volume['display_name'],
'display_description': ''},
True, False, 'iSCSI', 'default')]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_manage_volume_positive(self):
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE_MANAGE)
self.mock_client_service.online_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.edit_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE)
self.assertEqual({
'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.manage_existing({'name': 'volume-abcdef',
'id': fake.VOLUME_ID,
'agent_type': None},
{'source-name': 'test-vol'}))
expected_calls = [mock.call.edit_vol(
'test-vol', {'data': {'agent_type': 'openstack',
'name': 'volume-abcdef'}}),
mock.call.online_vol('volume-abcdef', True)]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_manage_volume_which_is_online(self):
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_ONLINE)
self.assertRaises(
exception.InvalidVolume,
self.driver.manage_existing,
{'name': 'volume-abcdef'},
{'source-name': 'test-vol'})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_manage_volume_get_size(self):
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_ONLINE)
size = self.driver.manage_existing_get_size(
{'name': 'volume-abcdef'}, {'source-name': 'test-vol'})
self.assertEqual(2, size)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_manage_volume_with_improper_ref(self):
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing,
{'name': 'volume-abcdef'},
{'source-id': 'test-vol'})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_manage_volume_with_nonexistant_volume(self):
self.mock_client_service.get_vol_info.side_effect = (
FAKE_VOLUME_INFO_NEGATIVE_RESPONSE)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.manage_existing,
{'name': 'volume-abcdef'},
{'source-name': 'test-vol'})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_manage_volume_with_wrong_agent_type(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.assertRaises(
exception.ManageExistingAlreadyManaged,
self.driver.manage_existing,
{'id': 'abcdef', 'name': 'volume-abcdef'},
{'source-name': 'test-vol'})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_unmanage_volume_positive(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.edit_vol.return_value = (
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE)
self.driver.unmanage({'name': 'volume-abcdef'})
expected_calls = [
mock.call.edit_vol(
'volume-abcdef',
{'data': {'agent_type': 'none'}}),
mock.call.online_vol('volume-abcdef', False)]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_unmanage_with_invalid_volume(self):
self.mock_client_service.get_vol_info.side_effect = (
FAKE_VOLUME_INFO_NEGATIVE_RESPONSE)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.unmanage,
{'name': 'volume-abcdef'}
)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_unmanage_with_invalid_agent_type(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_ONLINE)
self.assertRaises(
exception.InvalidVolume,
self.driver.unmanage,
{'name': 'volume-abcdef'}
)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type',
mock.Mock(type_id=FAKE_TYPE_ID_NEW,
return_value={
'id': FAKE_TYPE_ID_NEW,
'extra_specs':
{'nimble:perfpol-name': 'default',
'nimble:encryption': 'yes',
'multiattach': False,
'nimble:iops-limit': '1024'}}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_retype(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_ONLINE)
retype, update = self.driver.retype(None, FAKE_GET_VOL_INFO_ONLINE,
volume_types.get_volume_type(
None,
FAKE_TYPE_ID_NEW),
None, None)
self.assertTrue(retype)
self.assertIsNone(update)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_ISCSI_DRIVER)
@mock.patch.object(nimble.NimbleRestAPIExecutor, 'login')
@mock.patch.object(nimble.NimbleRestAPIExecutor,
'get_performance_policy_id')
@mock.patch.object(nimble.NimbleRestAPIExecutor, 'get_pool_info')
@mock.patch.object(nimble.NimbleRestAPIExecutor, 'get_folder_id')
@NimbleDriverBaseTestCase.client_mock_decorator_nimble_api(
'nimble', 'nimble_pass', '10.18.108.55', 'False')
def test_nimble_extraspecs_retype(self, mock_folder,
mock_pool, mock_perf_id,
mock_login):
mock_folder.return_value = None
mock_pool.return_value = None
mock_perf_id.return_value = None
mock_login.return_value = None
data = self.driver.get_valid_nimble_extraspecs(
FAKE_EXTRA_SPECS,
FAKE_GET_VOL_INFO_RETYPE)
self.assertTrue(data['multi_initiator'])
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_get_volume_stats(self):
self.mock_client_service.get_group_info.return_value = (
FAKE_POSITIVE_GROUP_INFO_RESPONSE)
expected_res = {'driver_version': DRIVER_VERSION,
'vendor_name': 'Nimble',
'volume_backend_name': 'NIMBLE',
'storage_protocol': 'iSCSI',
'pools': [{'pool_name': 'NIMBLE',
'total_capacity_gb': 7466.30419921875,
'free_capacity_gb': 94.16706105787307,
'reserved_percentage': 0,
'QoS_support': False,
'multiattach': True,
'consistent_group_snapshot_enabled': True}]}
self.assertEqual(
expected_res,
self.driver.get_volume_stats(refresh=True))
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_is_volume_backup_clone(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_BACKUP_RESPONSE)
self.mock_client_service.get_snap_info_by_id.return_value = (
FAKE_GET_SNAP_INFO_BACKUP_RESPONSE)
self.mock_client_service.get_snap_info_detail.return_value = (
FAKE_GET_SNAP_INFO_BACKUP_RESPONSE)
self.mock_client_service.get_volume_name.return_value = (
'volume-' + fake.VOLUME2_ID)
volume = obj_volume.Volume(context.get_admin_context(),
id=fake.VOLUME_ID,
_name_id=None)
self.assertEqual(("test-backup-snap", "volume-" + fake.VOLUME2_ID),
self.driver.is_volume_backup_clone(volume))
expected_calls = [
mock.call.get_vol_info('volume-' + fake.VOLUME_ID),
mock.call.get_snap_info_by_id('test-backup-snap',
'volume-' + fake.VOLUME2_ID)
]
self.mock_client_service.assert_has_calls(expected_calls)
class NimbleDriverSnapshotTestCase(NimbleDriverBaseTestCase):
"""Tests snapshot related api's."""
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_snapshot(self):
self.mock_client_service.snap_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.driver.create_snapshot(
{'volume_name': 'testvolume',
'name': 'testvolume-snap1',
'display_name': ''})
self.mock_client_service.snap_vol.assert_called_once_with(
{'volume_name': 'testvolume',
'name': 'testvolume-snap1',
'display_name': ''})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_delete_snapshot(self):
self.mock_client_service.online_snap.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.delete_snap.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.driver.delete_snapshot(
{'volume_name': 'testvolume',
'name': 'testvolume-snap1'})
expected_calls = [mock.call.online_snap(
'testvolume', False, 'testvolume-snap1'),
mock.call.delete_snap('testvolume',
'testvolume-snap1')]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'yes',
'multiattach': False}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_create_volume_from_snapshot(self):
self.mock_client_service.clone_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual({
'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None},
self.driver.create_volume_from_snapshot(
{'name': 'clone-testvolume',
'size': 2,
'volume_type_id': FAKE_TYPE_ID},
{'volume_name': 'testvolume',
'name': 'testvolume-snap1',
'volume_size': 1}))
expected_calls = [
mock.call.clone_vol(
{'name': 'clone-testvolume',
'volume_type_id': FAKE_TYPE_ID,
'size': 2},
{'volume_name': 'testvolume',
'name': 'testvolume-snap1',
'volume_size': 1},
False,
False,
'iSCSI',
'default'),
mock.call.edit_vol('clone-testvolume',
{'data': {'size': 2048,
'snap_limit': sys.maxsize,
'warn_level': 80,
'reserve': 0,
'limit': 100}})]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_revert_to_snapshot(self):
self.mock_client_service.online_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.volume_restore.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_REVERT)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.mock_client_service.get_snap_info.return_value = (
FAKE_SNAP_INFO_REVERT)
ctx = context.get_admin_context()
self.driver.revert_to_snapshot(ctx,
{'id': fake.VOLUME_ID,
'size': 1,
'name': 'testvolume'},
{'id': fake.SNAPSHOT2_ID,
'name': 'testsnap',
'volume_id': fake.VOLUME_ID})
expected_calls = [mock.call.online_vol('testvolume', False),
mock.call.volume_restore('testvolume',
{'data': {'id': fake.VOLUME_ID,
'base_snap_id': fake.SNAPSHOT2_ID}}),
mock.call.online_vol('testvolume', True)]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_revert_to_snapshot_negative(self):
self.mock_client_service.online_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.volume_restore.side_effect = (
FAKE_VOLUME_RESTORE_NEGATIVE_RESPONSE)
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_REVERT)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.mock_client_service.get_snap_info.return_value = (
FAKE_SNAP_INFO_REVERT)
ctx = context.get_admin_context()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.revert_to_snapshot, ctx,
{'id': fake.VOLUME_ID,
'size': 1,
'name': 'testvolume'},
{'id': fake.SNAPSHOT_ID,
'name': 'testsnap',
'volume_id': fake.VOLUME_ID})
class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase):
"""Tests Connection related api's."""
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_initialize_connection_igroup_exist(self):
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
expected_res = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'discard': True,
'volume_id': 12,
'target_iqn': '13',
'target_lun': 0,
'target_portal': '12'}}
self.assertEqual(
expected_res,
self.driver.initialize_connection(
{'name': 'test-volume',
'provider_location': '12 13',
'id': 12},
{'initiator': 'test-initiator1'}))
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_ISCSI_DRIVER + '._get_data_ips')
@mock.patch(NIMBLE_ISCSI_DRIVER + ".get_lun_number")
@mock.patch(NIMBLE_ISCSI_DRIVER + '._get_gst_for_group')
def test_initialize_connection_group_scoped_target(self, mock_gst_name,
mock_lun_number,
mock_data_ips):
mock_data_ips.return_value = ['12', '13']
mock_lun_number.return_value = 0
mock_gst_name.return_value = "group_target_name"
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
expected_res = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'discard': True,
'volume_id': 12,
'target_iqns': ['group_target_name', 'group_target_name'],
'target_luns': [0, 0],
'target_portals': ['12', '13']}}
self.assertEqual(
expected_res,
self.driver.initialize_connection(
{'name': 'test-volume',
'provider_location': '12 group_target_name',
'id': 12},
{'initiator': 'test-initiator1'}))
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_initialize_connection_live_migration(self):
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
expected_res = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'discard': True,
'volume_id': 12,
'target_iqn': '13',
'target_lun': 0,
'target_portal': '12'}}
self.assertEqual(
expected_res,
self.driver.initialize_connection(
{'name': 'test-volume',
'provider_location': '12 13',
'id': 12},
{'initiator': 'test-initiator1'}))
self.driver.initialize_connection(
{'name': 'test-volume',
'provider_location': '12 13',
'id': 12},
{'initiator': 'test-initiator1'})
# 2 or more calls to initialize connection and add_acl for live
# migration to work
expected_calls = [
mock.call.get_initiator_grp_list(),
mock.call.add_acl({'name': 'test-volume',
'provider_location': '12 13',
'id': 12},
'test-igrp1'),
mock.call.get_initiator_grp_list(),
mock.call.add_acl({'name': 'test-volume',
'provider_location': '12 13',
'id': 12},
'test-igrp1')]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_FC_DRIVER + ".get_lun_number")
@mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array")
def test_initialize_connection_fc_igroup_exist(self, mock_wwpns,
mock_lun_number):
mock_lun_number.return_value = 13
mock_wwpns.return_value = ["1111111111111101"]
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE_FC)
expected_res = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_lun': 13,
'target_discovered': True,
'discard': True,
'target_wwn': ["1111111111111101"],
'initiator_target_map': {'1000000000000000':
['1111111111111101']}}}
self.assertEqual(
expected_res,
self.driver.initialize_connection(
{'name': 'test-volume',
'provider_location': 'array1',
'id': 12},
{'initiator': 'test-initiator1',
'wwpns': ['1000000000000000']}))
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_RANDOM)
def test_initialize_connection_igroup_not_exist(self, mock_random):
mock_random.sample.return_value = 'abcdefghijkl'
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
expected_res = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'discard': True,
'target_lun': 0,
'volume_id': 12,
'target_iqn': '13',
'target_portal': '12'}}
self.assertEqual(
expected_res,
self.driver.initialize_connection(
{'name': 'test-volume',
'provider_location': '12 13',
'id': 12},
{'initiator': 'test-initiator3'}))
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array")
@mock.patch(NIMBLE_FC_DRIVER + ".get_lun_number")
@mock.patch(NIMBLE_RANDOM)
def test_initialize_connection_fc_igroup_not_exist(self, mock_random,
mock_lun_number,
mock_wwpns):
mock_random.sample.return_value = 'abcdefghijkl'
mock_lun_number.return_value = 13
mock_wwpns.return_value = ["1111111111111101"]
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE_FC)
expected_res = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_lun': 13,
'target_discovered': True,
'discard': True,
'target_wwn': ["1111111111111101"],
'initiator_target_map': {'1000000000000000':
['1111111111111101']}}}
self.driver._create_igroup_for_initiator("test-initiator3",
[1111111111111101])
self.assertEqual(
expected_res,
self.driver.initialize_connection(
{'name': 'test-volume',
'provider_location': 'array1',
'id': 12},
{'initiator': 'test-initiator3',
'wwpns': ['1000000000000000']}))
expected_calls = [mock.call.create_initiator_group_fc(
'openstack-abcdefghijkl'),
mock.call.add_initiator_to_igroup_fc('openstack-abcdefghijkl',
1111111111111101)]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_terminate_connection_positive(self):
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
ctx = context.get_admin_context()
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, multiattach=False)
self.driver.terminate_connection(
volume,
{'initiator': 'test-initiator1'})
expected_calls = [mock.call._get_igroupname_for_initiator(
'test-initiator1'),
mock.call.remove_acl({'name': 'test-volume'},
'test-igrp1')]
self.mock_client_service.assert_has_calls(
self.mock_client_service.method_calls,
expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_terminate_connection_without_connector(self):
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
self.driver.terminate_connection(
{'name': 'test-volume',
'provider_location': '12 13',
'id': 12},
None)
expected_calls = [mock.call._get_igroupname_for_initiator(
'test-initiator1'),
mock.call.remove_all_acls({'name': 'test-volume'})]
self.mock_client_service.assert_has_calls(
self.mock_client_service.method_calls,
expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array")
def test_terminate_connection_positive_fc(self, mock_wwpns):
mock_wwpns.return_value = ["1111111111111101"]
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE_FC)
ctx = context.get_admin_context()
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=14, multiattach=False)
self.driver.terminate_connection(
volume,
{'initiator': 'test-initiator1',
'wwpns': ['1000000000000000']})
expected_calls = [
mock.call.get_igroupname_for_initiator_fc(
"10:00:00:00:00:00:00:00"),
mock.call.remove_acl({'name': 'test-volume'},
'test-igrp1')]
self.mock_client_service.assert_has_calls(
self.mock_client_service.method_calls,
expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_terminate_connection_negative(self):
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
ctx = context.get_admin_context()
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, multiattach=False)
self.assertRaises(
exception.VolumeDriverException,
self.driver.terminate_connection,
volume,
{'initiator': 'test-initiator3'})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array")
def test_terminate_connection_negative_fc(self, mock_wwpns):
mock_wwpns.return_value = ["1111111111111101"]
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE_FC)
ctx = context.get_admin_context()
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, multiattach=False)
self.assertRaises(
exception.VolumeDriverException,
self.driver.terminate_connection,
volume,
{'initiator': 'test-initiator3',
'wwpns': ['1000000000000010']})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_terminate_connection_multiattach(self):
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
ctx = context.get_admin_context()
att_1 = fake_volume.volume_attachment_ovo(
ctx, id=uuidutils.generate_uuid())
att_2 = fake_volume.volume_attachment_ovo(
ctx, id=uuidutils.generate_uuid())
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, multiattach=True)
volume.volume_attachment.objects = [att_1, att_2]
self.driver.terminate_connection(
volume,
{'initiator': 'test-initiator1'})
self.mock_client_service.remove_acl.assert_not_called()
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
def test_terminate_connection_multiattach_complete(self):
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE)
ctx = context.get_admin_context()
att_1 = fake_volume.volume_attachment_ovo(
ctx, id=uuidutils.generate_uuid())
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, multiattach=True)
volume.volume_attachment.objects = [att_1]
self.driver.terminate_connection(
volume,
{'initiator': 'test-initiator1'})
expected_calls = [mock.call._get_igroupname_for_initiator(
'test-initiator1'),
mock.call.remove_acl({'name': 'test-volume'},
'test-igrp1')]
self.mock_client_service.assert_has_calls(
self.mock_client_service.method_calls,
expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array")
def test_terminate_connection_multiattach_fc(self, mock_wwpns):
mock_wwpns.return_value = ["1111111111111101"]
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE_FC)
ctx = context.get_admin_context()
att_1 = fake_volume.volume_attachment_ovo(
ctx, id=uuidutils.generate_uuid())
att_2 = fake_volume.volume_attachment_ovo(
ctx, id=uuidutils.generate_uuid())
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, multiattach=True)
volume.volume_attachment.objects = [att_1, att_2]
self.driver.terminate_connection(
volume,
{'initiator': 'test-initiator1',
'wwpns': ['1000000000000000']})
self.mock_client_service.remove_acl.assert_not_called()
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array")
def test_terminate_connection_multiattach_complete_fc(self, mock_wwpns):
mock_wwpns.return_value = ["1111111111111101"]
self.mock_client_service.get_initiator_grp_list.return_value = (
FAKE_IGROUP_LIST_RESPONSE_FC)
ctx = context.get_admin_context()
att_1 = fake_volume.volume_attachment_ovo(
ctx, id=uuidutils.generate_uuid())
volume = fake_volume.fake_volume_obj(
ctx, name='test-volume',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, multiattach=True)
volume.volume_attachment.objects = [att_1]
self.driver.terminate_connection(
volume,
{'initiator': 'test-initiator1',
'wwpns': ['1000000000000000']})
expected_calls = [
mock.call.get_igroupname_for_initiator_fc(
"10:00:00:00:00:00:00:00"),
mock.call.remove_acl({'name': 'test-volume'},
'test-igrp1')]
self.mock_client_service.assert_has_calls(
self.mock_client_service.method_calls,
expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_positive(self, mock_is_cg):
mock_is_cg.return_value = True
ctx = context.get_admin_context()
self.group = fake_group.fake_group_obj(
ctx, id = fake.GROUP_ID)
model_update = self.driver.create_group(ctx, self.group)
self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'])
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_generic_group(self, mock_is_cg):
mock_is_cg.return_value = False
ctx = context.get_admin_context()
self.group = fake_group.fake_group_obj(
ctx, id=fake.GROUP_ID, status='available')
self.assertRaises(
NotImplementedError,
self.driver.create_group,
ctx, self.group
)
mock_is_cg.assert_called_once_with(self.group)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_delete_generic_group(self, mock_is_cg):
mock_is_cg.return_value = False
ctx = context.get_admin_context()
group = mock.MagicMock()
volumes = [fake_volume.fake_volume_obj(None)]
self.assertRaises(
NotImplementedError,
self.driver.delete_group,
ctx, group, volumes
)
mock_is_cg.assert_called_once()
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
@mock.patch('cinder.volume.group_types.get_group_type_specs')
def test_delete_group_positive(self, mock_get_specs, mock_is_cg):
mock_get_specs.return_value = '<is> True'
mock_is_cg.return_value = True
ctx = context.get_admin_context()
group = mock.MagicMock()
volumes = [fake_volume.fake_volume_obj(None)]
self.driver.delete_group(ctx, group, volumes)
self.mock_client_service.delete_volcoll.assert_called_once()
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_update_group(self, mock_is_cg):
mock_is_cg.return_value = False
group = mock.MagicMock()
ctx = context.get_admin_context()
self.assertRaises(
NotImplementedError,
self.driver.update_group,
ctx, group
)
mock_is_cg.assert_called_once_with(group)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
@mock.patch('cinder.volume.group_types.get_group_type_specs')
@mock.patch(NIMBLE_ISCSI_DRIVER + '.is_volume_group_snap_type')
def test_update_group_positive(self, vol_gs_enable,
mock_get_specs, mock_is_cg):
mock_get_specs.return_value = '<is> True'
mock_is_cg.return_value = True
self.mock_client_service.get_volume_id_by_name.return_value = (
FAKE_GET_VOLID_INFO_RESPONSE)
self.mock_client_service.get_volcoll_id_by_name.return_value = (
FAKE_GET_VOLCOLL_INFO_RESPONSE)
self.mock_client_service.associate_volcoll.return_value = (
FAKE_GET_SNAP_INFO_BACKUP_RESPONSE)
ctx = context.get_admin_context()
group = mock.MagicMock()
volume1 = fake_volume.fake_volume_obj(
ctx, name='testvolume-cg1',
host='fakehost@nimble#Openstack',
provider_location='12 13',
id=12, consistency_group_snapshot_enabled=True)
addvollist = [volume1]
remvollist = [volume1]
model_update = self.driver.update_group(
ctx,
group,
addvollist,
remvollist
)
self.assertEqual(fields.GroupStatus.AVAILABLE,
model_update[0]['status'])
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_from_src(self, mock_is_cg):
mock_is_cg.return_value = False
group = mock.MagicMock()
ctx = context.get_admin_context()
volumes = [fake_volume.fake_volume_obj(None)]
self.assertRaises(
NotImplementedError,
self.driver.create_group_from_src,
ctx, group, volumes
)
mock_is_cg.assert_called_once_with(group)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
@mock.patch('cinder.volume.group_types.get_group_type_specs')
@mock.patch(NIMBLE_ISCSI_DRIVER + ".create_cloned_volume")
def test_create_group_from_src_positive(self, mock_clone,
mock_get_specs,
mock_is_cg):
source_volume = volume_src_cg
volume = volume_cg
volume['source_volid'] = source_volume['id']
volume['display_name'] = "cg-volume"
source_volume['display_name'] = "source-volume"
mock_get_specs.return_value = '<is> True'
mock_clone.return_value = volume['name']
mock_is_cg.return_value = True
self.driver.create_group_from_src(
context.get_admin_context(), FAKE_GROUP,
[volume], source_group=FAKE_SRC_GROUP,
source_vols=[source_volume])
self.mock_client_service.associate_volcoll.assert_called_once()
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
@mock.patch('cinder.volume.group_types.get_group_type_specs')
def test_create_group_snapshot_positive(self, mock_get_specs, mock_is_cg):
mock_get_specs.return_value = '<is> True'
mock_is_cg.return_value = True
ctx = context.get_admin_context()
group_snapshot = mock.MagicMock()
snapshots = [fake_snapshot.fake_snapshot_obj(None)]
self.driver.create_group_snapshot(
ctx,
group_snapshot,
snapshots
)
self.mock_client_service.snapcoll_create.assert_called_once()
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_delete_generic_group_snapshot(self, mock_is_cg):
mock_is_cg.return_value = False
group_snapshot = mock.MagicMock()
snapshots = [fake_snapshot.fake_snapshot_obj(None)]
ctx = context.get_admin_context()
self.assertRaises(
NotImplementedError,
self.driver.delete_group_snapshot,
ctx, group_snapshot, snapshots
)
mock_is_cg.assert_called_once_with(group_snapshot)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
@mock.patch('cinder.volume.group_types.get_group_type_specs')
def test_delete_group_snapshot_positive(self, mock_get_specs, mock_is_cg):
mock_get_specs.return_value = '<is> True'
mock_is_cg.return_value = True
ctx = context.get_admin_context()
group_snapshot = mock.MagicMock()
snapshots = [mock.Mock()]
self.driver.delete_group_snapshot(
ctx,
group_snapshot,
snapshots
)
self.mock_client_service.snapcoll_delete.assert_called_once()
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_negative(self, mock_is_cg):
mock_is_cg.return_value = True
ctx = context.get_admin_context()
self.vol_type = volume_type.VolumeType(
name='volume_type',
extra_specs=
{'consistent_group_snapshot_enabled': '<is> False'})
FAKE_GROUP.volume_types = volume_type.VolumeTypeList(
objects=[self.vol_type])
self.assertRaises(exception.InvalidInput,
self.driver.create_group, ctx, FAKE_GROUP)
|
mahak/cinder
|
cinder/tests/unit/volume/drivers/hpe/test_nimble.py
|
Python
|
apache-2.0
| 89,138
|
import subprocess
import sys
import os
from itertools import tee
def geojson_extent(FILE_NAME):
"""Get the geographic extent of a GeoJSON file in WGS84"""
# TODO: Make grep Python level for interop
assert os.path.isfile(FILE_NAME)
command = 'ogrinfo -ro -so -al {} | grep "Extent:"'.format(FILE_NAME)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
lines = iter(process.stdout.readline, '')
lines_sum = tee(lines) # We need to copy the iterator!
if sum(1 for _ in lines_sum) == 0:
raise OSError("There was no extent information for this file")
for line in lines:
extent = line.rstrip()
extent = extent.replace("Extent: ", "")
extent = extent.replace("(", "").replace(")", "").replace(" - ", ", ")
extent = [float(x) for x in extent.split(",")]
return extent
if __name__ == '__main__':
## Check all our variables are in order
if len(sys.argv) > 1:
FILE_NAME = sys.argv[1]
EXTENT = geojson_extent(FILE_NAME)
print EXTENT
else:
raise ValueError("FILE_NAME not defined")
|
joykuotw/tiler
|
tiler/tiler-scripts/geojson_extent.py
|
Python
|
mit
| 1,110
|
#!/usr/bin/env python
from cookielib import CookieJar
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.web.client import Agent, CookieAgent
from treq.client import HTTPClient
from txsocksx.http import SOCKS5Agent
class ClientFactory(object):
def __new__(self, cookies=None, socks=None, tor=None,
host="127.0.0.1", port=9050):
"""
Here we will check for some flags to be set. The calling instance can
decide what sort of HTTPClient object it wants to have returned by
specifying the capabilities that are required. For each set flag the
according wrapper will be applied to the standard agent.
"""
# TODO: add section to read tor configuration from config file
tor_ip = "127.0.0.1"
tor_port = 9050
"""
We need to check the status of the socks flags before we do anything
with the Agent since the agent takes care of initialising the
connection. The CookieAgent can't do it; if it could I'd just use that
one .
"""
if tor or socks:
if tor:
tor_endpoint = TCP4ClientEndpoint(reactor, tor_ip, tor_port)
else:
tor_endpoint = TCP4ClientEndpoint(reactor, host, port)
self.agent = SOCKS5Agent(reactor, proxyEndpoint=tor_endpoint)
else:
self.agent = Agent(reactor)
if cookies: # the user wants to use cookies as well
self.agent = CookieAgent(self.agent, CookieJar())
return HTTPClient(self.agent)
|
luceatnobis/chan_archiver
|
chan/client_factory.py
|
Python
|
gpl-3.0
| 1,625
|
#!/usr/bin/env python
import random
import sys
import django_includes
from qurkexp.estimation.runs import load_run
from qurkexp.estimation.models import *
from qurkexp.hitlayer.models import HitLayer
def initialize_run(run_name, dataset, estimate_vals, num_batches, batch_size, disp_style, assignments, price):
ds = EstExp.objects.get(name=dataset)
run = ExpRun.objects.create(name=run_name, exp=ds, num_batches=num_batches, batch_size=batch_size, display_style=disp_style, assignments=assignments, price=price)
for val in estimate_vals:
RunVal.objects.create(run=run, val=val)
return run
def create_batches(run):
items = list(ExpItem.objects.filter(exp__runs = run).distinct())
for idx in range(run.num_batches):
sample = random.sample(items, run.batch_size)
batch = RunBatch.objects.create(run=run)
batch.items_ptrs = sample
batch.save()
def post_batches(run):
title = "Identify various properties of images, text, or audio"
desc = "You will be presented with several items (shapes, photos, videos, text, audio). Below the items, you will see some questions regarding particular properties of those items (like color, or shape)."
for b in run.batches.all():
url = "/estimate/counts/%d/" % (b.id)
hitid = HitLayer.get_instance().create_job(url,
('estimate_counts', [b.id]),
desc = desc,
title = title,
price = run.price,
nassignments = run.assignments)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception("arguments: [run_name from run.py]")
args = load_run(sys.argv[1])
run = initialize_run(*args)
create_batches(run)
post_batches(run)
|
marcua/qurk_experiments
|
qurkexp/estimation/generate_run.py
|
Python
|
bsd-3-clause
| 1,917
|
# -*- coding: utf-8 -*-
# © 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
'name': 'Medical Physician',
'version': '10.0.1.0.0',
'author': "LasLabs, Odoo Community Association (OCA)",
'category': 'Medical',
'depends': [
'medical',
'medical_center',
'product',
],
"website": "https://laslabs.com",
"license": "GPL-3",
"data": [
'views/medical_physician_view.xml',
'views/medical_specialty_view.xml',
'views/medical_menu.xml',
'security/ir.model.access.csv',
'wizard/medical_physician_unavailable_view.xml',
'data/ir_sequence_data.xml',
'data/medical_specialties.xml',
],
'demo': [
'demo/medical_physician.xml',
],
"application": False,
'installable': True,
}
|
laslabs/vertical-medical
|
medical_physician/__manifest__.py
|
Python
|
agpl-3.0
| 845
|
#!/usr/bin/env python
from fundamentals.adv_python.context_management.cm_examples import time_print
"""
Problem:
A child is running up a staircase with n steps, and can hop either 1 step, 2 steps or 3 steps
at a time. Implement a method to count mow many possible ways the child can run up the stairs.
"""
class NWaysUpStairs(object):
"""
Phase 1: Build as a recursive problem
Base case: n < 0: 0
n = 0: 1
Recursive: n = 1: nways(0) then 1 step
n = 2: nways(0) then 2 steps, OR nways(1) then 1 step
nways(0) + nways(1)
n = 3: nways(0) then 3 steps, OR nways(1) then 2 steps, OR nways(2) then 1 step
Phase 2: Notice repeated subproblems, incorporate memoization for a dynamic programming solution
"""
@staticmethod
def nways_rec(n):
if n < 0:
return 0
if n == 0:
return 1
else:
return NWaysUpStairs.nways_rec(n-3) + NWaysUpStairs.nways_rec(n-2) + NWaysUpStairs.nways_rec(n-1)
@staticmethod
def _nways_dp(n, memoized):
if n < 0:
return 0
if len(memoized) >= (n+1):
return memoized[n]
nwaysn = NWaysUpStairs._nways_dp(n-3, memoized) + NWaysUpStairs._nways_dp(n-2, memoized) + \
NWaysUpStairs._nways_dp(n-1, memoized)
memoized.append(nwaysn)
return nwaysn
@staticmethod
def nways_dp(n):
return NWaysUpStairs._nways_dp(n, [1])
def main():
with time_print("recursive nways"):
print NWaysUpStairs.nways_rec(20)
with time_print("dynamic programming nways"):
print NWaysUpStairs.nways_dp(20)
if __name__ == "__main__":
main()
|
davjohnst/fundamentals
|
fundamentals/dynamic_programming/num_ways_up_stairs.py
|
Python
|
apache-2.0
| 1,743
|
#! /usr/bin/env python3
"""Test script for the dbm.open function based on testdumbdbm.py"""
import os
import unittest
import glob
import test.support
# Skip tests if dbm module doesn't exist.
dbm = test.support.import_module('dbm')
_fname = test.support.TESTFN
#
# Iterates over every database module supported by dbm currently available,
# setting dbm to use each in turn, and yielding that module
#
def dbm_iterator():
for name in dbm._names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
dbm._modules[name] = mod
yield mod
#
# Clean up all scratch databases we might have created during testing
#
def delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(_fname + "*"):
test.support.unlink(f)
class AnyDBMTestCase(unittest.TestCase):
_dict = {'0': b'',
'a': b'Python:',
'b': b'Programming',
'c': b'the',
'd': b'way',
'f': b'Guido',
'g': b'intended',
}
def init_db(self):
f = dbm.open(_fname, 'n')
for k in self._dict:
f[k.encode("ascii")] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = sorted(k.decode("ascii") for k in f.keys())
dkeys = sorted(self._dict.keys())
self.assertEqual(keys, dkeys)
return keys
def test_error(self):
self.assertTrue(issubclass(self.module.error, IOError))
def test_anydbm_not_existing(self):
self.assertRaises(dbm.error, dbm.open, _fname)
def test_anydbm_creation(self):
f = dbm.open(_fname, 'c')
self.assertEqual(list(f.keys()), [])
for key in self._dict:
f[key.encode("ascii")] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_modification(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.read_helper(f)
f.close()
def test_anydbm_read(self):
self.init_db()
f = dbm.open(_fname, 'r')
self.read_helper(f)
f.close()
def test_anydbm_keys(self):
self.init_db()
f = dbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def test_anydbm_access(self):
self.init_db()
f = dbm.open(_fname, 'r')
key = "a".encode("ascii")
self.assertIn(key, f)
assert(f[key] == b"Python:")
f.close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key.encode("ascii")])
def tearDown(self):
delete_files()
def setUp(self):
dbm._defaultmod = self.module
delete_files()
class WhichDBTestCase(unittest.TestCase):
# Actual test methods are added to namespace after class definition.
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_whichdb(self):
for module in dbm_iterator():
# Check whether whichdb correctly guesses module name
# for databases opened with "module" module.
# Try with empty files first
name = module.__name__
if name == 'dbm.dumb':
continue # whichdb can't support dbm.dumb
test.support.unlink(_fname)
f = module.open(_fname, 'c')
f.close()
self.assertEqual(name, dbm.whichdb(_fname))
# Now add a key
f = module.open(_fname, 'w')
f[b"1"] = b"1"
# and test that we can find it
self.assertIn(b"1", f)
# and read it
self.assertTrue(f[b"1"] == b"1")
f.close()
self.assertEqual(name, dbm.whichdb(_fname))
def tearDown(self):
delete_files()
def setUp(self):
delete_files()
self.filename = test.support.TESTFN
self.d = dbm.open(self.filename, 'c')
self.d.close()
def test_keys(self):
self.d = dbm.open(self.filename, 'c')
self.assertEqual(self.d.keys(), [])
a = [(b'a', b'b'), (b'12345678910', b'019237410982340912840198242')]
for k, v in a:
self.d[k] = v
self.assertEqual(sorted(self.d.keys()), sorted(k for (k, v) in a))
for k, v in a:
self.assertIn(k, self.d)
self.assertEqual(self.d[k], v)
self.assertNotIn(b'xxx', self.d)
self.assertRaises(KeyError, lambda: self.d[b'xxx'])
self.d.close()
def test_main():
classes = [WhichDBTestCase]
for mod in dbm_iterator():
classes.append(type("TestCase-" + mod.__name__, (AnyDBMTestCase,),
{'module': mod}))
test.support.run_unittest(*classes)
if __name__ == "__main__":
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.2/Lib/test/test_dbm.py
|
Python
|
mit
| 4,980
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import tempfile
import os
from pathlib import Path
import getpass
import shutil
import pytest
from warnings import warn
from testr.test_helper import on_head_network, has_sybase
from .. import report
user = getpass.getuser()
try:
import Ska.DBI
with Ska.DBI.DBI(server='sqlsao', dbi='sybase', user=user, database='axafvv') as db:
HAS_SYBASE_ACCESS = True
except Exception:
HAS_SYBASE_ACCESS = False
# If the user should have access, warn about the issue.
if (on_head_network() and not has_sybase() and
Path(os.environ['SKA'], 'data', 'aspect_authorization',
f'sqlsao-axafvv-{user}').exists()):
warn("On HEAD but no sybase access. Run test from production environment.")
HAS_SC_ARCHIVE = os.path.exists(report.starcheck.FILES['data_root'])
@pytest.mark.skipif('not HAS_SYBASE_ACCESS', reason='Report test requires Sybase VV access')
@pytest.mark.skipif('not HAS_SC_ARCHIVE', reason='Report test requires mica starcheck archive')
def test_write_reports():
"""
Make a report and database
"""
tempdir = tempfile.mkdtemp()
# Get a temporary file, but then delete it, because report.py will only
# make a new table if the supplied file doesn't exist
fh, fn = tempfile.mkstemp(dir=tempdir, suffix='.db3')
os.unlink(fn)
report.REPORT_ROOT = tempdir
report.REPORT_SERVER = fn
for obsid in [20001, 15175, 54778]:
report.main(obsid)
os.unlink(fn)
shutil.rmtree(tempdir)
|
sot/mica
|
mica/report/tests/test_write_report.py
|
Python
|
bsd-3-clause
| 1,557
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import testtools
from testtools.matchers import Equals
from unittest.mock import patch
import snapcraft
from snapcraft.internal.project_loader import grammar
import snapcraft.internal.project_loader.grammar._on as on
import snapcraft.internal.project_loader.grammar._to as to
import snapcraft.internal.project_loader.grammar._compound as compound
from . import GrammarBaseTestCase
class CompoundStatementGrammarTestCase(GrammarBaseTestCase):
scenarios = [
(
"on amd64",
{
"on": "on amd64",
"to": "to armhf",
"body": ["foo"],
"else_bodies": [],
"host_arch": "x86_64",
"expected_packages": {"foo"},
},
),
(
"on i386",
{
"on": "on amd64",
"to": "to armhf",
"body": ["foo"],
"else_bodies": [],
"host_arch": "i686",
"expected_packages": set(),
},
),
(
"ignored else",
{
"on": "on amd64",
"to": "to armhf",
"body": ["foo"],
"else_bodies": [["bar"]],
"host_arch": "x86_64",
"expected_packages": {"foo"},
},
),
(
"used else",
{
"on": "on amd64",
"to": "to i386",
"body": ["foo"],
"else_bodies": [["bar"]],
"host_arch": "i686",
"expected_packages": {"bar"},
},
),
(
"third else ignored",
{
"on": "on amd64",
"to": "to i386",
"body": ["foo"],
"else_bodies": [["bar"], ["baz"]],
"host_arch": "i686",
"expected_packages": {"bar"},
},
),
(
"third else followed",
{
"on": "on amd64",
"to": "to i386",
"body": ["foo"],
"else_bodies": [[{"on armhf": ["bar"]}], ["baz"]],
"host_arch": "i686",
"expected_packages": {"baz"},
},
),
(
"nested amd64",
{
"on": "on amd64",
"to": "to armhf",
"body": [{"on amd64": ["foo"]}, {"on i386": ["bar"]}],
"else_bodies": [],
"host_arch": "x86_64",
"expected_packages": {"foo"},
},
),
(
"nested i386",
{
"on": "on i386",
"to": "to armhf",
"body": [{"on amd64": ["foo"]}, {"on i386": ["bar"]}],
"else_bodies": [],
"host_arch": "i686",
"expected_packages": {"bar"},
},
),
(
"nested body ignored else",
{
"on": "on amd64",
"to": "to armhf",
"body": [{"on amd64": ["foo"]}, {"else": ["bar"]}],
"else_bodies": [],
"host_arch": "x86_64",
"expected_packages": {"foo"},
},
),
(
"nested body used else",
{
"on": "on i386",
"to": "to armhf",
"body": [{"on amd64": ["foo"]}, {"else": ["bar"]}],
"else_bodies": [],
"host_arch": "i686",
"expected_packages": {"bar"},
},
),
(
"nested else ignored else",
{
"on": "on armhf",
"to": "to i386",
"body": ["foo"],
"else_bodies": [[{"on amd64": ["bar"]}, {"else": ["baz"]}]],
"host_arch": "x86_64",
"expected_packages": {"bar"},
},
),
(
"nested else used else",
{
"on": "on armhf",
"to": "to i386",
"body": ["foo"],
"else_bodies": [[{"on amd64": ["bar"]}, {"else": ["baz"]}]],
"host_arch": "i686",
"expected_packages": {"baz"},
},
),
(
"with hyphen",
{
"on": "on other-arch",
"to": "to yet-another-arch",
"body": ["foo"],
"else_bodies": [],
"host_arch": "x86_64",
"expected_packages": set(),
},
),
(
"multiple selectors",
{
"on": "on amd64,i386",
"to": "to armhf,arm64",
"body": ["foo"],
"else_bodies": [],
"host_arch": "x86_64",
"expected_packages": set(),
},
),
]
@patch("platform.architecture")
@patch("platform.machine")
def test_compound_statement_grammar(
self, platform_machine_mock, platform_architecture_mock
):
platform_machine_mock.return_value = self.host_arch
platform_architecture_mock.return_value = ("64bit", "ELF")
processor = grammar.GrammarProcessor(
None, snapcraft.ProjectOptions(target_deb_arch="armhf"), self.checker
)
statements = [
on.OnStatement(on=self.on, body=None, processor=processor),
to.ToStatement(to=self.to, body=None, processor=processor),
]
statement = compound.CompoundStatement(
statements=statements, body=self.body, processor=processor
)
for else_body in self.else_bodies:
statement.add_else(else_body)
self.assertThat(statement.process(), Equals(self.expected_packages))
class CompoundStatementInvalidGrammarTestCase(GrammarBaseTestCase):
scenarios = [
(
"spaces in on selectors",
{
"on": "on amd64, ubuntu",
"to": "to i386",
"body": ["foo"],
"else_bodies": [],
"expected_exception": grammar.errors.OnStatementSyntaxError,
"expected_message": ".*not a valid 'on' clause.*spaces are not allowed in the "
"selectors.*",
},
),
(
"spaces in to selectors",
{
"on": "on amd64,ubuntu",
"to": "to i386, armhf",
"body": ["foo"],
"else_bodies": [],
"expected_exception": grammar.errors.ToStatementSyntaxError,
"expected_message": ".*not a valid 'to' clause.*spaces are not allowed in the "
"selectors.*",
},
),
]
def test_on_statement_invalid_grammar(self):
with testtools.ExpectedException(
self.expected_exception, self.expected_message
):
processor = grammar.GrammarProcessor(
None, snapcraft.ProjectOptions(target_deb_arch="armhf"), self.checker
)
statements = [
on.OnStatement(on=self.on, body=None, processor=processor),
to.ToStatement(to=self.to, body=None, processor=processor),
]
statement = compound.CompoundStatement(
statements=statements, body=self.body, processor=processor
)
for else_body in self.else_bodies:
statement.add_else(else_body)
statement.process()
|
sergiusens/snapcraft
|
tests/unit/project_loader/grammar/test_compound_statement.py
|
Python
|
gpl-3.0
| 8,322
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
import shutil
import os
import filecmp
class FileHelper(dpHelper):
def remove(self, files_or_dirs=None, files=None, dirs=None):
assert not files_or_dirs or not files or not dirs
files_or_dirs = files_or_dirs or []
files = files or []
dirs = dirs or []
if not isinstance(files_or_dirs, (list, tuple)):
files_or_dirs = (files_or_dirs, )
if not isinstance(files, (list, tuple)):
files = (files, )
if not isinstance(dirs, (list, tuple)):
dirs = (dirs, )
for f in files_or_dirs:
if os.path.isdir(f):
dirs.append(f)
elif os.path.isfile(f):
files.append(f)
removed = []
for f in files:
if os.path.isfile(f):
os.remove(f)
removed.append(f)
for f in dirs:
if os.path.isdir(f):
shutil.rmtree(f)
removed.append(f)
return removed
def write(self, path, content, mode='w'):
with open(path, mode) as fp:
fp.write(content)
def compare(self, *files):
if not files:
return None
def _iter():
l = len(files)
for i in range(l-1):
yield files[i], files[i+1]
try:
for f1, f2 in _iter():
if not filecmp.cmp(f1, f2):
return False
except Exception as e:
self.logging.exception(e)
return False
return True
|
why2pac/dp-tornado
|
dp_tornado/helper/io/file/__init__.py
|
Python
|
mit
| 1,651
|
#!/usr/bin/python
# The list are enclosed in brackets ([]) and their element
# and size can be changed, while tuples are enclosed in parenthese
# ( () ) and cannot be updated. The Tuples can be thought of as
# read-only of list
aTuple = ( 'abcd', 786, 2.23, 'John', 70.2)
bTuple = ( 123, 'Vien')
print aTuple # Print complete tuple
print aTuple[0] # Preint first element of the tuple
print aTuple[1:3] # Print element starting from 2nd till 3rd
print aTuple[2:] # Print elements starting from 3rd element
print bTuple # Print tuple tw0 times
print aTuple + bTuple # Print concatenated lists
val_tuple = ('abcd', 786, 2.23, 'John', 70.2 )
val_list = ['abcd', 786, 2.23, 'John', 70.2]
#
print "\nPrint the list and tuple before change: "
print val_tuple
print val_list
#val_tuple[2] = 100.2
val_list [2] = 100.2
print "\nPrint the list and tuple after change"
print val_tuple
print val_list
print "=================================================="
tuple1 = ('physics', 'schematic', 1997, 2000)
tuple2 = (1,2, 3, 4, 5, 6, 7)
print "tuple1[0]: ", tuple1[0]
print "tuple2[1:5]: ", tuple2[1:5]
print "=================================================="
print "Updating Tuples"
print "Tuples are immutable which means you can not update or change the value of tuples element."
print "You can able to take portions of existing tuples to creatr new tuples as the following example demonstrates"
tuple1 = (12, 34, 56)
tuple2 = ('abc', 'xyz')
#Following action is n ot valid for tuples
#tuple1[0] = 100
#so let's create a new tuple as follows
tuple3 = tuple1 + tuple2
print "Tuple3 is: ", tuple3
print "=================================================="
print "Delete Tuple Elements"
print "Removing individual tuple elements is not possible. There is, of course, nothing"
print "wrong with ptting together another tuple with the undesired element discarded"
tup = ('physics', 'schemistry', 1997, 2000)
print "The tuple is: ", tup
del tup
print "After deleting tup: "
#print tup
print "=================================================="
print "cmp(tuple1, tuple2): Compares elements of both tuples"
tuple1, tuple2 = (123, 'xyz'), (456, 'abc')
print "The tuple1 is: ", tuple1
print "The tuple2 is: ", tuple2
print "cmp(tuple1, tuple2): ", cmp(tuple1, tuple2)
print "cmp(tuple2, tuple1): ", cmp(tuple2, tuple1)
tuple3 = tuple2 + (678, )
print "The tuple3 is: ", tuple3
print "cmp(tuple2, tuple3): ", cmp(tuple2, tuple3)
print "=================================================="
print "len(tuple): Given the total length of the tuple"
tuple1, tuple2 = (123, 'zara', 'xyz'), (456, 'abc')
print "The Tuple1 is: ", tuple1
print "The Tuple2 is: ", tuple2
print "First tuple length is: ", len(tuple1)
print "Second tuple length is: ", len(tuple2)
print "=================================================="
print "max(tuple) or min(tuple): Returns item from the tuple with max or min value"
tuple1, tuple2 = (123, 'xyz', 'zara', 'abc'), (456, 700, 200)
print "The tuple1 is: ", tuple1
print "The tuple2 is: ", tuple2
print "Max value element of tupple1 is : ", max(tuple1)
print "Max value element of tupple2 is : ", max(tuple2)
print "Min value element of tupple1 is : ", min(tuple1)
print "Min value element of tupple2 is : ", min(tuple2)
print "=================================================="
print "tuple(seq): converts a list into tuple"
aList = [123, 'xyz', 'zara', 'abc']
aTuple = tuple(aList)
print "The aList is: ", aList
print "The aTuple is: ", aTuple
del aList[1]
print"The aList after delete aList[1]: ", aList
|
HuuHoangNguyen/Python_learning
|
Tuples.py
|
Python
|
mit
| 3,657
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils.html import escape
from horizon.workflows import views
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.networks import workflows
INDEX_URL = reverse('horizon:project:networks:index')
def form_data_subnet(subnet,
name=None, cidr=None, ip_version=None,
gateway_ip='', enable_dhcp=None,
allocation_pools=None,
dns_nameservers=None,
host_routes=None):
def get_value(value, default):
return default if value is None else value
data = {}
data['subnet_name'] = get_value(name, subnet.name)
data['cidr'] = get_value(cidr, subnet.cidr)
data['ip_version'] = get_value(ip_version, subnet.ip_version)
gateway_ip = subnet.gateway_ip if gateway_ip == '' else gateway_ip
data['gateway_ip'] = gateway_ip or ''
data['no_gateway'] = (gateway_ip is None)
data['enable_dhcp'] = get_value(enable_dhcp, subnet.enable_dhcp)
pools = get_value(allocation_pools, subnet.allocation_pools)
data['allocation_pools'] = _str_allocation_pools(pools)
nameservers = get_value(dns_nameservers, subnet.dns_nameservers)
data['dns_nameservers'] = _str_dns_nameservers(nameservers)
routes = get_value(host_routes, subnet.host_routes)
data['host_routes'] = _str_host_routes(routes)
return data
def form_data_no_subnet():
return {'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': '',
'no_gateway': False,
'enable_dhcp': True,
'allocation_pools': '',
'dns_nameservers': '',
'host_routes': ''}
def _str_allocation_pools(allocation_pools):
if isinstance(allocation_pools, str):
return allocation_pools
return '\n'.join(['%s,%s' % (pool['start'], pool['end'])
for pool in allocation_pools])
def _str_dns_nameservers(dns_nameservers):
if isinstance(dns_nameservers, str):
return dns_nameservers
return '\n'.join(dns_nameservers)
def _str_host_routes(host_routes):
if isinstance(host_routes, str):
return host_routes
return '\n'.join(['%s,%s' % (route['destination'], route['nexthop'])
for route in host_routes])
class NetworkTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',)})
def test_index(self):
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.neutron: ('network_list',)})
def test_index_network_list_exception(self):
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail(self):
self._test_network_detail()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_with_mac_learning(self):
self._test_network_detail(mac_learning=True)
def _test_network_detail(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception(self):
self._test_network_detail_network_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception_with_mac_learning(self):
self._test_network_detail_network_exception(mac_learning=True)
def _test_network_detail_network_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_subnet_exception(self):
self._test_network_detail_subnet_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_subnet_exception_with_mac_learning(self):
self._test_network_detail_subnet_exception(mac_learning=True)
def _test_network_detail_subnet_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_port_exception(self):
self._test_network_detail_port_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_port_exception_with_mac_learning(self):
self._test_network_detail_port_exception(mac_learning=True)
def _test_network_detail_port_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_get(self,
test_with_profile=False):
if test_with_profile:
net_profiles = self.net_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>',
'<CreateSubnetDetail: createsubnetdetailaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_get_with_profile(self):
self.test_network_create_get(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_profile(self):
self.test_network_create_post(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'subnet_create',
'profile_list',)})
def test_network_create_post_with_subnet(self,
test_with_profile=False):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_w_profile(self):
self.test_network_create_post_with_subnet(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post_network_exception(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_nw_exception_w_profile(self):
self.test_network_create_post_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list')})
def test_network_create_post_with_subnet_network_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_nw_exception_w_profile(self):
self.test_network_create_post_with_subnet_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'network_delete',
'subnet_create',
'profile_list')})
def test_network_create_post_with_subnet_subnet_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_delete(IsA(http.HttpRequest),
network.id)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_subnet_exception_w_profile(self):
self.test_network_create_post_with_subnet_subnet_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_nocidr(self,
test_with_profile=False):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address" or '
'clear "Create Subnet" checkbox.'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_no_cidr_w_profile(self):
self.test_network_create_post_with_subnet_nocidr(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_without_mask(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='10.0.0.0',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = "The subnet in the Network Address is too small (/32)."
self.assertContains(res, expected_msg)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_cidr_without_mask_w_profile(self):
self.test_network_create_post_with_subnet_cidr_without_mask(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_inconsistent(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_cidr_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_cidr_inconsistent(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_gw_inconsistent(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@override_settings(OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_gw_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_gw_inconsistent(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/update.html')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndReturn(network)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([])
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
class NetworkSubnetTests(test.TestCase):
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(self.subnets.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes(self):
network = self.networks.list()[1]
subnet = self.subnets.list()[1]
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes_no_gateway(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, gateway_ip=None)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = form_data_subnet(subnet, cidr=cidr,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertFormErrors(res, 1, expected_msg)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only allocation_pools
allocation_pools = '10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# pool with three entries
allocation_pools = '10.0.0.2,10.0.0.3,10.0.0.4'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_invalid_address(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# end address is not a valid IP address
allocation_pools = '10.0.0.2,invalid_address'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is CIDR
allocation_pools = '10.0.0.2/24,10.0.0.5'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_larger_than_end(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is larger than end address
allocation_pools = '10.0.0.254,10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start address is larger than end address '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_nameservers(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_destination_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_invalid_destination(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_nexthop_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_gateway_ip(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
gateway_ip = '10.0.0.100'
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=gateway_ip,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_no_gateway(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=None,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_additional_attributes(self):
subnet = self.subnets.list()[1]
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=False,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
enable_dhcp=False)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_nameservers(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_destination_only(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_three_entries(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_invalid_destination(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_nexthop_ip_network(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete(self):
self._test_subnet_delete()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_with_mac_learning(self):
self._test_subnet_delete(mac_learning=True)
def _test_subnet_delete(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception(self):
self._test_subnet_delete_exception()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception_with_mac_learning(self):
self._test_subnet_delete_exception(mac_learning=True)
def _test_subnet_delete_exception(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
class NetworkPortTests(test.TestCase):
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'project/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
def _test_port_update_get(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
def _test_port_update_post(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True)
def _test_port_update_post_exception(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
|
JioCloud/horizon
|
openstack_dashboard/dashboards/project/networks/tests.py
|
Python
|
apache-2.0
| 73,135
|
import urllib
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test.client import Client, RequestFactory
from django.utils import simplejson
from django.utils.importlib import import_module
from mock import patch
from social_auth.views import complete
class DumbResponse(object):
"""
Response from a call to, urllib2.urlopen()
"""
def __init__(self, data_str, url=None):
self.data_str = data_str
self.url = url
def read(self):
return self.data_str
class NoBackendError(Exception):
"""
Used when a client attempts to login with a invalid backend.
"""
pass
class SocialClient(Client):
"""
Test client to login/register a user
Does so by mocking api posts/responses.
Only supports facebook.
"""
@patch('social_auth.backends.facebook.FacebookAuth.enabled')
@patch('social_auth.utils.urlopen')
def login(self, user, mock_urlopen, mock_facebook_enabled, backend='facebook'):
"""
Login or Register a facebook user.
If the user has never logged in then they get registered and logged in.
If the user has already registered, then they are logged in.
user: dict
backend: 'facebook'
example user:
{
'first_name': 'Django',
'last_name': 'Reinhardt',
'verified': True,
'name': 'Django Reinhardt',
'locale': 'en_US',
'hometown': {
'id': '12345678',
'name': 'Any Town, Any State'
},
'expires': '4812',
'updated_time': '2012-01-29T19:27:32+0000',
'access_token': 'dummyToken',
'link': 'http://www.facebook.com/profile.php?id=1234',
'location': {
'id': '108659242498155',
'name': 'Chicago, Illinois'
},
'gender': 'male',
'timezone': -6,
'id': '1234',
'email': 'user@domain.com'
}
"""
token = 'dummyToken'
backends = {
'facebook': (
urllib.urlencode({
'access_token': token,
'expires': 3600,
}),
simplejson.dumps(user),
),
'google': (
simplejson.dumps({
"access_token": token,
"token_type": "Bearer",
"expires_in": 3600,
}),
simplejson.dumps(user),
),
'linkedin': (
urllib.urlencode({
'oauth_token': token,
'oauth_token_secret': token,
'oauth_callback_confirmed': 'true',
'xoauth_request_auth_url': (
'https://api.linkedin.com/uas/oauth/authorize'),
'oauth_expires_in': 3600,
}),
urllib.urlencode({
'oauth_token': token,
'oauth_token_secret': token,
'oauth_expires_in': 3600,
'oauth_authorization_expires_in': 3600,
}),
(('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
'<person>\n'
' <id>{id}</id>\n'
' <email-address>{email}</email-address>\n'
' <first-name>{first_name}</first-name>\n'
' <last-name>{last_name}</last-name>\n'
'</person>\n').format(**user)),
),
}
if backend not in backends:
raise NoBackendError("%s is not supported" % backend)
"""
mock out urlopen
"""
mock_urlopen.side_effect = [
DumbResponse(r) for r in backends[backend]
]
# make it work when no FACEBOOK_APP_ID declared
mock_facebook_enabled.return_value = True
factory = RequestFactory()
request = factory.post('', {'code': 'dummy',
'redirect_state': 'dummy'})
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
request.user = AnonymousUser()
request.session['facebook_state'] = 'dummy'
# make it happen.
redirect = complete(request, backend)
request.session.save()
# Set the cookie for this session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
|
krvss/django-social-auth
|
social_auth/tests/client.py
|
Python
|
bsd-3-clause
| 5,005
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from pdc_client.test_helpers import CLITestCase
from pdc_client.runner import Runner
from copy import deepcopy
class ComposeTreeLocationsTestCase(CLITestCase):
def setUp(self):
self.runner = Runner()
self.runner.setup()
self.compose_tree_location_detail = {"arch": "x86_64",
"compose": "Awesome-product-7.0-0",
"variant": "Server",
"location": "NAY",
"scheme": "http",
"synced_content": "debug",
"url": "http://example.com"}
def _setup_list(self, api):
api.add_endpoint('compose-tree-locations', 'GET', [
{"arch": "x86_64",
"compose": "Awesome-product-7.0-{0}".format(x),
"variant": "Server",
"location": "NAY",
"scheme": "http",
"synced_content": "debug",
"url": "http://example.com"}
for x in range(30)
])
def _setup_detail(self, api):
obj = {"arch": "x86_64",
"compose": "Awesome-product-7.0-0",
"variant": "Server",
"location": "NAY",
"scheme": "http",
"synced_content": "debug",
"url": "http://example.com"}
api.add_endpoint('compose-tree-locations/Awesome-product-7.0-0/Server/x86_64/NAY/https',
'GET', obj)
# PATCH test result to passed
obj_update = deepcopy(obj)
obj_update["scheme"] = "https"
obj_update["url"] = "https://example1.com"
obj_update["synced_content"] = "source"
api.add_endpoint('compose-tree-locations/Awesome-product-7.0-0/Server/x86_64/NAY/https',
'PATCH', obj_update)
def test_list(self, api):
self._setup_list(api)
with self.expect_output('list_multi_page.txt'):
self.runner.run(['compose-tree-locations', 'list'])
self.assertEqual(api.calls['compose-tree-locations'],
[('GET', {'page': 1}),
('GET', {'page': 2})])
def test_info(self, api):
self._setup_detail(api)
with self.expect_output('detail.txt'):
self.runner.run(['compose-tree-locations', 'info', 'Awesome-product-7.0-0', 'Server',
'x86_64', 'NAY', 'https'])
self.assertEqual(
api.calls['compose-tree-locations/Awesome-product-7.0-0/Server/x86_64/NAY/https'],
[('GET', {})])
def test_info_json(self, api):
self._setup_detail(api)
with self.expect_output('detail.json', parse_json=True):
self.runner.run(['--json', 'compose-tree-locations', 'info', 'Awesome-product-7.0-0', 'Server',
'x86_64', 'NAY', 'https'])
self.assertEqual(
api.calls['compose-tree-locations/Awesome-product-7.0-0/Server/x86_64/NAY/https'],
[('GET', {})])
def test_update(self, api):
self._setup_detail(api)
with self.expect_output('detail_for_patch.txt'):
self.runner.run(['compose-tree-locations', 'update', 'Awesome-product-7.0-0', 'Server', 'x86_64', 'NAY',
'https', '--scheme', 'http', '--synced-content', 'source',
'--url', 'https://example1.com'])
self.assertEqual(api.calls, {'compose-tree-locations/Awesome-product-7.0-0/Server/x86_64/NAY/https':
[('PATCH', {'scheme': 'http',
'synced_content': ['source'],
'url': 'https://example1.com'})]})
def test_create(self, api):
obj = {'arch': 'x86_64',
'compose': 'Awesome-product-7.0-0',
'variant': 'Server',
'location': 'NAY',
'scheme': 'http',
'synced_content': 'debug',
'url': 'http://example.com'}
api.add_endpoint('compose-tree-locations', 'POST', obj)
with self.expect_output('detail.txt'):
self.runner.run(['compose-tree-locations', 'create', '--compose', 'Awesome-product-7.0-0', '--variant',
'Server', '--arch', 'x86_64', '--location', 'NAY', '--scheme', 'http', '--synced-content',
'debug', '--url', 'http://example.com'])
self.assertEqual(api.calls, {'compose-tree-locations': [('POST', {'compose': 'Awesome-product-7.0-0',
'variant': 'Server', 'arch': 'x86_64',
'location': 'NAY', 'scheme': 'http',
'synced_content': ['debug'],
'url': 'http://example.com'})]})
def test_create_multi_synced_contents(self, api):
obj = {'arch': 'x86_64',
'compose': 'Awesome-product-7.0-0',
'variant': 'Server',
'location': 'NAY',
'scheme': 'http',
'synced_content': 'debug source binary',
'url': 'http://example.com'}
api.add_endpoint('compose-tree-locations', 'POST', obj)
with self.expect_output('detail_multi_synced_contents.txt'):
self.runner.run(['compose-tree-locations', 'create', '--compose', 'Awesome-product-7.0-0', '--variant',
'Server', '--arch', 'x86_64', '--location', 'NAY', '--scheme', 'http', '--synced-content',
'debug', 'source', 'binary', '--url', 'http://example.com'])
self.assertEqual(api.calls, {'compose-tree-locations': [('POST', {'compose': 'Awesome-product-7.0-0',
'variant': 'Server', 'arch': 'x86_64',
'location': 'NAY', 'scheme': 'http',
'synced_content': ['debug', 'source', 'binary'],
'url': 'http://example.com'})]})
def test_delete(self, api):
api.add_endpoint('compose-tree-locations/Awesome-product-7.0-0/Server/x86_64/NAY/https',
'DELETE', None)
with self.expect_output('empty.txt'):
self.runner.run(['compose-tree-locations', 'delete', 'Awesome-product-7.0-0', 'Server', 'x86_64', 'NAY',
'https'])
self.assertEqual(api.calls, {'compose-tree-locations/Awesome-product-7.0-0/Server/x86_64/NAY/https':
[('DELETE', {})]})
|
product-definition-center/pdc-client
|
tests/compose_tree_locations/tests.py
|
Python
|
mit
| 7,158
|
#!/usr/bin/env python
# coding=utf-8
#
# s3dir.py
# Extensions to `s3up.py` to recursively upload a directory to S3.
# Copyright (c) 2010-2012 Mike Tigas <mike@tig.as>
# http://mike.tig.as/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import traceback
import os
import s3up
from boto.s3.connection import S3Connection
from socket import setdefaulttimeout
setdefaulttimeout(100.0)
def update_irc():
upload_dir("/home/ubuntu/logbot/logs/#opennews",'','opennews-irc.yu8.in', 'http://opennews-irc.yu8.in/')
def upload_dir(local_dir,remote_dir,bucket,bucket_url=None):
for root, dirs, files in os.walk(local_dir):
for f in files:
fullfile = os.path.join(root, f).strip()
remotefile = fullfile.replace(local_dir,'').strip()
if remote_dir:
remotefile = remote_dir+"/"+remotefile
if remotefile[0] == "/":
remotefile = remotefile[1:]
if (remotefile.find('.svn') == -1) and \
(remotefile.find('.svn-base') == -1) and \
(remotefile.find('.DS_Store') == -1) and \
(remotefile.find('.pyo') == -1) and \
(remotefile.find('.pyc') == -1):
s3up.upload_file(fullfile, bucket, remotefile, cache_time=0, policy="private")
if not bucket_url:
print "https://s3.amazonaws.com/%s/%s" % (bucket,remotefile)
else:
print "%s%s" % (bucket_url,remotefile)
def main(args):
update_irc()
if __name__ == '__main__':
try:
main(sys.argv[1:])
except Exception, e:
sys.stderr.write('\n')
traceback.print_exc(file=sys.stderr)
sys.stderr.write('\n')
print sys.argv[1:]
sys.exit(1)
|
mtigas/logbot
|
s3dir.py
|
Python
|
gpl-2.0
| 2,810
|
from __future__ import print_function
import MySQLdb
import tweepy
import json
import AppConstants
class StreamListener(tweepy.StreamListener):
def on_connect(self):
print("You're connected to the streaming server.")
def on_error(self, status_code):
print('Error: ' + repr(status_code))
return False
def on_data(self, data):
all_data = json.loads(data)
AppConstants.username = all_data['user']['screen_name'].encode('utf-8')
AppConstants.tweet = all_data['text'].encode('utf-8')
print
AppConstants.username + ' :' + '\t' + AppConstants.tweet + '\n'
auth1 = tweepy.OAuthHandler(AppConstants.ckey, AppConstants.csecret)
auth1.set_access_token(AppConstants.atoken, AppConstants.asecret)
conn = MySQLdb.connect('localhost', 'root', 'tech', 'twitter')
c = conn.cursor()
l = StreamListener(api=tweepy.API(wait_on_rate_limit=True))
streamer = tweepy.Stream(auth=auth1, listener=l)
streamer.filter(track=AppConstants.queries)
|
shiskr/nltk
|
twitterstream.py
|
Python
|
gpl-3.0
| 1,003
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import functools
import mox
import os
import re
import mox
from oslo.config import cfg
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests import fake_processutils
import nova.tests.image.fake as fake_image
from nova.tests import matchers
from nova.tests.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
IMAGE_RAMDISK = '3'
IMAGE_RAW = '4'
IMAGE_VHD = '5'
IMAGE_ISO = '6'
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare'},
},
}
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached_here(function):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
def create_instance_with_system_metadata(context, instance_values):
instance_type = db.flavor_get(context,
instance_values['instance_type_id'])
sys_meta = flavors.save_flavor_info({},
instance_type)
instance_values['system_metadata'] = sys_meta
return db.instance_create(context, instance_values)
class XenAPIVolumeTestCase(stubs.XenAPITestBase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(disable_process_locking=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
db_fakes.stub_out_db_instance_api(self.stubs)
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size=0):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = CONF.default_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@staticmethod
def _make_connection_data():
return {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password',
}
@classmethod
def _make_connection_info(cls):
return {
'driver_volume_type': 'iscsi',
'data': cls._make_connection_data()
}
def test_mountpoint_to_number(self):
cases = {
'sda': 0,
'sdp': 15,
'hda': 0,
'hdp': 15,
'vda': 0,
'xvda': 0,
'0': 0,
'10': 10,
'vdq': -1,
'sdq': -1,
'hdq': -1,
'xvdq': -1,
}
for (input, expected) in cases.iteritems():
actual = volume_utils.mountpoint_to_number(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
def test_parse_volume_info_parsing_auth_details(self):
result = volume_utils.parse_volume_info(
self._make_connection_data())
self.assertEquals('username', result['chapuser'])
self.assertEquals('password', result['chappassword'])
def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
self.assertRaises(
volume_utils.StorageError,
volume_utils.get_device_number,
'dev/sd')
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance['name'], 'Running')
result = conn.attach_volume(self._make_connection_info(),
instance, '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{'driver_volume_type': 'nonexist'},
instance,
'/dev/sdc')
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = importutils.import_object(CONF.network_manager)
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', CONF.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_processutils.stub_out_processutils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
vbd1 = xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEquals(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in xrange(1, 4):
instance = self._create_instance(x)
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(xenapi_connection_url='myscheme://myaddress/')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
fake_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
def test_get_vnc_console(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(session, instance['name'])
console = conn.get_vnc_console(instance)
# Note(sulo): We dont care about session id in test
# they will always differ so strip that out
actual_path = console['internal_access_path'].split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_for_rescue(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
# Set instance state to rescued
instance['vm_state'] = 'rescued'
console = conn.get_vnc_console(instance)
# Note(sulo): We dont care about session id in test
# they will always differ so strip that out
actual_path = console['internal_access_path'].split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_instance_not_ready(self):
instance = {}
# set instance name and state
instance['name'] = 'fake-instance'
instance['uuid'] = '00000000-0000-0000-0000-000000000000'
instance['vm_state'] = 'building'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotFound,
conn.get_vnc_console, instance)
def test_get_vnc_console_rescue_not_ready(self):
instance = {}
instance['name'] = 'fake-rescue'
instance['uuid'] = '00000000-0000-0000-0000-000000000001'
instance['vm_state'] = 'rescued'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotReady,
conn.get_vnc_console, instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, vdi_uuids,
img_id):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertTrue(isinstance(vdi_uuids, list))
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEquals(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, instance_type_id, check_injection):
instance_type = db.flavor_get(conn, instance_type_id)
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
vcpu_weight = instance_type['vcpu_weight']
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
if vcpu_weight == None:
self.assertEquals(self.vm['VCPUs_params'], {})
else:
self.assertEquals(self.vm['VCPUs_params'],
{'weight': str(vcpu_weight)})
# Check that the VM is running according to Nova
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertFalse('vm-data/hostname' in xenstore_data)
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
{'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': '2001:db8:0:1::1',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1::1',
'netmask': 64,
'gateway': '2001:db8:0:1::1'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password,
fake.FakeVirtAPI())
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password,
fake.FakeVirtAPI())
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'root_gb': 20,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'key_data': key_data,
'architecture': architecture}
instance = create_instance_with_system_metadata(self.context,
instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = {}
if image_ref:
image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, instance_type_id, check_injection)
self.assertTrue(instance['os_type'])
self.assertTrue(instance['architecture'])
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
'1', 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None)
self.check_vm_params_for_windows()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386",
instance_type_id=5)
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
def fake_fetch_disk_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = vm_utils.safe_find_sr(session)
image_type_str = vm_utils.ImageType.to_string(image_type)
vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
name_label, image_type_str, "20")
vdi_role = vm_utils.ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
self.stubs.Set(vm_utils, '_fetch_disk_image',
fake_fetch_disk_image)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(None, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(None, None, None,
block_device_info=dev_info)
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.1.100',
'netmask 255.255.255.0',
'broadcast 192.168.1.255',
'gateway 192.168.1.1',
'dns-nameservers 192.168.1.3 192.168.1.4',
''])
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_processutils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent'),
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s'),
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_processutils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn('1', 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_injects_auto_disk_config_to_xenstore(self):
instance = self._create_instance(spawn=False)
self.mox.StubOutWithMock(self.conn._vmops, 'inject_auto_disk_config')
self.conn._vmops.inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.spawn(self.context, instance,
IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid='00000000-0000-0000-0000-000000000002',
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(xenapi_use_agent_default=True)
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
self.assertEqual("ssh-rsa fake_keydata", sshkey)
return "fake"
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-rsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-rsa fake_keydata')
self.assertEquals(actual_injected_files, injected_files)
def test_spawn_ssh_key_injection_non_rsa(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(xenapi_use_agent_default=True)
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
raise NotImplementedError("Should not be called")
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-dsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-dsa fake_keydata')
self.assertEquals(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
self.flags(xenapi_use_agent_default=True)
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEquals(actual_injected_files, injected_files)
def test_spawn_agent_upgrade(self):
self.flags(xenapi_use_agent_default=True)
actual_injected_files = []
def fake_agent_build(_self, *args):
return {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf"}
self.stubs.Set(self.conn.virtapi, 'agent_build_get_by_triple',
fake_agent_build)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
def test_spawn_agent_upgrade_fails_silently(self):
self.flags(xenapi_use_agent_default=True)
actual_injected_files = []
def fake_agent_build(_self, *args):
return {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf"}
self.stubs.Set(self.conn.virtapi, 'agent_build_get_by_triple',
fake_agent_build)
def fake_agent_update(self, method, args):
raise xenapi_fake.Failure(["fake_error"])
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_agentupdate', fake_agent_update)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
def _test_spawn_fails_with(self, trigger, expected_exception):
self.flags(xenapi_use_agent_default=True)
self.flags(agent_version_timeout=0)
actual_injected_files = []
def fake_agent_version(self, method, args):
raise xenapi_fake.Failure([trigger])
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_version', fake_agent_version)
self.assertRaises(expected_exception, self._test_spawn,
IMAGE_VHD, None, None, os_type="linux", architecture="x86-64")
def test_spawn_fails_with_agent_timeout(self):
self._test_spawn_fails_with("TIMEOUT:fake", exception.AgentTimeout)
def test_spawn_fails_with_agent_not_implemented(self):
self._test_spawn_fails_with("NOT IMPLEMENTED:fake",
exception.AgentNotImplemented)
def test_spawn_fails_with_agent_error(self):
self._test_spawn_fails_with("fake_error", exception.AgentError)
def test_spawn_fails_with_agent_bad_return(self):
self.flags(xenapi_use_agent_default=True)
self.flags(agent_version_timeout=0)
actual_injected_files = []
def fake_agent_version(self, method, args):
return xenapi_fake.as_json(returncode='-1', message='fake')
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_version', fake_agent_version)
self.assertRaises(exception.AgentError, self._test_spawn,
IMAGE_VHD, None, None, os_type="linux", architecture="x86-64")
def test_spawn_fails_agent_not_implemented(self):
# Test spawning with injected_files.
self.flags(xenapi_use_agent_default=True)
self.flags(agent_version_timeout=0)
actual_injected_files = []
def fake_agent_version(self, method, args):
raise xenapi_fake.Failure(["NOT IMPLEMENTED:fake"])
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_version', fake_agent_version)
self.assertRaises(exception.AgentNotImplemented, self._test_spawn,
IMAGE_VHD, None, None, os_type="linux", architecture="x86-64")
def test_rescue(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_uuids = []
for vbd_uuid in rescue_vm["VBDs"]:
vdi_uuids.append(xenapi_fake.get_record('VBD', vbd_uuid)["VDI"])
self.assertTrue("swap" not in vdi_uuids)
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_instance()
instances = [instance]
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.poll_rebooting_instances(60, instances)
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEquals(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def test_reboot_rescued(self):
instance = self._create_instance()
instance['vm_state'] = vm_states.RESCUED
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
real_result = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(conn._session, instance['name'],
True).AndReturn(real_result)
self.mox.ReplayAll()
conn.reboot(self.context, instance, None, "SOFT")
def test_get_console_output_succeeds(self):
def fake_get_console_output(instance):
self.assertEqual("instance", instance)
return "console_log"
self.stubs.Set(self.conn._vmops, 'get_console_output',
fake_get_console_output)
self.assertEqual(self.conn.get_console_output("instance"),
"console_log")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [{'fake': 'aggregate'}]
else:
return []
self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
expected = (instance['uuid'], 'bar', {})
self.assertTrue(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
self.assertTrue(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_uuid_find(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
inst_uuid = host._uuid_find(self.context, fake_inst['host'],
expected_name)
self.assertEqual(inst_uuid, fake_inst['uuid'])
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise Exception()
self.stubs.Set(self.conn._session._virtapi, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
try:
self.conn._session._get_host_uuid()
except Exception:
pass
self.assertTrue(was['called'])
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
instance_type = flavors.get_flavor(3)
expected = {instance['uuid']: {'memory_mb': instance_type['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
'display_name': 'host-%d' % instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEquals(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', CONF.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 5,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_processutils.stub_out_processutils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
def test_resize_xenserver_6(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(6, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
{'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_resize_xcp(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(1, 4, 99),
product_brand='XCP')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
{'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.flavor_get_by_name(self.context, 'm1.large')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.flavor_get_by_name(self.context, 'm1.large')
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
def _test_revert_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_revert_migrate_power_on(self):
self._test_revert_migrate(True)
def test_revert_migrate_power_off(self):
self._test_revert_migrate(False)
def _test_finish_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
def test_finish_migrate_power_on(self):
self._test_finish_migrate(True)
def test_finish_migrate_power_off(self):
self._test_finish_migrate(False)
def test_finish_migrate_no_local_storage(self):
tiny_type = flavors.get_flavor_by_name('m1.tiny')
tiny_type_id = tiny_type['id']
self.instance_values.update({'instance_type_id': tiny_type_id,
'root_gb': 0})
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_too_many_partitions_no_resize_down(self):
instance_values = self.instance_values
instance_values['root_gb'] = 40
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.flavor_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, 4), (1, 2, 3, 4)]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_bad_fs_type_no_resize_down(self):
instance_values = self.instance_values
instance_values['root_gb'] = 40
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.flavor_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, "ext2")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
virtapi = vmops._virtapi
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vmops, '_migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
self.mox.StubOutWithMock(virtapi, 'instance_update')
instance = {'auto_disk_config': True, 'uuid': 'uuid'}
vm_ref = "vm_ref"
dest = "dest"
instance_type = "type"
sr_path = "sr_path"
virtapi.instance_update(self.context, 'uuid', {'progress': 20.0})
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
virtapi.instance_update(self.context, 'uuid', {'progress': 40.0})
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
instance_type).AndReturn((new_vdi_ref, new_vdi_uuid))
virtapi.instance_update(self.context, 'uuid', {'progress': 60.0})
vmops._migrate_vhd(instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance, None)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, instance_type, vm_ref, sr_path)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEquals(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
def test_none(self):
image_meta = None
self.assert_disk_type(image_meta, None)
class XenAPIDetermineIsPVTestCase(test.TestCase):
"""Unit tests for code that detects the PV status based on ImageType."""
def assert_pv_status(self, disk_image_type, os_type, expected_pv_status):
session = None
vdi_ref = None
actual = vm_utils.determine_is_pv(session, vdi_ref,
disk_image_type, os_type)
self.assertEqual(expected_pv_status, actual)
def test_windows_vhd(self):
self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'windows', False)
def test_linux_vhd(self):
self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'linux', True)
def test_raw(self):
self.assert_pv_status(vm_utils.ImageType.DISK_RAW, 'linux', False)
def test_disk(self):
self.assert_pv_status(vm_utils.ImageType.DISK, None, True)
def test_iso(self):
self.assert_pv_status(vm_utils.ImageType.DISK_ISO, None, False)
def test_none(self):
self.assert_pv_status(None, None, False)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
# Test that cmp_version compares a as less than b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
# Test that cmp_version compares a as greater than b.
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
# Test that cmp_version compares a as equal to b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
# Test that cmp_version compares non-lexically.
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
# Test that cmp_version compares by length as last resort.
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers.
"""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEquals(stats['disk_total'], 40000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)
self.assertEquals(stats['host_memory_free'], 30)
self.assertEquals(stats['host_memory_free_computed'], 40)
self.assertEquals(stats['hypervisor_hostname'], 'fake-xenhost')
def test_host_state_missing_sr(self):
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
self.conn.get_host_stats)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
def test_set_enable_host_disable(self):
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
def test_get_host_uptime(self):
result = self.conn.get_host_uptime('host')
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.get_host_stats()
self.assertTrue('supported_instances' in stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
self.assertEquals(None, somedata)
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.get_host_stats()
self.assertEquals("SOMERETURNVALUE", stats['supported_instances'])
class ToSupportedInstancesTestCase(test.TestCase):
def test_default_return_value(self):
self.assertEquals([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEquals([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEquals([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEquals(
[
('x86_64', 'xapi', 'xen'),
('x86_32', 'xapi', 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old, new):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
ctx = context.RequestContext(self.user_id, self.project_id)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type)
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False, empty=False, unpluggable=True):
return session.call_xenapi('VBD.create', {'VM': vm_ref,
'VDI': vdi_ref})
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance,
disk_image_type=vm_utils.ImageType.DISK_VHD):
ctx = context.RequestContext(self.user_id, self.project_id)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdi_key = 'root'
if disk_image_type == vm_utils.ImageType.DISK_ISO:
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type)
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
def test_generate_iso_blank_root_disk(self):
instance_values = dict(self.instance_values, instance_type_id=4)
instance_values.pop('kernel_id')
instance_values.pop('ramdisk_id')
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
pass
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
def fake_generate_iso(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
fake_generate_iso)
self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return cls.FAKE_VMS.iteritems()
@classmethod
def _fake_fetch_bandwidth_mt(cls, session):
return {}
@classmethod
def _fake_fetch_bandwidth(cls, session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.virt.test_libvirt.IPTablesFirewallDriverTestCase share a lot of
# code. Consider abstracting common code in a base class for firewall driver
# testing.
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs,
1, spectacular=True)
from nova.compute import utils as compute_utils
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEquals(ipv4_network_rules, rules)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instances[instance_ref['id']] = instance_ref
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
# This test is only guaranteed if there is one host in the pool
self.assertEqual(len(xenapi_fake.get_all('host')), 1)
host_ref = xenapi_fake.get_all('host')[0]
pbd_refs = xenapi_fake.get_all('PBD')
for pbd_ref in pbd_refs:
pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
if pbd_rec['host'] != host_ref:
continue
sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
if sr_rec['other_config']['i18n-key'] == 'local-storage':
local_sr = pbd_rec['SR']
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
pool_ref = session.call_xenapi('pool.get_all')[0]
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(xenapi_connection_url='http://test_url',
xenapi_connection_username='test_user',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEquals("CONTEXT", context)
self.assertEquals("AGGREGATE", aggregate)
self.assertEquals("HOST", host)
self.assertEquals("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertTrue(pool_add_to_aggregate in calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEquals("CONTEXT", context)
self.assertEquals("AGGREGATE", aggregate)
self.assertEquals("HOST", host)
self.assertEquals("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertTrue(pool_remove_from_aggregate in calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
values = {"name": 'fake_aggregate',
'metadata': {'availability_zone': 'fake_zone'}}
result = db.aggregate_create(self.context, values)
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
db.aggregate_metadata_add(self.context, result['id'], metadata)
db.aggregate_host_add(self.context, result['id'], "host")
aggregate = db.aggregate_get(self.context, result['id'])
self.assertEqual(["host"], aggregate['hosts'])
self.assertEqual(metadata, aggregate['metadetails'])
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
values = {"name": aggr_name}
result = db.aggregate_create(self.context, values,
metadata={'availability_zone': aggr_zone})
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: aggr_state}
db.aggregate_metadata_add(self.context, result['id'], pool_flag)
for host in hosts:
db.aggregate_host_add(self.context, result['id'], host)
if metadata:
db.aggregate_metadata_add(self.context, result['id'], metadata)
return db.aggregate_get(self.context, result['id'])
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when adding host while
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
# let's mock the fact that the aggregate is in error!
status = {'operational_state': pool_states.ERROR}
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
self.assertEqual(expected['metadata'][pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
changing.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, "fake_host",
aggregate=jsonutils.to_primitive(self.aggr))
excepted = db.aggregate_get(self.context, self.aggr['id'])
self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
self.assertEqual(excepted['hosts'], [])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
host, slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
host_param, host, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.TestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadetails': {
'master_compute': 'master',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.TestCase):
def test_swapping(self):
self.assertEquals(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEquals(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEquals(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self.stubs)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.post_live_migration_at_destination(None, None, None, None)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'migrate_data': {
'migrate_send_data': "fake_migrate_data",
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
self.assertEqual(expected, result)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def _add_default_live_migrate_stubs(self, conn):
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return []
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
self.stubs.Set(conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return "true"
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return {'returncode': 'error', 'message': 'Plugin not found'}
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context, {'host': 'host'},
{})
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
self.metadetails = {"host": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
self.metadetails = {"dest_other": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'}, None, None)
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration):
post_method.called = True
self.conn.live_migration(self.conn, None, None, post_method, None)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def post_method(context, instance, destination_hostname,
block_migration):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_block_cleans_srs(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(context, instance):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_forget_sr(context, instance):
fake_forget_sr.called = True
self.stubs.Set(volume_utils, "forget_sr",
fake_forget_sr)
def post_method(context, instance, destination_hostname,
block_migration):
post_method.called = True
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
self.assertTrue(fake_forget_sr.called, "forget_sr.called")
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and no migrate data
self.assertRaises(exception.InvalidParameterValue,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, None)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEquals('SOMEDATA', migrate_data)
self.assertEquals(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
conn.live_migration(
self.context, instance_ref=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEquals(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEquals(conn._session, _session)
self.assertEquals(vm_ref, _vm_ref)
self.assertEquals("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEquals({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
def fake_get_vm_opaque_ref(inst, instance):
self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}],
uuid='fake')
self.conn._vmops.inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
class XenAPISessionTestCase(test.TestCase):
def _get_mock_xapisession(self, software_version):
class XcpXapiSession(xenapi_conn.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return XcpXapiSession()
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEquals(
(None, None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50'
})
self.assertEquals(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
|
plumgrid/plumgrid-nova
|
nova/tests/virt/xenapi/test_xenapi.py
|
Python
|
apache-2.0
| 157,476
|
# coding: utf-8
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://www.vauxoo.com>).
# All Rights Reserved
############# Credits #########################################################
# Coded by: Yanina Aular <yani@vauxoo.com>
# Planified by: Rafael Silva <rsilvam@vauxoo.com>
# Audited by: Humberto Arocha <hbto@vauxoo.com>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from . import model
from . import wizard
|
suhe/odoo
|
res-addons/hr_job_positions_extended/__init__.py
|
Python
|
gpl-3.0
| 1,365
|
import os
import urllib
import urllib.parse
import logging
import re
import asyncio
from bs4 import BeautifulSoup
from ..config import config
from ..toolbox import dateutils
from ..toolbox.net import download
from .base import SearcherBase, SearchResult, SearcherError
from .easynews_config import config as modconfig
__all__ = ['Searcher', 'modconfig']
log = logging.getLogger('stagehand.searchers.easynews')
class Searcher(SearcherBase):
NAME = 'easynews'
PRINTABLE_NAME = 'Easynews Global Search'
TYPE = 'http'
DEFAULT_URL_GLOBAL5 = 'https://members.easynews.com/global5/index.html?gps={keywords}&sbj={subject}&from=&ns=&fil=&fex=&vc=&ac=&fty[]=VIDEO&s1=nsubject&s1d=%2B&s2=nrfile&s2d=%2B&s3=dsize&s3d=%2B&pby=500&u=1&svL=&d1={date}&d1t=&d2=&d2t=&b1={size}&b1t=&b2=&b2t=&px1={res}&px1t=&px2=&px2t=&fps1=&fps1t=&fps2=&fps2t=&bps1=&bps1t=&bps2=&bps2t=&hz1=&hz1t=&hz2=&hz2t=&rn1=&rn1t=&rn2=&rn2t=&fly=2&pno=1&sS=5'
@asyncio.coroutine
def _search_global5(self, title, codes, size, date, res):
if not modconfig.username or not modconfig.password:
raise ValueError('Configuration lacks username and/or password')
if 0 and os.path.exists('result.rss'):
print('Using cached result.rss')
return file('result.rss').read()
url = modconfig.url or Searcher.DEFAULT_URL_GLOBAL5
url = url.format(keywords=urllib.parse.quote_plus(title), subject=codes,
date=urllib.parse.quote_plus(date), size=size, res=res)
status, rss = yield from download(url, retry=modconfig.retries,
auth=(modconfig.username, modconfig.password))
if status != 200:
# TODO: handle status codes like 401 (unauth)
raise SearcherError('HTTP status not ok (%d)' % status)
#file('result.rss', 'w').write(rss)
return rss
@asyncio.coroutine
def _search(self, series, episodes, date, min_size, quality):
title = series.cfg.search_string or series.name
# Strip problem characters from the title, and substitute alternative apostrophe
title = self.clean_title(title, apostrophe=Searcher.CLEAN_APOSTROPHE_REGEXP)
size = '%dM' % (min_size / 1048576) if min_size else '100M'
# XXX: easynews doesn't support HEVC so remove resolution filtering.
# res = '1x540' if quality == 'HD' else ''
res = ''
results = []
for i in range(0, len(episodes), 10):
batch = episodes[i:i+10]
codelist = [code for episode in batch \
for code in self._get_episode_codes_regexp_list([episode])]
codes = '|'.join(codelist)
log.debug('searching for %d episodes, minimum size %s and res %s, keywords=%s subject=%s',
len(batch), size, res or 'any', title, codes)
rss = yield from self._search_global5(title, codes, size, date or '', res)
soup = BeautifulSoup(rss, 'html.parser')
for item in soup.find_all('item'):
result = SearchResult(self)
urlpath = urllib.parse.urlparse(item.enclosure['url']).path
result.filename = urllib.parse.unquote(os.path.split(urlpath)[-1])
result.size = self._parse_hsize(item.enclosure['length'])
result.date = dateutils.from_rfc822(item.pubdate.contents[0])
result.subject = ''.join(item.title.contents)
result.url = item.enclosure['url']
# TODO: parse out newsgroup
results.append(result)
return {None: results}
@asyncio.coroutine
def _get_retriever_data(self, search_result):
return {
'url': search_result.url,
'username': modconfig.username,
'password': modconfig.password,
'retry': modconfig.retries
}
def _check_results_equal(self, a, b):
try:
# Easynews URLs contain hashes of the file, which is a convenient
# value to compare, because it means that even different URLs can
# end up being the same file.
a_hash = re.search(r'/([0-9a-f]{32,})', a.url).group(1)
b_hash = re.search(r'/([0-9a-f]{32,})', b.url).group(1)
return a_hash == b_hash
except AttributeError:
# Wasn't able to find hash in URL, so compare the URLs directly.
return a.url == b.url
def enable(manager):
"""
Called by the web interface when the plugin is enabled where it was
previously disabled.
"""
# http retriever is always enabled, so no special action is needed
# when the easynews searcher is enabled.
pass
def get_config_template(manager):
return os.path.join(manager.paths.data, 'web', 'settings', 'easynews.tmpl')
|
jtackaberry/stagehand
|
stagehand/searchers/easynews.py
|
Python
|
mit
| 4,873
|
#!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, a command-line cryptocurrency wallet
# Copyright (C)2013-2022 The MMGen Project <mmgen@tuta.io>
# Licensed under the GNU General Public License, Version 3:
# https://www.gnu.org/licenses
# Public project repositories:
# https://github.com/mmgen/mmgen
# https://gitlab.com/mmgen/mmgen
"""
wallet.incog_base: incognito wallet base class
"""
from ..globalvars import g
from ..opts import opt
from ..seed import Seed
from ..util import msg,vmsg,qmsg,make_chksum_8,keypress_confirm
from .enc import wallet
import mmgen.crypto as crypto
class wallet(wallet):
_msg = {
'check_incog_id': """
Check the generated Incog ID above against your records. If it doesn't
match, then your incognito data is incorrect or corrupted.
""",
'record_incog_id': """
Make a record of the Incog ID but keep it secret. You will use it to
identify your incog wallet data in the future.
""",
'decrypt_params': " {} hash preset"
}
def _make_iv_chksum(self,s):
from hashlib import sha256
return sha256(s).hexdigest()[:8].upper()
def _get_incog_data_len(self,seed_len):
return (
crypto.aesctr_iv_len
+ crypto.salt_len
+ (0 if opt.old_incog_fmt else crypto.hincog_chk_len)
+ seed_len//8 )
def _incog_data_size_chk(self):
# valid sizes: 56, 64, 72
dlen = len(self.fmt_data)
seed_len = opt.seed_len or Seed.dfl_len
valid_dlen = self._get_incog_data_len(seed_len)
if dlen == valid_dlen:
return True
else:
if opt.old_incog_fmt:
msg('WARNING: old-style incognito format requested. Are you sure this is correct?')
msg(f'Invalid incognito data size ({dlen} bytes) for this seed length ({seed_len} bits)')
msg(f'Valid data size for this seed length: {valid_dlen} bytes')
for sl in Seed.lens:
if dlen == self._get_incog_data_len(sl):
die(1,f'Valid seed length for this data size: {sl} bits')
msg(f'This data size ({dlen} bytes) is invalid for all available seed lengths')
return False
def _encrypt (self):
self._get_first_pw_and_hp_and_encrypt_seed()
if opt.old_incog_fmt:
die(1,'Writing old-format incognito wallets is unsupported')
d = self.ssdata
d.iv = crypto.get_random( crypto.aesctr_iv_len )
d.iv_id = self._make_iv_chksum(d.iv)
msg(f'New Incog Wallet ID: {d.iv_id}')
qmsg('Make a record of this value')
vmsg('\n ' + self.msg['record_incog_id'].strip()+'\n')
d.salt = crypto.get_random( crypto.salt_len )
seed_key = crypto.make_key(
passwd = d.passwd,
salt = d.salt,
hash_preset = d.hash_preset,
desc = 'incog wallet key' )
from hashlib import sha256
chk = sha256(self.seed.data).digest()[:8]
d.enc_seed = crypto.encrypt_seed(
data = chk + self.seed.data,
key = seed_key )
# IV is used BOTH to initialize counter and to salt password!
d.wrapper_key = crypto.make_key(
passwd = d.passwd,
salt = d.iv,
hash_preset = d.hash_preset,
desc = 'incog wrapper key' )
d.key_id = make_chksum_8(d.wrapper_key)
vmsg(f'Key ID: {d.key_id}')
d.target_data_len = self._get_incog_data_len(self.seed.bitlen)
def _format(self):
d = self.ssdata
self.fmt_data = d.iv + crypto.encrypt_data(
data = d.salt + d.enc_seed,
key = d.wrapper_key,
iv = d.iv,
desc = self.desc )
def _filename(self):
s = self.seed
d = self.ssdata
return '{}-{}-{}[{},{}]{x}.{}'.format(
s.fn_stem,
d.key_id,
d.iv_id,
s.bitlen,
d.hash_preset,
self.ext,
x='-α' if g.debug_utf8 else '')
def _deformat(self):
if not self._incog_data_size_chk():
return False
d = self.ssdata
d.iv = self.fmt_data[0:crypto.aesctr_iv_len]
d.incog_id = self._make_iv_chksum(d.iv)
d.enc_incog_data = self.fmt_data[crypto.aesctr_iv_len:]
msg(f'Incog Wallet ID: {d.incog_id}')
qmsg('Check this value against your records')
vmsg('\n ' + self.msg['check_incog_id'].strip()+'\n')
return True
def _verify_seed_newfmt(self,data):
chk,seed = data[:8],data[8:]
from hashlib import sha256
if sha256(seed).digest()[:8] == chk:
qmsg('Passphrase{} are correct'.format( self.msg['decrypt_params'].format('and') ))
return seed
else:
msg('Incorrect passphrase{}'.format( self.msg['decrypt_params'].format('or') ))
return False
def _verify_seed_oldfmt(self,seed):
m = f'Seed ID: {make_chksum_8(seed)}. Is the Seed ID correct?'
if keypress_confirm(m, True):
return seed
else:
return False
def _decrypt(self):
d = self.ssdata
self._get_hash_preset(add_desc=d.incog_id)
d.passwd = self._get_passphrase(add_desc=d.incog_id)
# IV is used BOTH to initialize counter and to salt password!
wrapper_key = crypto.make_key(
passwd = d.passwd,
salt = d.iv,
hash_preset = d.hash_preset,
desc = 'wrapper key' )
dd = crypto.decrypt_data(
enc_data = d.enc_incog_data,
key = wrapper_key,
iv = d.iv,
desc = 'incog data' )
d.salt = dd[0:crypto.salt_len]
d.enc_seed = dd[crypto.salt_len:]
seed_key = crypto.make_key(
passwd = d.passwd,
salt = d.salt,
hash_preset = d.hash_preset,
desc = 'main key' )
qmsg(f'Key ID: {make_chksum_8(seed_key)}')
verify_seed_func = getattr( self, '_verify_seed_'+ ('oldfmt' if opt.old_incog_fmt else 'newfmt') )
seed = verify_seed_func(
crypto.decrypt_seed(
enc_seed = d.enc_seed,
key = seed_key,
seed_id = '',
key_id = '' ))
if seed:
self.seed = Seed(seed)
msg(f'Seed ID: {self.seed.sid}')
return True
else:
return False
|
mmgen/mmgen
|
mmgen/wallet/incog_base.py
|
Python
|
gpl-3.0
| 5,589
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0092_remove_visit_improvementissues'),
]
operations = [
migrations.AddField(
model_name='visit',
name='improvementissues',
field=models.ManyToManyField(related_name='visits', through='visit.VisitImprovement', to='visit.ImprovementIssue'),
),
]
|
koebbe/homeworks
|
visit/migrations/0093_visit_improvementissues.py
|
Python
|
mit
| 497
|
import xmlrpclib
import urlparse
from model import Builder, Build
class BuildBotSystem(object):
def __init__(self, url):
try:
scheme, loc, _, _, _ = urlparse.urlsplit(url, scheme='http')
url = '%s://%s/xmlrpc'%(scheme, loc)
self.server = xmlrpclib.ServerProxy(url)
except Exception, e:
raise ValueError('Invalid BuildBot XML-RPC server %s: %s'%(url, e))
def getAllBuildsInInterval(self, start, stop):
return self.server.getAllBuildsInInterval(start, stop)
def getBuilder(self, name):
builds = []
for i in range(1, 5+1):
try:
builds.append(Build(self.server.getBuild(name, -i)))
except Exception, e:
self.env.log.debug('Cannot fetch build-info: %s'%(e))
break
return Builder(name, builds, [])
def getAllBuilders(self):
return self.server.getAllBuilders()
|
eunchong/build
|
third_party/buildbot_8_4p1/contrib/trac/bbwatcher/api.py
|
Python
|
bsd-3-clause
| 804
|
# encoding: utf-8
from __future__ import print_function
import os
import shutil
import tempfile
from operator import itemgetter
import py
import pytest
from compose.config import config
from compose.config.errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
from tests import unittest
def make_service_dict(name, service_dict, working_dir, filename=None):
"""
Test helper function to construct a ServiceExtendsResolver
"""
resolver = config.ServiceExtendsResolver(config.ServiceConfig(
working_dir=working_dir,
filename=filename,
name=name,
config=service_dict))
return config.process_service(resolver.run())
def service_sort(services):
return sorted(services, key=itemgetter('name'))
def build_config_details(contents, working_dir, filename):
return config.ConfigDetails(
working_dir,
[config.ConfigFile(filename, contents)])
class ConfigTest(unittest.TestCase):
def test_load(self):
service_dicts = config.load(
build_config_details(
{
'foo': {'image': 'busybox'},
'bar': {'image': 'busybox', 'environment': ['FOO=1']},
},
'tests/fixtures/extends',
'common.yml'
)
)
self.assertEqual(
service_sort(service_dicts),
service_sort([
{
'name': 'bar',
'image': 'busybox',
'environment': {'FOO': '1'},
},
{
'name': 'foo',
'image': 'busybox',
}
])
)
def test_load_throws_error_when_not_dict(self):
with self.assertRaises(ConfigurationError):
config.load(
build_config_details(
{'web': 'busybox:latest'},
'working_dir',
'filename.yml'
)
)
def test_config_invalid_service_names(self):
for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{invalid_name: {'image': 'busybox'}},
'working_dir',
'filename.yml'))
assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly()
def test_load_with_invalid_field_name(self):
config_details = build_config_details(
{'web': {'image': 'busybox', 'name': 'bogus'}},
'working_dir',
'filename.yml')
with pytest.raises(ConfigurationError) as exc:
config.load(config_details)
error_msg = "Unsupported config option for 'web' service: 'name'"
assert error_msg in exc.exconly()
assert "Validation failed in file 'filename.yml'" in exc.exconly()
def test_load_invalid_service_definition(self):
config_details = build_config_details(
{'web': 'wrong'},
'working_dir',
'filename.yml')
with pytest.raises(ConfigurationError) as exc:
config.load(config_details)
error_msg = "service 'web' doesn't have any configuration options"
assert error_msg in exc.exconly()
def test_config_integer_service_name_raise_validation_error(self):
expected_error_msg = ("In file 'filename.yml' service name: 1 needs to "
"be a string, eg '1'")
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{1: {'image': 'busybox'}},
'working_dir',
'filename.yml'
)
)
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
def test_load_with_multiple_files(self):
base_file = config.ConfigFile(
'base.yaml',
{
'web': {
'image': 'example/web',
'links': ['db'],
},
'db': {
'image': 'example/db',
},
})
override_file = config.ConfigFile(
'override.yaml',
{
'web': {
'build': '/',
'volumes': ['/home/user/project:/code'],
},
})
details = config.ConfigDetails('.', [base_file, override_file])
service_dicts = config.load(details)
expected = [
{
'name': 'web',
'build': '/',
'links': ['db'],
'volumes': ['/home/user/project:/code'],
},
{
'name': 'db',
'image': 'example/db',
},
]
self.assertEqual(service_sort(service_dicts), service_sort(expected))
def test_load_with_multiple_files_and_empty_override(self):
base_file = config.ConfigFile(
'base.yml',
{'web': {'image': 'example/web'}})
override_file = config.ConfigFile('override.yml', None)
details = config.ConfigDetails('.', [base_file, override_file])
with pytest.raises(ConfigurationError) as exc:
config.load(details)
error_msg = "Top level object in 'override.yml' needs to be an object"
assert error_msg in exc.exconly()
def test_load_with_multiple_files_and_empty_base(self):
base_file = config.ConfigFile('base.yml', None)
override_file = config.ConfigFile(
'override.yml',
{'web': {'image': 'example/web'}})
details = config.ConfigDetails('.', [base_file, override_file])
with pytest.raises(ConfigurationError) as exc:
config.load(details)
assert "Top level object in 'base.yml' needs to be an object" in exc.exconly()
def test_load_with_multiple_files_and_extends_in_override_file(self):
base_file = config.ConfigFile(
'base.yaml',
{
'web': {'image': 'example/web'},
})
override_file = config.ConfigFile(
'override.yaml',
{
'web': {
'extends': {
'file': 'common.yml',
'service': 'base',
},
'volumes': ['/home/user/project:/code'],
},
})
details = config.ConfigDetails('.', [base_file, override_file])
tmpdir = py.test.ensuretemp('config_test')
self.addCleanup(tmpdir.remove)
tmpdir.join('common.yml').write("""
base:
labels: ['label=one']
""")
with tmpdir.as_cwd():
service_dicts = config.load(details)
expected = [
{
'name': 'web',
'image': 'example/web',
'volumes': ['/home/user/project:/code'],
'labels': {'label': 'one'},
},
]
self.assertEqual(service_sort(service_dicts), service_sort(expected))
def test_load_with_multiple_files_and_invalid_override(self):
base_file = config.ConfigFile(
'base.yaml',
{'web': {'image': 'example/web'}})
override_file = config.ConfigFile(
'override.yaml',
{'bogus': 'thing'})
details = config.ConfigDetails('.', [base_file, override_file])
with pytest.raises(ConfigurationError) as exc:
config.load(details)
assert "service 'bogus' doesn't have any configuration" in exc.exconly()
assert "In file 'override.yaml'" in exc.exconly()
def test_config_valid_service_names(self):
for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
services = config.load(
build_config_details(
{valid_name: {'image': 'busybox'}},
'tests/fixtures/extends',
'common.yml'))
assert services[0]['name'] == valid_name
def test_config_invalid_ports_format_validation(self):
expected_error_msg = "Service 'web' configuration key 'ports' contains an invalid type"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
for invalid_ports in [{"1": "8000"}, False, 0, "8000", 8000, ["8000", "8000"]]:
config.load(
build_config_details(
{'web': {'image': 'busybox', 'ports': invalid_ports}},
'working_dir',
'filename.yml'
)
)
def test_config_valid_ports_format_validation(self):
valid_ports = [["8000", "9000"], ["8000/8050"], ["8000"], [8000], ["49153-49154:3002-3003"]]
for ports in valid_ports:
config.load(
build_config_details(
{'web': {'image': 'busybox', 'ports': ports}},
'working_dir',
'filename.yml'
)
)
def test_config_hint(self):
expected_error_msg = "(did you mean 'privileged'?)"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'foo': {'image': 'busybox', 'privilige': 'something'},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_invalid_config_build_and_image_specified(self):
expected_error_msg = "Service 'foo' has both an image and build path specified."
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'foo': {'image': 'busybox', 'build': '.'},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_invalid_config_type_should_be_an_array(self):
expected_error_msg = "Service 'foo' configuration key 'links' contains an invalid type, it should be an array"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'foo': {'image': 'busybox', 'links': 'an_link'},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_invalid_config_not_a_dictionary(self):
expected_error_msg = ("Top level object in 'filename.yml' needs to be "
"an object.")
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
['foo', 'lol'],
'tests/fixtures/extends',
'filename.yml'
)
)
def test_invalid_config_not_unique_items(self):
expected_error_msg = "has non-unique elements"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'web': {'build': '.', 'devices': ['/dev/foo:/dev/foo', '/dev/foo:/dev/foo']}
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_invalid_list_of_strings_format(self):
expected_error_msg = "Service 'web' configuration key 'command' contains 1"
expected_error_msg += ", which is an invalid type, it should be a string"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'web': {'build': '.', 'command': [1]}
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_config_image_and_dockerfile_raise_validation_error(self):
expected_error_msg = "Service 'web' has both an image and alternate Dockerfile."
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{'web': {'image': 'busybox', 'dockerfile': 'Dockerfile.alt'}},
'working_dir',
'filename.yml'
)
)
def test_config_extra_hosts_string_raises_validation_error(self):
expected_error_msg = "Service 'web' configuration key 'extra_hosts' contains an invalid type"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{'web': {
'image': 'busybox',
'extra_hosts': 'somehost:162.242.195.82'
}},
'working_dir',
'filename.yml'
)
)
def test_config_extra_hosts_list_of_dicts_validation_error(self):
expected_error_msg = "key 'extra_hosts' contains {'somehost': '162.242.195.82'}, which is an invalid type, it should be a string"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{'web': {
'image': 'busybox',
'extra_hosts': [
{'somehost': '162.242.195.82'},
{'otherhost': '50.31.209.229'}
]
}},
'working_dir',
'filename.yml'
)
)
def test_config_ulimits_invalid_keys_validation_error(self):
expected = ("Service 'web' configuration key 'ulimits' 'nofile' contains "
"unsupported option: 'not_soft_or_hard'")
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{
'web': {
'image': 'busybox',
'ulimits': {
'nofile': {
"not_soft_or_hard": 100,
"soft": 10000,
"hard": 20000,
}
}
}
},
'working_dir',
'filename.yml'))
assert expected in exc.exconly()
def test_config_ulimits_required_keys_validation_error(self):
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{
'web': {
'image': 'busybox',
'ulimits': {'nofile': {"soft": 10000}}
}
},
'working_dir',
'filename.yml'))
assert "Service 'web' configuration key 'ulimits' 'nofile'" in exc.exconly()
assert "'hard' is a required property" in exc.exconly()
def test_config_ulimits_soft_greater_than_hard_error(self):
expected = "cannot contain a 'soft' value higher than 'hard' value"
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{
'web': {
'image': 'busybox',
'ulimits': {
'nofile': {"soft": 10000, "hard": 1000}
}
}
},
'working_dir',
'filename.yml'))
assert expected in exc.exconly()
def test_valid_config_which_allows_two_type_definitions(self):
expose_values = [["8000"], [8000]]
for expose in expose_values:
service = config.load(
build_config_details(
{'web': {
'image': 'busybox',
'expose': expose
}},
'working_dir',
'filename.yml'
)
)
self.assertEqual(service[0]['expose'], expose)
def test_valid_config_oneof_string_or_list(self):
entrypoint_values = [["sh"], "sh"]
for entrypoint in entrypoint_values:
service = config.load(
build_config_details(
{'web': {
'image': 'busybox',
'entrypoint': entrypoint
}},
'working_dir',
'filename.yml'
)
)
self.assertEqual(service[0]['entrypoint'], entrypoint)
@mock.patch('compose.config.validation.log')
def test_logs_warning_for_boolean_in_environment(self, mock_logging):
expected_warning_msg = "There is a boolean value in the 'environment' key."
config.load(
build_config_details(
{'web': {
'image': 'busybox',
'environment': {'SHOW_STUFF': True}
}},
'working_dir',
'filename.yml'
)
)
self.assertTrue(mock_logging.warn.called)
self.assertTrue(expected_warning_msg in mock_logging.warn.call_args[0][0])
def test_config_valid_environment_dict_key_contains_dashes(self):
services = config.load(
build_config_details(
{'web': {
'image': 'busybox',
'environment': {'SPRING_JPA_HIBERNATE_DDL-AUTO': 'none'}
}},
'working_dir',
'filename.yml'
)
)
self.assertEqual(services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'], 'none')
def test_load_yaml_with_yaml_error(self):
tmpdir = py.test.ensuretemp('invalid_yaml_test')
self.addCleanup(tmpdir.remove)
invalid_yaml_file = tmpdir.join('docker-compose.yml')
invalid_yaml_file.write("""
web:
this is bogus: ok: what
""")
with pytest.raises(ConfigurationError) as exc:
config.load_yaml(str(invalid_yaml_file))
assert 'line 3, column 32' in exc.exconly()
class InterpolationTest(unittest.TestCase):
@mock.patch.dict(os.environ)
def test_config_file_with_environment_variable(self):
os.environ.update(
IMAGE="busybox",
HOST_PORT="80",
LABEL_VALUE="myvalue",
)
service_dicts = config.load(
config.find('tests/fixtures/environment-interpolation', None),
)
self.assertEqual(service_dicts, [
{
'name': 'web',
'image': 'busybox',
'ports': ['80:8000'],
'labels': {'mylabel': 'myvalue'},
'hostname': 'host-',
'command': '${ESCAPED}',
}
])
@mock.patch.dict(os.environ)
def test_unset_variable_produces_warning(self):
os.environ.pop('FOO', None)
os.environ.pop('BAR', None)
config_details = build_config_details(
{
'web': {
'image': '${FOO}',
'command': '${BAR}',
'container_name': '${BAR}',
},
},
'.',
None,
)
with mock.patch('compose.config.interpolation.log') as log:
config.load(config_details)
self.assertEqual(2, log.warn.call_count)
warnings = sorted(args[0][0] for args in log.warn.call_args_list)
self.assertIn('BAR', warnings[0])
self.assertIn('FOO', warnings[1])
@mock.patch.dict(os.environ)
def test_invalid_interpolation(self):
with self.assertRaises(config.ConfigurationError) as cm:
config.load(
build_config_details(
{'web': {'image': '${'}},
'working_dir',
'filename.yml'
)
)
self.assertIn('Invalid', cm.exception.msg)
self.assertIn('for "image" option', cm.exception.msg)
self.assertIn('in service "web"', cm.exception.msg)
self.assertIn('"${"', cm.exception.msg)
def test_empty_environment_key_allowed(self):
service_dict = config.load(
build_config_details(
{
'web': {
'build': '.',
'environment': {
'POSTGRES_PASSWORD': ''
},
},
},
'.',
None,
)
)[0]
self.assertEquals(service_dict['environment']['POSTGRES_PASSWORD'], '')
class VolumeConfigTest(unittest.TestCase):
def test_no_binding(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['/data']}, working_dir='.')
self.assertEqual(d['volumes'], ['/data'])
@mock.patch.dict(os.environ)
def test_volume_binding_with_environment_variable(self):
os.environ['VOLUME_PATH'] = '/host/path'
d = config.load(
build_config_details(
{'foo': {'build': '.', 'volumes': ['${VOLUME_PATH}:/container/path']}},
'.',
None,
)
)[0]
self.assertEqual(d['volumes'], ['/host/path:/container/path'])
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
@mock.patch.dict(os.environ)
def test_volume_binding_with_home(self):
os.environ['HOME'] = '/home/user'
d = make_service_dict('foo', {'build': '.', 'volumes': ['~:/container/path']}, working_dir='.')
self.assertEqual(d['volumes'], ['/home/user:/container/path'])
def test_name_does_not_expand(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['mydatavolume:/data']}, working_dir='.')
self.assertEqual(d['volumes'], ['mydatavolume:/data'])
def test_absolute_posix_path_does_not_expand(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['/var/lib/data:/data']}, working_dir='.')
self.assertEqual(d['volumes'], ['/var/lib/data:/data'])
def test_absolute_windows_path_does_not_expand(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['c:\\data:/data']}, working_dir='.')
self.assertEqual(d['volumes'], ['c:\\data:/data'])
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
def test_relative_path_does_expand_posix(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='/home/me/myproject')
self.assertEqual(d['volumes'], ['/home/me/myproject/data:/data'])
d = make_service_dict('foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='/home/me/myproject')
self.assertEqual(d['volumes'], ['/home/me/myproject:/data'])
d = make_service_dict('foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='/home/me/myproject')
self.assertEqual(d['volumes'], ['/home/me/otherproject:/data'])
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows paths')
def test_relative_path_does_expand_windows(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='c:\\Users\\me\\myproject')
self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject\\data:/data'])
d = make_service_dict('foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='c:\\Users\\me\\myproject')
self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject:/data'])
d = make_service_dict('foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='c:\\Users\\me\\myproject')
self.assertEqual(d['volumes'], ['c:\\Users\\me\\otherproject:/data'])
@mock.patch.dict(os.environ)
def test_home_directory_with_driver_does_not_expand(self):
os.environ['NAME'] = 'surprise!'
d = make_service_dict('foo', {
'build': '.',
'volumes': ['~:/data'],
'volume_driver': 'foodriver',
}, working_dir='.')
self.assertEqual(d['volumes'], ['~:/data'])
def test_volume_path_with_non_ascii_directory(self):
volume = u'/Füü/data:/data'
container_path = config.resolve_volume_path(".", volume)
self.assertEqual(container_path, volume)
class MergePathMappingTest(object):
def config_name(self):
return ""
def test_empty(self):
service_dict = config.merge_service_dicts({}, {})
self.assertNotIn(self.config_name(), service_dict)
def test_no_override(self):
service_dict = config.merge_service_dicts(
{self.config_name(): ['/foo:/code', '/data']},
{},
)
self.assertEqual(set(service_dict[self.config_name()]), set(['/foo:/code', '/data']))
def test_no_base(self):
service_dict = config.merge_service_dicts(
{},
{self.config_name(): ['/bar:/code']},
)
self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code']))
def test_override_explicit_path(self):
service_dict = config.merge_service_dicts(
{self.config_name(): ['/foo:/code', '/data']},
{self.config_name(): ['/bar:/code']},
)
self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/data']))
def test_add_explicit_path(self):
service_dict = config.merge_service_dicts(
{self.config_name(): ['/foo:/code', '/data']},
{self.config_name(): ['/bar:/code', '/quux:/data']},
)
self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/quux:/data']))
def test_remove_explicit_path(self):
service_dict = config.merge_service_dicts(
{self.config_name(): ['/foo:/code', '/quux:/data']},
{self.config_name(): ['/bar:/code', '/data']},
)
self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/data']))
class MergeVolumesTest(unittest.TestCase, MergePathMappingTest):
def config_name(self):
return 'volumes'
class MergeDevicesTest(unittest.TestCase, MergePathMappingTest):
def config_name(self):
return 'devices'
class BuildOrImageMergeTest(unittest.TestCase):
def test_merge_build_or_image_no_override(self):
self.assertEqual(
config.merge_service_dicts({'build': '.'}, {}),
{'build': '.'},
)
self.assertEqual(
config.merge_service_dicts({'image': 'redis'}, {}),
{'image': 'redis'},
)
def test_merge_build_or_image_override_with_same(self):
self.assertEqual(
config.merge_service_dicts({'build': '.'}, {'build': './web'}),
{'build': './web'},
)
self.assertEqual(
config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}),
{'image': 'postgres'},
)
def test_merge_build_or_image_override_with_other(self):
self.assertEqual(
config.merge_service_dicts({'build': '.'}, {'image': 'redis'}),
{'image': 'redis'}
)
self.assertEqual(
config.merge_service_dicts({'image': 'redis'}, {'build': '.'}),
{'build': '.'}
)
class MergeListsTest(unittest.TestCase):
def test_empty(self):
service_dict = config.merge_service_dicts({}, {})
self.assertNotIn('ports', service_dict)
def test_no_override(self):
service_dict = config.merge_service_dicts(
{'ports': ['10:8000', '9000']},
{},
)
self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000']))
def test_no_base(self):
service_dict = config.merge_service_dicts(
{},
{'ports': ['10:8000', '9000']},
)
self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000']))
def test_add_item(self):
service_dict = config.merge_service_dicts(
{'ports': ['10:8000', '9000']},
{'ports': ['20:8000']},
)
self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000', '20:8000']))
class MergeStringsOrListsTest(unittest.TestCase):
def test_no_override(self):
service_dict = config.merge_service_dicts(
{'dns': '8.8.8.8'},
{},
)
self.assertEqual(set(service_dict['dns']), set(['8.8.8.8']))
def test_no_base(self):
service_dict = config.merge_service_dicts(
{},
{'dns': '8.8.8.8'},
)
self.assertEqual(set(service_dict['dns']), set(['8.8.8.8']))
def test_add_string(self):
service_dict = config.merge_service_dicts(
{'dns': ['8.8.8.8']},
{'dns': '9.9.9.9'},
)
self.assertEqual(set(service_dict['dns']), set(['8.8.8.8', '9.9.9.9']))
def test_add_list(self):
service_dict = config.merge_service_dicts(
{'dns': '8.8.8.8'},
{'dns': ['9.9.9.9']},
)
self.assertEqual(set(service_dict['dns']), set(['8.8.8.8', '9.9.9.9']))
class MergeLabelsTest(unittest.TestCase):
def test_empty(self):
service_dict = config.merge_service_dicts({}, {})
self.assertNotIn('labels', service_dict)
def test_no_override(self):
service_dict = config.merge_service_dicts(
make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
make_service_dict('foo', {'build': '.'}, 'tests/'),
)
self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': ''})
def test_no_base(self):
service_dict = config.merge_service_dicts(
make_service_dict('foo', {'build': '.'}, 'tests/'),
make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'),
)
self.assertEqual(service_dict['labels'], {'foo': '2'})
def test_override_explicit_value(self):
service_dict = config.merge_service_dicts(
make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'),
)
self.assertEqual(service_dict['labels'], {'foo': '2', 'bar': ''})
def test_add_explicit_value(self):
service_dict = config.merge_service_dicts(
make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
make_service_dict('foo', {'build': '.', 'labels': ['bar=2']}, 'tests/'),
)
self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': '2'})
def test_remove_explicit_value(self):
service_dict = config.merge_service_dicts(
make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar=2']}, 'tests/'),
make_service_dict('foo', {'build': '.', 'labels': ['bar']}, 'tests/'),
)
self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': ''})
class MemoryOptionsTest(unittest.TestCase):
def test_validation_fails_with_just_memswap_limit(self):
"""
When you set a 'memswap_limit' it is invalid config unless you also set
a mem_limit
"""
expected_error_msg = (
"Service 'foo' configuration key 'memswap_limit' is invalid: when "
"defining 'memswap_limit' you must set 'mem_limit' as well"
)
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'foo': {'image': 'busybox', 'memswap_limit': 2000000},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_validation_with_correct_memswap_values(self):
service_dict = config.load(
build_config_details(
{'foo': {'image': 'busybox', 'mem_limit': 1000000, 'memswap_limit': 2000000}},
'tests/fixtures/extends',
'common.yml'
)
)
self.assertEqual(service_dict[0]['memswap_limit'], 2000000)
def test_memswap_can_be_a_string(self):
service_dict = config.load(
build_config_details(
{'foo': {'image': 'busybox', 'mem_limit': "1G", 'memswap_limit': "512M"}},
'tests/fixtures/extends',
'common.yml'
)
)
self.assertEqual(service_dict[0]['memswap_limit'], "512M")
class EnvTest(unittest.TestCase):
def test_parse_environment_as_list(self):
environment = [
'NORMAL=F1',
'CONTAINS_EQUALS=F=2',
'TRAILING_EQUALS=',
]
self.assertEqual(
config.parse_environment(environment),
{'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''},
)
def test_parse_environment_as_dict(self):
environment = {
'NORMAL': 'F1',
'CONTAINS_EQUALS': 'F=2',
'TRAILING_EQUALS': None,
}
self.assertEqual(config.parse_environment(environment), environment)
def test_parse_environment_invalid(self):
with self.assertRaises(ConfigurationError):
config.parse_environment('a=b')
def test_parse_environment_empty(self):
self.assertEqual(config.parse_environment(None), {})
@mock.patch.dict(os.environ)
def test_resolve_environment(self):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service_dict = make_service_dict(
'foo', {
'build': '.',
'environment': {
'FILE_DEF': 'F1',
'FILE_DEF_EMPTY': '',
'ENV_DEF': None,
'NO_DEF': None
},
},
'tests/'
)
self.assertEqual(
service_dict['environment'],
{'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''},
)
def test_env_from_file(self):
service_dict = make_service_dict(
'foo',
{'build': '.', 'env_file': 'one.env'},
'tests/fixtures/env',
)
self.assertEqual(
service_dict['environment'],
{'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'},
)
def test_env_from_multiple_files(self):
service_dict = make_service_dict(
'foo',
{'build': '.', 'env_file': ['one.env', 'two.env']},
'tests/fixtures/env',
)
self.assertEqual(
service_dict['environment'],
{'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'},
)
def test_env_nonexistent_file(self):
options = {'env_file': 'nonexistent.env'}
self.assertRaises(
ConfigurationError,
lambda: make_service_dict('foo', options, 'tests/fixtures/env'),
)
@mock.patch.dict(os.environ)
def test_resolve_environment_from_file(self):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service_dict = make_service_dict(
'foo',
{'build': '.', 'env_file': 'resolve.env'},
'tests/fixtures/env',
)
self.assertEqual(
service_dict['environment'],
{
'FILE_DEF': u'bär',
'FILE_DEF_EMPTY': '',
'ENV_DEF': 'E3',
'NO_DEF': ''
},
)
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
@mock.patch.dict(os.environ)
def test_resolve_path(self):
os.environ['HOSTENV'] = '/tmp'
os.environ['CONTAINERENV'] = '/host/tmp'
service_dict = config.load(
build_config_details(
{'foo': {'build': '.', 'volumes': ['$HOSTENV:$CONTAINERENV']}},
"tests/fixtures/env",
None,
)
)[0]
self.assertEqual(set(service_dict['volumes']), set(['/tmp:/host/tmp']))
service_dict = config.load(
build_config_details(
{'foo': {'build': '.', 'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}},
"tests/fixtures/env",
None,
)
)[0]
self.assertEqual(set(service_dict['volumes']), set(['/opt/tmp:/opt/host/tmp']))
def load_from_filename(filename):
return config.load(config.find('.', [filename]))
class ExtendsTest(unittest.TestCase):
def test_extends(self):
service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml')
self.assertEqual(service_sort(service_dicts), service_sort([
{
'name': 'mydb',
'image': 'busybox',
'command': 'top',
},
{
'name': 'myweb',
'image': 'busybox',
'command': 'top',
'links': ['mydb:db'],
'environment': {
"FOO": "1",
"BAR": "2",
"BAZ": "2",
},
}
]))
def test_nested(self):
service_dicts = load_from_filename('tests/fixtures/extends/nested.yml')
self.assertEqual(service_dicts, [
{
'name': 'myweb',
'image': 'busybox',
'command': '/bin/true',
'environment': {
"FOO": "2",
"BAR": "2",
},
},
])
def test_self_referencing_file(self):
"""
We specify a 'file' key that is the filename we're already in.
"""
service_dicts = load_from_filename('tests/fixtures/extends/specify-file-as-self.yml')
self.assertEqual(service_sort(service_dicts), service_sort([
{
'environment':
{
'YEP': '1', 'BAR': '1', 'BAZ': '3'
},
'image': 'busybox',
'name': 'myweb'
},
{
'environment':
{'YEP': '1'},
'image': 'busybox',
'name': 'otherweb'
},
{
'environment':
{'YEP': '1', 'BAZ': '3'},
'image': 'busybox',
'name': 'web'
}
]))
def test_circular(self):
with pytest.raises(config.CircularReference) as exc:
load_from_filename('tests/fixtures/extends/circle-1.yml')
path = [
(os.path.basename(filename), service_name)
for (filename, service_name) in exc.value.trail
]
expected = [
('circle-1.yml', 'web'),
('circle-2.yml', 'other'),
('circle-1.yml', 'web'),
]
self.assertEqual(path, expected)
def test_extends_validation_empty_dictionary(self):
with self.assertRaisesRegexp(ConfigurationError, 'service'):
config.load(
build_config_details(
{
'web': {'image': 'busybox', 'extends': {}},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_extends_validation_missing_service_key(self):
with self.assertRaisesRegexp(ConfigurationError, "'service' is a required property"):
config.load(
build_config_details(
{
'web': {'image': 'busybox', 'extends': {'file': 'common.yml'}},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_extends_validation_invalid_key(self):
expected_error_msg = (
"Service 'web' configuration key 'extends' "
"contains unsupported option: 'rogue_key'"
)
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'web': {
'image': 'busybox',
'extends': {
'file': 'common.yml',
'service': 'web',
'rogue_key': 'is not allowed'
}
},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_extends_validation_sub_property_key(self):
expected_error_msg = (
"Service 'web' configuration key 'extends' 'file' contains 1, "
"which is an invalid type, it should be a string"
)
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{
'web': {
'image': 'busybox',
'extends': {
'file': 1,
'service': 'web',
}
},
},
'tests/fixtures/extends',
'filename.yml'
)
)
def test_extends_validation_no_file_key_no_filename_set(self):
dictionary = {'extends': {'service': 'web'}}
def load_config():
return make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends')
self.assertRaisesRegexp(ConfigurationError, 'file', load_config)
def test_extends_validation_valid_config(self):
service = config.load(
build_config_details(
{
'web': {'image': 'busybox', 'extends': {'service': 'web', 'file': 'common.yml'}},
},
'tests/fixtures/extends',
'common.yml'
)
)
self.assertEquals(len(service), 1)
self.assertIsInstance(service[0], dict)
self.assertEquals(service[0]['command'], "/bin/true")
def test_extended_service_with_invalid_config(self):
expected_error_msg = "Service 'myweb' has neither an image nor a build path specified"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
load_from_filename('tests/fixtures/extends/service-with-invalid-schema.yml')
def test_extended_service_with_valid_config(self):
service = load_from_filename('tests/fixtures/extends/service-with-valid-composite-extends.yml')
self.assertEquals(service[0]['command'], "top")
def test_extends_file_defaults_to_self(self):
"""
Test not specifying a file in our extends options that the
config is valid and correctly extends from itself.
"""
service_dicts = load_from_filename('tests/fixtures/extends/no-file-specified.yml')
self.assertEqual(service_sort(service_dicts), service_sort([
{
'name': 'myweb',
'image': 'busybox',
'environment': {
"BAR": "1",
"BAZ": "3",
}
},
{
'name': 'web',
'image': 'busybox',
'environment': {
"BAZ": "3",
}
}
]))
def test_invalid_links_in_extended_service(self):
expected_error_msg = "services with 'links' cannot be extended"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
load_from_filename('tests/fixtures/extends/invalid-links.yml')
def test_invalid_volumes_from_in_extended_service(self):
expected_error_msg = "services with 'volumes_from' cannot be extended"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
load_from_filename('tests/fixtures/extends/invalid-volumes.yml')
def test_invalid_net_in_extended_service(self):
expected_error_msg = "services with 'net: container' cannot be extended"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
load_from_filename('tests/fixtures/extends/invalid-net.yml')
@mock.patch.dict(os.environ)
def test_valid_interpolation_in_extended_service(self):
os.environ.update(
HOSTNAME_VALUE="penguin",
)
expected_interpolated_value = "host-penguin"
service_dicts = load_from_filename('tests/fixtures/extends/valid-interpolation.yml')
for service in service_dicts:
self.assertTrue(service['hostname'], expected_interpolated_value)
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
def test_volume_path(self):
dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml')
paths = [
'%s:/foo' % os.path.abspath('tests/fixtures/volume-path/common/foo'),
'%s:/bar' % os.path.abspath('tests/fixtures/volume-path/bar'),
]
self.assertEqual(set(dicts[0]['volumes']), set(paths))
def test_parent_build_path_dne(self):
child = load_from_filename('tests/fixtures/extends/nonexistent-path-child.yml')
self.assertEqual(child, [
{
'name': 'dnechild',
'image': 'busybox',
'command': '/bin/true',
'environment': {
"FOO": "1",
"BAR": "2",
},
},
])
def test_load_throws_error_when_base_service_does_not_exist(self):
err_msg = r'''Cannot extend service 'foo' in .*: Service not found'''
with self.assertRaisesRegexp(ConfigurationError, err_msg):
load_from_filename('tests/fixtures/extends/nonexistent-service.yml')
def test_partial_service_config_in_extends_is_still_valid(self):
dicts = load_from_filename('tests/fixtures/extends/valid-common-config.yml')
self.assertEqual(dicts[0]['environment'], {'FOO': '1'})
def test_extended_service_with_verbose_and_shorthand_way(self):
services = load_from_filename('tests/fixtures/extends/verbose-and-shorthand.yml')
self.assertEqual(service_sort(services), service_sort([
{
'name': 'base',
'image': 'busybox',
'environment': {'BAR': '1'},
},
{
'name': 'verbose',
'image': 'busybox',
'environment': {'BAR': '1', 'FOO': '1'},
},
{
'name': 'shorthand',
'image': 'busybox',
'environment': {'BAR': '1', 'FOO': '2'},
},
]))
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
class ExpandPathTest(unittest.TestCase):
working_dir = '/home/user/somedir'
def test_expand_path_normal(self):
result = config.expand_path(self.working_dir, 'myfile')
self.assertEqual(result, self.working_dir + '/' + 'myfile')
def test_expand_path_absolute(self):
abs_path = '/home/user/otherdir/somefile'
result = config.expand_path(self.working_dir, abs_path)
self.assertEqual(result, abs_path)
def test_expand_path_with_tilde(self):
test_path = '~/otherdir/somefile'
with mock.patch.dict(os.environ):
os.environ['HOME'] = user_path = '/home/user/'
result = config.expand_path(self.working_dir, test_path)
self.assertEqual(result, user_path + 'otherdir/somefile')
class VolumePathTest(unittest.TestCase):
@pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive')
def test_split_path_mapping_with_windows_path(self):
windows_volume_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config:/opt/connect/config:ro"
expected_mapping = (
"/opt/connect/config:ro",
"c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config"
)
mapping = config.split_path_mapping(windows_volume_path)
self.assertEqual(mapping, expected_mapping)
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
class BuildPathTest(unittest.TestCase):
def setUp(self):
self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx')
def test_nonexistent_path(self):
with self.assertRaises(ConfigurationError):
config.load(
build_config_details(
{
'foo': {'build': 'nonexistent.path'},
},
'working_dir',
'filename.yml'
)
)
def test_relative_path(self):
relative_build_path = '../build-ctx/'
service_dict = make_service_dict(
'relpath',
{'build': relative_build_path},
working_dir='tests/fixtures/build-path'
)
self.assertEquals(service_dict['build'], self.abs_context_path)
def test_absolute_path(self):
service_dict = make_service_dict(
'abspath',
{'build': self.abs_context_path},
working_dir='tests/fixtures/build-path'
)
self.assertEquals(service_dict['build'], self.abs_context_path)
def test_from_file(self):
service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml')
self.assertEquals(service_dict, [{'name': 'foo', 'build': self.abs_context_path}])
class GetDefaultConfigFilesTestCase(unittest.TestCase):
files = [
'docker-compose.yml',
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]
def test_get_config_path_default_file_in_basedir(self):
for index, filename in enumerate(self.files):
self.assertEqual(
filename,
get_config_filename_for_files(self.files[index:]))
with self.assertRaises(config.ComposeFileNotFound):
get_config_filename_for_files([])
def test_get_config_path_default_file_in_parent_dir(self):
"""Test with files placed in the subdir"""
def get_config_in_subdir(files):
return get_config_filename_for_files(files, subdir=True)
for index, filename in enumerate(self.files):
self.assertEqual(filename, get_config_in_subdir(self.files[index:]))
with self.assertRaises(config.ComposeFileNotFound):
get_config_in_subdir([])
def get_config_filename_for_files(filenames, subdir=None):
def make_files(dirname, filenames):
for fname in filenames:
with open(os.path.join(dirname, fname), 'w') as f:
f.write('')
project_dir = tempfile.mkdtemp()
try:
make_files(project_dir, filenames)
if subdir:
base_dir = tempfile.mkdtemp(dir=project_dir)
else:
base_dir = project_dir
filename, = config.get_default_config_files(base_dir)
return os.path.basename(filename)
finally:
shutil.rmtree(project_dir)
|
alexandrev/compose
|
tests/unit/config/config_test.py
|
Python
|
apache-2.0
| 52,302
|
# Problem:
# Write a program that reads n-number numbers entered by the user and calculates the amount, minimum and
# the maximum of the even and odd numbers (1 count). When there is no minimum / maximum item, print "No".
import math
num_of_loops = int(input())
odd_sum = 0
odd_min = math.inf
odd_max = math.inf * (-1)
even_sum = 0
even_min = math.inf
even_max = math.inf * (-1)
new_num = 0
for i in range(1, num_of_loops + 1):
new_num = float(input())
if i % 2 != 0:
odd_sum += new_num
if new_num < odd_min:
odd_min = new_num
if new_num > odd_max:
odd_max = new_num
else:
even_sum += new_num
if new_num < even_min:
even_min = new_num
if new_num > even_max:
even_max = new_num
if odd_sum == 0:
print(f"OddSum = 0")
else:
print(f"OddSum = {odd_sum}")
if odd_min != math.inf:
print(f"OddMin = {odd_min},")
else:
print(f"OddMin = No,")
if odd_max != (math.inf * (-1)):
print(f"OddMax = {odd_max},")
else:
print("OddMax = No,")
if even_sum == 0:
print(f"EvenSum = 0,")
else:
print(f"EvenSum = {even_sum},")
if even_min != math.inf:
print(f"EvenMin = {even_min},")
else:
print("EvenMin = No,")
if even_max != (math.inf * (-1)):
print(f"EvenMax = {even_max}")
else:
print(f"EvenMax = No")
|
YaniLozanov/Software-University
|
Python/PyCharm/05.Simple Loops/11.Even or Odd positions.py
|
Python
|
mit
| 1,371
|
# encoding=utf-8
'''Item queue management and processing.'''
import abc
import contextlib
import gettext
import logging
import os
import itertools
from trollius import From, Return
import trollius
from wpull.backport.logging import BraceMessage as __
from wpull.database.base import NotFound
from wpull.hook import HookableMixin, HookDisconnected
from wpull.item import Status, URLItem
from wpull.url import parse_url_or_log
import wpull.string
_logger = logging.getLogger(__name__)
_ = gettext.gettext
class BaseEngine(object):
'''Base engine producer-consumer.'''
POISON_PILL = object()
ITEM_PRIORITY = 1
POISON_PRIORITY = 0
def __init__(self):
super().__init__()
self.__concurrent = 1
self._running = False
self._item_queue = trollius.PriorityQueue()
self._token_queue = trollius.JoinableQueue()
self._item_get_semaphore = trollius.BoundedSemaphore(value=1)
self._producer_task = None
self._worker_tasks = set()
@property
def _concurrent(self):
'''Get concurrency value.'''
return self.__concurrent
@trollius.coroutine
def _run_workers(self):
'''Run the consumers.
Coroutine.
'''
self._running = True
self._producer_task = trollius.async(self._run_producer_wrapper())
worker_tasks = self._worker_tasks
while self._running:
while len(worker_tasks) < self.__concurrent:
worker_task = trollius.async(self._run_worker())
worker_tasks.add(worker_task)
wait_coroutine = trollius.wait(
worker_tasks, return_when=trollius.FIRST_COMPLETED)
done_tasks = (yield From(wait_coroutine))[0]
for task in done_tasks:
task.result()
worker_tasks.remove(task)
_logger.debug('Exited workers loop.')
if worker_tasks:
_logger.debug('Waiting for workers to stop.')
yield From(trollius.wait(worker_tasks))
_logger.debug('Waiting for producer to stop.')
if self._item_get_semaphore.locked():
_logger.warning(__(
gettext.ngettext(
'Discarding {num} unprocessed item.',
'Discarding {num} unprocessed items.',
self._token_queue.qsize()
),
num=self._token_queue.qsize()
))
self._item_get_semaphore.release()
yield From(self._producer_task)
@trollius.coroutine
def _run_producer_wrapper(self):
'''Run the producer, if exception, stop engine.'''
try:
yield From(self._run_producer())
except Exception as error:
if not isinstance(error, StopIteration):
# Stop the workers so the producer exception will be handled
_logger.error('Producer died.')
self._stop()
raise
@trollius.coroutine
def _run_producer(self):
'''Run the producer.
Coroutine.
'''
while self._running:
_logger.debug('Get item from source')
item = yield From(self._get_item())
# FIXME: accessing protected unfinished_tasks
if item is None and self._token_queue._unfinished_tasks == 0:
_logger.debug('Producer stopping.')
self._stop()
elif item is None:
_logger.debug(
__('Producer waiting for {0} workers to finish up.',
len(self._worker_tasks)))
yield From(self._token_queue.join())
else:
yield From(self._item_get_semaphore.acquire())
self._token_queue.put_nowait(None)
yield From(self._item_queue.put((self.ITEM_PRIORITY, item)))
@trollius.coroutine
def _run_worker(self):
'''Run a single consumer.
Coroutine.
'''
_logger.debug('Worker start.')
while True:
priority, item = yield From(self._item_queue.get())
if item == self.POISON_PILL:
_logger.debug('Worker quitting.')
return
else:
_logger.debug(__('Processing item {0}.', item))
self._item_get_semaphore.release()
self._token_queue.get_nowait()
yield From(self._process_item(item))
self._token_queue.task_done()
if os.environ.get('OBJGRAPH_DEBUG'):
import gc
import objgraph
gc.collect()
objgraph.show_most_common_types(25)
if os.environ.get('FILE_LEAK_DEBUG'):
import subprocess
output = subprocess.check_output(
['lsof', '-p', str(os.getpid()), '-n'])
for line in output.decode('ascii', 'replace').split('\n'):
if 'REG' in line and \
(os.getcwd() in line or '/tmp/' in line):
print('FILELEAK', line)
def _set_concurrent(self, new_num):
'''Set concurrency level.'''
if self._running:
assert new_num >= 0, \
'No negative concurrency pls. Got {}.'.format(new_num)
change = new_num - self.__concurrent
if change < 0:
for dummy in range(abs(change)):
_logger.debug('Put poison pill for less workers.')
self._item_queue.put_nowait(
(self.POISON_PRIORITY, self.POISON_PILL))
elif change > 0:
_logger.debug('Put 1 poison pill to trigger more workers.')
self._item_queue.put_nowait(
(self.POISON_PRIORITY, self.POISON_PILL))
self.__concurrent = new_num
def _stop(self):
'''Gracefully stop.'''
if self._running:
self._running = False
for dummy in range(len(self._worker_tasks)):
_logger.debug('Put poison pill.')
self._item_queue.put_nowait(
(self.POISON_PRIORITY, self.POISON_PILL))
@abc.abstractmethod
@trollius.coroutine
def _get_item(self):
'''Get an item.
Coroutine.
'''
@abc.abstractmethod
@trollius.coroutine
def _process_item(self, item):
'''Process an item.
Coroutine.
'''
class Engine(BaseEngine, HookableMixin):
'''Manages and processes item.
Args:
url_table (:class:`.database.BaseURLTable`): A table of URLs to
be processed.
processor (:class:`.processor.BaseProcessor`): A processor that
will do things to finish an item.
statistics (:class:`.stats.Statistics`): Information needed to
compute the exit status.
concurrent (int): The number of items to process at once.
ignore_exceptions(bool): Whether to ignore exceptions.
resource_monitor (:class:`.app.resource_monitor`): Use resource
monitor to pause processing items if resources exceeded.
The engine is described like the following:
1. Get an "todo" item from the table. If none, skip to step 4.
2. Ask the processor to process the item.
3. Go to step 1.
4. Get an "error" item from the table. If none, skip to step 7.
5. Ask the processor to process the item.
6. Go to step 4.
7. Stop.
In the context of Wpull, URLs are the central part of items.
'''
def __init__(self, url_table, processor, statistics,
concurrent=1, ignore_exceptions=False, resource_monitor=None):
super().__init__()
self._url_table = url_table
self._processor = processor
self._statistics = statistics
self._ignore_exceptions = ignore_exceptions
self._resource_monitor = resource_monitor
self._num_worker_busy = 0
self._set_concurrent(concurrent)
self.register_hook('engine_run')
@property
def concurrent(self):
'''The concurrency value.'''
return self._concurrent
def set_concurrent(self, value):
'''Set concurrency value.'''
self._set_concurrent(value)
@trollius.coroutine
def __call__(self):
'''Run the engine.
This function will clear any items marked as in-progress, start up
the workers, and loop until a stop is requested.
Returns:
int: An integer describing the exit status.
.. seealso:: :class:`.errors.ExitStatus`
'''
try:
self.call_hook('engine_run')
except HookDisconnected:
pass
self._release_in_progress()
yield From(self._run_workers())
def _release_in_progress(self):
'''Release any items in progress.'''
_logger.debug('Release in-progress.')
self._url_table.release()
@trollius.coroutine
def _get_item(self):
with self._maybe_ignore_exceptions():
return self._get_next_url_record()
def _get_next_url_record(self):
'''Return the next available URL from the URL table.
This function will return items marked as "todo" and then items
marked as "error". As a consequence, items experiencing errors will
be done last.
Returns:
:class:`.item.URLRecord`.
'''
_logger.debug('Get next URL todo.')
try:
url_record = self._url_table.check_out(Status.todo)
except NotFound:
url_record = None
if not url_record:
try:
_logger.debug('Get next URL error.')
url_record = self._url_table.check_out(Status.error)
except NotFound:
url_record = None
_logger.debug(__('Return record {0}.', url_record))
return url_record
@trollius.coroutine
def _process_item(self, url_record):
'''Process given item.
Coroutine.
'''
with self._maybe_ignore_exceptions():
yield From(self._check_resource_monitor())
yield From(self._process_url_item(url_record))
@trollius.coroutine
def _check_resource_monitor(self):
if not self._resource_monitor:
return
for counter in itertools.count():
resource_info = self._resource_monitor.check()
if not resource_info:
if counter:
_logger.info(_('Situation cleared.'))
break
if counter % 15 == 0:
if resource_info.path:
_logger.warning(__(
_('Low disk space on {path} ({size} free).'),
path=resource_info.path,
size=wpull.string.format_size(resource_info.free)
))
else:
_logger.warning(__(
_('Low memory ({size} free).'),
size=wpull.string.format_size(resource_info.free)
))
_logger.warning(_('Waiting for operator to clear situation.'))
yield From(trollius.sleep(60))
@trollius.coroutine
def _process_url_item(self, url_record):
'''Process an item.
Args:
url_item (:class:`.database.URLRecord`): The item to process.
This function calls :meth:`.processor.BaseProcessor.process`.
Coroutine.
'''
assert url_record
url_info = parse_url_or_log(url_record.url)
if not url_info:
url_item = URLItem(self._url_table, None, url_record)
url_item.skip()
return
url_item = URLItem(self._url_table, url_info, url_record)
_logger.debug(__('Begin session for {0} {1}.',
url_record, url_item.url_info))
yield From(self._processor.process(url_item))
assert url_item.is_processed
self._statistics.mark_done(url_info)
if self._statistics.is_quota_exceeded:
_logger.debug('Stopping due to quota.')
self.stop()
_logger.debug(__('End session for {0} {1}.',
url_item.url_record, url_item.url_info))
def stop(self):
'''Stop the engine.'''
_logger.debug(__('Stopping'))
self._stop()
@contextlib.contextmanager
def _maybe_ignore_exceptions(self):
'''Catch all exceptions and maybe ignore them.'''
if self._ignore_exceptions:
try:
yield
except Exception as error:
if not isinstance(error, StopIteration):
_logger.exception('Ignored exception. Program unstable!')
else:
raise
else:
yield
|
bright-sparks/wpull
|
wpull/engine.py
|
Python
|
gpl-3.0
| 13,002
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_file_copy
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Copy a file to a remote NXOS device.
description:
- This module supports two different workflows for copying a file
to flash (or bootflash) on NXOS devices. Files can either be (1) pushed
from the Ansible controller to the device or (2) pulled from a remote SCP
file server to the device. File copies are initiated from the NXOS
device to the remote SCP server. This module only supports the
use of connection C(network_cli) or C(Cli) transport with connection C(local).
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOS 7.0(3)I2(5), 7.0(3)I4(6), 7.0(3)I5(3),
7.0(3)I6(1), 7.0(3)I7(3), 6.0(2)A8(8), 7.0(3)F3(4), 7.3(0)D1(1),
8.3(0)
- When pushing files (file_pull is False) to the NXOS device,
feature scp-server must be enabled.
- When pulling files (file_pull is True) to the NXOS device,
feature scp-server is not required.
- When pulling files (file_pull is True) to the NXOS device,
no transfer will take place if the file is already present.
- Check mode will tell you if the file would be copied.
requirements:
- paramiko (required when file_pull is False)
- SCPClient (required when file_pull is False)
- pexpect (required when file_pull is True)
options:
local_file:
description:
- When (file_pull is False) this is the path to the local file on the Ansible controller.
The local directory must exist.
- When (file_pull is True) this is the file name used on the NXOS device.
remote_file:
description:
- When (file_pull is False) this is the remote file path on the NXOS device.
If omitted, the name of the local file will be used.
The remote directory must exist.
- When (file_pull is True) this is the full path to the file on the remote SCP
server to be copied to the NXOS device.
file_system:
description:
- The remote file system of the device. If omitted,
devices that support a I(file_system) parameter will use
their default values.
default: "bootflash:"
connect_ssh_port:
description:
- SSH port to connect to server during transfer of file
default: 22
version_added: "2.5"
file_pull:
description:
- When (False) file is copied from the Ansible controller to the NXOS device.
- When (True) file is copied from a remote SCP server to the NXOS device.
In this mode, the file copy is initiated from the NXOS device.
- If the file is already present on the device it will be overwritten and
therefore the operation is NOT idempotent.
type: bool
default: False
version_added: "2.7"
file_pull_timeout:
description:
- Use this parameter to set timeout in seconds, when transferring
large files or when the network is slow.
default: 300
version_added: "2.7"
remote_scp_server:
description:
- The remote scp server address which is used to pull the file.
This is required if file_pull is True.
version_added: "2.7"
remote_scp_server_user:
description:
- The remote scp server username which is used to pull the file.
This is required if file_pull is True.
version_added: "2.7"
remote_scp_server_password:
description:
- The remote scp server password which is used to pull the file.
This is required if file_pull is True.
version_added: "2.7"
'''
EXAMPLES = '''
# File copy from ansible controller to nxos device
- name: "copy from server to device"
nxos_file_copy:
local_file: "./test_file.txt"
remote_file: "test_file.txt"
# Initiate file copy from the nxos device to transfer file from an SCP server back to the nxos device
- name: "initiate file copy from device"
nxos_file_copy:
nxos_file_copy:
file_pull: True
local_file: "xyz"
remote_file: "/mydir/abc"
remote_scp_server: "192.168.0.1"
remote_scp_server_user: "myUser"
remote_scp_server_password: "myPassword"
'''
RETURN = '''
transfer_status:
description: Whether a file was transferred. "No Transfer" or "Sent".
If file_pull is successful, it is set to "Received".
returned: success
type: string
sample: 'Sent'
local_file:
description: The path of the local file.
returned: success
type: string
sample: '/path/to/local/file'
remote_file:
description: The path of the remote file.
returned: success
type: string
sample: '/path/to/remote/file'
'''
import os
import re
import time
import traceback
from ansible.module_utils.network.nxos.nxos import run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_text
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
HAS_PEXPECT = False
def remote_file_exists(module, dst, file_system='bootflash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
body = run_commands(module, {'command': command, 'output': 'text'})[0]
if 'No such file' in body:
return False
return True
def verify_remote_file_exists(module, dst, file_system='bootflash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
body = run_commands(module, {'command': command, 'output': 'text'})[0]
if 'No such file' in body:
return 0
return body.split()[0].strip()
def local_file_exists(module):
return os.path.isfile(module.params['local_file'])
def get_flash_size(module):
command = 'dir {0}'.format(module.params['file_system'])
body = run_commands(module, {'command': command, 'output': 'text'})[0]
match = re.search(r'(\d+) bytes free', body)
bytes_free = match.group(1)
return int(bytes_free)
def enough_space(module):
flash_size = get_flash_size(module)
file_size = os.path.getsize(module.params['local_file'])
if file_size > flash_size:
return False
return True
def transfer_file_to_device(module, dest):
file_size = os.path.getsize(module.params['local_file'])
if not enough_space(module):
module.fail_json(msg='Could not transfer file. Not enough space on device.')
hostname = module.params['host']
username = module.params['username']
password = module.params['password']
port = module.params['connect_ssh_port']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
hostname=hostname,
username=username,
password=password,
port=port)
full_remote_path = '{0}{1}'.format(module.params['file_system'], dest)
scp = SCPClient(ssh.get_transport())
try:
scp.put(module.params['local_file'], full_remote_path)
except:
time.sleep(10)
temp_size = verify_remote_file_exists(
module, dest, file_system=module.params['file_system'])
if int(temp_size) == int(file_size):
pass
else:
module.fail_json(msg='Could not transfer file. There was an error '
'during transfer. Please make sure remote '
'permissions are set.', temp_size=temp_size,
file_size=file_size)
scp.close()
ssh.close()
return True
def copy_file_from_remote(module, local, file_system='bootflash:'):
hostname = module.params['host']
username = module.params['username']
password = module.params['password']
port = module.params['connect_ssh_port']
try:
child = pexpect.spawn('ssh ' + username + '@' + hostname + ' -p' + str(port))
# response could be unknown host addition or Password
index = child.expect(['yes', '(?i)Password'])
if index == 0:
child.sendline('yes')
child.expect('(?i)Password')
child.sendline(password)
child.expect('#')
command = ('copy scp://' + module.params['remote_scp_server_user'] +
'@' + module.params['remote_scp_server'] + module.params['remote_file'] +
' ' + file_system + local + ' vrf management')
child.sendline(command)
# response could be remote host connection time out,
# there is already an existing file with the same name,
# unknown host addition or password
index = child.expect(['timed out', 'existing', 'yes', '(?i)password'], timeout=180)
if index == 0:
module.fail_json(msg='Timeout occured due to remote scp server not responding')
elif index == 1:
child.sendline('y')
# response could be unknown host addition or Password
sub_index = child.expect(['yes', '(?i)password'])
if sub_index == 0:
child.sendline('yes')
child.expect('(?i)password')
elif index == 2:
child.sendline('yes')
child.expect('(?i)password')
child.sendline(module.params['remote_scp_server_password'])
fpt = module.params['file_pull_timeout']
# response could be that there is no space left on device,
# permission denied due to wrong user/password,
# remote file non-existent or success
index = child.expect(['No space', 'Permission denied', 'No such file', '#'], timeout=fpt)
if index == 0:
module.fail_json(msg='File copy failed due to no space left on the device')
elif index == 1:
module.fail_json(msg='Username/Password for remote scp server is wrong')
elif index == 2:
module.fail_json(msg='File copy failed due to remote file not present')
except pexpect.ExceptionPexpect as e:
module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc())
child.close()
def main():
argument_spec = dict(
local_file=dict(type='str'),
remote_file=dict(type='str'),
file_system=dict(required=False, default='bootflash:'),
connect_ssh_port=dict(required=False, type='int', default=22),
file_pull=dict(type='bool', default=False),
file_pull_timeout=dict(type='int', default=300),
remote_scp_server=dict(type='str'),
remote_scp_server_user=dict(type='str'),
remote_scp_server_password=dict(no_log=True),
)
argument_spec.update(nxos_argument_spec)
required_if = [("file_pull", True, ["remote_file", "remote_scp_server"]),
("file_pull", False, ["local_file"])]
required_together = [['remote_scp_server',
'remote_scp_server_user',
'remote_scp_server_password']]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
required_together=required_together,
supports_check_mode=True)
file_pull = module.params['file_pull']
if file_pull:
if not HAS_PEXPECT:
module.fail_json(
msg='library pexpect is required when file_pull is True but does not appear to be '
'installed. It can be installed using `pip install pexpect`'
)
else:
if not HAS_PARAMIKO:
module.fail_json(
msg='library paramiko is required when file_pull is False but does not appear to be '
'installed. It can be installed using `pip install paramiko`'
)
if not HAS_SCP:
module.fail_json(
msg='library scp is required when file_pull is False but does not appear to be '
'installed. It can be installed using `pip install scp`'
)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
local_file = module.params['local_file']
remote_file = module.params['remote_file']
file_system = module.params['file_system']
results['transfer_status'] = 'No Transfer'
results['file_system'] = file_system
if file_pull:
src = remote_file.split('/')[-1]
local = local_file or src
if not module.check_mode:
copy_file_from_remote(module, local, file_system=file_system)
results['transfer_status'] = 'Received'
results['changed'] = True
results['remote_file'] = src
results['local_file'] = local
else:
if not local_file_exists(module):
module.fail_json(msg="Local file {0} not found".format(local_file))
dest = remote_file or os.path.basename(local_file)
remote_exists = remote_file_exists(module, dest, file_system=file_system)
if not remote_exists:
results['changed'] = True
file_exists = False
else:
file_exists = True
if not module.check_mode and not file_exists:
transfer_file_to_device(module, dest)
results['transfer_status'] = 'Sent'
results['local_file'] = local_file
if remote_file is None:
remote_file = os.path.basename(local_file)
results['remote_file'] = remote_file
module.exit_json(**results)
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/network/nxos/nxos_file_copy.py
|
Python
|
gpl-3.0
| 14,439
|
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras import backend as K
from deep_qa.layers.time_distributed_embedding import TimeDistributedEmbedding
from ..common.test_case import DeepQaTestCase
class TestTimeDistributedEmbeddings(DeepQaTestCase):
def test_time_distributed_embedding_masking(self):
input_layer = Input(shape=(2, 3), dtype='int32')
embedding = TimeDistributedEmbedding(input_dim=3, output_dim=5, mask_zero=True)
embedding(input_layer) # A call to the layer is required to define the mask.
embedding_mask = embedding.get_output_mask_at(0)
get_mask = K.function([input_layer], [embedding_mask])
input_val = numpy.asarray([[[1, 0, 2], [0, 2, 1]]])
mask_val = get_mask([input_val])[0]
assert numpy.all(mask_val == numpy.asarray([[1, 0, 1], [0, 1, 1]], dtype='int8'))
|
RTHMaK/RPGOne
|
deep_qa-master/tests/layers/time_distributed_embedding_test.py
|
Python
|
apache-2.0
| 899
|
#!/usr/bin/env python3
import pytablereader
import xlsxwriter
import simplesqlite
def main():
file_path = "sample_data.xlsx"
# create sample data file ---
workbook = xlsxwriter.Workbook(file_path)
worksheet = workbook.add_worksheet("samplesheet1")
table = [
["", "", "", ""],
["", "a", "b", "c"],
["", 1, 1.1, "a"],
["", 2, 2.2, "bb"],
["", 3, 3.3, "cc"],
]
for row_idx, row in enumerate(table):
for col_idx, item in enumerate(row):
worksheet.write(row_idx, col_idx, item)
worksheet = workbook.add_worksheet("samplesheet2")
worksheet = workbook.add_worksheet("samplesheet3")
table = [
["", "", ""],
["", "", ""],
["aa", "ab", "ac"],
[1, "hoge", "a"],
[2, "", "bb"],
[3, "foo", ""],
]
for row_idx, row in enumerate(table):
for col_idx, item in enumerate(row):
worksheet.write(row_idx, col_idx, item)
workbook.close()
# create table ---
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
loader = pytablereader.ExcelTableFileLoader(file_path)
for table_data in loader.load():
con.create_table_from_tabledata(table_data)
# output ---
for table_name in con.fetch_table_names():
print("table: " + table_name)
print(con.fetch_attr_names(table_name))
result = con.select(select="*", table_name=table_name)
for record in result.fetchall():
print(record)
print()
if __name__ == "__main__":
main()
|
thombashi/SimpleSQLite
|
sample/create_table/sample_create_table_from_excel.py
|
Python
|
mit
| 1,573
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.automate.service_dialogs import ServiceDialog
from cfme.services.catalogs.catalog import Catalog
from cfme.services.catalogs.service_catalogs import ServiceCatalogs
from cfme.automate.explorer import Domain, Namespace, Class, Method, Instance
pytestmark = [
pytest.mark.usefixtures("logged_in"),
pytest.mark.long_running,
pytest.mark.ignore_stream("upstream"),
pytest.mark.meta(server_roles="+automate")
]
item_name = fauxfactory.gen_alphanumeric()
METHOD_TORSO = """
# Method for logging
def log(level, message)
@method = 'Service Dialog Provider Select'
$evm.log(level, "#{@method} - #{message}")
end
# Start Here
log(:info, " - Listing Root Object Attributes:") if @debug
$evm.root.attributes.sort.each { |k, v| $evm.log('info', "#{@method} - \t#{k}: #{v}") if @debug }
log(:info, "===========================================") if @debug
dialog_field = $evm.object
dialog_field['data_type'] = 'string'
dialog_field['required'] = 'true'
dialog_field['sort_by'] = 'value'
dialog_field["values"] = [[1, "one"], [2, "two"], [10, "ten"], [50, "fifty"]]
"""
@pytest.yield_fixture(scope="function")
def dialog(copy_instance, create_method):
dialog = "dialog_" + fauxfactory.gen_alphanumeric()
element_data = {
'ele_label': "ele_" + fauxfactory.gen_alphanumeric(),
'ele_name': fauxfactory.gen_alphanumeric(),
'ele_desc': fauxfactory.gen_alphanumeric(),
'choose_type': "Drop Down List",
'dynamic_chkbox': True
}
dialog = ServiceDialog(label="dialog_" + fauxfactory.gen_alphanumeric(),
description="my dialog", submit=True, cancel=True,
tab_label="tab_" + fauxfactory.gen_alphanumeric(),
tab_desc="my tab desc",
box_label="box_" + fauxfactory.gen_alphanumeric(),
box_desc="my box desc")
dialog.create(element_data)
yield dialog
@pytest.yield_fixture(scope="function")
def catalog():
cat_name = "cat_" + fauxfactory.gen_alphanumeric()
catalog = Catalog(name=cat_name,
description="my catalog")
catalog.create()
yield catalog
@pytest.fixture(scope="function")
def copy_domain(request):
domain = Domain(name="new_domain", enabled=True)
domain.create()
request.addfinalizer(lambda: domain.delete() if domain.exists() else None)
return domain
@pytest.fixture(scope="function")
def create_method(request, copy_domain):
method = Method(
name="InspectMe",
data=METHOD_TORSO,
cls=Class(
name="Request",
namespace=Namespace(
name="System",
parent=copy_domain
)
)
)
method.create()
return method
@pytest.fixture(scope="function")
def copy_instance(request, copy_domain):
miq_domain = Domain(name="ManageIQ (Locked)", enabled=True)
instance = Instance(
name="InspectMe",
cls=Class(
name="Request",
namespace=Namespace(
name="System",
parent=miq_domain
)
)
)
instance.copy_to(copy_domain)
@pytest.mark.tier(3)
@pytest.mark.meta(blockers=1219950)
def test_dynamicdropdown_dialog(dialog, catalog):
item_name = fauxfactory.gen_alphanumeric()
catalog_item = CatalogItem(item_type="Generic", name=item_name,
description="my catalog", display_in=True, catalog=catalog.name,
dialog=dialog.label)
catalog_item.create()
service_catalogs = ServiceCatalogs("service_name")
service_catalogs.order(catalog_item.catalog, catalog_item)
|
lehinevych/cfme_tests
|
cfme/tests/services/test_dynamicdd_dialogelement.py
|
Python
|
gpl-2.0
| 3,838
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.