code
stringlengths 1
199k
|
|---|
import traceback
from ckan.lib.helpers import json
from ckanext.harvest.model import HarvestObject, HarvestObjectExtra
from ckanext.harvest.harvesters import HarvesterBase
from ckanext.geocat.utils import search_utils, csw_processor, ogdch_map_utils, csw_mapping # noqa
from ckanext.geocat.utils.vocabulary_utils import \
(VALID_TERMS_OF_USE, DEFAULT_TERMS_OF_USE)
from ckan.logic.schema import default_update_package_schema,\
default_create_package_schema
from ckan.lib.navl.validators import ignore
import ckan.plugins.toolkit as tk
from ckan import model
from ckan.model import Session
import uuid
import logging
log = logging.getLogger(__name__)
DEFAULT_PERMA_LINK_URL = 'https://www.geocat.ch/geonetwork/srv/ger/md.viewer#/full_view/' # noqa
DEFAULT_PERMA_LINK_LABEL = 'geocat.ch Permalink'
HARVEST_USER = 'harvest'
class GeocatHarvester(HarvesterBase):
'''
The harvester for geocat
'''
def info(self):
return {
'name': 'geocat_harvester',
'title': 'Geocat harvester',
'description': (
'Harvests metadata from geocat (CSW)'
),
'form_config_interface': 'Text'
}
def validate_config(self, config):
if not config:
return config
try:
config_obj = json.loads(config)
except Exception as e:
raise ValueError(
'Configuration could not be parsed. An error {} occured'
.format(e)
)
if 'delete_missing_datasets' in config_obj:
if not isinstance(config_obj['delete_missing_datasets'], bool):
raise ValueError('delete_missing_dataset must be boolean')
if 'rights' in config_obj:
if not config_obj['rights'] in VALID_TERMS_OF_USE:
raise ValueError('{} is not valid as terms of use'
.format(config_obj['rights']))
return config
def _set_config(self, config_str, harvest_source_id):
if config_str:
self.config = json.loads(config_str)
else:
self.config = {}
self.config['rights'] = self.config.get('rights', DEFAULT_TERMS_OF_USE)
if not self.config['rights'] in VALID_TERMS_OF_USE:
self.config['rights'] = DEFAULT_TERMS_OF_USE
self.config['delete_missing_datasets'] = \
self.config.get('delete_missing_datasets', False)
self.config['geocat_perma_link_label'] = \
tk.config.get('ckanext.geocat.permalink_title',
DEFAULT_PERMA_LINK_LABEL)
self.config['geocat_perma_link_url'] = \
self.config.get('geocat_perma_link_url',
tk.config.get('geocat_perma_link_url',
DEFAULT_PERMA_LINK_URL))
self.config['legal_basis_url'] = \
self.config.get('legal_basis_url', None)
organization_slug = \
search_utils.get_organization_slug_for_harvest_source(
harvest_source_id)
self.config['organization'] = organization_slug
log.debug('Using config: %r' % self.config)
def gather_stage(self, harvest_job):
log.debug('In GeocatHarvester gather_stage')
self._set_config(harvest_job.source.config, harvest_job.source.id)
csw_url = harvest_job.source.url
try:
csw_data = csw_processor.GeocatCatalogueServiceWeb(url=csw_url)
gathered_geocat_identifiers = csw_data.get_geocat_id_from_csw()
except Exception as e:
self._save_gather_error(
'Unable to get content for URL: %s: %s / %s'
% (csw_url, str(e), traceback.format_exc()),
harvest_job
)
return []
existing_dataset_infos = \
search_utils.get_dataset_infos_for_organization(
organization_name=self.config['organization'],
harvest_source_id=harvest_job.source_id,
)
gathered_ogdch_identifiers = \
[ogdch_map_utils.map_geocat_to_ogdch_identifier(
geocat_identifier=geocat_identifier,
organization_slug=self.config['organization'])
for geocat_identifier in gathered_geocat_identifiers]
all_ogdch_identifiers = \
set(gathered_ogdch_identifiers + existing_dataset_infos.keys())
packages_to_delete = search_utils.get_packages_to_delete(
existing_dataset_infos=existing_dataset_infos,
gathered_ogdch_identifiers=gathered_ogdch_identifiers,
)
csw_map = csw_mapping.GeoMetadataMapping(
organization_slug=self.config['organization'],
geocat_perma_link=self.config['geocat_perma_link_url'],
geocat_perma_label=self.config['geocat_perma_link_label'],
legal_basis_url=self.config['legal_basis_url'],
default_rights=self.config['rights'],
valid_identifiers=all_ogdch_identifiers,
)
harvest_obj_ids = self.map_geocat_dataset(
csw_data,
csw_map,
gathered_geocat_identifiers,
gathered_ogdch_identifiers,
harvest_job)
log.debug('IDs: %r' % harvest_obj_ids)
if self.config['delete_missing_datasets']:
delete_harvest_object_ids = \
self.delete_geocat_ids(
harvest_job,
harvest_obj_ids,
packages_to_delete
)
harvest_obj_ids.extend(delete_harvest_object_ids)
return harvest_obj_ids
def delete_geocat_ids(self,
harvest_job,
harvest_obj_ids,
packages_to_delete):
delete_harvest_obj_ids = []
for package_info in packages_to_delete:
obj = HarvestObject(
guid=package_info[1].name,
job=harvest_job,
extras=[HarvestObjectExtra(key='import_action',
value='delete')])
obj.save()
delete_harvest_obj_ids.append(obj.id)
return delete_harvest_obj_ids
def map_geocat_dataset(self,
csw_data,
csw_map,
gathered_geocat_identifiers,
gathered_ogdch_identifiers,
harvest_job):
mapped_harvest_obj_ids = []
for geocat_id in gathered_geocat_identifiers:
ogdch_identifier = ogdch_map_utils.map_geocat_to_ogdch_identifier(
geocat_identifier=geocat_id,
organization_slug=self.config['organization'])
if ogdch_identifier in gathered_ogdch_identifiers:
try:
csw_record_as_string = csw_data.get_record_by_id(geocat_id)
except Exception as e:
self._save_gather_error(
'Error when reading csw record form source: %s %r / %s'
% (ogdch_identifier, e, traceback.format_exc()),
harvest_job)
continue
try:
dataset_dict = csw_map.get_metadata(csw_record_as_string,
geocat_id)
except Exception as e:
self._save_gather_error(
'Error when mapping csw data to dcat: %s %r / %s'
% (ogdch_identifier, e, traceback.format_exc()),
harvest_job)
continue
try:
harvest_obj = \
HarvestObject(guid=ogdch_identifier,
job=harvest_job,
content=json.dumps(dataset_dict))
harvest_obj.save()
except Exception as e:
self._save_gather_error(
'Error when processsing dataset: %s %r / %s'
% (ogdch_identifier, e, traceback.format_exc()),
harvest_job)
continue
else:
mapped_harvest_obj_ids.append(harvest_obj.id)
return mapped_harvest_obj_ids
def fetch_stage(self, harvest_object):
return True
def import_stage(self, harvest_object): # noqa
log.debug('In GeocatHarvester import_stage')
if not harvest_object:
log.error('No harvest object received')
self._save_object_error(
'No harvest object received',
harvest_object
)
return False
import_action = \
search_utils.get_value_from_object_extra(harvest_object.extras,
'import_action')
if import_action and import_action == 'delete':
log.debug('import action: %s' % import_action)
harvest_object.current = False
return self._delete_dataset({'id': harvest_object.guid})
if harvest_object.content is None:
self._save_object_error('Empty content for object %s' %
harvest_object.id,
harvest_object, 'Import')
return False
try:
pkg_dict = json.loads(harvest_object.content)
except ValueError:
self._save_object_error('Could not parse content for object {0}'
.format(harvest_object.id), harvest_object, 'Import') # noqa
return False
pkg_info = \
search_utils.find_package_for_identifier(harvest_object.guid)
context = {
'ignore_auth': True,
'user': HARVEST_USER,
}
try:
if pkg_info:
# Change default schema to ignore lists of dicts, which
# are stored in the '__junk' field
schema = default_update_package_schema()
context['schema'] = schema
schema['__junk'] = [ignore]
pkg_dict['name'] = pkg_info.name
pkg_dict['id'] = pkg_info.package_id
search_utils.map_resources_to_ids(pkg_dict, pkg_info)
updated_pkg = \
tk.get_action('package_update')(context, pkg_dict)
harvest_object.current = True
harvest_object.package_id = updated_pkg['id']
harvest_object.save()
log.debug("Updated PKG: %s" % updated_pkg)
else:
flat_title = _derive_flat_title(pkg_dict['title'])
if not flat_title:
self._save_object_error(
'Unable to derive name from title %s'
% pkg_dict['title'], harvest_object, 'Import')
return False
pkg_dict['name'] = self._gen_new_name(flat_title)
schema = default_create_package_schema()
context['schema'] = schema
schema['__junk'] = [ignore]
log.debug("No package found, create a new one!")
# generate an id to reference it in the harvest_object
pkg_dict['id'] = unicode(uuid.uuid4())
log.info('Package with GUID %s does not exist, '
'let\'s create it' % harvest_object.guid)
harvest_object.current = True
harvest_object.package_id = pkg_dict['id']
harvest_object.add()
model.Session.execute(
'SET CONSTRAINTS harvest_object_package_id_fkey DEFERRED')
model.Session.flush()
created_pkg = \
tk.get_action('package_create')(context, pkg_dict)
log.debug("Created PKG: %s" % created_pkg)
Session.commit()
return True
except Exception as e:
self._save_object_error(
('Exception in import stage: %r / %s'
% (e, traceback.format_exc())), harvest_object)
return False
def _create_new_context(self):
# get the site user
site_user = tk.get_action('get_site_user')(
{'model': model, 'ignore_auth': True}, {})
context = {
'model': model,
'session': Session,
'user': site_user['name'],
}
return context
def _delete_dataset(self, package_dict):
log.debug('deleting dataset %s' % package_dict['id'])
context = self._create_new_context()
tk.get_action('dataset_purge')(
context.copy(),
package_dict
)
return True
def _get_geocat_permalink_relation(self, geocat_pkg_id):
return {'url': self.config['geocat_perma_link_url'] + geocat_pkg_id,
'label': self.config['geocat_perma_link_label']}
class GeocatConfigError(Exception):
pass
def _derive_flat_title(title_dict):
"""localizes language dict if no language is specified"""
return title_dict.get('de') or title_dict.get('fr') or title_dict.get('en') or title_dict.get('it') or "" # noqa
|
{
'name': 'l10n_ar_account_check_sale',
'version': '1.0',
'summary': 'Venta de cheques de terceros',
'description': """
Cheques
==================================
Venta de cheques de terceros.
""",
'author': 'OPENPYME S.R.L.',
'website': 'http://www.openpyme.com.ar',
'category': 'Accounting',
'depends': [
'l10n_ar_account_check',
],
'data': [
'data/sold_check_data.xml',
'views/account_third_check_view.xml',
'views/account_sold_check_view.xml',
'wizard/wizard_sell_check_view.xml',
'security/ir.model.access.csv',
'data/security.xml',
],
'active': False,
'application': True,
'installable': True,
}
|
from ddd.logic.learning_unit.builder.effective_class_identity_builder import EffectiveClassIdentityBuilder
from ddd.logic.learning_unit.commands import GetEffectiveClassCommand
from ddd.logic.learning_unit.domain.model.effective_class import EffectiveClass
from ddd.logic.learning_unit.repository.i_effective_class import IEffectiveClassRepository
def get_effective_class(
cmd: 'GetEffectiveClassCommand',
effective_class_repository: 'IEffectiveClassRepository'
) -> 'EffectiveClass':
effective_class_identity = EffectiveClassIdentityBuilder.build_from_code_and_learning_unit_identity_data(
class_code=cmd.class_code,
learning_unit_code=cmd.learning_unit_code,
learning_unit_year=cmd.learning_unit_year
)
return effective_class_repository.get(entity_id=effective_class_identity)
|
import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.raw_module import RawDescriptor
from .x_module import XModule
from xblock.core import Integer, Scope, String, List, Float, Boolean
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor
from collections import namedtuple
from .fields import Date, Timedelta
import textwrap
log = logging.getLogger("mitx.courseware")
V1_SETTINGS_ATTRIBUTES = [
"display_name", "max_attempts", "graded", "accept_file_upload",
"skip_spelling_checks", "due", "graceperiod", "weight", "min_to_calibrate",
"max_to_calibrate", "peer_grader_count", "required_peer_grading",
]
V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state",
"student_attempts", "ready_to_reset", "old_task_states"]
V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES
VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes'])
VERSION_TUPLES = {
1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES,
V1_STUDENT_ATTRIBUTES),
}
DEFAULT_VERSION = 1
DEFAULT_DATA = textwrap.dedent("""\
<combinedopenended>
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/></task>
<task>
<openended min_score_to_attempt="4" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
<task>
<openended min_score_to_attempt="9" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
""")
class VersionInteger(Integer):
"""
A model type that converts from strings to integers when reading from json.
Also does error checking to see if version is correct or not.
"""
def from_json(self, value):
try:
value = int(value)
if value not in VERSION_TUPLES:
version_error_string = "Could not find version {0}, using version {1} instead"
log.error(version_error_string.format(value, DEFAULT_VERSION))
value = DEFAULT_VERSION
except:
value = DEFAULT_VERSION
return value
class CombinedOpenEndedFields(object):
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
default="Open Response Assessment",
scope=Scope.settings
)
current_task_number = Integer(
help="Current task that the student is on.",
default=0,
scope=Scope.user_state
)
old_task_states = List(
help=("A list of lists of state dictionaries for student states that are saved."
"This field is only populated if the instructor changes tasks after"
"the module is created and students have attempted it (for example changes a self assessed problem to "
"self and peer assessed."),
scope = Scope.user_state
)
task_states = List(
help="List of state dictionaries of each task within this module.",
scope=Scope.user_state
)
state = String(
help="Which step within the current task that the student is on.",
default="initial",
scope=Scope.user_state
)
graded = Boolean(
display_name="Graded",
help='Defines whether the student gets credit for grading this problem.',
default=False,
scope=Scope.settings
)
student_attempts = Integer(
help="Number of attempts taken by the student on this problem",
default=0,
scope=Scope.user_state
)
ready_to_reset = Boolean(
help="If the problem is ready to be reset or not.",
default=False,
scope=Scope.user_state
)
max_attempts = Integer(
display_name="Maximum Attempts",
help="The number of times the student can try to answer this problem.",
default=1,
scope=Scope.settings,
values={"min": 1 }
)
accept_file_upload = Boolean(
display_name="Allow File Uploads",
help="Whether or not the student can submit files as a response.",
default=False,
scope=Scope.settings
)
skip_spelling_checks = Boolean(
display_name="Disable Quality Filter",
help="If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed.",
default=False,
scope=Scope.settings
)
due = Date(
help="Date that this problem is due by",
scope=Scope.settings
)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings
)
version = VersionInteger(help="Current version number", default=DEFAULT_VERSION, scope=Scope.settings)
data = String(help="XML data for the problem", scope=Scope.content,
default=DEFAULT_DATA)
weight = Float(
display_name="Problem Weight",
help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
scope=Scope.settings,
values={"min": 0, "step": ".1"},
default=1
)
min_to_calibrate = Integer(
display_name="Minimum Peer Grading Calibrations",
help="The minimum number of calibration essays each student will need to complete for peer grading.",
default=3,
scope=Scope.settings,
values={"min": 1, "max": 20, "step": "1"}
)
max_to_calibrate = Integer(
display_name="Maximum Peer Grading Calibrations",
help="The maximum number of calibration essays each student will need to complete for peer grading.",
default=6,
scope=Scope.settings,
values={"min": 1, "max": 20, "step": "1"}
)
peer_grader_count = Integer(
display_name="Peer Graders per Response",
help="The number of peers who will grade each submission.",
default=3,
scope=Scope.settings,
values={"min": 1, "step": "1", "max": 5}
)
required_peer_grading = Integer(
display_name="Required Peer Grading",
help="The number of other students each student making a submission will have to grade.",
default=3,
scope=Scope.settings,
values={"min": 1, "step": "1", "max": 5}
)
markdown = String(
help="Markdown source of this module",
default=textwrap.dedent("""\
[prompt]
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
[prompt]
[rubric]
+ Ideas
- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
+ Content
- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
- Includes little information and few or no details. Explores only one or two facets of the topic.
- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
+ Organization
- Ideas organized illogically, transitions weak, and response difficult to follow.
- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
+ Style
- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
+ Voice
- Demonstrates language and tone that may be inappropriate to task and reader.
- Demonstrates an attempt to adjust language and tone to task and reader.
- Demonstrates effective adjustment of language and tone to task and reader.
[rubric]
[tasks]
(Self), ({4-12}AI), ({9-12}Peer)
[tasks]
"""),
scope=Scope.settings
)
class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
It transitions between problems, and support arbitrary ordering.
Each combined open ended module contains one or multiple "child" modules.
Child modules track their own state, and can transition between states. They also implement get_html and
handle_ajax.
The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess
ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem)
ajax actions implemented by all children are:
'save_answer' -- Saves the student answer
'save_assessment' -- Saves the student assessment (or external grader assessment)
'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc)
ajax actions implemented by combined open ended module are:
'reset' -- resets the whole combined open ended module and returns to the first child module
'next_problem' -- moves to the next child module
'get_results' -- gets results from a given child module
Types of children. Task is synonymous with child module, so each combined open ended module
incorporates multiple children (tasks):
openendedmodule
selfassessmentmodule
CombinedOpenEndedModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done'
icon_class = 'problem'
js = {
'coffee':
[
resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
]
}
js_module_name = "CombinedOpenEnded"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, *args, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block.
See DEFAULT_DATA for a sample.
"""
XModule.__init__(self, *args, **kwargs)
self.system.set('location', self.location)
if self.task_states is None:
self.task_states = []
if self.old_task_states is None:
self.old_task_states = []
version_tuple = VERSION_TUPLES[self.version]
self.student_attributes = version_tuple.student_attributes
self.settings_attributes = version_tuple.settings_attributes
attributes = self.student_attributes + self.settings_attributes
static_data = {}
instance_state = {k: getattr(self, k) for k in attributes}
self.child_descriptor = version_tuple.descriptor(self.system)
self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system)
self.child_module = version_tuple.module(self.system, self.location, self.child_definition, self.child_descriptor,
instance_state=instance_state, static_data=static_data,
attributes=attributes)
self.save_instance_data()
def get_html(self):
self.save_instance_data()
return_value = self.child_module.get_html()
return return_value
def handle_ajax(self, dispatch, data):
self.save_instance_data()
return_value = self.child_module.handle_ajax(dispatch, data)
self.save_instance_data()
return return_value
def get_instance_state(self):
return self.child_module.get_instance_state()
def get_score(self):
return self.child_module.get_score()
def max_score(self):
return self.child_module.max_score()
def get_progress(self):
return self.child_module.get_progress()
@property
def due_date(self):
return self.child_module.due_date
def save_instance_data(self):
for attribute in self.student_attributes:
setattr(self, attribute, getattr(self.child_module, attribute))
class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
"""
Module for adding combined open ended questions
"""
mako_template = "widgets/open-ended-edit.html"
module_class = CombinedOpenEndedModule
has_score = True
always_recalculate_grades = True
template_dir_name = "combinedopenended"
#Specify whether or not to pass in S3 interface
needs_s3_interface = True
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
metadata_attributes = RawDescriptor.metadata_attributes
js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/edit.coffee')]}
js_module_name = "OpenEndedMarkdownEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/combinedopenended/edit.scss')]}
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
}
def get_context(self):
_context = RawDescriptor.get_context(self)
_context.update({'markdown': self.markdown,
'enable_markdown': self.markdown is not None})
return _context
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(CombinedOpenEndedDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod,
CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version])
return non_editable_fields
|
from copy import deepcopy
from xml.sax.saxutils import escape
from lxml import etree as ElementTree
from odoo import SUPERUSER_ID, api
def _merge_views(env, xmlids):
old_view_ids = env["ir.ui.view"].search(
[("key", "in", xmlids), ("active", "=", True)]
)
# Get only the edited version of the views (if has it)
old_view_ids_edited = old_view_ids.filtered("website_id")
old_view_ids_edited_keys = old_view_ids_edited.mapped("key")
views_to_discard = env["ir.ui.view"]
for old_view in old_view_ids:
if not old_view.website_id and old_view.key in old_view_ids_edited_keys:
views_to_discard |= old_view
old_view_ids -= views_to_discard
new_website_page = env.ref("website_legal_page.legal_page_page")
new_view_id = env.ref("website_legal_page.legal_page")
# 'Dolly' separator element
separator = ElementTree.fromstring(
"<div class='s_hr text-left pt32 pb32' data-name='Separator'>"
+ "<hr class='s_hr_1px s_hr_solid border-600 w-100 mx-auto'/></div>"
)
# Replace new content with the old one per website
website_ids = old_view_ids.mapped("website_id")
for website_id in website_ids:
new_xml = ElementTree.fromstring(new_view_id.arch)
table_content_list = new_xml.xpath("//div[@id='section_list']/ul")[0]
sections_content = new_xml.xpath("//div[@id='section_content']")[0]
has_views_edited = any(
old_view_ids_edited.filtered(lambda x: x.website_id == website_id)
)
# Remove 'IS A SAMPLE' alert
if has_views_edited:
alert = new_xml.xpath(
"//section[@data-name='Title']//div[@data-name='Alert']"
)[0]
alert.find("..").remove(alert)
# Remove unused content
for child in table_content_list.getchildren():
table_content_list.remove(child)
for child in sections_content.getchildren():
sections_content.remove(child)
views_done = env["ir.ui.view"]
for old_view_id in old_view_ids:
if old_view_id.website_id != website_id:
continue
anchor_name = old_view_id.key.split(".")[1]
# Insert item in table content list
list_item = ElementTree.fromstring(
"<li><p><a href='#{}'>{}</a></p></li>".format(
anchor_name, escape(old_view_id.name)
)
)
table_content_list.append(list_item)
# Insert section content
old_xml = ElementTree.fromstring(old_view_id.arch)
old_content = old_xml.xpath("//div[@id='wrap']")[0]
sections_content.append(deepcopy(separator))
sections_content.append(
ElementTree.fromstring(
"<a class='legal_anchor' id='%s'/>" % anchor_name
)
)
for children in old_content.getchildren():
sections_content.append(children)
views_done |= old_view_id
old_view_ids -= views_done
# Create a new page with the changes
view_id = env["ir.ui.view"].create(
{
"arch": ElementTree.tostring(new_xml, encoding="unicode"),
"website_id": website_id.id,
"key": new_view_id.key,
"name": new_view_id.name,
"type": "qweb",
}
)
env["website.page"].create(
{
"name": new_website_page.name,
"url": new_website_page.url,
"view_id": view_id.id,
"is_published": True,
"website_id": website_id.id,
"website_indexed": True,
"website_published": True,
}
)
def post_init_hook(cr, registry):
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
is_website_sale_installed = (
env["ir.module.module"].search_count(
[("name", "=", "website_sale"), ("state", "=", "installed")]
)
> 0
)
if is_website_sale_installed:
_merge_views(env, ["website_sale.terms"])
|
'''Test cases for QtMultimediaWidgets'''
import unittest
from helper import UsesQApplication
from PySide2.QtMultimediaWidgets import QGraphicsVideoItem, QVideoWidget
from PySide2.QtWidgets import QGraphicsScene, QGraphicsView, QVBoxLayout, QWidget
from PySide2.QtCore import QTimer
class MyWidget(QWidget):
def __init__(self):
QWidget.__init__(self)
layout = QVBoxLayout(self)
layout.addWidget(QVideoWidget())
graphicsScene = QGraphicsScene()
graphicsView = QGraphicsView(graphicsScene)
graphicsScene.addItem(QGraphicsVideoItem())
layout.addWidget(graphicsView)
class QMultimediaWidgetsTest(UsesQApplication):
def testMultimediaWidgets(self):
w = MyWidget()
w.show()
timer = QTimer.singleShot(100, self.app.quit)
self.app.exec_()
if __name__ == '__main__':
unittest.main()
|
'''Simplified reimplementation of the gpioset tool in Python.'''
import gpiod
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
raise TypeError('usage: gpioset.py <gpiochip> <offset1>=<value1> ...')
with gpiod.Chip(sys.argv[1]) as chip:
offsets = []
values = []
for arg in sys.argv[2:]:
arg = arg.split('=')
offsets.append(int(arg[0]))
values.append(int(arg[1]))
lines = chip.get_lines(offsets)
lines.request(consumer=sys.argv[0], type=gpiod.LINE_REQ_DIR_OUT)
lines.set_values(values)
input()
|
"""LDAP protocol proxy server"""
from twisted.internet import reactor, defer
from ldaptor.protocols.ldap import ldapserver, ldapconnector, ldapclient
from ldaptor.protocols import pureldap
class Proxy(ldapserver.BaseLDAPServer):
protocol = ldapclient.LDAPClient
client = None
waitingConnect = []
unbound = False
def __init__(self, config):
"""
Initialize the object.
@param config: The configuration.
@type config: ldaptor.interfaces.ILDAPConfig
"""
ldapserver.BaseLDAPServer.__init__(self)
self.config = config
def _whenConnected(self, fn, *a, **kw):
if self.client is None:
d = defer.Deferred()
self.waitingConnect.append((d, fn, a, kw))
return d
else:
return defer.maybeDeferred(fn, *a, **kw)
def _cbConnectionMade(self, proto):
self.client = proto
while self.waitingConnect:
d, fn, a, kw = self.waitingConnect.pop(0)
d2 = defer.maybeDeferred(fn, *a, **kw)
d2.chainDeferred(d)
def _clientQueue(self, request, controls, reply):
# TODO controls
if request.needs_answer:
d = self.client.send_multiResponse(request, self._gotResponse, reply)
# TODO handle d errbacks
else:
self.client.send_noResponse(request)
def _gotResponse(self, response, reply):
reply(response)
# TODO this is ugly
return isinstance(response, (
pureldap.LDAPSearchResultDone,
pureldap.LDAPBindResponse,
))
def _failConnection(self, reason):
#TODO self.loseConnection()
return reason # TODO
def connectionMade(self):
clientCreator = ldapconnector.LDAPClientCreator(
reactor, self.protocol)
d = clientCreator.connect(
dn='',
overrides=self.config.getServiceLocationOverrides())
d.addCallback(self._cbConnectionMade)
d.addErrback(self._failConnection)
ldapserver.BaseLDAPServer.connectionMade(self)
def connectionLost(self, reason):
assert self.client is not None
if self.client.connected:
if not self.unbound:
self.client.unbind()
self.unbound = True
else:
self.client.transport.loseConnection()
self.client = None
ldapserver.BaseLDAPServer.connectionLost(self, reason)
def _handleUnknown(self, request, controls, reply):
self._whenConnected(self._clientQueue, request, controls, reply)
return None
def handleUnknown(self, request, controls, reply):
d = defer.succeed(request)
d.addCallback(self._handleUnknown, controls, reply)
return d
def handle_LDAPUnbindRequest(self, request, controls, reply):
self.unbound = True
self.handleUnknown(request, controls, reply)
if __name__ == '__main__':
"""
Demonstration LDAP proxy; passes all requests to localhost:389.
"""
from twisted.internet import protocol
from twisted.python import log
import sys
log.startLogging(sys.stderr)
factory = protocol.ServerFactory()
factory.protocol = lambda : Proxy(overrides={
'': ('localhost', 389),
})
reactor.listenTCP(10389, factory)
reactor.run()
|
import sys
import os
import subprocess as ssubprocess
_p = None
def start_syslog():
global _p
with open(os.devnull, 'w') as devnull:
_p = ssubprocess.Popen(
['logger', '-p', 'daemon.notice', '-t', 'sshuttle'],
stdin=ssubprocess.PIPE,
stdout=devnull,
stderr=devnull
)
def close_stdin():
sys.stdin.close()
def stdout_to_syslog():
sys.stdout.flush()
os.dup2(_p.stdin.fileno(), sys.stdout.fileno())
def stderr_to_syslog():
sys.stderr.flush()
os.dup2(_p.stdin.fileno(), sys.stderr.fileno())
|
import urllib2
import re
JIRA_URL='https://bugreports.qt-project.org/browse'
class JIRA:
__instance__ = None
# Helper class
class Bug:
CREATOR = 'QTCREATORBUG'
SIMULATOR = 'QTSIM'
SDK = 'QTSDK'
QT = 'QTBUG'
QT_QUICKCOMPONENTS = 'QTCOMPONENTS'
# constructor of JIRA
def __init__(self, number, bugType=Bug.CREATOR):
if JIRA.__instance__ == None:
JIRA.__instance__ = JIRA.__impl(number, bugType)
JIRA.__dict__['_JIRA__instance__'] = JIRA.__instance__
else:
JIRA.__instance__._bugType = bugType
JIRA.__instance__._number = number
JIRA.__instance__.__fetchStatusAndResolutionFromJira__()
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __getattr__(self, attr):
return getattr(self.__instance__, attr)
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __setattr__(self, attr, value):
return setattr(self.__instance__, attr, value)
# function to get an instance of the singleton
@staticmethod
def getInstance():
if '_JIRA__instance__' in JIRA.__dict__:
return JIRA.__instance__
else:
return JIRA.__impl(0, Bug.CREATOR)
# function to check if the given bug is open or not
@staticmethod
def isBugStillOpen(number, bugType=Bug.CREATOR):
tmpJIRA = JIRA(number, bugType)
return tmpJIRA.isOpen()
# function similar to performWorkaroundForBug - but it will execute the
# workaround (function) only if the bug is still open
# returns True if the workaround function has been executed, False otherwise
@staticmethod
def performWorkaroundIfStillOpen(number, bugType=Bug.CREATOR, *args):
if JIRA.isBugStillOpen(number, bugType):
return JIRA.performWorkaroundForBug(number, bugType, *args)
else:
test.warning("Bug is closed... skipping workaround!",
"You should remove potential code inside performWorkaroundForBug()")
return False
# function that performs the workaround (function) for the given bug
# if the function needs additional arguments pass them as 3rd parameter
@staticmethod
def performWorkaroundForBug(number, bugType=Bug.CREATOR, *args):
functionToCall = JIRA.getInstance().__bugs__.get("%s-%d" % (bugType, number), None)
if functionToCall:
test.warning("Using workaround for %s-%d" % (bugType, number))
functionToCall(*args)
return True
else:
JIRA.getInstance()._exitFatal_(bugType, number)
return False
# implementation of JIRA singleton
class __impl:
# constructor of __impl
def __init__(self, number, bugType):
self._number = number
self._bugType = bugType
self._localOnly = os.getenv("SYSTEST_JIRA_NO_LOOKUP")=="1"
self.__initBugDict__()
self.__fetchStatusAndResolutionFromJira__()
# function to retrieve the status of the current bug
def getStatus(self):
return self._status
# function to retrieve the resolution of the current bug
def getResolution(self):
return self._resolution
# this function checks the resolution of the given bug
# and returns True if the bug can still be assumed as 'Open' and False otherwise
def isOpen(self):
# handle special cases
if self._resolution == None:
return True
if self._resolution in ('Duplicate', 'Moved', 'Incomplete', 'Cannot Reproduce', 'Invalid'):
test.warning("Resolution of bug is '%s' - assuming 'Open' for now." % self._resolution,
"Please check the bugreport manually and update this test.")
return True
return self._resolution != 'Done'
# this function tries to fetch the status and resolution from JIRA for the given bug
# if this isn't possible or the lookup is disabled it does only check the internal
# dict whether a function for the given bug is deposited or not
def __fetchStatusAndResolutionFromJira__(self):
global JIRA_URL
data = None
if not self._localOnly:
try:
bugReport = urllib2.urlopen('%s/%s-%d' % (JIRA_URL, self._bugType, self._number))
data = bugReport.read()
except:
data = self.__tryExternalTools__()
if data == None:
test.warning("Sorry, ssl module missing - cannot fetch data via HTTPS",
"Try to install the ssl module by yourself, or set the python "
"path inside SQUISHDIR/etc/paths.ini to use a python version with "
"ssl support OR install wget or curl to get rid of this warning!")
self._localOnly = True
if data == None:
if '%s-%d' % (self._bugType, self._number) in self.__bugs__:
test.warning("Using internal dict - bug status could have changed already",
"Please check manually!")
self._status = None
self._resolution = None
return
else:
test.fatal("No workaround function deposited for %s-%d" % (self._bugType, self._number))
self._resolution = 'Done'
return
else:
data = data.replace("\r", "").replace("\n", "")
resPattern = re.compile('<span\s+id="resolution-val".*?>(?P<resolution>.*?)</span>')
statPattern = re.compile('<span\s+id="status-val".*?>(.*?<img.*?>)?(?P<status>.*?)</span>')
status = statPattern.search(data)
resolution = resPattern.search(data)
if status:
self._status = status.group("status").strip()
else:
test.fatal("FATAL: Cannot get status of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._status = None
if resolution:
self._resolution = resolution.group("resolution").strip()
else:
test.fatal("FATAL: Cannot get resolution of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._resolution = None
# simple helper function - used as fallback if python has no ssl support
# tries to find curl or wget in PATH and fetches data with it instead of
# using urllib2
def __tryExternalTools__(self):
global JIRA_URL
cmdAndArgs = { 'curl':'-k', 'wget':'-qO-' }
for call in cmdAndArgs:
prog = which(call)
if prog:
return getOutputFromCmdline('"%s" %s %s/%s-%d' % (prog, cmdAndArgs[call], JIRA_URL, self._bugType, self._number))
return None
# this function initializes the bug dict for localOnly usage and
# for later lookup which function to call for which bug
# ALWAYS update this dict when adding a new function for a workaround!
def __initBugDict__(self):
self.__bugs__= {
'QTCREATORBUG-6853':self._workaroundCreator6853_,
'QTCREATORBUG-6918':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6953':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6994':self._workaroundCreator6994_,
'QTCREATORBUG-7002':self._workaroundCreator7002_
}
# helper function - will be called if no workaround for the requested bug is deposited
def _exitFatal_(self, bugType, number):
test.fatal("No workaround found for bug %s-%d" % (bugType, number))
def _workaroundCreator6994_(self, *args):
if args[0] in ('Mobile Qt Application', 'Qt Gui Application', 'Qt Custom Designer Widget'):
args[1].remove('Harmattan')
test.xverify(False, "Removed Harmattan from expected targets.")
def _workaroundCreator6853_(self, *args):
if "Release" in args[0] and platform.system() == "Linux":
snooze(1)
def _workaroundCreator_MacEditorFocus_(self, *args):
editor = args[0]
nativeMouseClick(editor.mapToGlobal(QPoint(50, 50)).x, editor.mapToGlobal(QPoint(50, 50)).y, Qt.LeftButton)
def _workaroundCreator7002_(self, *args):
if platform.system() in ("Linux", "Darwin"):
result = args[0]
result.append(QtQuickConstants.Targets.EMBEDDED_LINUX)
|
from sys import platform, exec_prefix
from distutils.core import setup, Extension
if platform == "win32":
libmaxent_name = 'libmaxent'
extra_compile_args = [
"-DWIN32",
"-DPYTHON_MODULE",
"-DHAVE_FORTRAN=1",
"-DBOOST_DISABLE_THREADS",
"-DBOOST_DISABLE_ASSERTS",
"/GR",
]
data_files = [('Lib/site-packages/maxent' ,
['stlport_vc7146.dll',
'libifcoremd.dll',
'libmmd.dll']),
]
opt_lib = []
else: # unix
libmaxent_name = 'maxent'
extra_compile_args = [
"-DNDEBUG",
"-DPYTHON_MODULE",
"-DBOOST_DISABLE_THREADS",
]
data_files = []
# various options detected from running ../configure
opt_lib = []
opt_lib_path = []
ac_cv_lib_z_main = "@ac_cv_lib_z_main@"
if ac_cv_lib_z_main == 'yes':
opt_lib.append('z')
fclibs = "/usr/lib/x86_64-linux-gnu/libboost_chrono.a"
opt_lib_path.append("/usr/lib/x86_64-linux-gnu/")
opt_lib.append('boost_chrono')
opt_lib.append('boost_timer')
# if fclibs != '':
# for s in fclibs.split():
# if s[:2] == '-L':
# opt_lib_path.append(s[2:])
# elif s[:2] == '-l':
# opt_lib.append(s[2:])
# else:
# raise 'unknow FCLIBS item: %s' % s
setup(name = "maxent",
version = "version-devel",
author = "Le Zhang",
author_email = "ejoy@users.sourceforge.net",
url = "http://homepages.inf.ed.ac.uk/lzhang10/maxent_toolkit.html",
description = "A Maximum Entropy Modeling toolkit in python",
long_description = """Maxent is a powerful, flexible, and easy-to-use
Maximum Entropy Modeling library for Python. The core engine is written in C++
with speed and portability in mind.
The win32 version of this module was compiled with MSVC7.1, Intel Fortran 8.0,
STLPort 4.6.
""",
license = "LGPL",
packages = ['maxent'],
ext_modules=[
Extension("maxent._cmaxent",
["maxent_wrap.cxx"],
include_dirs=[
"../src",
],
library_dirs=[
"../build/src",
] + opt_lib_path,
libraries = [libmaxent_name] + opt_lib,
extra_compile_args = extra_compile_args,
)
],
data_files = data_files,
)
|
import unittest
import re
import time
import sys
import liblo
def approx(a, b, e = 0.0002):
return abs(a - b) < e
def matchHost(host, regex):
r = re.compile(regex)
return r.match(host) != None
class Arguments:
def __init__(self, path, args, types, src, data):
self.path = path
self.args = args
self.types = types
self.src = src
self.data = data
class ServerTestCaseBase(unittest.TestCase):
def setUp(self):
self.cb = None
def callback(self, path, args, types, src, data):
self.cb = Arguments(path, args, types, src, data)
def callback_dict(self, path, args, types, src, data):
if self.cb == None:
self.cb = { }
self.cb[path] = Arguments(path, args, types, src, data)
class ServerTestCase(ServerTestCaseBase):
def setUp(self):
ServerTestCaseBase.setUp(self)
self.server = liblo.Server('1234')
def tearDown(self):
del self.server
def testPort(self):
assert self.server.get_port() == 1234
def testURL(self):
assert matchHost(self.server.get_url(), 'osc\.udp://.*:1234/')
def testSendInt(self):
self.server.add_method('/foo', 'i', self.callback, "data")
self.server.send('1234', '/foo', 123)
assert self.server.recv() == True
assert self.cb.path == '/foo'
assert self.cb.args[0] == 123
assert self.cb.types == 'i'
assert self.cb.data == "data"
assert matchHost(self.cb.src.get_url(), 'osc\.udp://.*:1234/')
def testSendBlob(self):
self.server.add_method('/blob', 'b', self.callback)
self.server.send('1234', '/blob', [4, 8, 15, 16, 23, 42])
assert self.server.recv() == True
if sys.hexversion < 0x03000000:
assert list(self.cb.args[0]) == [4, 8, 15, 16, 23, 42]
else:
assert self.cb.args[0] == b'\x04\x08\x0f\x10\x17\x2a'
def testSendVarious(self):
self.server.add_method('/blah', 'ihfdscb', self.callback)
if sys.hexversion < 0x03000000:
self.server.send(1234, '/blah', 123, 2**42, 123.456, 666.666, "hello", ('c', 'x'), (12, 34, 56))
else:
self.server.send(1234, '/blah', 123, ('h', 2**42), 123.456, 666.666, "hello", ('c', 'x'), (12, 34, 56))
assert self.server.recv() == True
assert self.cb.types == 'ihfdscb'
assert len(self.cb.args) == len(self.cb.types)
assert self.cb.args[0] == 123
assert self.cb.args[1] == 2**42
assert approx(self.cb.args[2], 123.456)
assert approx(self.cb.args[3], 666.666)
assert self.cb.args[4] == "hello"
assert self.cb.args[5] == 'x'
if sys.hexversion < 0x03000000:
assert list(self.cb.args[6]) == [12, 34, 56]
else:
assert self.cb.args[6] == b'\x0c\x22\x38'
def testSendOthers(self):
self.server.add_method('/blubb', 'tmSTFNI', self.callback)
self.server.send(1234, '/blubb', ('t', 666666.666), ('m', (1, 2, 3, 4)), ('S', 'foo'), True, ('F',), None, ('I',))
assert self.server.recv() == True
assert self.cb.types == 'tmSTFNI'
assert approx(self.cb.args[0], 666666.666)
assert self.cb.args[1] == (1, 2, 3, 4)
assert self.cb.args[2] == 'foo'
assert self.cb.args[3] == True
assert self.cb.args[4] == False
assert self.cb.args[5] == None
assert self.cb.args[6] == float('inf')
def testSendMessage(self):
self.server.add_method('/blah', 'is', self.callback)
m = liblo.Message('/blah', 42, 'foo')
self.server.send(1234, m)
assert self.server.recv() == True
assert self.cb.types == 'is'
assert self.cb.args[0] == 42
assert self.cb.args[1] == 'foo'
def testSendBundle(self):
self.server.add_method('/foo', 'i', self.callback_dict)
self.server.add_method('/bar', 's', self.callback_dict)
self.server.send(1234, liblo.Bundle(
liblo.Message('/foo', 123),
liblo.Message('/bar', "blubb")
))
assert self.server.recv(100) == True
assert self.cb['/foo'].args[0] == 123
assert self.cb['/bar'].args[0] == "blubb"
def testSendTimestamped(self):
self.server.add_method('/blubb', 'i', self.callback)
d = 1.23
t1 = time.time()
b = liblo.Bundle(liblo.time() + d)
b.add('/blubb', 42)
self.server.send(1234, b)
while not self.cb:
self.server.recv(1)
t2 = time.time()
assert approx(t2 - t1, d, 0.01)
def testSendInvalid(self):
try:
self.server.send(1234, '/blubb', ('x', 'y'))
except TypeError as e:
pass
else:
assert False
def testRecvTimeout(self):
t1 = time.time()
assert self.server.recv(500) == False
t2 = time.time()
assert t2 - t1 < 0.666
def testRecvImmediate(self):
t1 = time.time()
assert self.server.recv(0) == False
t2 = time.time()
assert t2 - t1 < 0.01
class ServerCreationTestCase(unittest.TestCase):
def testNoPermission(self):
try:
s = liblo.Server('22')
except liblo.ServerError as e:
pass
else:
assert False
def testRandomPort(self):
s = liblo.Server()
assert 1024 <= s.get_port() <= 65535
def testPort(self):
s = liblo.Server(1234)
t = liblo.Server('5678')
assert s.port == 1234
assert t.port == 5678
assert matchHost(s.url, 'osc\.udp://.*:1234/')
def testPortProto(self):
s = liblo.Server(1234, liblo.TCP)
assert matchHost(s.url, 'osc\.tcp://.*:1234/')
class ServerTCPTestCase(ServerTestCaseBase):
def setUp(self):
ServerTestCaseBase.setUp(self)
self.server = liblo.Server('1234', liblo.TCP)
def tearDown(self):
del self.server
def testSendReceive(self):
self.server.add_method('/foo', 'i', self.callback)
liblo.send(self.server.url, '/foo', 123)
assert self.server.recv() == True
assert self.cb.path == '/foo'
assert self.cb.args[0] == 123
assert self.cb.types == 'i'
def testNotReachable(self):
try:
self.server.send('osc.tcp://192.168.23.42:4711', '/foo', 23, 42)
except IOError:
pass
else:
assert False
class ServerThreadTestCase(ServerTestCaseBase):
def setUp(self):
ServerTestCaseBase.setUp(self)
self.server = liblo.ServerThread('1234')
def tearDown(self):
del self.server
def testSendAndReceive(self):
self.server.add_method('/foo', 'i', self.callback)
self.server.send('1234', '/foo', 42)
self.server.start()
time.sleep(0.2)
self.server.stop()
assert self.cb.args[0] == 42
class DecoratorTestCase(unittest.TestCase):
class TestServer(liblo.Server):
def __init__(self):
liblo.Server.__init__(self, 1234)
@liblo.make_method('/foo', 'ibm')
def foo_cb(self, path, args, types, src, data):
self.cb = Arguments(path, args, types, src, data)
def setUp(self):
self.server = self.TestServer()
def tearDown(self):
del self.server
def testSendReceive(self):
liblo.send(1234, '/foo', 42, ('b', [4, 8, 15, 16, 23, 42]), ('m', (6, 6, 6, 0)))
assert self.server.recv() == True
assert self.server.cb.path == '/foo'
assert len(self.server.cb.args) == 3
class AddressTestCase(unittest.TestCase):
def testPort(self):
a = liblo.Address(1234)
b = liblo.Address('5678')
assert a.port == 1234
assert b.port == 5678
assert a.url == 'osc.udp://localhost:1234/'
def testUrl(self):
a = liblo.Address('osc.udp://foo:1234/')
assert a.url == 'osc.udp://foo:1234/'
assert a.hostname == 'foo'
assert a.port == 1234
assert a.protocol == liblo.UDP
def testHostPort(self):
a = liblo.Address('foo', 1234)
assert a.url == 'osc.udp://foo:1234/'
def testHostPortProto(self):
a = liblo.Address('foo', 1234, liblo.TCP)
assert a.url == 'osc.tcp://foo:1234/'
if __name__ == "__main__":
unittest.main()
|
'''
Often used utility functions
Copyright 2020 by Massimo Del Fedele
'''
import sys
import uno
from com.sun.star.beans import PropertyValue
from datetime import date
import calendar
import PyPDF2
'''
ALCUNE COSE UTILI
La finestra che contiene il documento (o componente) corrente:
desktop.CurrentFrame.ContainerWindow
Non cambia nulla se è aperto un dialogo non modale,
ritorna SEMPRE il frame del documento.
desktop.ContainerWindow ritorna un None -- non so a che serva
Per ottenere le top windows, c'è il toolkit...
tk = ctx.ServiceManager.createInstanceWithContext("com.sun.star.awt.Toolkit", ctx)
tk.getTopWindowCount() ritorna il numero delle topwindow
tk.getTopWIndow(i) ritorna una topwindow dell'elenco
tk.getActiveTopWindow () ritorna la topwindow attiva
La topwindow attiva, per essere attiva deve, appunto, essere attiva, indi avere il focus
Se si fa il debug, ad esempio, è probabile che la finestra attiva sia None
Resta quindi SEMPRE il problema di capire come fare a centrare un dialogo sul componente corrente.
Se non ci sono dialoghi in esecuzione, il dialogo creato prende come parent la ContainerWindow(si suppone...)
e quindi viene posizionato in base a quella
Se c'è un dialogo aperto e nell'event handler se ne apre un altro, l'ultimo prende come parent il precedente,
e viene quindi posizionato in base a quello e non alla schermata principale.
Serve quindi un metodo per trovare le dimensioni DELLA FINESTRA PARENT di un dialogo, per posizionarlo.
L'oggetto UnoControlDialog permette di risalire al XWindowPeer (che non serve ad una cippa), alla XView
(che mi fornisce la dimensione del dialogo ma NON la parent...), al UnoControlDialogModel, che fornisce
la proprietà 'DesktopAsParent' che mi dice SOLO se il dialogo è modale (False) o non modale (True)
L'unica soluzione che mi viene in mente è tentare con tk.ActiveTopWindow e, se None, prendere quella del desktop
'''
def getComponentContext():
'''
Get current application's component context
'''
try:
if __global_context__ is not None:
return __global_context__
return uno.getComponentContext()
except Exception:
return uno.getComponentContext()
def getDesktop():
'''
Get current application's LibreOffice desktop
'''
ctx = getComponentContext()
return ctx.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", ctx)
def getDocument():
'''
Get active document
'''
desktop = getDesktop()
# try to activate current frame
# needed sometimes because UNO doesnt' find the correct window
# when debugging.
try:
desktop.getCurrentFrame().activate()
except Exception:
pass
return desktop.getCurrentComponent()
def getServiceManager():
'''
Gets the service manager
'''
return getComponentContext().ServiceManager
def createUnoService(serv):
'''
create an UNO service
'''
return getComponentContext().getServiceManager().createInstance(serv)
def MRI(target):
ctx = getComponentContext()
mri = ctx.ServiceManager.createInstanceWithContext("mytools.Mri", ctx)
mri.inspect(target)
def isLeenoDocument():
'''
check if current document is a LeenO document
'''
try:
return getDocument().getSheets().hasByName('S2')
except Exception:
return False
def DisableDocumentRefresh(oDoc):
'''
Disabilita il refresh per accelerare le procedure
'''
oDoc.lockControllers()
oDoc.addActionLock()
def EnableDocumentRefresh(oDoc):
'''
Riabilita il refresh
'''
oDoc.removeActionLock()
oDoc.unlockControllers()
def getGlobalVar(name):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
return bDict.get('LEENO_GLOBAL_' + name)
def setGlobalVar(name, value):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
bDict['LEENO_GLOBAL_' + name] = value
def initGlobalVars(dict):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
for key, value in dict.items():
bDict['LEENO_GLOBAL_' + key] = value
def dictToProperties(values, unoAny=False):
'''
convert a dictionary in a tuple of UNO properties
if unoAny is True, return the result in an UNO Any variable
otherwise use a python tuple
'''
ps = tuple([PropertyValue(Name=n, Value=v) for n, v in values.items()])
if unoAny:
ps = uno.Any('[]com.sun.star.beans.PropertyValue', ps)
return ps
def daysInMonth(dat):
'''
returns days in month of date dat
'''
month = dat.month + 1
year = dat.year
if month > 12:
month = 1
year += 1
dat2 = date(year=year, month=month, day=dat.day)
t = dat2 - dat
return t.days
def firstWeekDay(dat):
'''
returns first week day in month from dat
monday is 0
'''
return calendar.weekday(dat.year, dat.month, 1)
DAYNAMES = ['Lun', 'Mar', 'Mer', 'Gio', 'Ven', 'Sab', 'Dom']
MONTHNAMES = [
'Gennaio', 'Febbraio', 'Marzo', 'Aprile',
'Maggio', 'Giugno', 'Luglio', 'Agosto',
'Settembre', 'Ottobre', 'Novembre', 'Dicembre'
]
def date2String(dat, fmt = 0):
'''
conversione data in stringa
fmt = 0 25 Febbraio 2020
fmt = 1 25/2/2020
fmt = 2 25-02-2020
fmt = 3 25.02.2020
'''
d = dat.day
m = dat.month
if m < 10:
ms = '0' + str(m)
else:
ms = str(m)
y = dat.year
if fmt == 1:
return str(d) + '/' + ms + '/' + str(y)
elif fmt == 2:
return str(d) + '-' + ms + '-' + str(y)
elif fmt == 3:
return str(d) + '.' + ms + '.' + str(y)
else:
return str(d) + ' ' + MONTHNAMES[m - 1] + ' ' + str(y)
def string2Date(s):
if '.' in s:
sp = s.split('.')
elif '/' in s:
sp = s.split('/')
elif '-' in s:
sp = s.split('-')
else:
return date.today()
if len(sp) != 3:
raise Exception
day = int(sp[0])
month = int(sp[1])
year = int(sp[2])
return date(day=day, month=month, year=year)
def countPdfPages(path):
'''
Returns the number of pages in a PDF document
using external PyPDF2 module
'''
with open(path, 'rb') as f:
pdf = PyPDF2.PdfFileReader(f)
return pdf.getNumPages()
def replacePatternWithField(oTxt, pattern, oField):
'''
Replaces a string pattern in a Text object
(for example '[PATTERN]') with the given field
'''
# pattern may be there many times...
repl = False
pos = oTxt.String.find(pattern)
while pos >= 0:
#create a cursor
cursor = oTxt.createTextCursor()
# use it to select the pattern
cursor.collapseToStart()
cursor.goRight(pos, False)
cursor.goRight(len(pattern), True)
# remove the pattern from text
cursor.String = ''
# insert the field at cursor's position
cursor.collapseToStart()
oTxt.insertTextContent(cursor, oField, False)
# next occurrence of pattern
pos = oTxt.String.find(pattern)
repl = True
return repl
|
import os, sys
def open_in_browser(link):
browser = os.environ.get('BROWSER', 'firefox')
child = os.fork()
if child == 0:
# We are the child
try:
os.spawnlp(os.P_NOWAIT, browser, browser, link)
os._exit(0)
except Exception, ex:
print >>sys.stderr, "Error", ex
os._exit(1)
os.waitpid(child, 0)
|
inp = 0
outp = 1
parameters = dict() #parametriseerbare cell
properties = {'Device ID': ' 0x01', 'Channel [0/1]': ' 0', 'name': 'epos_areadBlk'} #voor netlisten
iconSource = 'AD'
views = {'icon':iconSource}
|
import json
import maps
import traceback
from requests import get
from requests import post
from requests import put
from tendrl.commons.utils import log_utils as logger
from tendrl.monitoring_integration.grafana import constants
from tendrl.monitoring_integration.grafana import exceptions
from tendrl.monitoring_integration.grafana import utils
def _post_datasource(datasource_json):
config = maps.NamedDict(NS.config.data)
if utils.port_open(config.grafana_port, config.grafana_host):
resp = post(
"http://{}:{}/api/datasources".format(
config.grafana_host,
config.grafana_port
),
headers=constants.HEADERS,
auth=config.credentials,
data=datasource_json
)
else:
raise exceptions.ConnectionFailedException
return resp
def form_datasource_json():
config = maps.NamedDict(NS.config.data)
url = "http://" + str(config.datasource_host) + ":" \
+ str(config.datasource_port)
datasource_json = (
{'name': config.datasource_name,
'type': config.datasource_type,
'url': url,
'access': config.access,
'basicAuth': config.basicAuth,
'isDefault': config.isDefault
}
)
return datasource_json
def create_datasource():
try:
datasource_json = form_datasource_json()
response = _post_datasource(json.dumps(datasource_json))
return response
except exceptions.ConnectionFailedException:
logger.log("error", NS.get("publisher_id", None),
{'message': str(traceback.print_stack())})
raise exceptions.ConnectionFailedException
def get_data_source():
config = maps.NamedDict(NS.config.data)
if utils.port_open(config.grafana_port, config.grafana_host):
resp = get(
"http://{}:{}/api/datasources/id/{}".format(
config.grafana_host,
config.grafana_port,
config.datasource_name
),
auth=config.credentials
)
else:
raise exceptions.ConnectionFailedException
return resp
def update_datasource(datasource_id):
try:
config = maps.NamedDict(NS.config.data)
datasource_json = form_datasource_json()
datasource_str = json.dumps(datasource_json)
if utils.port_open(config.grafana_port, config.grafana_host):
response = put(
"http://{}:{}/api/datasources/{}".format(
config.grafana_host,
config.grafana_port,
datasource_id
),
headers=constants.HEADERS,
auth=config.credentials,
data=datasource_str
)
else:
raise exceptions.ConnectionFailedException
return response
except exceptions.ConnectionFailedException as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': str(ex)})
raise ex
|
from spack import *
import glob
import os.path
class Xbraid(MakefilePackage):
"""XBraid: Parallel time integration with Multigrid"""
homepage = "https://computing.llnl.gov/projects/parallel-time-integration-multigrid/software"
url = "https://github.com/XBraid/xbraid/archive/v2.2.0.tar.gz"
version('2.2.0', sha256='082623b2ddcd2150b3ace65b96c1e00be637876ec6c94dc8fefda88743b35ba3')
depends_on('mpi')
def build(self, spec, prefix):
make('libbraid.a')
# XBraid doesn't have a real install target, so it has to be done
# manually
def install(self, spec, prefix):
# Install headers
mkdirp(prefix.include)
headers = glob.glob('*.h')
for f in headers:
install(f, join_path(prefix.include, os.path.basename(f)))
# Install library
mkdirp(prefix.lib)
library = 'libbraid.a'
install(library, join_path(prefix.lib, library))
# Install other material (e.g., examples, tests, docs)
mkdirp(prefix.share)
install('makefile.inc', prefix.share)
install_tree('examples', prefix.share.examples)
install_tree('drivers', prefix.share.drivers)
# TODO: Some of the scripts in 'test' are useful, even for
# users; some could be deleted from an installation because
# they're not useful to users
install_tree('test', prefix.share.test)
install_tree('user_utils', prefix.share.user_utils)
install_tree('docs', prefix.share.docs)
@property
def libs(self):
return find_libraries('libbraid', root=self.prefix,
shared=False, recursive=True)
|
import os
import six
from gi.repository import BlockDev as blockdev
from ..devicelibs import mdraid, raid
from .. import errors
from .. import util
from ..flags import flags
from ..storage_log import log_method_call
from .. import udev
from ..size import Size
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from .container import ContainerDevice
from .raid import RaidDevice
class MDRaidArrayDevice(ContainerDevice, RaidDevice):
""" An mdraid (Linux RAID) device. """
_type = "mdarray"
_packages = ["mdadm"]
_devDir = "/dev/md"
_formatClassName = property(lambda s: "mdmember")
_formatUUIDAttr = property(lambda s: "mdUuid")
def __init__(self, name, level=None, major=None, minor=None, size=None,
memberDevices=None, totalDevices=None,
uuid=None, fmt=None, exists=False, metadataVersion=None,
parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword uuid: the device UUID
:type uuid: str
:keyword level: the device's RAID level
:type level: any valid RAID level descriptor
:keyword int memberDevices: the number of active member devices
:keyword int totalDevices: the total number of member devices
:keyword metadataVersion: the version of the device's md metadata
:type metadataVersion: str (eg: "0.90")
:keyword minor: the device minor (obsolete?)
:type minor: int
"""
# pylint: disable=unused-argument
# These attributes are used by _addParent, so they must be initialized
# prior to instantiating the superclass.
self._memberDevices = 0 # the number of active (non-spare) members
self._totalDevices = 0 # the total number of members
# avoid attribute-defined-outside-init pylint warning
self._level = None
super(MDRaidArrayDevice, self).__init__(name, fmt=fmt, uuid=uuid,
exists=exists, size=size,
parents=parents,
sysfsPath=sysfsPath)
try:
self.level = level
except errors.DeviceError as e:
# Could not set the level, so set loose the parents that were
# added in superclass constructor.
for dev in self.parents:
dev.removeChild()
raise e
self.uuid = uuid
self._totalDevices = util.numeric_type(totalDevices)
self.memberDevices = util.numeric_type(memberDevices)
self.chunkSize = mdraid.MD_CHUNK_SIZE
if not self.exists and not isinstance(metadataVersion, str):
self.metadataVersion = "default"
else:
self.metadataVersion = metadataVersion
if self.parents and self.parents[0].type == "mdcontainer" and self.type != "mdbiosraidarray":
raise errors.DeviceError("A device with mdcontainer member must be mdbiosraidarray.")
if self.exists and self.mdadmFormatUUID and not flags.testing:
# this is a hack to work around mdadm's insistence on giving
# really high minors to arrays it has no config entry for
with open("/etc/mdadm.conf", "a") as c:
c.write("ARRAY %s UUID=%s\n" % (self.path, self.mdadmFormatUUID))
@property
def mdadmFormatUUID(self):
""" This array's UUID, formatted for external use.
:returns: the array's UUID in mdadm format, if available
:rtype: str or NoneType
"""
formatted_uuid = None
if self.uuid is not None:
try:
formatted_uuid = blockdev.md.get_md_uuid(self.uuid)
except blockdev.MDRaidError:
pass
return formatted_uuid
@property
def level(self):
""" Return the raid level
:returns: raid level value
:rtype: an object that represents a RAID level
"""
return self._level
@property
def _levels(self):
""" Allowed RAID level for this type of device."""
return mdraid.RAID_levels
@level.setter
def level(self, value):
""" Set the RAID level and enforce restrictions based on it.
:param value: new raid level
:param type: object
:raises :class:`~.errors.DeviceError`: if value does not describe
a valid RAID level
:returns: None
"""
try:
level = self._getLevel(value, self._levels)
except ValueError as e:
raise errors.DeviceError(e)
self._level = level
@property
def createBitmap(self):
""" Whether or not a bitmap should be created on the array.
If the the array is sufficiently small, a bitmap yields no benefit.
If the array has no redundancy, a bitmap is just pointless.
"""
try:
return self.level.has_redundancy() and self.size >= Size(1000) and self.format.type != "swap"
except errors.RaidError:
# If has_redundancy() raises an exception then this device has
# a level for which the redundancy question is meaningless. In
# that case, creating a write-intent bitmap would be a meaningless
# action.
return False
def getSuperBlockSize(self, raw_array_size):
"""Estimate the superblock size for a member of an array,
given the total available memory for this array and raid level.
:param raw_array_size: total available for this array and level
:type raw_array_size: :class:`~.size.Size`
:returns: estimated superblock size
:rtype: :class:`~.size.Size`
"""
return blockdev.md.get_superblock_size(raw_array_size,
version=self.metadataVersion)
@property
def size(self):
"""Returns the actual or estimated size depending on whether or
not the array exists.
"""
if not self.exists or not self.mediaPresent:
try:
size = self.level.get_size([d.size for d in self.devices],
self.memberDevices,
self.chunkSize,
self.getSuperBlockSize)
except (blockdev.MDRaidError, errors.RaidError) as e:
log.info("could not calculate size of device %s for raid level %s: %s", self.name, self.level, e)
size = Size(0)
log.debug("non-existent RAID %s size == %s", self.level, size)
else:
size = self.currentSize
log.debug("existing RAID %s size == %s", self.level, size)
return size
def updateSize(self):
# pylint: disable=bad-super-call
super(ContainerDevice, self).updateSize()
@property
def description(self):
levelstr = self.level.nick if self.level.nick else self.level.name
return "MDRAID set (%s)" % levelstr
def __repr__(self):
s = StorageDevice.__repr__(self)
s += (" level = %(level)s spares = %(spares)s\n"
" members = %(memberDevices)s\n"
" total devices = %(totalDevices)s"
" metadata version = %(metadataVersion)s" %
{"level": self.level, "spares": self.spares,
"memberDevices": self.memberDevices,
"totalDevices": self.totalDevices,
"metadataVersion": self.metadataVersion})
return s
@property
def dict(self):
d = super(MDRaidArrayDevice, self).dict
d.update({"level": str(self.level),
"spares": self.spares, "memberDevices": self.memberDevices,
"totalDevices": self.totalDevices,
"metadataVersion": self.metadataVersion})
return d
@property
def mdadmConfEntry(self):
""" This array's mdadm.conf entry. """
uuid = self.mdadmFormatUUID
if self.memberDevices is None or not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
fmt = "ARRAY %s level=%s num-devices=%d UUID=%s\n"
return fmt % (self.path, self.level, self.memberDevices, uuid)
@property
def totalDevices(self):
""" Total number of devices in the array, including spares. """
if not self.exists:
return self._totalDevices
else:
return len(self.parents)
def _getMemberDevices(self):
return self._memberDevices
def _setMemberDevices(self, number):
if not isinstance(number, six.integer_types):
raise ValueError("memberDevices must be an integer")
if not self.exists and number > self.totalDevices:
raise ValueError("memberDevices cannot be greater than totalDevices")
self._memberDevices = number
memberDevices = property(_getMemberDevices, _setMemberDevices,
doc="number of member devices")
def _getSpares(self):
spares = 0
if self.memberDevices is not None:
if self.totalDevices is not None and \
self.totalDevices > self.memberDevices:
spares = self.totalDevices - self.memberDevices
elif self.totalDevices is None:
spares = self.memberDevices
self._totalDevices = self.memberDevices
return spares
def _setSpares(self, spares):
max_spares = self.level.get_max_spares(len(self.parents))
if spares > max_spares:
log.debug("failed to set new spares value %d (max is %d)",
spares, max_spares)
raise errors.DeviceError("new spares value is too large")
if self.totalDevices > spares:
self.memberDevices = self.totalDevices - spares
spares = property(_getSpares, _setSpares)
def _addParent(self, member):
super(MDRaidArrayDevice, self)._addParent(member)
if self.status and member.format.exists:
# we always probe since the device may not be set up when we want
# information about it
self._size = self.currentSize
# These should be incremented when adding new member devices except
# during devicetree.populate. When detecting existing arrays we will
# have gotten these values from udev and will use them to determine
# whether we found all of the members, so we shouldn't change them in
# that case.
if not member.format.exists:
self._totalDevices += 1
self.memberDevices += 1
def _removeParent(self, member):
error_msg = self._validateParentRemoval(self.level, member)
if error_msg:
raise errors.DeviceError(error_msg)
super(MDRaidArrayDevice, self)._removeParent(member)
self.memberDevices -= 1
@property
def _trueStatusStrings(self):
""" Strings in state file for which status() should return True."""
return ("clean", "active", "active-idle", "readonly", "read-auto")
@property
def status(self):
""" This device's status.
For now, this should return a boolean:
True the device is open and ready for use
False the device is not open
"""
# check the status in sysfs
status = False
if not self.exists:
return status
if os.path.exists(self.path) and not self.sysfsPath:
# the array has been activated from outside of blivet
self.updateSysfsPath()
# make sure the active array is the one we expect
info = udev.get_device(self.sysfsPath)
uuid = udev.device_get_md_uuid(info)
if uuid and uuid != self.uuid:
log.warning("md array %s is active, but has UUID %s -- not %s",
self.path, uuid, self.uuid)
self.sysfsPath = ""
return status
state_file = "%s/md/array_state" % self.sysfsPath
try:
state = open(state_file).read().strip()
if state in self._trueStatusStrings:
status = True
except IOError:
status = False
return status
def memberStatus(self, member):
if not (self.status and member.status):
return
member_name = os.path.basename(member.sysfsPath)
path = "/sys/%s/md/dev-%s/state" % (self.sysfsPath, member_name)
try:
state = open(path).read().strip()
except IOError:
state = None
return state
@property
def degraded(self):
""" Return True if the array is running in degraded mode. """
rc = False
degraded_file = "%s/md/degraded" % self.sysfsPath
if os.access(degraded_file, os.R_OK):
val = open(degraded_file).read().strip()
if val == "1":
rc = True
return rc
@property
def members(self):
""" Returns this array's members.
:rtype: list of :class:`StorageDevice`
"""
return list(self.parents)
@property
def complete(self):
""" An MDRaidArrayDevice is complete if it has at least as many
component devices as its count of active devices.
"""
return (self.memberDevices <= len(self.members)) or not self.exists
@property
def devices(self):
""" Return a list of this array's member device instances. """
return self.parents
def _postSetup(self):
super(MDRaidArrayDevice, self)._postSetup()
self.updateSysfsPath()
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
disks = []
for member in self.devices:
member.setup(orig=orig)
disks.append(member.path)
blockdev.md.activate(self.path, members=disks, uuid=self.mdadmFormatUUID)
def _postTeardown(self, recursive=False):
super(MDRaidArrayDevice, self)._postTeardown(recursive=recursive)
# mdadm reuses minors indiscriminantly when there is no mdadm.conf, so
# we need to clear the sysfs path now so our status method continues to
# give valid results
self.sysfsPath = ''
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# We don't really care what the array's state is. If the device
# file exists, we want to deactivate it. mdraid has too many
# states.
if self.exists and os.path.exists(self.path):
blockdev.md.deactivate(self.path)
self._postTeardown(recursive=recursive)
def _postCreate(self):
# this is critical since our status method requires a valid sysfs path
self.exists = True # this is needed to run updateSysfsPath
self.updateSysfsPath()
StorageDevice._postCreate(self)
# update our uuid attribute with the new array's UUID
# XXX this won't work for containers since no UUID is reported for them
info = blockdev.md.detail(self.path)
self.uuid = info.uuid
for member in self.devices:
member.format.mdUuid = self.uuid
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
disks = [disk.path for disk in self.devices]
spares = len(self.devices) - self.memberDevices
level = None
if self.level:
level = str(self.level)
blockdev.md.create(self.path, level, disks, spares,
version=self.metadataVersion,
bitmap=self.createBitmap)
udev.settle()
def _remove(self, member):
self.setup()
# see if the device must be marked as failed before it can be removed
fail = (self.memberStatus(member) == "in_sync")
blockdev.md.remove(self.path, member.path, fail)
def _add(self, member):
""" Add a member device to an array.
:param str member: the member's path
:raises: blockdev.MDRaidError
"""
self.setup()
raid_devices = None
try:
if not self.level.has_redundancy():
if self.level is not raid.Linear:
raid_devices = int(blockdev.md.detail(self.name).raid_devices) + 1
except errors.RaidError:
pass
blockdev.md.add(self.path, member.path, raid_devs=raid_devices)
@property
def formatArgs(self):
formatArgs = []
if self.format.type == "ext2":
recommended_stride = self.level.get_recommended_stride(self.memberDevices)
if recommended_stride:
formatArgs = ['-R', 'stride=%d' % recommended_stride ]
return formatArgs
@property
def model(self):
return self.description
def dracutSetupArgs(self):
return set(["rd.md.uuid=%s" % self.mdadmFormatUUID])
def populateKSData(self, data):
if self.isDisk:
return
super(MDRaidArrayDevice, self).populateKSData(data)
data.level = self.level.name
data.spares = self.spares
data.members = ["raid.%d" % p.id for p in self.parents]
data.preexist = self.exists
data.device = self.name
class MDContainerDevice(MDRaidArrayDevice):
_type = "mdcontainer"
def __init__(self, name, **kwargs):
kwargs['level'] = raid.Container
super(MDContainerDevice, self).__init__(name, **kwargs)
@property
def _levels(self):
return mdraid.MDRaidLevels(["container"])
@property
def description(self):
return "BIOS RAID container"
@property
def mdadmConfEntry(self):
uuid = self.mdadmFormatUUID
if not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
return "ARRAY %s UUID=%s\n" % (self.path, uuid)
@property
def _trueStatusStrings(self):
return ("clean", "active", "active-idle", "readonly", "read-auto", "inactive")
def teardown(self, recursive=None):
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# Since BIOS RAID sets (containers in mdraid terminology) never change
# there is no need to stop them and later restart them. Not stopping
# (and thus also not starting) them also works around bug 523334
return
@property
def mediaPresent(self):
# Containers should not get any format handling done
# (the device node does not allow read / write calls)
return False
class MDBiosRaidArrayDevice(MDRaidArrayDevice):
_type = "mdbiosraidarray"
_formatClassName = property(lambda s: None)
_isDisk = True
_partitionable = True
def __init__(self, name, **kwargs):
super(MDBiosRaidArrayDevice, self).__init__(name, **kwargs)
# For container members probe size now, as we cannot determine it
# when teared down.
self._size = self.currentSize
@property
def size(self):
# For container members return probed size, as we cannot determine it
# when teared down.
return self._size
@property
def description(self):
levelstr = self.level.nick if self.level.nick else self.level.name
return "BIOS RAID set (%s)" % levelstr
@property
def mdadmConfEntry(self):
uuid = self.mdadmFormatUUID
if not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
return "ARRAY %s UUID=%s\n" % (self.path, uuid)
@property
def members(self):
# If the array is a BIOS RAID array then its unique parent
# is a container and its actual member devices are the
# container's parents.
return list(self.parents[0].parents)
def teardown(self, recursive=None):
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# Since BIOS RAID sets (containers in mdraid terminology) never change
# there is no need to stop them and later restart them. Not stopping
# (and thus also not starting) them also works around bug 523334
return
|
""" Data filter converting CSTBox v2 event logs to v3 format.
Usage: ./cbx-2to3.py < /path/to/input/file > /path/to/output/file
"""
__author__ = 'Eric Pascual - CSTB (eric.pascual@cstb.fr)'
import fileinput
import json
for line in fileinput.input():
ts, var_type, var_name, value, data = line.split('\t')
# next 3 lines are specific to Actility box at home files conversion
if var_name.startswith('home.'):
var_name = var_name[5:]
var_name = '.'.join((var_type, var_name))
data = data.strip().strip('{}')
if data:
pairs = data.split(',')
data = json.dumps(dict([(k.lower(), v) for k, v in (pair.split('=') for pair in pairs)]))
else:
data = "{}"
print('\t'.join((ts, var_type, var_name, value, data)))
|
from typing import Callable, Any
from ..model import MetaEvent, Event
from ..exceptions import PropertyStatechartError
__all__ = ['InternalEventListener', 'PropertyStatechartListener']
class InternalEventListener:
"""
Listener that filters and propagates internal events as external events.
"""
def __init__(self, callable: Callable[[Event], Any]) -> None:
self._callable = callable
def __call__(self, event: MetaEvent) -> None:
if event.name == 'event sent':
self._callable(Event(event.event.name, **event.event.data))
class PropertyStatechartListener:
"""
Listener that propagates meta-events to given property statechart, executes
the property statechart, and checks it.
"""
def __init__(self, interpreter) -> None:
self._interpreter = interpreter
def __call__(self, event: MetaEvent) -> None:
self._interpreter.queue(event)
self._interpreter.execute()
if self._interpreter.final:
raise PropertyStatechartError(self._interpreter)
|
'''
Created on Jan 6, 2018
@author: consultit
'''
from panda3d.core import Filename
import sys, os
from subprocess import call
currdir = os.path.abspath(sys.path[0])
builddir = Filename.from_os_specific(os.path.join(currdir, '/ely/')).get_fullpath()
elydir = Filename.fromOsSpecific(os.path.join(currdir, '/ely/')).getFullpath()
lpref = ''
mpref = ''
lsuff = '.so'
tools = 'libtools'
modules = ['ai', 'audio', 'control', 'physics']
if __name__ == '__main__':
# cwd
os.chdir(currdir + builddir)
# build 'tools'
libtools = lpref + tools + lsuff
print('building "' + libtools + '" ...')
toolsdir = '..' + elydir + tools
args = ['build.py', '--dir', toolsdir, '--clean']
call(['/usr/bin/python'] + args)
#print('installing "' + libtools + '" ...')
#args = [libtools, toolsdir]
#call(['/usr/bin/install'] + args)
# build modules
for module in modules:
modulelib = mpref + module + lsuff
print('building "' + modulelib + '" ...')
moduledir = '..' + elydir + module
args = ['build.py', '--dir', moduledir, '--libs', libtools, '--libs_src',
toolsdir, '--clean']
call(['/usr/bin/python'] + args)
#print('installing "' + modulelib + '" ...')
#args = [modulelib, moduledir]
#call(['/usr/bin/install'] + args)
|
""" Barcode Creation (PDF417)
"""
import os
basedir = os.path.split(__file__)[0]
bcdelib = os.path.join(basedir, 'psbcdelib.ps')
class Barcode(object):
__lib__ = open(bcdelib, 'r').read()
@property
def ps(self):
raise NotImplementedError
@property
def eps(self):
raise NotImplementedError
|
import pytest
from forte.solvers import solver_factory, HF
def test_df_rhf():
"""Test DF-RHF on HF."""
ref_energy = -100.04775218911111
# define a molecule
xyz = """
H 0.0 0.0 0.0
F 0.0 0.0 1.0
"""
# create a molecular model
input = solver_factory(molecule=xyz, basis='cc-pVTZ', int_type='df')
# specify the electronic state
state = input.state(charge=0, multiplicity=1, sym='a1')
# create a HF object and run
hf = HF(input, state=state)
hf.run()
assert hf.value('hf energy') == pytest.approx(ref_energy, 1.0e-10)
def test_df_rhf_select_aux():
"""Test DF-RHF on HF."""
ref_energy = -100.04775602524956
# define a molecule
xyz = """
H 0.0 0.0 0.0
F 0.0 0.0 1.0
"""
# create a molecular model
input = solver_factory(molecule=xyz, int_type='df', basis='cc-pVTZ', scf_aux_basis='cc-pVQZ-JKFIT')
# specify the electronic state
state = input.state(charge=0, multiplicity=1, sym='a1')
# create a HF object and run
hf = HF(input, state=state)
hf.run()
assert hf.value('hf energy') == pytest.approx(ref_energy, 1.0e-10)
if __name__ == "__main__":
test_df_rhf()
test_df_rhf_select_aux()
|
import time
import sys
def sizeof_fmt(num, unit='B'):
# source: http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
for uprexif in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "{:3.2f} {}{}".format(num, uprexif, unit)
num /= 1024.0
return "{:3.2f} Yi{}".format(num, unit)
output = sys.stderr
progress_format = '{n} [{b}] {p:3.1f}% ({d}/{a}) {s}'
class FileTransferProgressBar(object):
# inspired by clint.textui.progress.Bar
def __init__(self, filesize, name='', width=32, empty_char=' ', filled_char='#', hide=None, speed_update=0.2,
bar_update=0.05, progress_format=progress_format):
self.name, self.filesize, self.width, self.ec, self.fc = name, filesize, width, empty_char, filled_char
self.speed_update, self.bar_update, self.progress_format = speed_update, bar_update, progress_format
if hide is None:
try:
self.hide = not output.isatty()
except AttributeError:
self.hide = True
else:
self.hide = hide
self.last_progress = 0
self.last_time = time.time()
self.last_speed_update = self.last_time
self.start_time = self.last_time
self.last_speed_progress = 0
self.last_speed = 0
self.max_bar_size = 0
def show(self, progress):
if time.time() - self.last_time > self.bar_update:
self.last_time = time.time()
self.last_progress = progress
if self.last_time - self.last_speed_update > self.speed_update:
self.last_speed = (self.last_speed_progress - progress) / float(self.last_speed_update - self.last_time)
self.last_speed_update = self.last_time
self.last_speed_progress = progress
status = self.width * progress // self.filesize
percent = float(progress * 100) / self.filesize
bar = self.progress_format.format(n=self.name, b=self.fc * status + self.ec * (self.width - status),
p=percent, d=sizeof_fmt(progress), a=sizeof_fmt(self.filesize),
s=sizeof_fmt(self.last_speed) + '/s')
max_bar = self.max_bar_size
self.max_bar_size = max(len(bar), self.max_bar_size)
bar = bar + (' ' * (max_bar - len(bar))) + '\r' # workaround for ghosts
output.write(bar)
output.flush()
def done(self):
speed = self.filesize / float(time.time() - self.start_time)
bar = self.progress_format.format(n=self.name, b=self.fc * self.width, p=100, d=sizeof_fmt(self.filesize),
a=sizeof_fmt(self.filesize), s=sizeof_fmt(speed) + '/s')
max_bar = self.max_bar_size
self.max_bar_size = max(len(bar), self.max_bar_size)
bar = bar + (' ' * (max_bar - len(bar))) + '\r'
output.write(bar)
output.write('\n')
output.flush()
|
from ctypes import*
import math
lib = cdll.LoadLibrary("Z:\\Documents\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMIComponent\\bin\\Debug\\SWMMComponent.dll")
print(lib)
print("\n")
finp = b"Z:\\Documents\\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMINoGlobalsPythonTest\\test.inp"
frpt = b"Z:\\Documents\\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMINoGlobalsPythonTest\\test.rpt"
fout = b"Z:\\Documents\\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMINoGlobalsPythonTest\\test.out"
project = lib.swmm_open(finp , frpt , fout)
print(project)
print("\n")
newHour = 0
oldHour = 0
theDay = 0
theHour = 0
elapsedTime = c_double()
if(lib.swmm_getErrorCode(project) == 0):
lib.swmm_start(project, 1)
if(lib.swmm_getErrorCode(project) == 0):
print("Simulating day: 0 Hour: 0")
print("\n")
while True:
lib.swmm_step(project, byref(elapsedTime))
newHour = elapsedTime.value * 24
if(newHour > oldHour):
theDay = int(elapsedTime.value)
temp = math.floor(elapsedTime.value)
temp = (elapsedTime.value - temp) * 24.0
theHour = int(temp)
#print("\b\b\b\b\b\b\b\b\b\b\b\b\b\b")
#print("\n")
print "Hour " , str(theHour) , " Day " , str(theDay) , ' \r',
#print("\n")
oldHour = newHour
if(elapsedTime.value <= 0 or not lib.swmm_getErrorCode(project) == 0):
break
lib.swmm_end(project)
lib.swmm_report(project)
lib.swmm_close(project)
|
import pytest
import importlib
from mpi4py import MPI
from spectralDNS import config, get_solver, solve
from TGMHD import initialize, regression_test, pi
comm = MPI.COMM_WORLD
if comm.Get_size() >= 4:
params = ('uniform_slab', 'nonuniform_slab',
'uniform_pencil', 'nonuniform_pencil')
else:
params = ('uniform', 'nonuniform')
@pytest.fixture(params=params)
def sol(request):
"""Check for uniform and non-uniform cube"""
pars = request.param.split('_')
mesh = pars[0]
mpi = 'slab'
if len(pars) == 2:
mpi = pars[1]
_args = ['--decomposition', mpi]
if mesh == 'uniform':
_args += ['--M', '4', '4', '4', '--L', '2*pi', '2*pi', '2*pi']
else:
_args += ['--M', '6', '5', '4', '--L', '6*pi', '4*pi', '2*pi']
_args += ['MHD']
return _args
def test_MHD(sol):
config.update(
{
'nu': 0.000625, # Viscosity
'dt': 0.01, # Time step
'T': 0.1, # End time
'eta': 0.01,
'L': [2*pi, 4*pi, 6*pi],
'M': [4, 5, 6],
'convection': 'Divergence'
}
)
solver = get_solver(regression_test=regression_test,
parse_args=sol)
context = solver.get_context()
initialize(**context)
solve(solver, context)
config.params.dealias = '3/2-rule'
initialize(**context)
solve(solver, context)
config.params.dealias = '2/3-rule'
config.params.optimization = 'cython'
importlib.reload(solver)
initialize(**context)
solve(solver, context)
config.params.write_result = 1
config.params.checkpoint = 1
config.dt = 0.01
config.params.t = 0.0
config.params.tstep = 0
config.T = 0.04
solver.regression_test = lambda c: None
solve(solver, context)
|
"""
Sample a specific geometry or set of geometries.
"""
import numpy as np
import nomad.core.glbl as glbl
import nomad.core.trajectory as trajectory
import nomad.core.log as log
def set_initial_coords(wfn):
"""Takes initial position and momentum from geometry specified in input"""
coords = glbl.properties['init_coords']
ndim = coords.shape[-1]
log.print_message('string',[' Initial coordinates taken from input file(s).\n'])
for coord in coords:
itraj = trajectory.Trajectory(glbl.properties['n_states'], ndim,
width=glbl.properties['crd_widths'],
mass=glbl.properties['crd_masses'],
parent=0, kecoef=glbl.modules['integrals'].kecoef)
# set position and momentum
itraj.update_x(np.array(coord[0]))
itraj.update_p(np.array(coord[1]))
# add a single trajectory specified by geometry.dat
wfn.add_trajectory(itraj)
|
"""
:copyright:
Wenjie Lei (lei@princeton.edu), 2016
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
from __future__ import (absolute_import, division, print_function) # NOQA
from .adjoint_source import calculate_adjsrc_on_stream # NOQA
from .adjoint_source import calculate_and_process_adjsrc_on_stream # NOQA
from .adjoint_source import calculate_adjsrc_on_trace # NOQA
from .adjoint_source import measure_adjoint_on_stream # NOQA
|
from common.db_sum import _metric_meta_db
'''get the data from table by name'''
def get_data_by_name(name, status=[1], other=0):
result = []
where = ''
if status:
status = ",".join([str(x) for x in status])
where += ' and status in ({}) '.format(status)
if other:
where += ' and id not in ({}) '.format(other)
sql = """
select * from t_chart_reports where name="{}" {};
""".format(name, where)
try:
result = _metric_meta_db.query(sql)
if result:
result = result[0]
except Exception, e:
from traceback import print_exc
print_exc()
return result
'''get chart from table by ids'''
def get_data_by_ids(sids):
result = []
sids = [str(x) for x in sids]
sql = """
select * from t_chart_reports where id in ({});
""".format(",".join(sids))
try:
result = _metric_meta_db.query(sql)
except Exception, e:
from traceback import print_exc
print_exc()
return result
'''get the data from table by id'''
def get_data_by_id(sid):
result = []
sql = """
select * from t_chart_reports where id={} and status=1;
""".format(int(sid))
try:
result = _metric_meta_db.query(sql)
if result:
result = result[0]
except Exception, e:
from traceback import print_exc
print_exc()
return result
'''save data to chart table'''
def save(form):
hid = _metric_meta_db.insert('t_chart_reports', **form)
return hid
'''update chart table's data by id '''
def update(form):
_metric_meta_db.update('t_chart_reports', where="id={}".format(form['id']), **form)
return form['id']
'''get highchart_edit json'''
def get_chart(chart, data):
result = {}
if chart and data:
if chart.get('series', False):
first = data[0]
data = get_column_combine(data)
lens = len(first)
series = chart['series']
tmp_series = []
if series:
now_key = -1
for key, item in enumerate(series):
if key < lens - 1:
now_key = key
item['name'] = first[key + 1]
item['data'] = data[key]
tmp_series.append(item)
else:
break
template_series = series[-1]
for key, item in enumerate(first):
if key == 0:
continue
elif now_key < key - 1:
tmp = dict(template_series)
tmp['name'] = item
tmp['data'] = data[key - 1]
tmp['_colorIndex'] = key - 1
tmp['_symbolIndex'] = key - 1
tmp_series.append(tmp)
else:
tmp_series = series
chart['series'] = tmp_series
result = chart
return result
'''parse new data to highchart_edit json data'''
def get_column_combine(data):
result = []
if data:
lens = len(data[0])
if lens > 0:
result = [[] for i in xrange(lens)]
for key, item in enumerate(data):
if key > 0:
for k, it in enumerate(item):
if k > 0:
if it == '':
result[k - 1].append([item[0], None])
else:
if type(it) == str or type(it) == unicode:
try:
if r"." in it:
if r"," in it:
tmp = it.replace(",", "")
it = float(tmp)
else:
it = float(it)
elif r"," in it:
tmp = it.replace(",", "")
it = int(tmp)
else:
it = int(it)
except Exception, e:
from traceback import print_exc
print_exc()
result[k - 1].append([item[0], it])
return result
'''get the chart list'''
def get_chart_list(sid="", name="", fields=[], iscount=False, current=1, rowCount=20):
where = []
limit = ''
if sid:
if type(sid) != list:
sid = [sid]
where.append("""and id in ({})""".format(",".join(map(str, sid))))
if name:
where.append("""and name like "%{}%" """.format(name))
if rowCount:
stc = (int(current) - 1) * int(rowCount)
if not stc:
stc = 0
limit = "limit {},{}".format(int(current) - 1, rowCount)
content = "*"
orders = "order by id desc"
if iscount:
limit = ""
content = "count(*) as c"
orders = ""
elif fields:
content = ",".join(fields)
sql = """
select {} from t_chart_reports where status=1 {} {} {};
""".format(content, " ".join(where), orders, limit)
result = _metric_meta_db.query(sql)
if iscount:
if result:
return result[0]['c']
else:
return 0
else:
if result:
return result
else:
return []
|
from rapidsms.tests.scripted import TestScript
from apps.form.models import *
from apps.reporters.models import *
import apps.reporters.app as reporter_app
import apps.supply.app as supply_app
import apps.form.app as form_app
import apps.default.app as default_app
from app import App
from django.core.management.commands.dumpdata import Command
import time
import random
import os
from datetime import datetime
class TestApp (TestScript):
#apps = (reporter_app.App, App,form_app.App, supply_app.App, default_app.App )
apps = (reporter_app.App, App,form_app.App, supply_app.App )
# the test_backend script does the loading of the dummy backend that allows reporters
# to work properly in tests
fixtures = ['nigeria_llin', 'test_kano_locations', 'test_backend']
def setUp(self):
TestScript.setUp(self)
def testFixtures(self):
self._testKanoLocations()
self._testForms()
self._testRoles()
def testScript(self):
a = """
8005551219 > llin register 20 dl crummy user
8005551219 < Hello crummy! You are now registered as Distribution point team leader at KANO State.
"""
self.runScript(a)
# this should succeed because we just created him
reporters = Reporter.objects.all()
Reporter.objects.get(alias="cuser")
dict = {"alias":"fail"}
# make sure checking a non-existant user fails
self.assertRaises(Reporter.DoesNotExist, Reporter.objects.get, **dict)
testRegistration = """
8005551212 > llin my status
8005551212 < Please register your phone with RapidSMS.
8005551212 > llin register 20 dl dummy user
8005551212 < Hello dummy! You are now registered as Distribution point team leader at KANO State.
8005551212 > llin my status
8005551212 < I think you are dummy user.
#duplicate submission
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
# this one should be a duplicate
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello again duplicate! You are already registered as a Distribution point team leader at KANO State.
# but all of these should create a new registration
test_reg_dup > llin register 20 dl duplicate user withanothername
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicate userlonger
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicated user
test_reg_dup < Hello duplicated! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 sm duplicate user
test_reg_dup < Hello duplicate! You are now registered as Stock manager at KANO State.
test_reg_dup > llin register 2001 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at AJINGI LGA.
# case sensitivity
test_reg_2 > llin REGISTER 20 dl another user
test_reg_2 < Hello another! You are now registered as Distribution point team leader at KANO State.
# different name formats
test_reg_3 > llin register 20 dl onename
test_reg_3 < Hello onename! You are now registered as Distribution point team leader at KANO State.
# these fail
test_reg_4 > llin register 20 dl mister three names
test_reg_4 < Hello mister! You are now registered as Distribution point team leader at KANO State.
test_reg_5 > llin register 20 dl mister four name guy
test_reg_5 < Hello mister! You are now registered as Distribution point team leader at KANO State.
# some other spellings
test_reg_short > llin regstr 20 dl short user
test_reg_short < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_2 > llin regs 20 dl short user
test_reg_short_2 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_3 > llin reg 20 dl short user
test_reg_short_3 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_long > llin registered 20 dl long user
test_reg_long < Hello long! You are now registered as Distribution point team leader at KANO State.
# extra spaces
test_reg_8 > llin register 20 dl space guy
test_reg_8 < Hello space! You are now registered as Distribution point team leader at KANO State.
# new tests for more flexible roles
test_reg_dl > llin register 20 dl distribution leader
test_reg_dl < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_2 > llin register 20 ds distribution leader
test_reg_dl_2 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_3 > llin register 20 dm distribution leader
test_reg_dl_3 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_4 > llin register 20 dp distribution leader
test_reg_dl_4 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_lf > llin register 20 lf lga focal person
test_reg_lf < Hello lga! You are now registered as LGA focal person at KANO State.
test_reg_lf > llin register 20 lp lga focal person
test_reg_lf < Hello again lga! You are already registered as a LGA focal person at KANO State.
# alas, we're not perfect
test_reg_fail > llin rgstr 20 dl sorry guy
test_reg_fail < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testRegistrationErrors = """
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 45 DL hello world
12345 < Invalid form. 45 not in list of location codes
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 20 pp hello world
12345 < Invalid form. Unknown role code: pp
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 6803 AL hello world
12345 < Invalid form. 6803 not in list of location codes. Unknown role code: AL
12345 > llin my status
12345 < Please register your phone with RapidSMS.
"""
testKeyword= """
tkw_1 > llin register 20 dl keyword tester
tkw_1 < Hello keyword! You are now registered as Distribution point team leader at KANO State.
# base case
tkw_1 > llin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# capitalize the domain
tkw_1 > LLIN nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# drop an L
tkw_1 > lin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# mix the order - this is no longer supported
#tkw_1 > ILLn nets 2001 123 456 78 90
#tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
#tkw_1 > ilin nets 2001 123 456 78 90
#tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# ll anything works?
tkw_1 > ll nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
tkw_1 > llan nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# don't support w/o keyword
tkw_1 > nets 2001 123 456 78 90
# the default app to the rescue!
tkw_1 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testNets= """
8005551213 > llin register 2001 lf net guy
8005551213 < Hello net! You are now registered as LGA focal person at AJINGI LGA.
8005551213 > llin nets 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
8005551213 > llin nets 2001 123 456 78
8005551213 < Invalid form. The following fields are required: discrepancy
# test some of the different form prefix options
# case sensitivity
8005551213 > llin NETS 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# no s
8005551213 > llin net 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# really? this works?
8005551213 > llin Nt 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# something's gotta fail
8005551213 > llin n 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
8005551213 > llin bednets 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
8005551213 > llin ents 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testNetCards= """
8005551214 > llin register 200201 lf card guy
8005551214 < Hello card! You are now registered as LGA focal person at ALBASU CENTRAL Ward.
8005551214 > llin net cards 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin net cards 200201 123 456
8005551214 < Invalid form. The following fields are required: issued
# test some of the different form prefix options
# case sensitivity
8005551214 > llin NET CARDS 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# no s
8005551214 > llin net card 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# one word
8005551214 > llin netcards 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin netcard 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# he he
8005551214 > llin nt cd 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin ntcrds 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# something's gotta fail
8005551214 > llin cards 200201 123 456 78
8005551214 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testUnregisteredSubmissions = """
tus_1 > llin net cards 200201 123 456 78
tus_1 < Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78. Please register your phone
tus_1 > llin my status
tus_1 < Please register your phone with RapidSMS.
tus_2 > llin nets 2001 123 456 78 90
tus_2 < Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90. Please register your phone
tus_2 > llin my status
tus_2 < Please register your phone with RapidSMS.
"""
def testGenerateNetFixtures(self):
""" This isn't actually a test. It just takes advantage
of the test harness to spam a bunch of messages to the
nigeria app and spit out the data in a format that can
be sucked into a fixture. It should be moved to some
data generator at some point, but is being left here
for laziness sake """
# this is the number of net reports that will be generated
count = 0
# the sender will always be the same, for now
phone = "55555"
expected_actual_match_percent = .8
# allow specifying the minimum and maximum dates for message generation
min_date = datetime(2009,4,1)
max_date = datetime(2009,4,30)
min_time = time.mktime(min_date.timetuple())
max_time = time.mktime(max_date.timetuple())
# these are the locations that will be chosen. The actual
# location will be a distribution point under one of these
# wards
wards = [200101, 200102, 200103, 200104, 200105, 200106, 200107, 200108, 200109, 200110, 200201]
all_net_strings = []
for i in range(count):
# this first part generates a net form at a random DP
date = datetime.fromtimestamp(random.randint(min_time, max_time))
ward = Location.objects.get(code=random.choice(wards))
dp = random.choice(ward.children.all())
distributed = random.randint(50,500)
expected = random.randint(0,2000)
# create an actual amount based on the likelihood of match
if random.random() < expected_actual_match_percent:
actual = expected
else:
actual = random.randint(0,2000)
discrepancy = random.randint(0,distributed/5)
net_string = "%s@%s > llin nets %s %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), dp.code, distributed, expected, actual, discrepancy)
all_net_strings.append(net_string)
# the second part generates a net card form at a random MT
date = datetime.fromtimestamp(random.randint(min_time, max_time))
ward = Location.objects.get(code=random.choice(wards))
dp = random.choice(ward.children.all())
mt = random.choice(dp.children.all())
settlements = random.randint(3, 50)
people = random.randint(50, 600)
coupons = random.randint(50, 600)
net_card_string = "%s@%s > llin net cards %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), mt.code, settlements, people, coupons )
all_net_strings.append(net_card_string)
script = "\n".join(all_net_strings)
self.runScript(script)
dumpdata = Command()
filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_net_data.json"))
options = { "indent" : 2 }
datadump = dumpdata.handle("bednets", **options)
# uncomment these lines to save the fixture
def _testKanoLocations(self):
#TODO test for DPs and MTs
loc_types = LocationType.objects.all()
self.assertEqual(6, len(loc_types))
state = LocationType.objects.get(name="State")
lga = LocationType.objects.get(name="LGA")
ward = LocationType.objects.get(name="Ward")
locations = Location.objects.all()
# 1 state
self.assertEqual(1, len(locations.filter(type=state)))
# 44 lgas
self.assertEqual(44, len(locations.filter(type=lga)))
# 484 wards
self.assertEqual(484, len(locations.filter(type=ward)))
kano = locations.get(type=state)
self.assertEqual("KANO", kano.name)
self.assertEqual(44, len(kano.children.all()))
for lga in locations.filter(type=lga):
self.assertEqual(kano, lga.parent)
def _testForms(self):
forms = Form.objects.all()
self.assertEqual(5, len(forms))
for form_name in ["register", "issue", "receive", "nets", "netcards"]:
# this will throw an error if it doesn't exist
Form.objects.get(code__abbreviation=form_name)
def _testRoles(self):
# add this when we have a fixture for roles
roles = Role.objects.all()
self.assertEqual(4, len(roles))
for role_name in ["LGA focal person", "Ward supervisor", "Stock manager", "Distribution point team leader"]:
# this will throw an error if it doesn't exist
Role.objects.get(name=role_name)
|
'''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com www.jeremiepasserin.com
Fork Author: Miquel Campos hello@miqueltd.com www.miqueltd.com
Date: 2013 / 08 / 16
'''
from gear.xsi import xsi, c, XSIMath, XSIFactory
import gear.xsi.utils as uti
import gear.xsi.transform as tra
def addCnsCurve(parent, name, centers, close=False, degree=1):
# convert collections to list
centers = [center for center in centers]
if degree == 3:
if len(centers) == 2:
centers.insert(0, centers[0])
centers.append(centers[-1])
elif len(centers) == 3:
centers.append(centers[-1])
points = []
for center in centers:
points.append(center.Kinematics.Global.Transform.PosX)
points.append(center.Kinematics.Global.Transform.PosY)
points.append(center.Kinematics.Global.Transform.PosZ)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
crv_geo = curve.ActivePrimitive.Geometry
for i, center in enumerate(centers):
cluster = crv_geo.AddCluster( c.siVertexCluster, "center_%s"%i, [i] )
xsi.ApplyOp( "ClusterCenter", cluster.FullName+";"+center.FullName, 0, 0, None, 2)
return curve
def addCurve(parent, name, points, close=False, degree=1, t=XSIMath.CreateTransform(), color=[0,0,0]):
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
def addCurve2(parent, name, points, ncp=[], kn=[], nkn=[], close=[], degree=[], t=XSIMath.CreateTransform(), color=[0,0,0]):
pointCount = len(ncp)
aPar = [c.siNonUniformParameterization for i in range(pointCount)]
curve = parent.AddNurbsCurveList2(pointCount, points, ncp, kn, nkn, close, degree, aPar, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
def addCurveFromPos(parent, name, positions, close=False, degree=1, knotsPara=c.siNonUniformParameterization, t=XSIMath.CreateTransform(), color=[0,0,0]):
points = []
for v in positions:
points.append(v.X)
points.append(v.Y)
points.append(v.Z)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, knotsPara, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
def mergeCurves(curves):
points = []
ncp = []
kn = []
nkn = []
closed = []
degree = []
for curve in curves:
curve_matrix = curve.Kinematics.Global.Transform.Matrix4
for nurbscrv in curve.ActivePrimitive.Geometry.Curves:
ncp.append(nurbscrv.ControlPoints.Count)
kn.extend(nurbscrv.Knots.Array)
nkn.append(len(nurbscrv.Knots.Array))
closed.append(isClosed(nurbscrv))
degree.append(nurbscrv.Degree)
for point in nurbscrv.ControlPoints:
point_pos = point.Position
point_pos.MulByMatrix4InPlace(curve_matrix)
points.extend([point_pos.X, point_pos.Y,point_pos.Z, 1])
if len(ncp) > 1:
curve = addCurve2(xsi.ActiveSceneRoot, "curve", points, ncp, kn, nkn, closed, degree)
else:
curve = addCurve(xsi.ActiveSceneRoot, "curve", points, closed[0], degree[0])
return curve
def splitCurve(curve):
t = curve.Kinematics.Global.Transform
curves = [addCurve(curve.Parent, curve.Name+str(i), nurbscrv.ControlPoints.Array, isClosed(nurbscrv), nurbscrv.Degree, t) for i, nurbscrv in enumerate(curve.ActivePrimitive.Geometry.Curves)]
return curves
def isClosed(nurbscrv):
if nurbscrv.Degree == 3:
return not nurbscrv.ControlPoints.Count == (len(nurbscrv.Knots.Array)-2)
else:
return not nurbscrv.ControlPoints.Count == len(nurbscrv.Knots.Array)
def applyCurveResamplerOp(curve):
op = XSIFactory.CreateObject("gear_CurveResamplerOp")
op.AddIOPort(curve.ActivePrimitive)
op.Connect()
return op
def getGlobalPositionFromPercentage(percentage, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
position = crv_sub.EvaluatePositionFromPercentage(percentage)[0]
position = XSIMath.MapObjectPositionToWorldSpace(crv_tra, position)
return position
def getClosestU(position, crv, normalized=False):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
crv_sub = crv_geo.Curves(rtn[0])
u = rtn[2]
if normalized:
u = crv_sub.GetNormalizedUFromU(u)
return u
def getClosestPercentage(position, crv):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
crv_sub = crv_geo.Curves(rtn[0])
perc = crv_sub.GetPercentageFromU(rtn[2])
return perc
def getClosestGlobalTransform(position, crv, subcurve=0, tan_axis="x", upv_axis="y", normal=XSIMath.CreateVector3(0,1,0)):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
u = rtn[2]
pos = rtn[3]
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, pos)
tan = crv_sub.EvaluatePosition(u)[1]
r = crv_tra.Rotation
r.InvertInPlace()
tan.MulByRotationInPlace(r)
tan.AddInPlace(pos)
t = tra.getTransformLookingAt(pos, tan, normal, tan_axis+upv_axis, False)
return t
def getClosestGlobalPosition(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
pos = crv_geo.GetClosestCurvePosition2(pos)[3]
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, pos)
return pos
def getClosestGlobalTangent(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
tan = crv_sub.EvaluatePosition(u)[1]
tan.MulByRotationInPlace(crv_tra.Rotation)
return tan
def getClosestGlobalNormal(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
nor = crv_sub.EvaluatePosition(u)[2]
nor.MulByRotationInPlace(crv_tra.Rotation)
return nor
def getClosestGlobalBiNormal(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
bin = crv_sub.EvaluatePosition(u)[3]
bin.MulByRotationInPlace(crv_tra.Rotation)
return bin
def getGlobalPointPosition(index, crv):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, crv_geo.Points(index).Position)
return pos
|
from test_methods import TestBaseFeedlyClass
|
"""
Created on 2013-12-16
@author: readon
@copyright: reserved
@note: CustomWidget example for mvp
"""
from gi.repository import Gtk
from gi.repository import GObject
class CustomEntry(Gtk.Entry):
"""
custom widget inherit from gtkentry.
"""
def __init__(self):
Gtk.Entry.__init__(self)
print "this is a custom widget loading"
GObject.type_register(CustomEntry)
|
import hashlib
import io
import struct
BLOCK_LENGTH = 1024 * 1024
try:
file_types = (file, io.IOBase)
except NameError:
file_types = (io.IOBase,)
def read_int(stream, length):
try:
return struct.unpack('<I', stream.read(length))[0]
except Exception:
return None
class HashedBlockIO(io.BytesIO):
"""
The data is stored in hashed blocks. Each block consists of a block index (4
bytes), the hash (32 bytes) and the block length (4 bytes), followed by the
block data. The block index starts counting at 0. The block hash is a
SHA-256 hash of the block data. A block has a maximum length of
BLOCK_LENGTH, but can be shorter.
Provide a I/O stream containing the hashed block data as the `block_stream`
argument when creating a HashedBlockReader. Alternatively the `bytes`
argument can be used to hand over data as a string/bytearray/etc. The data
is verified upon initialization and an IOError is raised when a hash does
not match.
HashedBlockReader is a subclass of io.BytesIO. The inherited read, seek, ...
functions shall be used to access the verified data.
"""
def __init__(self, block_stream=None, bytes=None):
io.BytesIO.__init__(self)
input_stream = None
if block_stream is not None:
if not (isinstance(block_stream, io.IOBase) or isinstance(block_stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
input_stream = block_stream
elif bytes is not None:
input_stream = io.BytesIO(bytes)
if input_stream is not None:
self.read_block_stream(input_stream)
def read_block_stream(self, block_stream):
"""
Read the whole block stream into the self-BytesIO.
"""
if not (isinstance(block_stream, io.IOBase) or isinstance(block_stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
while True:
data = self._next_block(block_stream)
if not self.write(data):
break
self.seek(0)
def _next_block(self, block_stream):
"""
Read the next block and verify the data.
Raises an IOError if the hash does not match.
"""
index = read_int(block_stream, 4)
bhash = block_stream.read(32)
length = read_int(block_stream, 4)
if length > 0:
data = block_stream.read(length)
if hashlib.sha256(data).digest() == bhash:
return data
else:
raise IOError('Block hash mismatch error.')
return bytes()
def write_block_stream(self, stream, block_length=BLOCK_LENGTH):
"""
Write all data in this buffer, starting at stream position 0, formatted
in hashed blocks to the given `stream`.
For example, writing data from one file into another as hashed blocks::
# create new hashed block io without input stream or data
hb = HashedBlockIO()
# read from a file, write into the empty hb
with open('sample.dat', 'rb') as infile:
hb.write(infile.read())
# write from the hb into a new file
with open('hb_sample.dat', 'w') as outfile:
hb.write_block_stream(outfile)
"""
if not (isinstance(stream, io.IOBase) or isinstance(stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
index = 0
self.seek(0)
while True:
data = self.read(block_length)
if data:
stream.write(struct.pack('<I', index))
stream.write(hashlib.sha256(data).digest())
stream.write(struct.pack('<I', len(data)))
stream.write(data)
index += 1
else:
stream.write(struct.pack('<I', index))
stream.write('\x00' * 32)
stream.write(struct.pack('<I', 0))
break
|
from odoo import models, fields, api
class AccessGroups(models.Model):
_inherit = 'muk_security.access_groups'
#----------------------------------------------------------
# Database
#----------------------------------------------------------
directories = fields.Many2many(
comodel_name='muk_dms.directory',
relation='muk_dms_directory_groups_rel',
string="Directories",
column1='gid',
column2='aid',
readonly=True)
count_directories = fields.Integer(
compute='_compute_count_directories',
string="Count Directories")
#----------------------------------------------------------
# Read, View
#----------------------------------------------------------
@api.depends('directories')
def _compute_count_directories(self):
for record in self:
record.count_directories = len(record.directories)
|
"""
-----------------------------------------------------------------------------
This source file is part of OSTIS (Open Semantic Technology for Intelligent Systems)
For the latest info, see http://www.ostis.net
Copyright (c) 2010 OSTIS
OSTIS is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OSTIS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with OSTIS. If not, see <http://www.gnu.org/licenses/>.
-----------------------------------------------------------------------------
"""
'''
Created on Oct 2, 2009
@author: Denis Koronchik
'''
import pm
import time
import thread
import threading
import sys, traceback
class Processor(threading.Thread):
def __init__(self, params = {}):
"""Constuructor
@param params: dictionary with parameters to start processor module
Available parameters:
repo_path - path to repository folder
@type params: dict
"""
threading.Thread.__init__(self)
self.stoped = False
self.finished = False
self.started = False
self.__repo_path = '.'
if params.has_key('repo_path'):
self.__repo_path = params['repo_path']
self.start()
def run(self):
try:
pm.do_init(False, True, self.__repo_path, False)
pm.do_dedicated(False)
except:
print "Error:", sys.exc_info()[0]
traceback.print_exc(file=sys.stdout)
return
self.started = True
while not self.stoped:
pm.do_step()
time.sleep(0.01)
pm.libsc_deinit()
self.finished = True
def stop(self):
self.stoped = True
class Callback(pm.sc_event_multi):
def __init__(self):
pm.sc_event_multi.__init__(self)
self.__disown__()
def activate(self, wait_type, params, len):
print str(params)
class TestOp(pm.ScOperationActSetMember):
def __init__(self, aset):
pm.ScOperationActSetMember.__init__(self, "Test", aset)
def activateImpl(self, arc, el):
print "Hello"
Processor({'repo_path': '../repo/fs_repo'})
|
import logging
from odoo import api, conf
from odoo.tests.common import HttpCase, tagged
_logger = logging.getLogger(__name__)
@tagged("post_install", "-at_install")
class TestProductTmplImage(HttpCase):
def _get_original_image_url(self, px=1024):
return "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1e/Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg/{}px-Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg".format(
px
)
def _get_odoo_image_url(self, model, record_id, field):
return "/web/image?model={}&id={}&field={}".format(model, record_id, field)
def test_getting_product_variant_image_fields_urls(self):
assert (
"ir_attachment_url" in conf.server_wide_modules
), "ir_attachment_url is not in server_wide_modules. Please add it via --load parameter"
env = api.Environment(self.registry.test_cr, self.uid, {})
env["ir.config_parameter"].set_param("ir_attachment_url.storage", "url")
product_tmpl = env["product.template"].create(
{
"name": "Test template",
"image": self._get_original_image_url(1024),
"image_medium": self._get_original_image_url(128),
"image_small": self._get_original_image_url(64),
}
)
product_product = env["product.product"].create(
{
"name": "Test product",
"image": False,
"image_medium": False,
"image_small": False,
"product_tmpl_id": product_tmpl.id,
}
)
odoo_image_url = self._get_odoo_image_url(
"product.product", product_product.id, "image"
)
odoo_image_medium_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_medium"
)
odoo_image_small_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_small"
)
product_tmpl_image_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image", product_tmpl
)
product_tmpl_image_medium_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image_medium", product_tmpl
)
product_tmpl_image_small_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image_small", product_tmpl
)
self.assertTrue(product_tmpl_image_attachment)
self.assertTrue(product_tmpl_image_medium_attachment)
self.assertTrue(product_tmpl_image_small_attachment)
self.authenticate("demo", "demo")
self.assertEqual(
self.url_open(odoo_image_url).url, product_tmpl_image_attachment.url
)
self.assertEqual(
self.url_open(odoo_image_medium_url).url,
product_tmpl_image_medium_attachment.url,
)
self.assertEqual(
self.url_open(odoo_image_small_url).url,
product_tmpl_image_small_attachment.url,
)
|
from optparse import OptionParser
import sys
class CLI(object):
color = {
"PINK": "",
"BLUE": "",
"CYAN": "",
"GREEN": "",
"YELLOW": "",
"RED": "",
"END": "",
}
@staticmethod
def show_colors():
CLI.color = {
"PINK": "\033[35m",
"BLUE": "\033[34m",
"CYAN": "\033[36m",
"GREEN": "\033[32m",
"YELLOW": "\033[33m",
"RED": "\033[31m",
"END": "\033[0m",
}
def __init__(self):
self.__config_parser()
def __config_parser(self):
self.__parser = OptionParser(usage="usage: %prog [options] start")
self.__parser.add_option("-c", "--config",
dest="config_file",
default="compressor.yaml",
help="Use a specific config file. If not provided, will search for 'compressor.yaml' in the current directory.")
self.__parser.add_option("-s", "--sync",
dest="sync",
action="store_true",
default=False,
help="Sync files with S3")
self.__parser.add_option("-v", "--version",
action="store_true",
dest="compressor_version",
default=False,
help="Displays compressor version and exit.")
self.__parser.add_option("--color",
action="store_true",
dest="show_colors",
default=False,
help="Output with beautiful colors.")
self.__parser.add_option("--prefix",
dest="prefix",
default="min",
help="Use prefix in output js and css.")
def get_parser(self):
return self.__parser
def parse(self):
return self.__parser.parse_args()
def error_and_exit(self, msg):
self.msg("[ERROR] %s\n" % msg, "RED")
sys.exit(1)
def info_and_exit(self, msg):
self.msg("%s\n" % msg, "BLUE")
sys.exit(0)
def msg(self, msg, color="CYAN"):
print "%s%s%s" % (self.color[color], msg, self.color["END"])
|
from django.db import models
class Channel(models.Model):
channel_id = models.CharField(max_length=50, unique=True)
channel_name = models.CharField(max_length=50, null=True, blank=True)
rtmp_url = models.CharField(max_length=100, null=True, blank=True)
active = models.IntegerField(null=True, blank=True)
start = models.IntegerField(null=True, blank=True)
PID = models.IntegerField(null=True, blank=True)
PGID = models.IntegerField(null=True, blank=True)
client_ip = models.CharField(max_length=50, null=True, blank=True)
sort = models.IntegerField(null=False, blank=True, default=0)
class Meta:
managed = False
db_table = 'channel'
verbose_name = '频道'
verbose_name_plural = '频道管理'
def __str__(self):
return self.channel_name + '(' + self.channel_id + ')'
class Program(models.Model):
channel = models.ForeignKey(Channel, to_field='channel_id', null=True)
start_time = models.DateTimeField(auto_now_add=False, null=True, blank=True)
end_time = models.DateTimeField(auto_now_add=False, null=True, blank=True)
url = models.CharField(max_length=50, null=True, blank=True)
title = models.CharField(max_length=50, null=True, blank=True)
finished = models.IntegerField(null=True, blank=True, default=0)
event_id = models.IntegerField(null=True, blank=True)
class Meta:
managed = False
db_table = 'program'
verbose_name = '节目'
verbose_name_plural = '节目管理'
def __str__(self):
return str(self.channel) + ':' + self.title
|
from __future__ import annotations
from decimal import Decimal
from typing import (
Any,
Mapping,
Sequence,
)
import uuid
from pprint import pprint
import pytest
from ai.backend.common.docker import ImageRef
from ai.backend.common.types import (
AccessKey, AgentId, KernelId,
ResourceSlot, SessionTypes,
)
from ai.backend.manager.scheduler import PendingSession, ExistingSession, AgentContext
from ai.backend.manager.scheduler.dispatcher import load_scheduler
from ai.backend.manager.scheduler.fifo import FIFOSlotScheduler, LIFOSlotScheduler
from ai.backend.manager.scheduler.drf import DRFScheduler
from ai.backend.manager.scheduler.mof import MOFScheduler
def test_load_intrinsic():
assert isinstance(load_scheduler('fifo', {}), FIFOSlotScheduler)
assert isinstance(load_scheduler('lifo', {}), LIFOSlotScheduler)
assert isinstance(load_scheduler('drf', {}), DRFScheduler)
assert isinstance(load_scheduler('mof', {}), MOFScheduler)
example_group_id = uuid.uuid4()
example_total_capacity = ResourceSlot({'cpu': '4.0', 'mem': '4096'})
@pytest.fixture
def example_agents():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_mixed_agents():
return [
AgentContext(
agent_id=AgentId('i-gpu'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-cpu'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_first_one_assigned():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_no_valid():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
),
]
pending_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
existing_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
_common_dummy_for_pending_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
resource_policy={},
resource_opts={},
mounts=[],
mount_map={},
environ={},
bootstrap_script=None,
startup_command=None,
internal_data=None,
preopen_ports=[],
)
_common_dummy_for_existing_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
)
@pytest.fixture
def example_pending_sessions():
# lower indicies are enqueued first.
return [
PendingSession( # rocm
kernel_id=pending_kernel_ids[0],
access_key=AccessKey('user01'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('1'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cuda
kernel_id=pending_kernel_ids[1],
access_key=AccessKey('user02'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('0.5'),
'rocm.devices': Decimal('0'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cpu-only
kernel_id=pending_kernel_ids[2],
access_key=AccessKey('user03'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
]
@pytest.fixture
def example_existing_sessions():
return [
ExistingSession(
kernel_id=existing_kernel_ids[0],
access_key=AccessKey('user01'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('1'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
ExistingSession(
kernel_id=existing_kernel_ids[1],
access_key=AccessKey('user02'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('0.5'),
'rocm.devices': Decimal('0'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
ExistingSession(
kernel_id=existing_kernel_ids[2],
access_key=AccessKey('user03'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
]
def _find_and_pop_picked_session(pending_sessions, picked_session_id):
for picked_idx, pending_sess in enumerate(pending_sessions):
if pending_sess.kernel_id == picked_session_id:
break
else:
# no matching entry for picked session?
raise RuntimeError('should not reach here')
return pending_sessions.pop(picked_idx)
def test_fifo_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = FIFOSlotScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == AgentId('i-001')
def test_lifo_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = LIFOSlotScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[2].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_fifo_scheduler_favor_cpu_for_requests_without_accelerators(
example_mixed_agents,
example_pending_sessions,
):
scheduler = FIFOSlotScheduler({})
for idx in range(3):
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
[])
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_mixed_agents, picked_session)
if idx == 0:
# example_mixed_agents do not have any agent with ROCM accelerators.
assert agent_id is None
elif idx == 1:
assert agent_id == AgentId('i-gpu')
elif idx == 2:
# It should favor the CPU-only agent if the requested slots
# do not include accelerators.
assert agent_id == AgentId('i-cpu')
def test_lifo_scheduler_favor_cpu_for_requests_without_accelerators(
example_mixed_agents,
example_pending_sessions,
):
# Check the reverse with the LIFO scheduler.
# The result must be same.
scheduler = LIFOSlotScheduler({})
for idx in range(3):
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
[])
assert picked_session_id == example_pending_sessions[-1].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_mixed_agents, picked_session)
if idx == 2:
# example_mixed_agents do not have any agent with ROCM accelerators.
assert agent_id is None
elif idx == 1:
assert agent_id == AgentId('i-gpu')
elif idx == 0:
# It should favor the CPU-only agent if the requested slots
# do not include accelerators.
assert agent_id == AgentId('i-cpu')
def test_drf_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = DRFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
pprint(example_pending_sessions)
assert picked_session_id == example_pending_sessions[1].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_mof_scheduler_first_assign(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_mof_scheduler_second_assign(example_agents_first_one_assigned, example_pending_sessions,
example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(
example_agents_first_one_assigned, picked_session)
assert agent_id == 'i-101'
def test_mof_scheduler_no_valid_agent(example_agents_no_valid, example_pending_sessions,
example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents_no_valid, picked_session)
assert agent_id is None
|
"""Statistics analyzer for HotShot."""
import profile
import pstats
import hotshot.log
from hotshot.log import ENTER, EXIT
def load(filename):
return StatsLoader(filename).load()
class StatsLoader:
def __init__(self, logfn):
self._logfn = logfn
self._code = {}
self._stack = []
self.pop_frame = self._stack.pop
def load(self):
p = Profile()
p.get_time = _brokentimer
log = hotshot.log.LogReader(self._logfn)
taccum = 0
for event in log:
what, (filename, lineno, funcname), tdelta = event
if tdelta > 0:
taccum += tdelta
if what == ENTER:
frame = self.new_frame(filename, lineno, funcname)
p.trace_dispatch_call(frame, taccum * 1e-06)
taccum = 0
elif what == EXIT:
frame = self.pop_frame()
p.trace_dispatch_return(frame, taccum * 1e-06)
taccum = 0
return pstats.Stats(p)
def new_frame(self, *args):
try:
code = self._code[args]
except KeyError:
code = FakeCode(*args)
self._code[args] = code
if self._stack:
back = self._stack[-1]
else:
back = None
frame = FakeFrame(code, back)
self._stack.append(frame)
return frame
class Profile(profile.Profile):
def simulate_cmd_complete(self):
pass
class FakeCode:
def __init__(self, filename, firstlineno, funcname):
self.co_filename = filename
self.co_firstlineno = firstlineno
self.co_name = self.__name__ = funcname
class FakeFrame:
def __init__(self, code, back):
self.f_back = back
self.f_code = code
def _brokentimer():
raise RuntimeError, 'this timer should not be called'
|
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec']
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
QP = 1
BASE64 = 2
SHORTEST = 3
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
CHARSETS = {'iso-8859-1': (
QP, QP, None),
'iso-8859-2': (
QP, QP, None),
'iso-8859-3': (
QP, QP, None),
'iso-8859-4': (
QP, QP, None),
'iso-8859-9': (
QP, QP, None),
'iso-8859-10': (
QP, QP, None),
'iso-8859-13': (
QP, QP, None),
'iso-8859-14': (
QP, QP, None),
'iso-8859-15': (
QP, QP, None),
'iso-8859-16': (
QP, QP, None),
'windows-1252': (
QP, QP, None),
'viscii': (
QP, QP, None),
'us-ascii': (None, None, None),
'big5': (
BASE64, BASE64, None),
'gb2312': (
BASE64, BASE64, None),
'euc-jp': (
BASE64, None, 'iso-2022-jp'),
'shift_jis': (
BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (
BASE64, None, None),
'koi8-r': (
BASE64, BASE64, None),
'utf-8': (
SHORTEST, BASE64, 'utf-8'),
'8bit': (
None, BASE64, 'utf-8')
}
ALIASES = {'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10': 'iso-8859-16',
'latin-10': 'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii'
}
CODEC_MAP = {'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
'us-ascii': None
}
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (
header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
henc, benc, conv = CHARSETS.get(self.input_charset, (
SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
self.input_codec = CODEC_MAP.get(self.input_charset, self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset, self.output_charset)
return
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
if self.body_encoding == QP:
return 'quoted-printable'
else:
if self.body_encoding == BASE64:
return 'base64'
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
else:
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
return s
return
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
else:
try:
return ustr.encode(codec, 'replace')
except LookupError:
return ustr
return
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
else:
if self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
if self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
else:
if self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
if self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
return None
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
else:
if self.body_encoding is QP:
return email.quoprimime.body_encode(s)
return s
|
import sys
from ivy import hooks, site, templates
try:
import jinja2
except ImportError:
jinja2 = None
env = None
if jinja2:
# Initialize our Jinja environment on the 'init' event hook.
@hooks.register('init')
def init():
# Initialize a template loader.
settings = {
'loader': jinja2.FileSystemLoader(site.theme('templates'))
}
# Check the site's config file for any custom settings.
settings.update(site.config.get('jinja', {}))
# Initialize an Environment instance.
global env
env = jinja2.Environment(**settings)
# Register our template engine callback for files with a .jinja extension.
@templates.register('jinja')
def callback(page, filename):
try:
template = env.get_template(filename)
return template.render(page)
except jinja2.TemplateError as err:
msg = "------------------------\n"
msg += " Jinja Template Error \n"
msg += "------------------------\n\n"
msg += " Template: %s\n" % filename
msg += " Page: %s\n\n" % page['filepath']
msg += " %s: %s" % (err.__class__.__name__, err)
if err.__context__:
cause = err.__context__
msg += "\n\n The following cause was reported:\n\n"
msg += " %s: %s" % (cause.__class__.__name__, cause)
sys.exit(msg)
|
import random
def turn(board, symbol):
while 1:
x = random.choice(range(8))
y = random.choice(range(8))
if getboard(board,x,y) == '#': return (x,y)
|
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
ExtractorError,
find_xpath_attr,
unified_strdate,
determine_ext,
get_element_by_id,
compat_str,
)
class ArteTvIE(InfoExtractor):
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
_LIVEWEB_URL = r'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
_LIVE_URL = r'index-[0-9]+\.html$'
IE_NAME = u'arte.tv'
@classmethod
def suitable(cls, url):
return any(re.match(regex, url) for regex in (cls._VIDEOS_URL, cls._LIVEWEB_URL))
# TODO implement Live Stream
# from ..utils import compat_urllib_parse
# def extractLiveStream(self, url):
# video_lang = url.split('/')[-4]
# info = self.grep_webpage(
# url,
# r'src="(.*?/videothek_js.*?\.js)',
# 0,
# [
# (1, 'url', u'Invalid URL: %s' % url)
# ]
# )
# http_host = url.split('/')[2]
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
# info = self.grep_webpage(
# next_url,
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
# '(http://.*?\.swf).*?' +
# '(rtmp://.*?)\'',
# re.DOTALL,
# [
# (1, 'path', u'could not extract video path: %s' % url),
# (2, 'player', u'could not extract video player: %s' % url),
# (3, 'url', u'could not extract video url: %s' % url)
# ]
# )
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
def _real_extract(self, url):
mobj = re.match(self._VIDEOS_URL, url)
if mobj is not None:
id = mobj.group('id')
lang = mobj.group('lang')
return self._extract_video(url, id, lang)
mobj = re.match(self._LIVEWEB_URL, url)
if mobj is not None:
name = mobj.group('name')
lang = mobj.group('lang')
return self._extract_liveweb(url, name, lang)
if re.search(self._LIVE_URL, url) is not None:
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
# self.extractLiveStream(url)
# return
def _extract_video(self, url, video_id, lang):
"""Extract from videos.arte.tv"""
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
config_xml_url = config_node.attrib['ref']
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
def _key(m):
quality = m.group('quality')
if quality == 'hd':
return 2
else:
return 1
# We pick the best quality
video_urls = sorted(video_urls, key=_key)
video_url = list(video_urls)[-1].group('url')
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
config_xml, 'thumbnail')
return {'id': video_id,
'title': title,
'thumbnail': thumbnail,
'url': video_url,
'ext': 'flv',
}
def _extract_liveweb(self, url, name, lang):
"""Extract form http://liveweb.arte.tv/"""
webpage = self._download_webpage(url, name)
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
config_xml = self._download_webpage('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
video_id, u'Downloading information')
config_doc = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
event_doc = config_doc.find('event')
url_node = event_doc.find('video').find('urlHd')
if url_node is None:
url_node = event_doc.find('urlSd')
return {'id': video_id,
'title': event_doc.find('name%s' % lang.capitalize()).text,
'url': url_node.text.replace('MP4', 'mp4'),
'ext': 'flv',
'thumbnail': self._og_search_thumbnail(webpage),
}
class ArteTVPlus7IE(InfoExtractor):
IE_NAME = u'arte.tv:+7'
_VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
@classmethod
def _extract_url_info(cls, url):
mobj = re.match(cls._VALID_URL, url)
lang = mobj.group('lang')
# This is not a real id, it can be for example AJT for the news
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
video_id = mobj.group('id')
return video_id, lang
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, video_id)
return self._extract_from_webpage(webpage, video_id, lang)
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
self.report_extraction(video_id)
info = json.loads(json_info)
player_info = info['videoJsonPlayer']
info_dict = {
'id': player_info['VID'],
'title': player_info['VTI'],
'description': player_info.get('VDE'),
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
all_formats = player_info['VSR'].values()
# Some formats use the m3u8 protocol
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
def _match_lang(f):
if f.get('versionCode') is None:
return True
# Return true if that format is in the language of the url
if lang == 'fr':
l = 'F'
elif lang == 'de':
l = 'A'
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
return any(re.match(r, f['versionCode']) for r in regexes)
# Some formats may not be in the same language as the url
formats = filter(_match_lang, all_formats)
formats = list(formats) # in python3 filter returns an iterator
if not formats:
# Some videos are only available in the 'Originalversion'
# they aren't tagged as being in French or German
if all(f['versionCode'] == 'VO' for f in all_formats):
formats = all_formats
else:
raise ExtractorError(u'The formats list is empty')
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
def sort_key(f):
return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
else:
def sort_key(f):
return (
# Sort first by quality
int(f.get('height',-1)),
int(f.get('bitrate',-1)),
# The original version with subtitles has lower relevance
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
# The version with sourds/mal subtitles has also lower relevance
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
)
formats = sorted(formats, key=sort_key)
def _format(format_info):
quality = ''
height = format_info.get('height')
if height is not None:
quality = compat_str(height)
bitrate = format_info.get('bitrate')
if bitrate is not None:
quality += '-%d' % bitrate
if format_info.get('versionCode') is not None:
format_id = u'%s-%s' % (quality, format_info['versionCode'])
else:
format_id = quality
info = {
'format_id': format_id,
'format_note': format_info.get('versionLibelle'),
'width': format_info.get('width'),
'height': height,
}
if format_info['mediaType'] == u'rtmp':
info['url'] = format_info['streamer']
info['play_path'] = 'mp4:' + format_info['url']
info['ext'] = 'flv'
else:
info['url'] = format_info['url']
info['ext'] = determine_ext(info['url'])
return info
info_dict['formats'] = [_format(f) for f in formats]
return info_dict
class ArteTVCreativeIE(ArteTVPlus7IE):
IE_NAME = u'arte.tv:creative'
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
_TEST = {
u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
u'file': u'050489-002.mp4',
u'info_dict': {
u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
},
}
class ArteTVFutureIE(ArteTVPlus7IE):
IE_NAME = u'arte.tv:future'
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
_TEST = {
u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
u'file': u'050940-003.mp4',
u'info_dict': {
u'title': u'Les champignons au secours de la planète',
},
}
def _real_extract(self, url):
anchor_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, anchor_id)
row = get_element_by_id(anchor_id, webpage)
return self._extract_from_webpage(row, anchor_id, lang)
|
import sys
line = sys.stdin.readline() # skip the header
line = sys.stdin.readline()
all = {}
while line:
v = line.split()
if v[0] not in all:
all[v[0]] = set()
all[v[0]].add(v[1])
line = sys.stdin.readline()
s = [k for (_, k) in sorted([(len(v), k) for (k,v) in all.items()])]
print ' '.join(reversed(s))
for i in s:
print i,
for j in reversed(s):
print len(all[i].intersection(all[j])),
print
|
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda : None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return '<%s tests=%s>' % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
if not hasattr(test, '__call__'):
raise TypeError('{} is not callable'.format(repr(test)))
if isinstance(test, type) and issubclass(test, (
case.TestCase, TestSuite)):
raise TypeError('TestCases and TestSuites must be instantiated before passing them to addTest()')
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError('tests must be an iterable of tests, not a string')
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if result._moduleSetUpFailed:
return
if getattr(currentClass, '__unittest_skip__', False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
else:
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
return
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
else:
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, '__unittest_skip__', False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return '<ErrorHolder description=%r>' % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"""A crude way to tell apart testcases and suites with duck-typing"""
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"""Used by the TestSuite to hold previous class when running in debug."""
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
from google.analytics import data_v1beta
async def sample_batch_run_pivot_reports():
# Create a client
client = data_v1beta.BetaAnalyticsDataAsyncClient()
# Initialize request argument(s)
request = data_v1beta.BatchRunPivotReportsRequest(
)
# Make the request
response = await client.batch_run_pivot_reports(request=request)
# Handle the response
print(response)
|
"""
This module, debugging.py, will contain code related to debugging (such as printing error messages).
"""
class MyException(Exception):
"""
Just something useful to have to throw some of my own custom exception.
"""
pass
class ParameterException(Exception):
"""
A custom exception for when a function receives bad parameter data.
"""
def __init__(self, message):
super(ParameterException, self).__init__(message)
class AbstractMethodNotImplementedException(Exception):
"""
A custom exception for when a function gets called that hasn't been set in a child class.
"""
def __init(self, message):
super(AbstractMethodNotImplementedException, self).__init__(message)
def raise_exception(exception, message):
raise exception(message)
TCP_LOCAL_HOST = 'tcp://127.0.0.1:'
LOCAL_HOST = '127.0.0.1'
NEXUS_DEV_RECEIVE_PORT = 40000
NEXUS_DEV_MANUAL_COMMUNICATION_PORT = 40001
NEXUS_DEV_AUTOMATED_COMMUNICATION_PORT = 40002
starting_port = NEXUS_DEV_AUTOMATED_COMMUNICATION_PORT + 1
def get_a_free_port():
global starting_port
# We can assume ports are free because ports above 30000 have been sealed off.
# TODO: THIS WILL BREAK WHEN MORE THAN DEV EXISTS.
starting_port += 1
return starting_port - 1
class TextColors:
HEADER = '\033[95m'
OK_BLUE = '\033[94m'
OK_GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_text_with_color(text, color, end=None):
if end is None:
print(color + text + TextColors.ENDC + '\n')
else:
print(color + text + TextColors.ENDC, end='')
def terminate(termination_message=''):
if termination_message is '':
print_text_with_color('Program termination has been initiated, good bye!', TextColors.FAIL)
else:
print_text_with_color(termination_message, TextColors.WARNING, '')
if not termination_message.endswith('.'):
print_text_with_color('. The program will now terminate.', TextColors.FAIL)
else:
print_text_with_color(' The program will now terminate.', TextColors.FAIL)
exit()
|
"""Tests for the kraken sensor platform."""
from datetime import timedelta
from unittest.mock import patch
from pykrakenapi.pykrakenapi import KrakenAPIError
from homeassistant.components.kraken.const import (
CONF_TRACKED_ASSET_PAIRS,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TRACKED_ASSET_PAIR,
DOMAIN,
)
from homeassistant.const import CONF_SCAN_INTERVAL, EVENT_HOMEASSISTANT_START
import homeassistant.util.dt as dt_util
from .const import (
MISSING_PAIR_TICKER_INFORMATION_RESPONSE,
MISSING_PAIR_TRADEABLE_ASSET_PAIR_RESPONSE,
TICKER_INFORMATION_RESPONSE,
TRADEABLE_ASSET_PAIR_RESPONSE,
)
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_sensor(hass):
"""Test that sensor has a value."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"pykrakenapi.KrakenAPI.get_tradable_asset_pairs",
return_value=TRADEABLE_ASSET_PAIR_RESPONSE,
), patch(
"pykrakenapi.KrakenAPI.get_ticker_information",
return_value=TICKER_INFORMATION_RESPONSE,
):
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="0123456789",
options={
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_TRACKED_ASSET_PAIRS: [
"ADA/XBT",
"ADA/ETH",
"XBT/EUR",
"XBT/GBP",
"XBT/USD",
"XBT/JPY",
],
},
)
entry.add_to_hass(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
# Pre-create registry entries for disabled by default sensors
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_ask_volume",
suggested_object_id="xbt_usd_ask_volume",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_last_trade_closed",
suggested_object_id="xbt_usd_last_trade_closed",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_bid_volume",
suggested_object_id="xbt_usd_bid_volume",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_today",
suggested_object_id="xbt_usd_volume_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_last_24h",
suggested_object_id="xbt_usd_volume_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_weighted_average_today",
suggested_object_id="xbt_usd_volume_weighted_average_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_weighted_average_last_24h",
suggested_object_id="xbt_usd_volume_weighted_average_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_number_of_trades_today",
suggested_object_id="xbt_usd_number_of_trades_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_number_of_trades_last_24h",
suggested_object_id="xbt_usd_number_of_trades_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_low_last_24h",
suggested_object_id="xbt_usd_low_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_high_last_24h",
suggested_object_id="xbt_usd_high_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_opening_price_today",
suggested_object_id="xbt_usd_opening_price_today",
disabled_by=None,
)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
xbt_usd_sensor = hass.states.get("sensor.xbt_usd_ask")
assert xbt_usd_sensor.state == "0.0003494"
assert xbt_usd_sensor.attributes["icon"] == "mdi:currency-usd"
xbt_eur_sensor = hass.states.get("sensor.xbt_eur_ask")
assert xbt_eur_sensor.state == "0.0003494"
assert xbt_eur_sensor.attributes["icon"] == "mdi:currency-eur"
ada_xbt_sensor = hass.states.get("sensor.ada_xbt_ask")
assert ada_xbt_sensor.state == "0.0003494"
assert ada_xbt_sensor.attributes["icon"] == "mdi:currency-btc"
xbt_jpy_sensor = hass.states.get("sensor.xbt_jpy_ask")
assert xbt_jpy_sensor.state == "0.0003494"
assert xbt_jpy_sensor.attributes["icon"] == "mdi:currency-jpy"
xbt_gbp_sensor = hass.states.get("sensor.xbt_gbp_ask")
assert xbt_gbp_sensor.state == "0.0003494"
assert xbt_gbp_sensor.attributes["icon"] == "mdi:currency-gbp"
ada_eth_sensor = hass.states.get("sensor.ada_eth_ask")
assert ada_eth_sensor.state == "0.0003494"
assert ada_eth_sensor.attributes["icon"] == "mdi:cash"
xbt_usd_ask_volume = hass.states.get("sensor.xbt_usd_ask_volume")
assert xbt_usd_ask_volume.state == "15949"
xbt_usd_last_trade_closed = hass.states.get("sensor.xbt_usd_last_trade_closed")
assert xbt_usd_last_trade_closed.state == "0.0003478"
xbt_usd_bid_volume = hass.states.get("sensor.xbt_usd_bid_volume")
assert xbt_usd_bid_volume.state == "20792"
xbt_usd_volume_today = hass.states.get("sensor.xbt_usd_volume_today")
assert xbt_usd_volume_today.state == "146300.24906838"
xbt_usd_volume_last_24h = hass.states.get("sensor.xbt_usd_volume_last_24h")
assert xbt_usd_volume_last_24h.state == "253478.04715403"
xbt_usd_volume_weighted_average_today = hass.states.get(
"sensor.xbt_usd_volume_weighted_average_today"
)
assert xbt_usd_volume_weighted_average_today.state == "0.000348573"
xbt_usd_volume_weighted_average_last_24h = hass.states.get(
"sensor.xbt_usd_volume_weighted_average_last_24h"
)
assert xbt_usd_volume_weighted_average_last_24h.state == "0.000344881"
xbt_usd_number_of_trades_today = hass.states.get(
"sensor.xbt_usd_number_of_trades_today"
)
assert xbt_usd_number_of_trades_today.state == "82"
xbt_usd_number_of_trades_last_24h = hass.states.get(
"sensor.xbt_usd_number_of_trades_last_24h"
)
assert xbt_usd_number_of_trades_last_24h.state == "128"
xbt_usd_low_last_24h = hass.states.get("sensor.xbt_usd_low_last_24h")
assert xbt_usd_low_last_24h.state == "0.0003446"
xbt_usd_high_last_24h = hass.states.get("sensor.xbt_usd_high_last_24h")
assert xbt_usd_high_last_24h.state == "0.0003521"
xbt_usd_opening_price_today = hass.states.get(
"sensor.xbt_usd_opening_price_today"
)
assert xbt_usd_opening_price_today.state == "0.0003513"
async def test_missing_pair_marks_sensor_unavailable(hass):
"""Test that a missing tradable asset pair marks the sensor unavailable."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"pykrakenapi.KrakenAPI.get_tradable_asset_pairs",
return_value=TRADEABLE_ASSET_PAIR_RESPONSE,
) as tradeable_asset_pairs_mock, patch(
"pykrakenapi.KrakenAPI.get_ticker_information",
return_value=TICKER_INFORMATION_RESPONSE,
) as ticket_information_mock:
entry = MockConfigEntry(
domain=DOMAIN,
options={
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_TRACKED_ASSET_PAIRS: [DEFAULT_TRACKED_ASSET_PAIR],
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.xbt_usd_ask")
assert sensor.state == "0.0003494"
tradeable_asset_pairs_mock.return_value = (
MISSING_PAIR_TRADEABLE_ASSET_PAIR_RESPONSE
)
ticket_information_mock.side_effect = KrakenAPIError(
"EQuery:Unknown asset pair"
)
async_fire_time_changed(
hass, utcnow + timedelta(seconds=DEFAULT_SCAN_INTERVAL * 2)
)
await hass.async_block_till_done()
ticket_information_mock.side_effect = None
ticket_information_mock.return_value = MISSING_PAIR_TICKER_INFORMATION_RESPONSE
async_fire_time_changed(
hass, utcnow + timedelta(seconds=DEFAULT_SCAN_INTERVAL * 2)
)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.xbt_usd_ask")
assert sensor.state == "unavailable"
|
"""
pluginconf.d configuration file - Files
=======================================
Shared mappers for parsing and extracting data from
``/etc/yum/pluginconf.d/*.conf`` files. Parsers contained
in this module are:
PluginConfD - files ``/etc/yum/pluginconf.d/*.conf``
---------------------------------------------------
PluginConfDIni - files ``/etc/yum/pluginconf.d/*.conf``
-------------------------------------------------------
"""
from insights.core import IniConfigFile, LegacyItemAccess, Parser
from insights.core.plugins import parser
from insights.parsers import get_active_lines
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.pluginconf_d)
class PluginConfD(LegacyItemAccess, Parser):
"""
.. warning::
This parser is deprecated, please use
:py:class:`insights.parsers.pluginconf_d.PluginConfDIni` instead
Class to parse configuration file under ``pluginconf.d``
Sample configuration::
[main]
enabled = 0
gpgcheck = 1
timeout = 120
# You can specify options per channel, e.g.:
#
#[rhel-i386-server-5]
#enabled = 1
#
#[some-unsigned-custom-channel]
#gpgcheck = 0
"""
def parse_content(self, content):
deprecated(PluginConfD, "Deprecated. Use 'PluginConfDIni' instead.")
plugin_dict = {}
section_dict = {}
key = None
for line in get_active_lines(content):
if line.startswith('['):
section_dict = {}
plugin_dict[line[1:-1]] = section_dict
elif '=' in line:
key, _, value = line.partition("=")
key = key.strip()
section_dict[key] = value.strip()
else:
if key:
section_dict[key] = ','.join([section_dict[key], line])
self.data = plugin_dict
def __iter__(self):
for sec in self.data:
yield sec
@parser(Specs.pluginconf_d)
class PluginConfDIni(IniConfigFile):
"""
Read yum plugin config files, in INI format, using the standard INI file
parser class.
Sample configuration::
[main]
enabled = 0
gpgcheck = 1
timeout = 120
# You can specify options per channel, e.g.:
#
#[rhel-i386-server-5]
#enabled = 1
#
#[some-unsigned-custom-channel]
#gpgcheck = 0
[test]
test_multiline_config = http://example.com/repos/test/
http://mirror_example.com/repos/test/
Examples:
>>> type(conf)
<class 'insights.parsers.pluginconf_d.PluginConfDIni'>
>>> conf.sections()
['main', 'test']
>>> conf.has_option('main', 'gpgcheck')
True
>>> conf.get("main", "enabled")
'0'
>>> conf.getint("main", "timeout")
120
>>> conf.getboolean("main", "enabled")
False
>>> conf.get("test", "test_multiline_config")
'http://example.com/repos/test/ http://mirror_example.com/repos/test/'
"""
pass
|
from cobra.core.loading import get_model
from cobra.core import json
class UserConfig(object):
default_config = {
'guide.task.participant': '1',
'guide.document.share': '1',
'guide.customer.share': '1',
'guide.workflow.operation': '1',
'guide.workflow.createform': '1',
'order.task.search': 'default',
'order.task.searchDirection': 'DESC',
'portal.workdyna': 'subordinates-task',
'system.menu.display':'',
'viewState.task': 'list',
'guide.biaoge.showintro': '1',
'workreport.push.set': '1',
'agenda.push.set': '1'
}
def __init__(self, user):
self.__user_config = self.__build_user_config(user)
def __build_user_config(self, user):
UserOption = get_model('option', 'UserOption')
u_c = {}
for k, v in self.default_config.items():
u_c[k] = UserOption.objects.get_value(user, None, k, v)
return u_c
def to_python(self):
configs = []
for k, v in self.__user_config.items():
m = {
'configKey': k,
'configValue': v
}
configs.append(m)
return configs
def to_json(self):
return json.dumps(self.to_python())
|
""" Example code about how to run raw_file_io
python3 -m vispek.examples.run_raw_file_io \
--in_path /Users/huaminli/Downloads/data \
--out_path /Users/huaminli/Desktop/vispek/data
"""
import argparse
from vispek.lib.io.raw_file_io import RawFileIO
def run_file_io(args):
my_file_io = RawFileIO(args.in_path, args.out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Example code about how tun run raw_file_io')
parser.add_argument(
'--in_path', type=str,
help='absolute path to the directories that contains raw csv files')
parser.add_argument(
'--out_path', type=str,
help='absolute path to the directories that contains ' +
'preproceed files')
args = parser.parse_args()
print(args.in_path)
print(args.out_path)
run_file_io(args)
|
import datetime
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import JSONB
from pns.app import app, db
class SerializationMixin():
"""serialization mixin for sqlalchemy model object
"""
def to_dict(self, *exceptions, **extra_payload):
"""get dict representation of the object
:param list exceptions: a list to discard from dict
:param dict extra_payload: new parameters to add to dict
"""
_dict = ({c.name: getattr(self, c.name) for c in self.__table__.columns
if c.name not in exceptions})
_dict.update(**extra_payload)
return _dict
subscriptions = db.Table('subscriptions',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('channel_id', db.Integer, db.ForeignKey('channel.id'), nullable=False),
UniqueConstraint('user_id', 'channel_id'))
channel_devices = db.Table('channel_devices',
db.Column('channel_id', db.Integer, db.ForeignKey('channel.id'), nullable=False),
db.Column('device_id', db.Integer, db.ForeignKey('device.id'), nullable=False),
UniqueConstraint('channel_id', 'device_id'))
class User(db.Model, SerializationMixin):
"""user resource
"""
id = db.Column(db.Integer, primary_key=True)
# pns_id is a unique identifier for easy third-party integration (email, citizen id etc.)
pns_id = db.Column(db.String(255), unique=True, nullable=False)
subscriptions = db.relationship('Channel',
secondary=subscriptions,
lazy='dynamic',
backref=db.backref('subscribers', lazy='dynamic'))
devices = db.relationship('Device', backref='user', lazy='dynamic',
cascade='all, delete, delete-orphan')
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __repr__(self):
return '<User %r>' % self.id
class Channel(db.Model, SerializationMixin):
"""channel resource
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False)
description = db.Column(db.Text)
devices = db.relationship('Device',
secondary=channel_devices,
lazy='dynamic',
backref=db.backref('channels', lazy='dynamic'))
alerts = db.relationship('Alert', backref='channel', lazy='dynamic',
cascade='all, delete, delete-orphan')
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def subscribe_user(self, user):
try:
self.subscribers.append(user)
for device in user.devices.all():
self.devices.append(device)
db.session.add(self)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def unsubscribe_user(self, user):
try:
self.subscribers.remove(user)
for device in user.devices.all():
self.devices.remove(device)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def __repr__(self):
return '<Channel %r>' % self.id
class Alert(db.Model, SerializationMixin):
"""alert resource
"""
id = db.Column(db.Integer, primary_key=True)
channel_id = db.Column(db.Integer, db.ForeignKey('channel.id'), index=True)
payload = db.Column(JSONB, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __repr__(self):
return '<Alert %r>' % self.id
class Device(db.Model, SerializationMixin):
"""device resource
"""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
platform = db.Column(db.String(10), index=True, nullable=False)
platform_id = db.Column(db.Text, unique=True, nullable=False)
mobile_app_id = db.Column(db.Text, index=True)
mobile_app_ver = db.Column(db.Integer, index=True)
mute = db.Column(db.Boolean, default=False, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def subscribe_to_channels(self):
"""subscribe new device to existing channels
"""
try:
for channel in self.user.subscriptions.all():
channel.devices.append(self)
db.session.add(self.user)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def __repr__(self):
return '<Device %r>' % self.id
if __name__ == '__main__':
db.create_all()
|
import ditagen.dita
from ditagen.dtdgen import Particle as Particle
from ditagen.dtdgen import Choice as Choice
from ditagen.dtdgen import Name as Name
from ditagen.dtdgen import Seq as Seq
from ditagen.dtdgen import Attribute as Attribute
from ditagen.dtdgen import Param as Param
from ditagen.dtdgen import ParameterEntity as ParameterEntity
OPTIONAL = Particle.Occurrences.OPTIONAL
ZERO_OR_MORE = Particle.Occurrences.ZERO_OR_MORE
class TopicElement(ditagen.dita.DitaElement):
"""Topic element."""
name = u"topic"
cls = u"- topic/topic "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("body"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ConceptElement(ditagen.dita.DitaElement):
"""Concept element."""
name = u"concept"
cls = u"- topic/topic concept/concept "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("conbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class TaskElement(ditagen.dita.DitaElement):
"""Task element."""
name = u"task"
cls = u"- topic/topic task/task "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("taskbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ReferenceElement(ditagen.dita.DitaElement):
"""Reference element."""
name = u"reference"
cls = u"- topic/topic reference/reference "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("refbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossentryElement(ditagen.dita.DitaElement):
"""Glossary entry element."""
name = u"glossentry"
cls = u"- topic/topic concept/concept glossentry/glossentry "
model = Seq([
Choice(ParameterEntity("glossterm")),
Choice(ParameterEntity("glossdef"), OPTIONAL),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("glossBody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossgroupElement(ditagen.dita.DitaElement):
"""Glossary group element."""
name = u"glossgroup"
cls = u"- topic/topic concept/concept glossgroup/glossgroup "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningBaseElement(ditagen.dita.DitaElement):
"""Learning Base element."""
name = u"learningBase"
cls = u"- topic/topic learningBase/learningBase "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningBasebody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningAssessmentElement(ditagen.dita.DitaElement):
"""Learning Assessment element."""
name = u"learningAssessment"
cls = u"- topic/topic learningBase/learningBase learningAssessment/learningAssessment "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningAssessmentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningOverviewElement(ditagen.dita.DitaElement):
"""Learning Overview element."""
name = u"learningOverview"
cls = u"- topic/topic learningBase/learningBase learningOverview/learningOverview "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningOverviewbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningPlanElement(ditagen.dita.DitaElement):
"""Learning Plan element."""
name = u"learningPlan"
cls = u"- topic/topic learningBase/learningBase learningPlan/learningPlan "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningPlanbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningSummaryElement(ditagen.dita.DitaElement):
"""Learning Summary element."""
name = u"learningSummary"
cls = u"- topic/topic learningBase/learningBase learningSummary/learningSummary "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningSummarybody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningContentElement(ditagen.dita.DitaElement):
"""Learning Content element."""
name = u"learningContent"
cls = u"- topic/topic learningBase/learningBase learningContent/learningContent "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningContentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class SubjectSchemeElement(ditagen.dita.DitaElement):
"""Subject scheme element."""
name = u"subjectScheme"
cls = u"- map/map subjectScheme/subjectScheme "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("enumerationdef"),
ParameterEntity("hasInstance"),
ParameterEntity("hasKind"),
ParameterEntity("hasNarrower"),
ParameterEntity("hasPart"),
ParameterEntity("hasRelated"),
ParameterEntity("navref"),
ParameterEntity("relatedSubjects"),
ParameterEntity("reltable"),
ParameterEntity("schemeref"),
ParameterEntity("subjectdef"),
ParameterEntity("subjectHead"),
ParameterEntity("subjectRelTable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class MapElement(ditagen.dita.DitaElement):
"""Map element."""
name = u"map"
cls = u"- map/map "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("navref"),
ParameterEntity("reltable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("title", "CDATA", "#IMPLIED"),
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class BookMapElement(ditagen.dita.DitaElement):
"""BookMap element."""
name = u"bookmap"
cls = u"- map/map bookmap/bookmap "
model = Seq([
Choice([Choice(ParameterEntity("title")), Choice(ParameterEntity("booktitle"))], OPTIONAL),
Choice(ParameterEntity("bookmeta"), OPTIONAL),
Choice(ParameterEntity("frontmatter"), OPTIONAL),
Choice(ParameterEntity("chapter"), ZERO_OR_MORE),
Choice(ParameterEntity("part"), ZERO_OR_MORE),
Choice([Choice(ParameterEntity("appendices"), OPTIONAL), Choice(ParameterEntity("appendix"), ZERO_OR_MORE)]),
Choice(ParameterEntity("backmatter"), OPTIONAL),
Choice(ParameterEntity("reltable"), ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class TopicType(ditagen.dita.Type):
"""Topic topic type."""
id = u"topic"
file = u"base/dtd/topic" # the .dtd file is at technicalContent
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Topic//EN"
title = u"Topic"
parent = None
root = TopicElement()
class ConceptType(TopicType):
"""Concept topic type."""
id = u"concept"
file = u"technicalContent/dtd/concept"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Concept//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Concept//EN"
title = u"Concept"
parent = TopicType()
root = ConceptElement()
class TaskType(TopicType):
"""Task topic type."""
id = u"task"
file = u"technicalContent/dtd/task"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task//EN"
title = u"Task"
parent = TopicType()
root = TaskElement()
def __init__(self):
super(TaskType, self).__init__()
#self.required_domains = [StrictTaskbodyConstraints]
class GeneralTaskType(ditagen.dita.ShellType):
"""General Task topic type."""
def __init__(self):
super(GeneralTaskType, self).__init__(u"generalTask", u"General Task", TaskType())
#self.parent.required_domains = []
class ReferenceType(TopicType):
"""Reference topic type."""
id = u"reference"
file = u"technicalContent/dtd/reference"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Reference//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Reference//EN"
title = u"Reference"
parent = TopicType()
root = ReferenceElement()
class MapType(ditagen.dita.Type):
"""Map topic type."""
id = u"map"
file = u"base/dtd/map" # the .dtd file is at technicalContent
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map//EN"
title = u"Map"
parent = None
root = MapElement()
class BookMapType(MapType):
"""BookMap topic type."""
id = u"bookmap"
file = u"bookmap/dtd/bookmap"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 BookMap//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 BookMap//EN"
title = u"BookMap"
parent = MapType()
root = BookMapElement()
class GlossentryType(ConceptType):
"""Glossary entry topic type."""
id = u"glossentry"
file = u"technicalContent/dtd/glossentry"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Entry//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Entry//EN"
title = u"Glossary Entry"
parent = ConceptType()
root = GlossentryElement()
class GlossgroupType(ConceptType):
"""Glossary group topic type."""
id = u"glossgroup"
file = u"technicalContent/dtd/glossgroup"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Group//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Group//EN"
title = u"Glossary Group"
parent = ConceptType()
root = GlossgroupElement()
class MachineryTaskType(ditagen.dita.ShellType):
"""Machinery Task topic type."""
def __init__(self):
super(MachineryTaskType, self).__init__(u"machineryTask", u"Machinery Task", TaskType(), file=u"machineryIndustry/dtd/machineryTask")
#self.parent.required_domains = [MachineryTaskbodyConstraints]
class LearningBaseType(TopicType):
"""Learning Base topic type."""
id = u"learningBase"
file = u"learning/dtd/learningBase"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Base//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Base//EN"
title = u"Learning Base"
parent = TopicType()
root = LearningBaseElement()
class LearningAssessmentType(LearningBaseType):
"""Learning Assessment topic type."""
id = u"learningAssessment"
file = u"learning/dtd/learningAssessment"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Assessment//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Assessment//EN"
title = u"Learning Assessment"
parent = LearningBaseType()
root = LearningAssessmentElement()
class LearningOverviewType(LearningBaseType):
"""Learning Overview topic type."""
id = u"learningOverview"
file = u"learning/dtd/learningOverview"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Overview//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Overview//EN"
title = u"Learning Overview"
parent = LearningBaseType()
root = LearningOverviewElement()
class LearningPlanType(LearningBaseType):
"""Learning Plan topic type."""
id = u"learningPlan"
file = u"learning/dtd/learningPlan"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Plan//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Plan//EN"
title = u"Learning Plan"
parent = LearningBaseType()
root = LearningPlanElement()
class LearningSummaryType(LearningBaseType):
"""Learning Summary topic type."""
id = u"learningSummary"
file = u"learning/dtd/learningSummary"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Summary//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Summary//EN"
title = u"Learning Summary"
parent = LearningBaseType()
root = LearningSummaryElement()
class LearningContentType(LearningBaseType):
"""Learning Content topic type."""
id = u"learningContent"
file = u"learning/dtd/learningContent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Content//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Content//EN"
title = u"Learning Content"
parent = LearningBaseType()
root = LearningContentElement()
def __init__(self):
super(LearningContentType, self).__init__()
self.required_types = [TaskType, ConceptType, ReferenceType, LearningSummaryType, LearningAssessmentType]
class LearningMapType(ditagen.dita.ShellType):
"""Learning Map topic type."""
def __init__(self):
super(LearningMapType, self).__init__(u"learningMap", u"Learning Map", MapType(), file=u"learning/dtd/learningMap")
#self.parent.required_domains = []
class LearningBookMapType(ditagen.dita.ShellType):
"""Learning BookMap topic type."""
def __init__(self):
super(LearningBookMapType, self).__init__(u"learningBookmap", u"Learning BookMap", BookMapType(), file=u"learning/dtd/learningBookmap")
#self.parent.required_domains = []
class ClassificationMapType(ditagen.dita.ShellType):
"""Classification Map topic type."""
def __init__(self):
super(ClassificationMapType, self).__init__(u"classifyMap", u"Classification Map", MapType(), file=u"subjectScheme/dtd/classifyMap")
#self.parent.required_domains = []
class SubjectSchemeType(MapType):
"""Subject Scheme Map topic type."""
id = u"subjectScheme"
file = u"subjectScheme/dtd/subjectScheme"
title = u"Subject Scheme Map"
parent = MapType()
root = SubjectSchemeElement()
class Constraints(ditagen.dita.DomainBase):
"""Base class for constraints."""
pi_suffix = u" Constraint"
elements = []
att_id = None
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
class AttributeDomain(ditagen.dita.DomainBase):
"""Base class for attribute domains."""
pi_suffix = u" Attribute Domain"
#elements = []
attributes = []
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
class UiDomain(ditagen.dita.Domain):
"""User interface domain."""
id = u"ui-d"
si_module = u"technicalContent/dtd/uiDomain.mod"
si_entity = u"technicalContent/dtd/uiDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 User Interface Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 User Interface Domain//EN"
title = u"User Interface"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class HiDomain(ditagen.dita.Domain):
"""Hilight domain."""
id = u"hi-d"
si_module = u"base/dtd/highlightDomain.mod"
si_entity = u"base/dtd/highlightDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Highlight Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Highlight Domain//EN"
title = u"Highlight"
elements = [u"ph"]
parent = [TopicType]
class PrDomain(ditagen.dita.Domain):
"""Programmign domain."""
id = u"pr-d"
si_module = u"technicalContent/dtd/programmingDomain.mod"
si_entity = u"technicalContent/dtd/programmingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Programming Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Programming Domain//EN"
title = u"Programming"
elements = [u"pre", u"keyword", u"ph", u"fig", u"dl"]
parent = [TopicType]
class SwDomain(ditagen.dita.Domain):
"""Software development domain."""
id = u"sw-d"
si_module = u"technicalContent/dtd/softwareDomain.mod"
si_entity = u"technicalContent/dtd/softwareDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Software Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Software Domain//EN"
title = u"Software"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class UtDomain(ditagen.dita.Domain):
"""Utilities domain."""
id = u"ut-d"
si_module = u"base/dtd/utilitiesDomain.mod"
si_entity = u"base/dtd/utilitiesDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Utilities Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Utilities Domain//EN"
title = u"Utilities"
elements = [u"fig"]
parent = [TopicType]
class IndexingDomain(ditagen.dita.Domain):
"""Indexing domain."""
id = u"indexing-d"
si_module = u"base/dtd/indexingDomain.mod"
si_entity = u"base/dtd/indexingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Indexing Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Indexing Domain//EN"
title = u"Indexing"
elements = [u"index-base"]
parent = [TopicType, MapType]
class LearningDomain(ditagen.dita.Domain):
"""Learning domain."""
id = u"learning-d"
si_module = u"learning/dtd/learningDomain.mod"
si_entity = u"learning/dtd/learningDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Domain//EN"
title = u"Learning"
elements = [u"note", u"fig"]
# XXX: This builds on
parent = [TopicType]
required_domains = [UtDomain]
class LearningMetaDomain(ditagen.dita.Domain):
"""Learning metadata domain."""
id = u"learningmeta-d"
si_module = u"learning/dtd/learningMetadataDomain.mod"
si_entity = u"learning/dtd/learningMetadataDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Metadata Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Metadata Domain//EN"
title = u"Learning Metadata"
elements = [u"metadata"]
parent = [TopicType]
class LearningMapDomain(ditagen.dita.Domain):
"""Learning map domain."""
id = u"learningmap-d"
si_module = u"learning/dtd/learningMapDomain.mod"
si_entity = u"learning/dtd/learningMapDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Map Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Map Domain//EN"
title = u"Learning Map"
elements = [u"topicref"]
parent = [MapType]
class TaskRequirementsDomain(ditagen.dita.Domain):
"""Task requirements domain."""
id = u"taskreq-d"
si_module = u"technicalContent/dtd/taskreqDomain.mod"
si_entity = u"technicalContent/dtd/taskreqDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task Requirements Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task Requirements Domain//EN"
title = u"Machine Industry Task"
elements = [u"prereq", u"postreq"]
parent = [TaskType]
class HazardStatementDomain(ditagen.dita.Domain):
"""Hazard statement domain."""
id = u"hazard-d"
si_module = u"base/dtd/hazardstatementDomain.mod"
si_entity = u"base/dtd/hazardstatementDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Hazard Statement Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Hazard Statement Domain//EN"
title = u"Hazard Statement"
elements = [u"note"]
parent = [TopicType]
class MapGroupDomain(ditagen.dita.Domain):
"""Map group domain."""
id = u"mapgroup-d"
si_module = u"base/dtd/mapGroup.mod"
si_entity = u"base/dtd/mapGroup.ent" # This is an exception to DITA's naming scheme
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map Group Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map Group Domain//EN"
title = u"Map Group"
elements = [u"topicref"]
parent = [MapType]
class AbbreviatedFormDomain(ditagen.dita.Domain):
"""Abbreviated form domain."""
id = u"abbrev-d"
si_module = u"technicalContent/dtd/abbreviateDomain.mod"
si_entity = u"technicalContent/dtd/abbreviateDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Abbreviated Form Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Abbreviated Form Domain//EN"
title = u"Abbreviated Form"
elements = [u"term"]
parent = [TopicType]
class XNALDomain(ditagen.dita.Domain):
"""XNAL domain."""
id = u"xnal-d"
si_module = u"xnal/dtd/xnalDomain.mod"
si_entity = u"xnal/dtd/xnalDomain.ent"
title = u"XNAL"
elements = [u"author"]
parent = [MapType]
class UserDelayedResolutionDomain(ditagen.dita.Domain):
"""User delayed resolution domain."""
id = u"delay-d"
si_module = u"base/dtd/delayResolutionDomain.mod"
si_entity = u"base/dtd/delayResolutionDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Delayed Resolution Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Delayed Resolution Domain//EN"
title = u"Delayed Resolution"
elements = [u"keywords"]
parent = [TopicType, MapType]
class ClassifyDomain(ditagen.dita.Domain):
"""Classify domain."""
id = u"classify-d"
si_module = u"subjectScheme/dtd/classifyDomain.mod"
si_entity = u"subjectScheme/dtd/classifyDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Classification Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Classification Domain//EN"
title = u"Map Subject Classification"
elements = [u"topicref", u"reltable"]
parent = [TopicType, MapType]
class GlossaryReferenceDomain(ditagen.dita.Domain):
"""Glossary reference domain."""
id = u"glossref-d"
si_module = u"technicalContent/dtd/glossrefDomain.mod"
si_entity = u"technicalContent/dtd/glossrefDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Glossary Reference Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Glossary Reference Domain//EN"
title = u"Glossary Reference"
elements = [u"topicref"]
parent = [MapType]
class StrictTaskbodyConstraints(Constraints):
"""Strict taskbody constraints."""
id = u"strictTaskbody-c"
si_module = u"technicalContent/dtd/strictTaskbodyConstraint.mod"
si_entity = u"technicalContent/dtd/strictTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Strict Taskbody Constraint//EN"
title = u"Strict Taskbody"
parent = [TaskType]
att_id = u"taskbody"
class MachineryTaskbodyConstraints(Constraints):
"""Machinery taskbody constraints."""
id = u"machineryTaskbody-c"
si_module = u"machineryIndustry/dtd/machineryTaskbodyConstraint.mod"
si_entity = u"machineryIndustry/dtd/machineryTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Machinery Taskbody Constraint//EN"
title = u"Machinery Taskbody"
parent = [TaskType]
att_id = u"taskbody"
TopicType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ConceptType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
TaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain, StrictTaskbodyConstraints]
GeneralTaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ReferenceType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
MachineryTaskType.default_domains = [TaskRequirementsDomain, HazardStatementDomain, HiDomain, UtDomain, IndexingDomain, PrDomain, SwDomain, UiDomain, MachineryTaskbodyConstraints]
MapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, GlossaryReferenceDomain]
BookMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
ClassificationMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, ClassifyDomain]
SubjectSchemeType.default_domains = [MapGroupDomain]
LearningAssessmentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningBookMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
LearningContentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain]
LearningOverviewType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningPlanType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningSummaryType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
GlossentryType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
GlossgroupType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webcore', '0016_profile_emails'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='emails',
),
]
|
from distutils.core import setup
PKGLIST = ['gearman_geodis']
setup(name='gearman-geodis',
version='1.0.0',
description='Geolocation Gearman worker powered by Geodis',
author_email='engineering@shazamteam.com',
license='Apache License, Version 2.0',
packages=PKGLIST,
scripts=['gearman_geodis/geodis_worker.py', 'gearman_geodis/gearman_geodisd.py', 'gearman_geodis/stdin_geodis_worker.py'],
data_files=[('/etc/sysconfig/',['support/gearman_geodis.sysconfig']),
('/etc/init.d/',['support/gearman_geodis'])]
)
|
from distutils.core import setup
from src import __version__
setup(
name="irma.common",
version=__version__,
author="Quarkslab",
author_email="irma@quarkslab.com",
description="The common component of the IRMA software",
packages=["irma.common",
"irma.common.base",
"irma.common.utils",
"irma.common.configuration",
"irma.common.ftp",
"irma.common.plugins"],
package_dir={"irma.common": "src",
"irma.common.utils": "src/utils",
"irma.common.base": "src/base",
"irma.common.plugins": "src/plugins"},
namespace_packages=["irma"]
)
|
import hashlib
import random
from rest_framework import serializers
from sita.users.models import User
from sita.subscriptions.models import Subscription
from sita.utils.refresh_token import create_token
from hashlib import md5
from datetime import datetime, timedelta
import pytz
class LoginSerializer(serializers.Serializer):
"""
Serializer for user login
"""
email = serializers.EmailField(
required=True
)
password = serializers.CharField(
required=True
)
device_os= serializers.ChoiceField(
required=False,
choices=['ANDROID', 'IOS']
)
device_token= serializers.CharField(
required=False,
max_length=254
)
def validate(self, data):
"""
Validation email, password and active status
"""
try:
user = User.objects.get(email__exact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError({"email":"invalid credentials"})
if not user.check_password(data.get('password')):
raise serializers.ValidationError({"email":"invalid credentials"})
if data.get("device_os") or data.get("device_token"):
if not data.get("device_os") or not data.get("device_token"):
raise serializers.ValidationError(
{"device_token":"Don`t send device OS or device token"})
if not user.is_active:
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
return data
def get_user(self, data):
"""
return user object
"""
return User.objects.get(email__exact=data.get('email'))
class SignUpSerializer(serializers.Serializer):
""""""
TYPE_OS = (
('1', 'IOS'),
('2', 'ANDROID')
)
email = serializers.EmailField(
max_length=254,
required=True
)
password = serializers.CharField(
max_length=100,
required=True
)
time_zone = serializers.CharField(
max_length=100,
required=True
)
name = serializers.CharField(
required=False,
max_length = 100
)
phone = serializers.CharField(
required=False,
max_length=10
)
device_os= serializers.ChoiceField(
required=False,
choices=['ANDROID', 'IOS']
)
device_token= serializers.CharField(
required=False,
max_length=254
)
conekta_card = serializers.CharField(
max_length=254,
required=False
)
subscription_id= serializers.IntegerField(
required=False
)
def validate(self, data):
if data.get("device_os") or data.get("device_token"):
if not data.get("device_os") or not data.get("device_token"):
raise serializers.ValidationError(
{"device_token":"Don`t send device OS or device token"})
if data.get("conekta_card"):
if not data.get("phone") or not data.get("name") or not data.get("subscription_id"):
raise serializers.ValidationError(
{"conekta_card":
"If send conektaCard you should send phone and name"})
try:
subscription = Subscription.objects.get(id=data.get('subscription_id'))
except Subscription.DoesNotExist:
raise serializers.ValidationError(
{"subscription_id":"That subscription don't exists"}
)
try:
user = User.objects.get(email__exact=data.get('email'))
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
except User.DoesNotExist:
pass
try:
datetime.now(pytz.timezone(data.get("time_zone")))
except pytz.UnknownTimeZoneError:
raise serializers.ValidationError(
{"time_zone":"The time zone is not correct"}
)
return data
class LoginResponseSerializer(object):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
def __init__(self):
pass
def get_token(self,obj):
"""
Create token.
"""
return create_token(obj)
class RecoveryPasswordSerializer(serializers.Serializer):
"""
Serializer for user recovery password
"""
email = serializers.EmailField(
required=True
)
def validate(self, data):
"""
Validation email and active status
"""
try:
user = User.objects.get(email__exact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError("invalid credentials")
if not user.is_active:
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
return data
def generate_recovery_token(self, data):
""" Generate code to recovery password. """
user = User.objects.get(email__exact=data.get('email'))
email = user.email
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
if isinstance(email, unicode):
email = email.encode('utf-8')
key = hashlib.sha1(salt + email).hexdigest()
user.reset_pass_code = key
user.save()
return True
class ResetPasswordWithCodeSerializer(serializers.Serializer):
"""
Serializer for user login
"""
password = serializers.CharField(
required=True
)
password_confim = serializers.CharField(
required=True
)
recovery_code = serializers.CharField(
required=True
)
def validate(self, data):
"""
Validation email, password and active status
"""
try:
user = User.objects.get(reset_pass_code=data.get('recovery_code'))
except User.DoesNotExist:
raise serializers.ValidationError(
{"recovery_code":"Don't exits code"})
if not data.get('password') == data.get('password_confim'):
raise serializers.ValidationError(
{"password_confim":
"Password is not equals to Confirm Password"})
return data
def update_password(self, data):
"""
Change password
"""
user = User.objects.get(reset_pass_code=data.get('recovery_code'))
user.reset_pass_code = None
user.set_password(data.get('password'))
user.save()
return True
|
import cProfile
from scipy.stats import norm
def profile(func):
def profiled_func(*args, **kwargs):
p = cProfile.Profile()
try:
p.enable()
result = func(*args, **kwargs)
p.disable()
return result
finally:
p.print_stats()
return profiled_func
def print_models(func):
def printed_func(*args, **kwargs):
model = func(*args, **kwargs)
cv_keys = ('mean_test_score', 'std_test_score', 'params')
for r, _ in enumerate(model.cv_results_['mean_test_score']):
print("%0.3f +/- %0.2f %r" % (model.cv_results_[cv_keys[0]][r],
model.cv_results_[cv_keys[1]][r] / 2.0,
model.cv_results_[cv_keys[2]][r]))
print('Best parameters: %s' % model.best_params_)
print('Best accuracy: %.2f' % model.best_score_)
return model
return printed_func
def mov_to_win_percent(u, m=11, offset=0):
u = u + offset
return 1 - norm.cdf(0.5, loc=u, scale=m) + .5 * (norm.cdf(0.5, loc=u, scale=m) - norm.cdf(-0.5, loc=u, scale=m))
|
import sys
import numpy as np
from normalization import tokenize
from helpers import ahash
class KerasVectorizer():
'''
Convert list of documents to numpy array for input into Keras model
'''
def __init__(self, n_features=100000, maxlen=None, maxper=100, hash_function=ahash):
self.maxlen = maxlen
self.maxper = maxper
self.n_features = n_features
self.hash_function = hash_function
def _exact_hash(self, word, n_features):
return self.token_lookup.get(word, 0)
def fit_transform(self, raw_documents, y=None, suffix='', verbose=True):
if verbose:
print >> sys.stderr, 'splitting raw documents'
# Some way to print progress?
tokens = map(self._split_function, raw_documents)
if self.maxlen:
maxlen = self.maxlen
else:
maxlen = int(np.percentile(map(len, tokens), self.maxper))
self.maxlen = maxlen
X = np.zeros((len(tokens), maxlen))
for i,t in enumerate(tokens):
if verbose:
if not i % 10000:
print >> sys.stderr, 'processed %d tokens' % i
if len(t) > 0:
X[i,-len(t):] = map(lambda x: self.hash_function(x + suffix, self.n_features), t[:maxlen])
return X
class KerasCharacterVectorizer(KerasVectorizer):
'''
Split a string into characters
'''
def _split_function(self, doc):
return list(doc)
class KerasTokenVectorizer(KerasVectorizer):
'''
Split a string into words,
'''
def _split_function(self, doc):
return tokenize(doc, keep_punctuation=True)
class KerasPretokenizedVectorizer(KerasVectorizer):
def _split_function(self, doc):
return doc
'''
from keras_vectorizer import KerasTokenVectorizer, KerasCharacterVectorizer
ktv = KerasTokenVectorizer()
ktv.fit_transform(['this is a test'])
ktv.fit_transform(['this is a test', 'this is a another test'])
ktv = KerasTokenVectorizer(maxlen=2)
ktv.fit_transform(['this is a test', 'this is a another test'])
kcv = KerasCharacterVectorizer()
kcv.fit_transform(['something', 'else'])
'''
|
from ansible.module_utils.basic import AnsibleModule
import git
import itertools
import multiprocessing
import os
import signal
import time
DOCUMENTATION = """
---
module: git_requirements
short_description: Module to run a multithreaded git clone
options:
repo_info:
description:
- List of repo information dictionaries containing at
a minimum a key entry "src" with the source git URL
to clone for each repo. In these dictionaries, one
can further specify:
"path" - destination clone location
"version" - git version to checkout
"refspec" - git refspec to checkout
"depth" - clone depth level
"force" - require git clone uses "--force"
default_path:
description:
Default git clone path (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "master". Not
required.
default_version:
description:
Default git version (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "master". Not
required.
default_refspec:
description:
Default git repo refspec (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "". Not required.
default_depth:
description:
Default clone depth (int) in case not specified
on an individual repo basis. Defaults to 10.
Not required.
retries:
description:
Integer number of retries allowed in case of git
clone failure. Defaults to 1. Not required.
delay:
description:
Integer time delay (seconds) between git clone
retries in case of failure. Defaults to 0. Not
required.
force:
description:
Boolean. Apply --force flags to git clones wherever
possible. Defaults to False. Not required.
core_multiplier:
description:
Integer multiplier on the number of cores
present on the machine to use for
multithreading. For example, on a 2 core
machine, a multiplier of 4 would use 8
threads. Defaults to 4. Not required.
"""
EXAMPLES = r"""
- name: Clone repos
git_requirements:
repo_info: "[{'src':'https://github.com/ansible/',
'name': 'ansible'
'dest': '/etc/opt/ansible'}]"
"""
def init_signal():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def check_out_version(repo, version, pull=False, force=False,
refspec=None, tag=False, depth=10):
try:
repo.git.fetch(tags=tag, force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to fetch %s\n%s" % (repo.working_dir, str(e))]
try:
repo.git.checkout(version, force=force)
except Exception as e:
return [
"Failed to check out version %s for %s\n%s" %
(version, repo.working_dir, str(e))]
if repo.is_dirty(untracked_files=True) and force:
try:
repo.git.clean(force=force)
except Exception as e:
return [
"Failed to clean up repository% s\n%s" %
(repo.working_dir, str(e))]
if pull:
try:
repo.git.pull(force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to pull repo %s\n%s" % (repo.working_dir, str(e))]
return []
def pull_wrapper(info):
role_info = info
retries = info[1]["retries"]
delay = info[1]["delay"]
for i in range(retries):
success = pull_role(role_info)
if success:
return True
else:
time.sleep(delay)
info[2].append(["Role {0} failed after {1} retries\n".format(role_info[0],
retries)])
return False
def pull_role(info):
role, config, failures = info
required_version = role["version"]
version_hash = False
if 'version' in role:
# If the version is the length of a hash then treat is as one
if len(required_version) == 40:
version_hash = True
def get_repo(dest):
try:
return git.Repo(dest)
except Exception:
failtxt = "Role in {0} is broken/not a git repo.".format(
role["dest"])
failtxt += "Please delete or fix it manually"
failures.append(failtxt)
return False
# if repo exists
if os.path.exists(role["dest"]):
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
repo_url = list(repo.remote().urls)[0]
if repo_url != role["src"]:
repo.remote().set_url(role["src"])
# if they want master then fetch, checkout and pull to stay at latest
# master
if required_version == "master":
fail = check_out_version(repo, required_version, pull=True,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
# If we have a hash then reset it to
elif version_hash:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
# describe can fail in some cases so be careful:
try:
current_version = repo.git.describe(tags=True)
except Exception:
current_version = ""
if current_version == required_version and not config["force"]:
fail = []
pass
else:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"],
tag=True)
else:
try:
# If we have a hash id then treat this a little differently
if version_hash:
git.Repo.clone_from(role["src"], role["dest"],
branch='master',
no_single_branch=True,
depth=role["depth"])
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
git.Repo.clone_from(role["src"], role["dest"],
branch=required_version,
depth=role["depth"],
no_single_branch=True)
fail = []
except Exception as e:
fail = ('Failed cloning repo %s\n%s' % (role["dest"], str(e)))
if fail == []:
return True
else:
failures.append(fail)
return False
def set_default(dictionary, key, defaults):
if key not in dictionary.keys():
dictionary[key] = defaults[key]
def main():
# Define variables
failures = multiprocessing.Manager().list()
# Data we can pass in to the module
fields = {
"repo_info": {"required": True, "type": "list"},
"default_path": {"required": True,
"type": "str"},
"default_version": {"required": False,
"type": "str",
"default": "master"},
"default_refspec": {"required": False,
"type": "str",
"default": None},
"default_depth": {"required": False,
"type": "int",
"default": 10},
"retries": {"required": False,
"type": "int",
"default": 1},
"delay": {"required": False,
"type": "int",
"default": 0},
"force": {"required": False,
"type": "bool",
"default": False},
"core_multiplier": {"required": False,
"type": "int",
"default": 4},
}
# Pull in module fields and pass into variables
module = AnsibleModule(argument_spec=fields)
git_repos = module.params['repo_info']
defaults = {
"path": module.params["default_path"],
"depth": module.params["default_depth"],
"version": module.params["default_version"],
"refspec": module.params["default_refspec"]
}
config = {
"retries": module.params["retries"],
"delay": module.params["delay"],
"force": module.params["force"],
"core_multiplier": module.params["core_multiplier"]
}
# Set up defaults
for repo in git_repos:
for key in ["path", "refspec", "version", "depth"]:
set_default(repo, key, defaults)
if "name" not in repo.keys():
repo["name"] = os.path.basename(repo["src"])
repo["dest"] = os.path.join(repo["path"], repo["name"])
# Define varibles
failures = multiprocessing.Manager().list()
core_count = multiprocessing.cpu_count() * config["core_multiplier"]
# Load up process and pass in interrupt and core process count
p = multiprocessing.Pool(core_count, init_signal)
clone_success = p.map(pull_wrapper, zip(git_repos,
itertools.repeat(config),
itertools.repeat(failures)),
chunksize=1)
p.close()
success = all(i for i in clone_success)
if success:
module.exit_json(msg=str(git_repos), changed=True)
else:
module.fail_json(msg=("Module failed"), meta=failures)
if __name__ == '__main__':
main()
|
import configparser
import io
import os
import subprocess
from rally.common import logging
from rally.utils import encodeutils
LOG = logging.getLogger(__name__)
def check_output(*args, **kwargs):
"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The difference between check_output from subprocess package and this
function:
* Additional arguments:
- "msg_on_err" argument. It is a message that should be written in case
of error. Reduces a number of try...except blocks
- "debug_output" argument(Defaults to True). Print or not output to
LOG.debug
* stderr is hardcoded to stdout
* In case of error, prints failed command and output to LOG.error
* Prints output to LOG.debug
"""
msg_on_err = kwargs.pop("msg_on_err", None)
debug_output = kwargs.pop("debug_output", True)
kwargs["stderr"] = subprocess.STDOUT
try:
output = subprocess.check_output(*args, **kwargs)
except subprocess.CalledProcessError as exc:
if msg_on_err:
LOG.error(msg_on_err)
LOG.error("Failed cmd: '%s'" % exc.cmd)
LOG.error("Error output: '%s'" % encodeutils.safe_decode(exc.output))
raise
output = encodeutils.safe_decode(output)
if output and debug_output:
LOG.debug("Subprocess output: '%s'" % output)
return output
def create_dir(dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return dir_path
def extend_configfile(extra_options, conf_path):
conf_object = configparser.ConfigParser()
conf_object.optionxform = str
conf_object.read(conf_path)
conf_object = add_extra_options(extra_options, conf_object)
with open(conf_path, "w") as configfile:
conf_object.write(configfile)
raw_conf = io.StringIO()
conf_object.write(raw_conf)
return raw_conf.getvalue()
def add_extra_options(extra_options, conf_object):
conf_object.optionxform = str
for section in extra_options:
if section not in (conf_object.sections() + ["DEFAULT"]):
conf_object.add_section(section)
for option, value in extra_options[section].items():
conf_object.set(section, option, value)
return conf_object
|
import pytest
import gen.template
from gen.template import (For, Replacement, Switch, Tokenizer, UnsetParameter,
parse_str)
just_text = "foo"
more_complex_text = "foo {"
def get_tokens(str):
return Tokenizer(str).tokens
def test_lex():
assert(get_tokens("foo") == [("blob", "foo"), ("eof", None)])
assert(get_tokens("{") == [('blob', '{'), ('eof', None)])
assert(get_tokens("{#") == [('blob', '{'), ('blob', '#'), ('eof', None)])
assert(get_tokens("{ foo ") == [
('blob', '{'), ('blob', ' foo '), ('eof', None)])
assert(get_tokens("{ foo {{{{ {{{{{ ") == [('blob', '{'), ('blob', ' foo '), (
'blob', '{{'), ('blob', ' '), ('blob', '{{'), ('blob', '{'), ('blob', ' '), ('eof', None)])
assert(get_tokens("{{ test }}") == [
('replacement', ('test', None)), ('eof', None)])
assert(get_tokens("{{ test | foo }}") == [
('replacement', ('test', 'foo')), ('eof', None)])
assert(get_tokens(" {{ test }}") == [
('blob', ' '), ('replacement', ('test', None)), ('eof', None)])
assert(get_tokens("{{ test }}}}") == [
('replacement', ('test', None)), ('blob', '}}'), ('eof', None)])
assert(get_tokens('{% switch foo %}{% case "as\\"df" %}foobar{% endswitch %}}}') == [
('switch', 'foo'),
('case', 'as"df'),
('blob', 'foobar'),
('endswitch', None),
('blob', '}}'),
('eof', None)])
assert(get_tokens('{% switch foo %} \n \r {% case "as\\"df" %}foobar{% endswitch %}}}') == [
('switch', 'foo'),
('blob', ' \n \r '),
('case', 'as"df'),
('blob', 'foobar'),
('endswitch', None),
('blob', '}}'),
('eof', None)])
assert(get_tokens("a{% switch foo %}{% case \"test\" %}{{ a | baz }}b{{ a | bar }}{% endswitch %}c{{ c | bar }}{{ a | foo }}") == [ # noqa
('blob', 'a'),
('switch', 'foo'),
('case', 'test'),
('replacement', ('a', 'baz')),
('blob', 'b'),
('replacement', ('a', 'bar')),
('endswitch', None),
('blob', 'c'),
('replacement', ('c', 'bar')),
('replacement', ('a', 'foo')),
('eof', None)
])
assert(get_tokens("{% for foo in bar %}{{ foo }}{% endfor %}") == [
('for', ('foo', 'bar')),
('replacement', ('foo', None)),
('endfor', None),
('eof', None)])
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test |}}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test| }}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test | }}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test }}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{test}}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test}}")
def test_parse():
assert(parse_str("a").ast == ["a"])
assert(parse_str("{{ a }}").ast == [Replacement(("a", None))])
assert(parse_str("a {{ a | foo }}{{ b }} c {{ d | bar }}").ast == [
"a ",
Replacement(("a", 'foo')),
Replacement(("b", None)),
" c ",
Replacement(("d", 'bar'))
])
assert(parse_str('{% switch foo %}{% case "as\\"df" %}foobar{% endswitch %}}}').ast ==
[Switch("foo", {'as"df': ["foobar"]}), '}}'])
assert(parse_str('{{ a }}b{{ c }}{% switch foo %} \n {% case "as\\"df" %}foobar{% endswitch %}}}').ast == [
Replacement(("a", None)),
"b",
Replacement(("c", None)),
Switch("foo", {'as"df': ["foobar"]}),
"}}"
])
# TODO(cmaloney): Add parse syntax error tests
assert parse_str("{% for foo in bar %}{{ foo }}{% endfor %}").ast == [For("foo", "bar", [Replacement('foo')])]
def test_get_variables():
assert(parse_str("a").get_scoped_arguments() ==
{'variables': set(), 'sub_scopes': dict()})
assert(parse_str("{{ a }}").get_scoped_arguments() ==
{'variables': {"a"}, 'sub_scopes': dict()})
assert(parse_str("{{ a | foo }}").get_scoped_arguments() ==
{'variables': {"a"}, 'sub_scopes': dict()})
assert(parse_str("a{{ a }}b{{ c }}").get_scoped_arguments() ==
{'variables': {"a", "c"}, 'sub_scopes': dict()})
assert(parse_str("a{{ a }}b{{ a }}c{{ c | baz }}").get_scoped_arguments() ==
{'variables': {"a", "c"}, 'sub_scopes': dict()})
assert(parse_str("a{{ a }}b{{ a | bar }}c{{ c }}").get_scoped_arguments() ==
{'variables': {"a", "c"}, 'sub_scopes': dict()})
assert(parse_str("{{ a }}{% switch b %}{% case \"c\" %}{{ d }}{% endswitch %}{{ e }}").get_scoped_arguments() == {
'variables': {'a', 'e'},
'sub_scopes': {
'b': {
'c': {
'variables': {'d'},
'sub_scopes': {}
}
}
}
})
assert (parse_str("{% for foo in bar %}{{ foo }}{{ bar }}{{ baz }}{% endfor %}").get_scoped_arguments() ==
{'variables': {'bar', 'baz'}, 'sub_scopes': dict()})
# TODO(cmaloney): Disallow reusing a for new variable as a general variable.
assert (parse_str("{% for foo in bar %}{{ foo }}{{ bar }}{{ baz }}{% endfor %}{{ foo }}").get_scoped_arguments() ==
{'variables': {'foo', 'bar', 'baz'}, 'sub_scopes': dict()})
def test_get_filters():
assert(parse_str("{{ a }}").get_filters() == set())
assert(parse_str("{{ a | foo }}").get_filters() == {"foo"})
assert(parse_str(
"a{{ a | baz }}b{{ a | bar }}c{{ c | bar }}").get_filters() == {"baz", "bar"})
assert(parse_str("a{% switch foo %}{% case \"test\" %}{{ a | baz }}b{{ a | bar }}{% endswitch %}c{{ c | bar }}{{ a | foo }}").get_filters() == {"foo", "baz", "bar"}) # noqa
assert parse_str("{% for foo in bar %}{{ foo | bang }}{% endfor %}").get_filters() == {'bang'}
def test_render():
assert(parse_str("a").render({}) == "a")
assert(parse_str("{{ a }}a{{ b }}").render({"a": "1", "b": "2"}) == "1a2")
assert(parse_str("{{ a | foo }}a{{ b }}").render(
{"a": "1", "b": "2"},
{'foo': lambda x: x + 'foo'}
) == "1fooa2")
with pytest.raises(UnsetParameter):
parse_str("{{ a }}a{{ b }}").render({"a": "1"})
with pytest.raises(UnsetParameter):
parse_str("{{ a }}").render({"c": "1"})
with pytest.raises(UnsetParameter):
parse_str("{{ a | foo }}").render({"a": "1"})
assert parse_str("{% for a in b %}{{ a }}{% endfor %}").render({"b": ['a', 'test']}) == "atest"
assert (parse_str("{% for a in b %}{{ a }}{% endfor %}else{{ a }}").render({"b": ['b', 't', 'c'], "a": "foo"}) ==
"btcelsefoo")
with pytest.raises(UnsetParameter):
parse_str("{% for a in b %}{{ a }}{% endfor %}else{{ a }}").render({"b": ['b', 't', 'c']})
|
"""Flask Blueprint adding login functionality to our app. Note that we expect
gluten model and db config to be handled elsewhere
"""
import sys
import traceback
from functools import partial, wraps
from flask import redirect, request, flash, session, abort, g, url_for
from flask.globals import LocalProxy, _lookup_app_object
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
from flask_dance.consumer import (
OAuth2ConsumerBlueprint,
oauth_authorized,
oauth_error
)
from gludb.utils import now_field
from .utils import app_logger
from .models import User
def set_user_session(user_id=None):
if not user_id:
user_id = ''
session['user_id'] = user_id
def get_user():
"""Return current user"""
user_id = session.get('user_id', '')
if not user_id:
return None # Not logged in
return User.find_one(user_id)
def require_login(func):
"""Simple decorator helper for requiring login on functions decorated with
flask route: make sure that it's LAST in the decorator list so that the
flask magic happens (see voice_testing for an example).
Important: we are assuming the blueprint endpoint auth.login exists
"""
@wraps(func)
def wrapper(*args, **kwrds):
try:
user = get_user()
if user:
setattr(g, 'user', user)
return func(*args, **kwrds)
else:
url = url_for('auth.login', redir=request.url)
return redirect(url)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
log = app_logger()
log.warning("Unexpected error: %s", exc_value)
log.error(''.join(traceback.format_exception(
exc_type, exc_value, exc_traceback
)))
return abort(500)
return wrapper
auth = OAuth2ConsumerBlueprint(
"auth",
__name__,
client_id=None, # Handled via app config
client_secret=None, # Handled via app config
scope=["profile", "email"],
base_url="https://www.googleapis.com/",
authorization_url="https://accounts.google.com/o/oauth2/auth",
token_url="https://accounts.google.com/o/oauth2/token",
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
authorization_url_params={},
session_class=None,
backend=None,
)
auth.from_config["client_id"] = "GOOGLE_OAUTH_CLIENT_ID"
auth.from_config["client_secret"] = "GOOGLE_OAUTH_CLIENT_SECRET"
@auth.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.google_oauth = auth.session
google_api = LocalProxy(partial(_lookup_app_object, "google_oauth"))
def login_fail(msg):
flash(msg, category="error")
app_logger().error(msg)
return False
@oauth_authorized.connect
def log_in_event(blueprint, token):
set_user_session() # Clear previous session
if not token:
return login_fail("Failed to log in")
resp = blueprint.session.get("/oauth2/v1/userinfo")
if not resp.ok:
return login_fail("Failed to login user!")
data = resp.json()
email = data.get('email', '')
if not email:
return login_fail("Google failed to supply an email address")
users = User.find_by_index('idx_email', email)
if users:
user = users[0]
else:
user = User(email=email)
# Update the user info and save the session info
user.name = data.get('name', email)
user.photo = data.get('picture', '/static/anonymous_person.png')
user.logins.append(now_field())
user.save()
set_user_session(user.id)
app_logger().info("Logged in user id %s, email %s" % (user.id, user.email))
@oauth_error.connect
def github_error(blueprint, error, error_description=None, error_uri=None):
login_fail("OAuth login failure: [%s] %s (uri=%s)" % (
error, error_description, error_uri
))
@auth.route('/logout')
def logout():
set_user_session()
redir_url = request.args.get("redir", None)
if not redir_url:
redir_url = '/'
return redirect(redir_url)
|
from .tables import (
BatchCreateRowsRequest,
BatchCreateRowsResponse,
BatchDeleteRowsRequest,
BatchUpdateRowsRequest,
BatchUpdateRowsResponse,
ColumnDescription,
CreateRowRequest,
DeleteRowRequest,
GetRowRequest,
GetTableRequest,
GetWorkspaceRequest,
LabeledItem,
ListRowsRequest,
ListRowsResponse,
ListTablesRequest,
ListTablesResponse,
ListWorkspacesRequest,
ListWorkspacesResponse,
LookupDetails,
RelationshipDetails,
Row,
Table,
UpdateRowRequest,
Workspace,
View,
)
__all__ = (
"BatchCreateRowsRequest",
"BatchCreateRowsResponse",
"BatchDeleteRowsRequest",
"BatchUpdateRowsRequest",
"BatchUpdateRowsResponse",
"ColumnDescription",
"CreateRowRequest",
"DeleteRowRequest",
"GetRowRequest",
"GetTableRequest",
"GetWorkspaceRequest",
"LabeledItem",
"ListRowsRequest",
"ListRowsResponse",
"ListTablesRequest",
"ListTablesResponse",
"ListWorkspacesRequest",
"ListWorkspacesResponse",
"LookupDetails",
"RelationshipDetails",
"Row",
"Table",
"UpdateRowRequest",
"Workspace",
"View",
)
|
import json
import re
import tg
import pkg_resources
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import c
from ming.orm import ThreadLocalORMSession
from datadiff.tools import assert_equal
from allura import model as M
from allura.lib import helpers as h
from allura.tests import decorators as td
from alluratest.controller import TestController
class _TestCase(TestController):
def setUp(self):
super(_TestCase, self).setUp()
self.setup_with_tools()
@td.with_git
def setup_with_tools(self):
h.set_context('test', 'src-git', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgegit', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testgit.git'
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-git', neighborhood='Projects')
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
class TestRootController(_TestCase):
def test_index(self):
resp = self.app.get('/src-git/').follow().follow()
assert 'git://' in resp
def test_index_empty(self):
self.app.get('/git/')
def test_commit_browser(self):
resp = self.app.get('/src-git/commit_browser')
def test_commit_browser_data(self):
resp = self.app.get('/src-git/commit_browser_data')
data = json.loads(resp.body);
assert data['max_row'] == 3
assert data['next_column'] == 1
assert_equal(data['built_tree']['df30427c488aeab84b2352bdf88a3b19223f9d7a'],
{u'url': u'/p/test/src-git/ci/df30427c488aeab84b2352bdf88a3b19223f9d7a/',
u'oid': u'df30427c488aeab84b2352bdf88a3b19223f9d7a',
u'column': 0,
u'parents': [u'6a45885ae7347f1cac5103b0050cc1be6a1496c8'],
u'message': u'Add README', u'row': 1})
def test_log(self):
resp = self.app.get('/src-git/ref/master~/log/')
def test_tags(self):
resp = self.app.get('/src-git/ref/master~/tags/')
def _get_ci(self):
r = self.app.get('/src-git/ref/master:/')
resp = r.follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-git/ci/'):
return tag['href']
return None
def test_commit(self):
ci = self._get_ci()
resp = self.app.get(ci)
assert 'Rick' in resp, resp.showbrowser()
def test_feed(self):
assert 'Add README' in self.app.get('/feed')
def test_tree(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/')
assert len(resp.html.findAll('tr')) == 2, resp.showbrowser()
resp = self.app.get(ci + 'tree/')
assert 'README' in resp, resp.showbrowser()
links = [ a.get('href') for a in resp.html.findAll('a') ]
assert 'README' in links, resp.showbrowser()
assert 'README/' not in links, resp.showbrowser()
def test_tree_extra_params(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/?format=raw')
assert 'README' in resp, resp.showbrowser()
def test_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README')
assert 'README' in resp.html.find('h2', {'class':'dark title'}).contents[2]
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert 'This is readme' in content, content
assert '<span id="l1" class="code_block">' in resp
assert 'var hash = window.location.hash.substring(1);' in resp
def test_invalid_file(self):
ci = self._get_ci()
self.app.get(ci + 'tree/READMEz', status=404)
def test_diff(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?diff=df30427c488aeab84b2352bdf88a3b19223f9d7a')
assert 'readme' in resp, resp.showbrowser()
assert '+++' in resp, resp.showbrowser()
def test_refresh(self):
notification = M.Notification.query.find(
dict(subject='[test:src-git] 4 new commits to test Git')).first()
domain = '.'.join(reversed(c.app.url[1:-1].split('/'))).replace('_', '-')
common_suffix = tg.config.get('forgemail.domain', '.sourceforge.net')
email = 'noreply@%s%s' % (domain, common_suffix)
assert email in notification['reply_to_address']
def test_file_force_display(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?force=True')
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert re.search(r'<pre>.*This is readme', content), content
assert '</pre>' in content, content
class TestRestController(_TestCase):
def test_index(self):
self.app.get('/rest/p/test/src-git/', status=200)
def test_commits(self):
self.app.get('/rest/p/test/src-git/commits', status=200)
class TestFork(_TestCase):
def setUp(self):
super(TestFork, self).setUp()
to_project = M.Project.query.get(
shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-git/fork', params=dict(
project_id=str(to_project._id),
mount_point='code',
mount_label='Test forked repository'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
def _follow(self, r, **kw):
if r.status_int == 302:
print r.request.url
while r.status_int == 302:
print ' ==> 302 ==> %s' % r.location
r = r.follow(**kw)
return r
def _upstream_page(self, **kw):
r = self.app.get('/src-git/', **kw)
r = self._follow(r, **kw)
return r
def _fork_page(self, **kw):
r = self.app.get('/p/test2/code/', **kw)
r = self._follow(r, **kw)
return r
def _request_merge(self, **kw):
r = self.app.get('/p/test2/code/request_merge', **kw)
r = self._follow(r, **kw)
r = r.forms[0].submit()
r = self._follow(r, **kw)
mr_num = r.request.url.split('/')[-2]
assert mr_num.isdigit(), mr_num
return r, mr_num
def test_fork_form(self):
r = self.app.get('%sfork/' % c.app.repo.url())
assert '<input type="text" name="mount_point" value="test"/>' in r
assert '<input type="text" name="mount_label" value="test - Git"/>' in r
def test_fork_listed_in_parent(self):
assert 'Forks' in self._upstream_page()
def test_fork_display(self):
r = self._fork_page()
assert 'Clone of' in r
assert 'Test forked repository' in r
def test_fork_links_go_to_fork(self):
r = self._fork_page()
hrefs = ( a.get('href') for a in r.html('a') )
hrefs = ( href for href in hrefs if href and '/ci/' in href )
for href in hrefs:
assert href.startswith('/p/test2/code/'), href
def test_merge_request_visible_to_admin(self):
assert 'Request Merge' in self._fork_page()
def test_merge_request_invisible_to_non_admin(self):
assert 'Request Merge' not in self._fork_page(
extra_environ=dict(username='test-user'))
def test_merge_action_available_to_admin(self):
self.app.get('/p/test2/code/request_merge')
def test_merge_action_unavailable_to_non_admin(self):
self.app.get(
'/p/test2/code/request_merge',
status=403, extra_environ=dict(username='test-user'))
def test_merge_request_detail_view(self):
r, mr_num = self._request_merge()
assert 'would like you to merge' in r, r.showbrowser()
def test_merge_request_list_view(self):
r, mr_num = self._request_merge()
r = self.app.get('/p/test/src-git/merge-requests/')
assert 'href="%s/"' % mr_num in r, r
def test_merge_request_update_status(self):
r, mr_num = self._request_merge()
r = self.app.post('/p/test/src-git/merge-requests/%s/save' % mr_num,
params=dict(status='rejected')).follow()
assert 'Merge Request #%s: (rejected)' % mr_num in r, r
|
from oslo_config import cfg
from oslo_utils import importutils
import webob
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron import quota
from neutron.quota import resource_registry
from neutron import wsgi
RESOURCE_NAME = 'quota'
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
QUOTAS = quota.QUOTAS
DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver'
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class QuotaSetsController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
self._driver = importutils.import_class(
cfg.CONF.QUOTAS.quota_driver
)
self._update_extended_attributes = True
def _update_attributes(self):
for quota_resource in resource_registry.get_all_resources().keys():
attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
attr_dict[quota_resource] = {
'allow_post': False,
'allow_put': True,
'convert_to': attributes.convert_to_int,
'validate': {'type:range': [-1, const.DB_INTEGER_MAX_VALUE]},
'is_visible': True}
self._update_extended_attributes = False
def _get_quotas(self, request, tenant_id):
return self._driver.get_tenant_quotas(
request.context,
resource_registry.get_all_resources(),
tenant_id)
def create(self, request, body=None):
msg = _('POST requests are not supported on this resource.')
raise webob.exc.HTTPNotImplemented(msg)
def index(self, request):
context = request.context
self._check_admin(context)
return {self._resource_name + "s":
self._driver.get_all_quotas(
context, resource_registry.get_all_resources())}
def tenant(self, request):
"""Retrieve the tenant info in context."""
context = request.context
if not context.tenant_id:
raise n_exc.QuotaMissingTenant()
return {'tenant': {'tenant_id': context.tenant_id}}
def show(self, request, id):
if id != request.context.tenant_id:
self._check_admin(request.context,
reason=_("Only admin is authorized "
"to access quotas for another tenant"))
return {self._resource_name: self._get_quotas(request, id)}
def _check_admin(self, context,
reason=_("Only admin can view or configure quota")):
if not context.is_admin:
raise n_exc.AdminRequired(reason=reason)
def delete(self, request, id):
self._check_admin(request.context)
self._driver.delete_tenant_quota(request.context, id)
def update(self, request, id, body=None):
self._check_admin(request.context)
if self._update_extended_attributes:
self._update_attributes()
body = base.Controller.prepare_request_body(
request.context, body, False, self._resource_name,
EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION])
for key, value in body[self._resource_name].items():
self._driver.update_quota_limit(request.context, id, key, value)
return {self._resource_name: self._get_quotas(request, id)}
class Quotasv2(extensions.ExtensionDescriptor):
"""Quotas management support."""
@classmethod
def get_name(cls):
return "Quota management support"
@classmethod
def get_alias(cls):
return RESOURCE_COLLECTION
@classmethod
def get_description(cls):
description = 'Expose functions for quotas management'
if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER:
description += ' per tenant'
return description
@classmethod
def get_updated(cls):
return "2012-07-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
controller = resource.Resource(
QuotaSetsController(manager.NeutronManager.get_plugin()),
faults=base.FAULT_MAP)
return [extensions.ResourceExtension(
Quotasv2.get_alias(),
controller,
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
from adt.util.prog import Prog
from adt.util.literal import Literal
from adt.util.expr import Expr
from adt.util.unary_op import Unary_op
from adt.util.binary_op import Binary_op
from adt.util.block import Block
from adt.util.context import Context
from adt.util.instr import Instr
from adt.types.bool import Bool
from adt.types.nat import Nat
from adt.types.char import Char
from adt.types.string import String
from adt.types.relative import Z
from adt.types.relative_list import List
from adt.types.map import Map
def test_eval_expr():
var = String('var')
lit1 = Literal.lit_nat(Nat(5))
lit2 = Literal.lit_nat(Nat(3))
context = Context.cons(c=Context.empty(), k=var, v=lit2)
expr = Expr.expr_lit(lit1)
# literal
assert Prog.eval_expr(expr=expr, context=context) == lit1
# variable
assert Prog.eval_expr(expr=Expr.expr_variable(var), context=context) == lit2
# unary operations
# o.not
lit_true = Literal.lit_bool(Bool.true())
lit_false = Literal.lit_bool(Bool.false())
op = Unary_op.o_not()
expr_bool = Expr.expr_unary(op=op, expr=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr_bool, context=context) == lit_false
assert Prog.eval_expr(expr=expr_bool, context=context) != lit_true
# uSub
lit_z = Literal.lit_z(Z(2))
lit_z2 = Literal.lit_z(Z(-2))
op = Unary_op.uSub()
expr_z = Expr.expr_unary(op=op, expr=Expr.expr_lit(lit_z))
assert Prog.eval_expr(expr=expr_z, context=context) == lit_z2
# binary operations
# add
lit_z1 = Literal.lit_z(Z(2))
lit_z2 = Literal.lit_z(Z(-5))
lit_z3 = Literal.lit_z(Z(-3))
op = Binary_op.add()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z1), expr2=Expr.expr_lit(lit_z2))
assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# sub
lit_z3 = Literal.lit_z(Z(7))
op = Binary_op.sub()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z1), expr2=Expr.expr_lit(lit_z2))
assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# mult
lit_z3 = Literal.lit_z(Z(10))
op = Binary_op.mult()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z1), expr2=Expr.expr_lit(lit_z2))
# assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# div
lit_z3 = Literal.lit_z(Z(2))
op = Binary_op.div()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z2), expr2=Expr.expr_lit(lit_z1))
# assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# modulo
lit_z3 = Literal.lit_z(Z(1))
op = Binary_op.modulo()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z2), expr2=Expr.expr_lit(lit_z1))
# assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# and
op = Binary_op.o_and()
expr1 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_true), expr2=Expr.expr_lit(lit_true))
expr2 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr1, context=context) == lit_true
assert Prog.eval_expr(expr=expr2, context=context) == lit_false
assert Prog.eval_expr(expr=expr2, context=context) != lit_true
# or
op = Binary_op.o_or()
expr1 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_false))
expr2 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr1, context=context) == lit_false
assert Prog.eval_expr(expr=expr2, context=context) == lit_true
assert Prog.eval_expr(expr=expr2, context=context) != lit_false
# xor
op = Binary_op.xor()
expr1 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_true), expr2=Expr.expr_lit(lit_true))
expr2 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr1, context=context) == lit_false
assert Prog.eval_expr(expr=expr2, context=context) == lit_true
assert Prog.eval_expr(expr=expr2, context=context) != lit_false
|
import random
import time
import mock
from mox3 import mox
from os_xenapi.client import XenAPI
from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import vm_utils
class TestGlanceStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestGlanceStore, self).setUp()
self.store = glance.GlanceStore()
self.flags(api_servers=['http://localhost:9292'], group='glance')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
self.instance = {'uuid': 'blah',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
def _get_params(self):
return {'image_id': 'fake_image_uuid',
'endpoint': 'http://localhost:9292',
'sr_path': '/fake/sr/path',
'api_version': 2,
'extra_headers': {'X-Auth-Token': 'foobar',
'X-Roles': '',
'X-Tenant-Id': 'project',
'X-User-Id': 'user',
'X-Identity-Status': 'Confirmed'}}
def _get_download_params(self):
params = self._get_params()
params['uuid_stack'] = ['uuid1']
return params
def test_download_image(self):
params = self._get_download_params()
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'download_vhd2',
**params)
self.mox.ReplayAll()
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
self.mox.VerifyAll()
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
@mock.patch.object(random, 'shuffle')
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_download_image_retry(self, mock_fault, mock_sleep,
mock_shuffle, mock_make_uuid_stack):
params = self._get_download_params()
self.flags(num_retries=2, group='glance')
params.pop("endpoint")
calls = [mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.1.1:9292',
**params),
mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.0.1:9293',
**params)]
glance_api_servers = ['10.0.1.1:9292',
'http://10.0.0.1:9293']
self.flags(api_servers=glance_api_servers, group='glance')
with (mock.patch.object(self.session, 'call_plugin_serialized')
) as mock_call_plugin_serialized:
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
mock_call_plugin_serialized.side_effect = [error, "success"]
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin_serialized.assert_has_calls(calls)
self.assertEqual(1, mock_fault.call_count)
def _get_upload_params(self, auto_disk_config=True,
expected_os_type='default'):
params = self._get_params()
params['vdi_uuids'] = ['fake_vdi_uuid']
params['properties'] = {'auto_disk_config': auto_disk_config,
'os_type': expected_os_type}
return params
def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
params = self._get_upload_params(auto_disk_config, expected_os_type)
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image(self):
self._test_upload_image(True)
def test_upload_image_None_os_type(self):
self.instance['os_type'] = None
self._test_upload_image(True, 'linux')
def test_upload_image_no_os_type(self):
del self.instance['os_type']
self._test_upload_image(True, 'linux')
def test_upload_image_auto_config_disk_disabled(self):
sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
self.instance["system_metadata"] = sys_meta
self._test_upload_image("disabled")
def test_upload_image_raises_exception(self):
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_then_raises_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
self.mox.ReplayAll()
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_on_signal_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "task signaled", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
# Note(johngarbutt) XenServer 6.1 and later has this error
error_details = ["", "signal: SIGTERM", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
|
"""
This module defines the Connection class.
"""
from __future__ import unicode_literals
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import requests
import logging
class Connection(object):
""" Creates a connection to Space Platform mimicking a GUI login.
This class is **not** thread-safe. It is up to the users of the class to
ensure thread safety. The ``rest.Space`` class uses this class for
supporting session-based connections to Junos Space. Thread-safety
requirements are met by that class.
"""
def __init__(self,
homeurl,
username=None,
password=None,
cert=None,
our_ip=None):
self._logger = logging.getLogger('root')
self.homeurl = homeurl + '/mainui'
self.authurl = homeurl + '/mainui/j_security_check'
self.session = None
if username is not None:
if password is None:
raise ValueError('password is mandatory along with username')
if cert is not None:
raise ValueError('You must provide only one of username+password or cert')
else:
if password is not None:
raise ValueError('password is valid only along with username')
if cert is None:
raise ValueError('You must provide one of username+password or cert')
self.username = username
self.password = password
self.our_ip = our_ip
self.cert = cert
self._logger.debug("Connection: Initiating login to %s", self.homeurl)
self.login()
def login(self):
""" Login to Space """
self.session = requests.Session()
sess = self.session
if self.our_ip is None:
resp = sess.get(self.homeurl, cert=self.cert, verify=False)
#self._logger.debug(resp.status_code)
#self._logger.debug(resp.headers)
#self._logger.debug(resp.text)
# Extract the ipAddr and code variables embbed in the form validation code
ip_addr_start_idx = resp.text.find("var ipAddr = ")
if ip_addr_start_idx < 0:
self.check_login_status()
return
ip_addr_end_idx = resp.text.find("\n", ip_addr_start_idx)
ip_addr_line = resp.text[ip_addr_start_idx : ip_addr_end_idx]
ip_addr_items = ip_addr_line.split("=", 2)
ip_addr = ip_addr_items[1].strip("'; ").strip()
#codeStartIdx = r.text.find("var code = ", ip_addr_end_idx);
#codeEndIdx = r.text.find("\n", codeStartIdx);
#codeLine = r.text[codeStartIdx : codeEndIdx]
#codeItems = codeLine.split("=", 2);
#code = codeItems[1].strip("'; ").strip();'''
#form_username = self.username + '%' + code + '@' + ip_addr;
else:
resp = sess.get(self.homeurl, cert=self.cert, verify=False)
ip_addr = self.our_ip
form_username = self.username + '@' + ip_addr
data = {
"j_screen_username" : self.username,
"j_username" : form_username,
"j_password" : self.password
}
self._logger.debug(data)
resp = sess.post(self.authurl, data=data, cert=self.cert, verify=False)
#self._logger.debug(resp.status_code)
#self._logger.debug(resp.headers)
#self._logger.debug(resp.text)
self.check_login_status()
def is_logged_in(self):
""" Checks if a login has been established """
return self.session is not None
def check_login_status(self):
""" Check login-status """
if not self.is_logged_in():
raise Exception("Not logged in")
resp = self.session.get(self.homeurl, verify=False)
ip_addr_start_idx = resp.text.find("var ipAddr = ")
if ip_addr_start_idx >= 0:
raise Exception("Not in a logged-in session.")
def get_session(self):
""" Return the HTTP session object """
if self.is_logged_in():
return self.session
else:
raise Exception("Not logged in")
def logout(self):
""" Logout from Space Server """
logout_url = self.homeurl + "/unsecured/logout.jsp"
resp = self.session.get(logout_url, verify=False)
#self._logger.debug(resp.status_code)
#self._logger.debug(resp.headers)
#self._logger.debug(resp.text)
if resp.status_code == 200:
self.session = None
|
import Utils
from Utils import printe
class CommandBuilder(object):
def __init__(self, *command_args):
self.command_args = list(command_args)
def append(self, *args):
for arg in args:
if isinstance(arg, str):
self.command_args += [arg]
elif isinstance(arg, list) or isinstance(arg, tuple):
for sub_arg in arg:
self.append(sub_arg)
else:
printe('Error appending argument of unknown type: {}'.format(
str(type(arg))), terminate=True)
return self
def debug(self):
return Utils.debug(*self.command_args)
def run(self, replaceForeground=False):
return Utils.run(*self.command_args,
replaceForeground=replaceForeground)
|
import copy
import os
import re
import socket
import sys
import tempfile
from datetime import datetime
from subprocess import CalledProcessError
from subprocess import check_output, STDOUT
import termios
import json
import logging
from pprint import pformat
import yaml
from deepdiff import DeepDiff
LOCAL_IP_ENV = "MY_IP"
LOCAL_IPv6_ENV = "MY_IPv6"
logger = logging.getLogger(__name__)
ETCD_SCHEME = os.environ.get("ETCD_SCHEME", "http")
ETCD_CA = os.environ.get("ETCD_CA_CERT_FILE", "")
ETCD_CERT = os.environ.get("ETCD_CERT_FILE", "")
ETCD_KEY = os.environ.get("ETCD_KEY_FILE", "")
ETCD_HOSTNAME_SSL = "etcd-authority-ssl"
KUBECONFIG = "/home/user/certs/kubeconfig"
API_VERSION = 'projectcalico.org/v3'
ERROR_CONFLICT = "update conflict"
NOT_FOUND = "resource does not exist"
NOT_NAMESPACED = "is not namespaced"
SET_DEFAULT = "Cannot set"
NOT_SUPPORTED = "is not supported on"
KUBERNETES_NP = "kubernetes network policies must be managed through the kubernetes API"
NOT_LOCKED = "Datastore is not locked. Run the `calicoctl datastore migrate lock` command in order to begin migration."
NOT_KUBERNETES = "Invalid datastore type: etcdv3 to import to for datastore migration. Datastore type must be kubernetes"
NO_IPAM = "No IPAM resources specified in file"
class CalicoctlOutput:
"""
CalicoctlOutput contains the output from running a calicoctl command using
the calicoctl function below.
This class contains the command, output and error code (if it failed)
along with YAML/JSON decoded output if the output could be decoded.
"""
def __init__(self, command, output, error=None):
self.command = command
self.output = output
self.error = error
# Attempt to decode the output and store the output format.
self.decoded, self.decoded_format = decode_json_yaml(self.output)
def assert_data(self, data, format="yaml", text=None):
"""
Assert the decoded output from the calicoctl command matches the
supplied data and the expected decoder format.
Args:
data: The data to compare
format: The expected output format of the data.
text: (optional) Expected text in the command output.
"""
self.assert_no_error(text)
assert self.decoded is not None, "No value was decoded from calicoctl response."
if isinstance(data, str):
data, _ = decode_json_yaml(data)
assert data is not None, "String data did not decode"
if format is not None:
assert format == self.decoded_format, "Decoded format is different. " \
"expect %s; got %s" % (format, self.decoded_format)
# Copy and clean the decoded data to allow it to be comparable.
cleaned = clean_calico_data(self.decoded)
assert cmp(cleaned, data) == 0, \
"Items are not the same. Difference is:\n %s" % \
pformat(DeepDiff(cleaned, data), indent=2)
def assert_empty_list(self, kind, format="yaml", text=None):
"""
Assert the calicoctl command output an empty list of the specified
kind.
Args:
kind: The resource kind.
format: The expected output format of the data.
text: (optional) Expected text in the command output.
Returns:
"""
data = make_list(kind, [])
self.assert_data(data, format=format, text=text)
def assert_list(self, kind, items, format="yaml", text=None):
"""
Assert the calicoctl command output a list of the specified
kind.
Args:
kind: The resource kind.
items: A list of the items in the list.
format: The expected output format of the data.
text: (optional) Expected text in the command output.
Returns:
"""
data = make_list(kind, items)
self.assert_data(data, format=format, text=text)
def assert_error(self, text=None):
"""
Assert the calicoctl command exited with an error and did not panic
Args:
text: (optional) Expected text in the command output.
"""
assert self.error, "Expected error running command; \n" \
"command=" + self.command + "\noutput=" + self.output
assert not "panic" in self.output, "Exited with an error due to a panic"
self.assert_output_contains(text)
def assert_no_error(self, text=None):
"""
Assert the calicoctl command did not exit with an error code.
Args:
text: (optional) Expected text in the command output.
"""
assert not self.error, "Expected no error running command; \n" \
"command=" + self.command + "\noutput=" + self.output
# If text is supplied, assert it appears in the output
if text:
self.assert_output_contains(text)
def assert_output_equals(self, text):
"""
Assert the calicoctl command output is exactly the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
assert text == self.output, "Expected output to exactly match; \n" + \
"command=" + self.command + "\noutput=\n" + self.output + \
"\nexpected=\n" + text
def assert_output_equals_ignore_res_version(self, text):
"""
Assert the calicoctl command output is exactly the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
text = re.sub('resourceVersion: ".*?"', 'resourceVersion: "<ignored>"', text)
out = re.sub('resourceVersion: ".*?"', 'resourceVersion: "<ignored>"', self.output)
assert text == out, "Expected output to match after ignoring resource version; \n" + \
"command=" + self.command + "\noutput=\n" + out + \
"\nexpected=\n" + text
def assert_output_contains(self, text):
"""
Assert the calicoctl command output contains the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
assert text in self.output, "Expected text in output; \n" + \
"command=" + self.command + "\noutput=\n" + self.output + \
"\nexpected=\n" + text
def assert_output_not_contains(self, text):
"""
Assert the calicoctl command output does not contain the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
assert not text in self.output, "Unexpected text in output; \n" + \
"command=" + self.command + "\noutput=\n" + self.output + \
"\nunexpected=\n" + text
def calicoctl(command, data=None, load_as_stdin=False, format="yaml", only_stdout=False, no_config=False, kdd=False, allowVersionMismatch=True):
"""
Convenience function for abstracting away calling the calicoctl
command.
:param command: The calicoctl command line parms as a single string.
:param data: Input data either as a string or a JSON serializable Python
object.
:param load_as_stdin: Load the input data through stdin rather than by
loading from file.
:param format: Specify the format for loading the data.
:param only_stdout: Return only the stdout
:return: The output from the command with leading and trailing
whitespace removed.
"""
# If input data is specified, save it to file in the required format.
if isinstance(data, str):
data, _ = decode_json_yaml(data)
assert data is not None, "String data did not decode"
if data is not None:
if format == "yaml":
writeyaml("/tmp/input-data", data)
else:
writejson("/tmp/input-data", data)
stdin = ''
option_file = ''
if data and load_as_stdin:
stdin = 'cat /tmp/input-data | '
option_file = ' -f -'
elif data and not load_as_stdin:
option_file = ' -f /tmp/input-data'
calicoctl_bin = os.environ.get("CALICOCTL", "/code/bin/calicoctl-linux-amd64")
if allowVersionMismatch:
calicoctl_bin += " --allow-version-mismatch"
if ETCD_SCHEME == "https":
etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL
else:
etcd_auth = "%s:2379" % get_ip()
# Export the environment, in case the command has multiple parts, e.g.
# use of | or ;
#
# Pass in all etcd params, the values will be empty if not set anyway
calicoctl_env_cmd = "export ETCD_ENDPOINTS=%s; " \
"export ETCD_CA_CERT_FILE=%s; " \
"export ETCD_CERT_FILE=%s; " \
"export ETCD_KEY_FILE=%s; " \
"export DATASTORE_TYPE=%s; %s %s" % \
(ETCD_SCHEME+"://"+etcd_auth, ETCD_CA, ETCD_CERT, ETCD_KEY,
"etcdv3", stdin, calicoctl_bin)
if kdd:
calicoctl_env_cmd = "export DATASTORE_TYPE=kubernetes; " \
"export KUBECONFIG=%s; %s %s" % \
(KUBECONFIG, stdin, calicoctl_bin)
if no_config :
calicoctl_env_cmd = calicoctl_bin
full_cmd = calicoctl_env_cmd + " " + command + option_file
try:
output = log_and_run(full_cmd, stderr=(None if only_stdout else STDOUT))
return CalicoctlOutput(full_cmd, output)
except CalledProcessError as e:
return CalicoctlOutput(full_cmd, e.output, error=e.returncode)
def clean_calico_data(data, extra_keys_to_remove=None):
"""
Clean the data returned from a calicoctl get command to remove empty
structs, null values and non-configurable fields. This makes comparison
with the input data much simpler.
Args:
data: The data to clean.
extra_keys_to_remove: more keys to remove if needed.
Returns: The cleaned data.
"""
new = copy.deepcopy(data)
# Recursively delete empty structs / nil values and non-configurable
# fields.
def clean_elem(elem, extra_keys):
if isinstance(elem, list):
# Loop through each element in the list
for i in elem:
clean_elem(i, extra_keys)
if isinstance(elem, dict):
# Remove non-settable fields, and recursively clean each value of
# the dictionary, removing nil values or values that are empty
# dicts after cleaning.
del_keys = ['creationTimestamp', 'resourceVersion', 'uid']
if extra_keys is not None:
for extra_key in extra_keys:
del_keys.append(extra_key)
for k, v in elem.iteritems():
clean_elem(v, extra_keys)
if v is None or v == {}:
del_keys.append(k)
for k in del_keys:
if k in elem:
del(elem[k])
clean_elem(new, extra_keys_to_remove)
return new
def decode_json_yaml(value):
try:
decoded = json.loads(value)
# fix the python datetime back into isoformat with empty timezone information
decoded = find_and_format_creation_timestamp(decoded)
return decoded, "json"
except ValueError:
pass
try:
decoded = yaml.safe_load(value)
# fix the python datetime back into isoformat with empty timezone information
decoded = find_and_format_creation_timestamp(decoded)
return decoded, "yaml"
except yaml.YAMLError:
pass
return None, None
def find_and_format_creation_timestamp(decoded):
if decoded:
if 'items' in decoded:
for i in xrange(len(decoded['items'])):
decoded['items'][i] = format_creation_timestamp(decoded['items'][i])
else:
decoded = format_creation_timestamp(decoded)
return decoded
def format_creation_timestamp(decoded):
if isinstance(decoded, dict) and 'metadata' in decoded and 'creationTimestamp' in decoded['metadata']:
if isinstance(decoded['metadata']['creationTimestamp'], datetime):
decoded['metadata']['creationTimestamp'] = decoded.get('metadata', {}). \
get('creationTimestamp', datetime.utcnow()).isoformat() + 'Z'
return decoded
def writeyaml(filename, data):
"""
Converts a python dict to yaml and outputs to a file.
:param filename: filename to write
:param data: dictionary to write out as yaml
"""
with open(filename, 'w') as f:
text = yaml.dump(data, default_flow_style=False)
logger.debug("Writing %s: \n%s" % (filename, truncate_for_log(text, 4000)))
f.write(text)
def writejson(filename, data):
"""
Converts a python dict to json and outputs to a file.
:param filename: filename to write
:param data: dictionary to write out as json
"""
with open(filename, 'w') as f:
text = json.dumps(data,
sort_keys=True,
indent=2,
separators=(',', ': '))
logger.debug("Writing %s: \n%s" % (filename, truncate_for_log(text, 4000)))
f.write(text)
def truncate_for_log(text, length):
if len(text) <=length:
return text
return text[:length] + "... <truncated>"
def get_ip(v6=False):
"""
Return a string of the IP of the hosts interface.
Try to get the local IP from the environment variables. This allows
testers to specify the IP address in cases where there is more than one
configured IP address for the test system.
"""
env = LOCAL_IPv6_ENV if v6 else LOCAL_IP_ENV
ip = os.environ.get(env)
if not ip:
logger.debug("%s not set; try to auto detect IP.", env)
socket_type = socket.AF_INET6 if v6 else socket.AF_INET
s = socket.socket(socket_type, socket.SOCK_DGRAM)
remote_ip = "2001:4860:4860::8888" if v6 else "8.8.8.8"
s.connect((remote_ip, 0))
ip = s.getsockname()[0]
s.close()
else:
logger.debug("Got local IP from %s=%s", env, ip)
return ip
_term_settings = termios.tcgetattr(sys.stdin.fileno())
def log_and_run(command, raise_exception_on_failure=True, stderr=STDOUT):
def log_output(results):
if results is None:
logger.info(" # <no output>")
lines = results.split("\n")
for line in lines:
logger.info(" # %s", line.rstrip())
try:
logger.info("%s", command)
try:
results = check_output(command, shell=True, stderr=stderr).rstrip()
finally:
# Restore terminal settings in case the command we ran manipulated
# them. Note: under concurrent access, this is still not a perfect
# solution since another thread's child process may break the
# settings again before we log below.
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, _term_settings)
log_output(results)
return results
except CalledProcessError as e:
# Wrap the original exception with one that gives a better error
# message (including command output).
logger.info(" # Return code: %s", e.returncode)
log_output(e.output)
if raise_exception_on_failure:
raise e
def curl_etcd(path, options=None, recursive=True, ip=None):
"""
Perform a curl to etcd, returning JSON decoded response.
:param path: The key path to query
:param options: Additional options to include in the curl
:param recursive: Whether we want recursive query or not
:return: The JSON decoded response.
"""
if options is None:
options = []
if ETCD_SCHEME == "https":
# Etcd is running with SSL/TLS, require key/certificates
rc = check_output(
"curl --cacert %s --cert %s --key %s "
"-sL https://%s:2379/v2/keys/%s?recursive=%s %s"
% (ETCD_CA, ETCD_CERT, ETCD_KEY, ETCD_HOSTNAME_SSL,
path, str(recursive).lower(), " ".join(options)),
shell=True)
else:
rc = check_output(
"curl -sL http://%s:2379/v2/keys/%s?recursive=%s %s"
% (ip, path, str(recursive).lower(), " ".join(options)),
shell=True)
logger.info("etcd RC: %s" % rc.strip())
return json.loads(rc.strip())
def wipe_etcd(ip):
# Delete /calico if it exists. This ensures each test has an empty data
# store at start of day.
curl_etcd("calico", options=["-XDELETE"], ip=ip)
# Disable Usage Reporting to usage.projectcalico.org
# We want to avoid polluting analytics data with unit test noise
curl_etcd("calico/v1/config/UsageReportingEnabled",
options=["-XPUT -d value=False"], ip=ip)
etcd_container_name = "calico-etcd"
tls_vars = ""
if ETCD_SCHEME == "https":
# Etcd is running with SSL/TLS, require key/certificates
etcd_container_name = "calico-etcd-ssl"
tls_vars = ("ETCDCTL_CACERT=/etc/calico/certs/ca.pem " +
"ETCDCTL_CERT=/etc/calico/certs/client.pem " +
"ETCDCTL_KEY=/etc/calico/certs/client-key.pem ")
check_output("docker exec " + etcd_container_name + " sh -c '" + tls_vars +
"ETCDCTL_API=3 etcdctl del --prefix /calico" +
"'", shell=True)
def make_list(kind, items):
"""
Convert the list of resources into a single List resource type.
Args:
items: A list of the resources in the List object.
Returns:
None
"""
assert isinstance(items, list)
if "List" not in kind:
kind = kind + "List"
return {
'kind': kind,
'apiVersion': API_VERSION,
'items': items,
}
def name(data):
"""
Returns the name of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
"""
return data['metadata']['name']
def namespace(data):
"""
Returns the namespace of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
"""
return data['metadata']['namespace']
def set_cluster_version(calico_version="", kdd=False):
"""
Set Calico version in ClusterInformation using the calico_version_helper go app.
Args:
calico_version: string with version to set
kdd: optional bool to indicate use of kubernetes datastore (default False)
Returns: The command output
"""
if ETCD_SCHEME == "https":
etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL
else:
etcd_auth = "%s:2379" % get_ip()
calico_helper_bin = "/code/tests/fv/helper/bin/calico_version_helper"
full_cmd = "export ETCD_ENDPOINTS=%s; " \
"export ETCD_CA_CERT_FILE=%s; " \
"export ETCD_CERT_FILE=%s; " \
"export ETCD_KEY_FILE=%s; " \
"export DATASTORE_TYPE=%s; %s" % \
(ETCD_SCHEME+"://"+etcd_auth, ETCD_CA, ETCD_CERT, ETCD_KEY,
"etcdv3", calico_helper_bin)
if kdd:
full_cmd = "export DATASTORE_TYPE=kubernetes; " \
"export KUBECONFIG=%s; %s" % \
(KUBECONFIG, calico_helper_bin)
if calico_version:
full_cmd += " -v " + calico_version
try:
output = log_and_run(full_cmd, stderr=STDOUT)
return CalicoctlOutput(full_cmd, output)
except CalledProcessError as e:
return CalicoctlOutput(full_cmd, e.output, error=e.returncode)
|
python manage.py collectstatic
python manage.py runserver --nostatic
urlpatterns += patterns('',
(r'^static/suit/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.DJANGO_SUIT_TEMPLATE}),
)
urlpatterns += patterns('',
(r'^static/admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.DJANGO_ADMIN_TEMPLATE}),
)
SITE_PATH = os.path.dirname(__file__)
REPO_ROOT = os.path.normpath(os.path.join(SITE_PATH, '..'))
MEDIA_ROOT = os.path.join(REPO_ROOT, 'public/media')
DJANGO_SUIT_TEMPLATE = os.path.join(REPO_ROOT, 'static/suit')
DJANGO_EDITOR = os.path.join(REPO_ROOT, 'static/django_summernote')
DJANGO_ADMIN_TEMPLATE = os.path.join(REPO_ROOT, 'static/admin')
|
from __future__ import absolute_import
from importlib import import_module
import logging
import os
import sys
import click
from colorlog import ColoredFormatter
logger = logging.getLogger(__name__)
def setup_logging(): # pragma: no cover
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(asctime)s %(green)s%(name)s"
"%(reset)s %(message)s",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'blue',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def import_queue(location):
module, attr = location.rsplit('.', 1)
module = import_module(module)
queue = getattr(module, attr)
if hasattr(queue, '__call__'):
queue = queue()
return queue
@click.command()
@click.option(
'--path', '-p',
help='Import path. By default, this is the current working directory.')
@click.option(
'--pid',
help='Write the process ID to the specified file.')
@click.argument(
'queue',
nargs=1,
required=True)
def main(path, pid, queue):
"""
Standalone PSQ worker.
The queue argument must be the full importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
"""
setup_logging()
if pid:
with open(os.path.expanduser(pid), "w") as f:
f.write(str(os.getpid()))
if not path:
path = os.getcwd()
sys.path.insert(0, path)
queue = import_queue(queue)
import psq
worker = psq.Worker(queue=queue)
worker.listen()
if __name__ == '__main__':
main()
|
import json
import tempfile
import fixtures
from lxml import etree
from oslo_config import cfg
import requests
import testtools
from testtools import content as test_content
from testtools import matchers
import urllib.parse as urlparse
from os_collect_config import cfn
from os_collect_config import collect
from os_collect_config import exc
META_DATA = {u'int1': 1,
u'strfoo': u'foo',
u'map_ab': {
u'a': 'apple',
u'b': 'banana',
}}
SOFTWARE_CONFIG_DATA = {
u'old-style': u'value',
u'deployments': [
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'Heat::Ungrouped',
u'name': 'dep-name1',
u'outputs': None,
u'options': None,
u'config': {
u'config1': 'value1'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'os-apply-config',
u'name': 'dep-name2',
u'outputs': None,
u'options': None,
u'config': {
u'config2': 'value2'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'name': 'dep-name3',
u'outputs': None,
u'options': None,
u'config': {
u'config3': 'value3'
}
},
{
u'inputs': [],
u'group': 'ignore_me',
u'name': 'ignore_me_name',
u'outputs': None,
u'options': None,
u'config': 'ignore_me_config'
}
]
}
SOFTWARE_CONFIG_IMPOSTER_DATA = {
u'old-style': u'value',
u'deployments': {
u"not": u"a list"
}
}
class FakeResponse(dict):
def __init__(self, text):
self.text = text
def raise_for_status(self):
pass
class FakeReqSession(object):
SESSION_META_DATA = META_DATA
def __init__(self, testcase, expected_netloc):
self._test = testcase
self._expected_netloc = expected_netloc
self.verify = False
def get(self, url, params, headers, verify=None, timeout=None):
self._test.addDetail('url', test_content.text_content(url))
url = urlparse.urlparse(url)
self._test.assertEqual(self._expected_netloc, url.netloc)
self._test.assertEqual('/v1/', url.path)
self._test.assertEqual('application/json',
headers['Content-Type'])
self._test.assertIn('SignatureVersion', params)
self._test.assertEqual('2', params['SignatureVersion'])
self._test.assertIn('Signature', params)
self._test.assertIn('Action', params)
self._test.assertEqual('DescribeStackResource',
params['Action'])
self._test.assertIn('LogicalResourceId', params)
self._test.assertEqual('foo', params['LogicalResourceId'])
self._test.assertEqual(10, timeout)
root = etree.Element('DescribeStackResourceResponse')
result = etree.SubElement(root, 'DescribeStackResourceResult')
detail = etree.SubElement(result, 'StackResourceDetail')
metadata = etree.SubElement(detail, 'Metadata')
metadata.text = json.dumps(self.SESSION_META_DATA)
if verify is not None:
self.verify = True
return FakeResponse(etree.tostring(root))
class FakeRequests(object):
exceptions = requests.exceptions
def __init__(self, testcase, expected_netloc='192.0.2.1:8000'):
self._test = testcase
self._expected_netloc = expected_netloc
def Session(self):
return FakeReqSession(self._test, self._expected_netloc)
class FakeReqSessionSoftwareConfig(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_DATA
class FakeRequestsSoftwareConfig(FakeRequests):
FAKE_SESSION = FakeReqSessionSoftwareConfig
def Session(self):
return self.FAKE_SESSION(self._test, self._expected_netloc)
class FakeReqSessionConfigImposter(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_IMPOSTER_DATA
class FakeRequestsConfigImposter(FakeRequestsSoftwareConfig):
FAKE_SESSION = FakeReqSessionConfigImposter
class FakeFailRequests(object):
exceptions = requests.exceptions
class Session(object):
def get(self, url, params, headers, verify=None, timeout=None):
raise requests.exceptions.HTTPError(403, 'Forbidden')
class TestCfnBase(testtools.TestCase):
def setUp(self):
super(TestCfnBase, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.NestedTempfile())
self.hint_file = tempfile.NamedTemporaryFile()
self.hint_file.write(u'http://192.0.2.1:8000'.encode('utf-8'))
self.hint_file.flush()
self.addCleanup(self.hint_file.close)
collect.setup_conf()
cfg.CONF.cfn.heat_metadata_hint = self.hint_file.name
cfg.CONF.cfn.metadata_url = None
cfg.CONF.cfn.path = ['foo.Metadata']
cfg.CONF.cfn.access_key_id = '0123456789ABCDEF'
cfg.CONF.cfn.secret_access_key = 'FEDCBA9876543210'
class TestCfn(TestCfnBase):
def test_collect_cfn(self):
cfn_md = cfn.Collector(requests_impl=FakeRequests(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_md = cfn_md[0][1]
for k in ('int1', 'strfoo', 'map_ab'):
self.assertIn(k, cfn_md)
self.assertEqual(cfn_md[k], META_DATA[k])
self.assertEqual('', self.log.output)
def test_collect_with_ca_cert(self):
cfn.CONF.cfn.ca_certificate = "foo"
collector = cfn.Collector(requests_impl=FakeRequests(self))
collector.collect()
self.assertTrue(collector._session.verify)
def test_collect_cfn_fail(self):
cfn_collect = cfn.Collector(requests_impl=FakeFailRequests)
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Forbidden', self.log.output)
def test_collect_cfn_no_path(self):
cfg.CONF.cfn.path = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No path configured', self.log.output)
def test_collect_cfn_bad_path(self):
cfg.CONF.cfn.path = ['foo']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('Path not in format', self.log.output)
def test_collect_cfn_no_metadata_url(self):
cfg.CONF.cfn.heat_metadata_hint = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No metadata_url configured', self.log.output)
def test_collect_cfn_missing_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.not_there']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Sub-key not_there does not exist', self.log.output)
def test_collect_cfn_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.map_ab']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
content = cfn_collect.collect()
self.assertThat(content, matchers.IsInstance(list))
self.assertEqual('cfn', content[0][0])
content = content[0][1]
self.assertIn(u'b', content)
self.assertEqual(u'banana', content[u'b'])
def test_collect_cfn_metadata_url_overrides_hint(self):
cfg.CONF.cfn.metadata_url = 'http://127.0.1.1:8000/v1/'
cfn_collect = cfn.Collector(
requests_impl=FakeRequests(self,
expected_netloc='127.0.1.1:8000'))
cfn_collect.collect()
class TestCfnSoftwareConfig(TestCfnBase):
def test_collect_cfn_software_config(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsSoftwareConfig(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_config = cfn_md[0][1]
self.assertThat(cfn_config, matchers.IsInstance(dict))
self.assertEqual(set(['old-style', 'deployments']),
set(cfn_config.keys()))
self.assertIn('deployments', cfn_config)
self.assertThat(cfn_config['deployments'], matchers.IsInstance(list))
self.assertEqual(4, len(cfn_config['deployments']))
deployment = cfn_config['deployments'][0]
self.assertIn('inputs', deployment)
self.assertThat(deployment['inputs'], matchers.IsInstance(list))
self.assertEqual(1, len(deployment['inputs']))
self.assertEqual('dep-name1', cfn_md[1][0])
self.assertEqual('value1', cfn_md[1][1]['config1'])
self.assertEqual('dep-name2', cfn_md[2][0])
self.assertEqual('value2', cfn_md[2][1]['config2'])
def test_collect_cfn_deployments_not_list(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsConfigImposter(self)).collect()
self.assertEqual(1, len(cfn_md))
self.assertEqual('cfn', cfn_md[0][0])
self.assertIn('not', cfn_md[0][1]['deployments'])
self.assertEqual('a list', cfn_md[0][1]['deployments']['not'])
|
import streamcorpus as sc
import cuttsum.events
import cuttsum.corpora
from cuttsum.trecdata import SCChunkResource
from cuttsum.pipeline import ArticlesResource, DedupedArticlesResource
import os
import pandas as pd
from datetime import datetime
from collections import defaultdict
import matplotlib.pylab as plt
plt.style.use('ggplot')
pd.set_option('display.max_rows', 500)
pd.set_option('display.width', 200)
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
def format_int(x):
return locale.format("%d", x, grouping=True)
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
chunk_res = SCChunkResource()
articles_res = ArticlesResource()
ded_articles_res = DedupedArticlesResource()
data = []
event2ids = defaultdict(set)
fltr_event2ids = defaultdict(set)
for event in cuttsum.events.get_events():
corpus = cuttsum.corpora.get_raw_corpus(event)
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
if event.query_num > 25:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
#print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
for hour in hours:
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
num_raw_si += int(fname.split("-")[1])
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id,
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
corpus = cuttsum.corpora.FilteredTS2015()
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
fltr_event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
for hour in hours:
print hour
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
#num_raw_si += int(fname.split("-")[1])
with sc.Chunk(path=chunk, mode="rb", message=corpus.sc_msg()) as c:
for si in c:
num_raw_si += 1
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id + " (filtered)",
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
df = pd.DataFrame(data)
cols = ["raw articles", "goose articles", "deduped articles",
"deduped match articles"]
df_sum = df.groupby("event")[cols].sum()
df_sum["raw articles"] = df_sum["raw articles"].apply(format_int)
df_sum["goose articles"] = df_sum["goose articles"].apply(format_int)
df_sum["deduped articles"] = df_sum["deduped articles"].apply(format_int)
df_sum["deduped match articles"] = df_sum["deduped match articles"].apply(format_int)
print df_sum
print
coverage = []
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
isect = event2ids[event.fs_name()].intersection(fltr_event2ids[event.fs_name()])
n_isect = len(isect)
n_unfltr = max(len(event2ids[event.fs_name()]), 1)
n_fltr = max(len(fltr_event2ids[event.fs_name()]), 1)
print event.fs_name()
print n_isect, float(n_isect) / n_fltr, float(n_isect) / n_unfltr
coverage.append({
"event": event.query_id,
"intersection": n_isect,
"isect/n_2015F": float(n_isect) / n_fltr,
"isect/n_2014": float(n_isect) / n_unfltr,
})
df = pd.DataFrame(coverage)
df_u = df.mean()
df_u["event"] = "mean"
print pd.concat([df, df_u.to_frame().T]).set_index("event")
exit()
with open("article_count.tex", "w") as f:
f.write(df_sum.to_latex())
import os
if not os.path.exists("plots"):
os.makedirs("plots")
import cuttsum.judgements
ndf = cuttsum.judgements.get_merged_dataframe()
for (event, title), group in df.groupby(["event", "title"]):
matches = ndf[ndf["query id"] == event]
#fig = plt.figure()
group = group.set_index(["hour"])
#ax = group[["goose articles", "deduped articles", "deduped match articles"]].plot()
linex = epoch(group.index[10])
ax = plt.plot(group.index, group["goose articles"], label="goose")
ax = plt.plot(group.index, group["deduped articles"], label="dedupe")
ax = plt.plot(group.index, group["deduped match articles"], label="dedupe qmatch")
for nugget, ngroup in matches.groupby("nugget id"):
times = ngroup["update id"].apply(lambda x: datetime.utcfromtimestamp(int(x.split("-")[0])))
#ngroup = ngroup.sort("timestamp")
times.sort()
times = times.reset_index(drop=True)
if len(times) == 0: continue
plt.plot_date(
(times[0], times[0]),
(0, plt.ylim()[1]),
'--', color="black", linewidth=.5, alpha=.5)
plt.gcf().autofmt_xdate()
plt.gcf().suptitle(title)
plt.gcf().savefig(os.path.join("plots", "{}-stream.png".format(event)))
plt.close("all")
|
import netaddr
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.networks_client = cls.os.networks_client
cls.subnets_client = cls.os.subnets_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name(cls.__name__ + '-server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (cls.client.show_server(cls.server_initial['id'])
['server'])
def _create_net_subnet_ret_net_from_cidr(self, cidr):
name_net = data_utils.rand_name(self.__class__.__name__)
net = self.networks_client.create_network(name=name_net)
self.addCleanup(self.networks_client.delete_network,
net['network']['id'])
subnet = self.subnets_client.create_subnet(
network_id=net['network']['id'],
cidr=cidr,
ip_version=4)
self.addCleanup(self.subnets_client.delete_subnet,
subnet['subnet']['id'])
return net
@test.attr(type='smoke')
@test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
# NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
# Here we compare directly with the canonicalized format.
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@test.attr(type='smoke')
@test.idempotent_id('9a438d88-10c6-4bcd-8b5b-5b6e25e1346f')
def test_list_servers(self):
# The created server should be in the list of all servers
body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
def test_list_servers_with_detail(self):
# The created server should be in the detailed list of all servers
body = self.client.list_servers(detail=True)
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('cbc0f52f-05aa-492b-bdc1-84b575ca294b')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@test.idempotent_id('ac1ad47f-984b-4441-9274-c9079b7a0666')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
hostname = linux_client.get_hostname()
msg = ('Failed while verifying servername equals hostname. Expected '
'hostname "%s" but got "%s".' % (self.name, hostname))
self.assertEqual(self.name.lower(), hostname, msg)
@test.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
@testtools.skipUnless(
test.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
'ServerGroupAffinityFilter is not available.')
def test_create_server_with_scheduler_hint_group(self):
# Create a server with the scheduler hint "group".
group_id = self.create_test_server_group()['id']
hints = {'group': group_id}
server = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')
# Check a server is in the group
server_group = (self.server_groups_client.show_server_group(group_id)
['server_group'])
self.assertIn(server['id'], server_group['members'])
@test.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
# Cleanup server; this is needed in the test case because with the LIFO
# nature of the cleanups, if we don't delete the server first, the port
# will still be part of the subnet and we'll get a 409 from Neutron
# when trying to delete the subnet. The tear down in the base class
# will try to delete the server and get a 404 but it's ignored so
# we're OK.
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
# We can't predict the ip addresses assigned to the server on networks.
# Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
@test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_duplicate_network_nics(self):
# Verify that server creation does not fail when more than one nic
# is created on the same network.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']},
{'uuid': net1['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr'],
addresses[net1['network']['name']][1]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24'),
netaddr.IPNetwork('19.80.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.flavor_client = cls.os_adm.flavors_client
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
@test.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_ephemeral(ephem_disk):
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
if ephem_disk > 0:
# Create a flavor with ephemeral disk
flavor_name = data_utils.rand_name('eph_flavor')
flavor = self.flavor_client.create_flavor(
name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id, ephemeral=ephem_disk)['flavor']
else:
# Create a flavor without ephemeral disk
flavor_name = data_utils.rand_name('no_eph_flavor')
flavor = self.flavor_client.create_flavor(
name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id)['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
flavor_no_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=0)
admin_pass = self.image_ssh_password
server_no_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
# Get partition number of server without ephemeral disk.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_no_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'],
server=server_no_eph_disk,
servers_client=self.client)
partition_num = len(linux_client.get_partitions().split('\n'))
# Explicit server deletion necessary for Juno compatibility
self.client.delete_server(server_no_eph_disk['id'])
server_with_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'],
server=server_with_eph_disk,
servers_client=self.client)
partition_num_emph = len(linux_client.get_partitions().split('\n'))
self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@classmethod
def skip_checks(cls):
super(ServersTestManualDisk, cls).skip_checks()
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
|
"""Tests for hparams_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from classifaedes import hparams_lib
import tensorflow.compat.v1 as tf
class HparamsLibTest(tf.test.TestCase):
def testIndentedSerialize(self):
"""Tests that our slightly customized serialization can be parsed.
hparams_lib._human_serialize() uses indented JSON to improve readability.
"""
hps1 = hparams_lib.defaults()
serialized = hparams_lib._human_serialize(hps1)
hps2 = hparams_lib.defaults()
hps2.parse_json(serialized)
self.assertDictEqual(hps1.values(), hps2.values())
if __name__ == '__main__':
tf.test.main()
|
import config
print 'Status: 302 Found'
print 'Location: http://' + config.custom_url
print ''
|
from paypalrestsdk import BillingAgreement
import logging
BILLING_AGREEMENT_ID = "I-HT38K76XPMGJ"
try:
billing_agreement = BillingAgreement.find(BILLING_AGREEMENT_ID)
print("Billing Agreement [%s] has state %s" % (billing_agreement.id, billing_agreement.state))
suspend_note = {
"note": "Suspending the agreement"
}
if billing_agreement.suspend(suspend_note):
# Would expect state has changed to Suspended
billing_agreement = BillingAgreement.find(BILLING_AGREEMENT_ID)
print("Billing Agreement [%s] has state %s" % (billing_agreement.id, billing_agreement.state))
reactivate_note = {
"note": "Reactivating the agreement"
}
if billing_agreement.reactivate(reactivate_note):
# Would expect state has changed to Active
billing_agreement = BillingAgreement.find(BILLING_AGREEMENT_ID)
print("Billing Agreement [%s] has state %s" % (billing_agreement.id, billing_agreement.state))
else:
print(billing_agreement.error)
else:
print(billing_agreement.error)
except ResourceNotFound as error:
print("Billing Agreement Not Found")
|
"""Unit tests for the utility functions used by the placement API."""
import fixtures
from oslo_middleware import request_id
import webob
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement import util
from nova import objects
from nova import test
from nova.tests import uuidsentinel
class TestCheckAccept(test.NoDBTestCase):
"""Confirm behavior of util.check_accept."""
@staticmethod
@util.check_accept('application/json', 'application/vnd.openstack')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/plain'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_fail_complex_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/html;q=0.9,text/plain,application/vnd.aws;q=0.8'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_success_no_accept(self):
req = webob.Request.blank('/')
self.assertTrue(self.handler(req))
def test_success_simple_match(self):
req = webob.Request.blank('/')
req.accept = 'application/json'
self.assertTrue(self.handler(req))
def test_success_complex_any_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self.assertTrue(self.handler(req))
def test_success_complex_lower_quality_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xml;q=0.9,application/vnd.openstack;q=0.8'
self.assertTrue(self.handler(req))
class TestExtractJSON(test.NoDBTestCase):
# Although the intent of this test class is not to test that
# schemas work, we may as well use a real one to ensure that
# behaviors are what we expect.
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"uuid": {"type": "string", "format": "uuid"}
},
"required": ["name"],
"additionalProperties": False
}
def test_not_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'I am a string',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_malformed_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"my bytes got left behind":}',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_schema_mismatch(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"a": "b"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_type_invalid(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": 1}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_format_checker(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "uuid": "not a uuid"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_no_addtional_properties(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "cow": "moo"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_valid(self):
data = util.extract_json(
'{"name": "cow", '
'"uuid": "%s"}' % uuidsentinel.rp_uuid,
self.schema)
self.assertEqual('cow', data['name'])
self.assertEqual(uuidsentinel.rp_uuid, data['uuid'])
class TestJSONErrorFormatter(test.NoDBTestCase):
def setUp(self):
super(TestJSONErrorFormatter, self).setUp()
self.environ = {}
# TODO(jaypipes): Remove this when we get more than a single version
# in the placement API. The fact that we only had a single version was
# masking a bug in the utils code.
_versions = [
'1.0',
'1.1',
]
mod_str = 'nova.api.openstack.placement.microversion.VERSIONS'
self.useFixture(fixtures.MonkeyPatch(mod_str, _versions))
def test_status_to_int_code(self):
body = ''
status = '404 Not Found'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(404, result['errors'][0]['status'])
def test_strip_body_tags(self):
body = '<h1>Big Error!</h1>'
status = '400 Bad Request'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('Big Error!', result['errors'][0]['detail'])
def test_request_id_presence(self):
body = ''
status = '400 Bad Request'
title = ''
# no request id in environ, none in error
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('request_id', result['errors'][0])
# request id in environ, request id in error
self.environ[request_id.ENV_REQUEST_ID] = 'stub-id'
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('stub-id', result['errors'][0]['request_id'])
def test_microversion_406_handling(self):
body = ''
status = '400 Bad Request'
title = ''
# Not a 406, no version info required.
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# A 406 but not because of microversions (microversion
# parsing was successful), no version info
# required.
status = '406 Not Acceptable'
version_obj = microversion.parse_version_string('2.3')
self.environ[microversion.MICROVERSION_ENVIRON] = version_obj
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# Microversion parsing failed, status is 406, send version info.
del self.environ[microversion.MICROVERSION_ENVIRON]
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(microversion.max_version_string(),
result['errors'][0]['max_version'])
self.assertEqual(microversion.min_version_string(),
result['errors'][0]['min_version'])
class TestRequireContent(test.NoDBTestCase):
"""Confirm behavior of util.require_accept."""
@staticmethod
@util.require_content('application/json')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_content_type(self):
req = webob.Request.blank('/')
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type None is not supported, use application/json',
str(error))
def test_fail_wrong_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'text/plain'
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type text/plain is not supported, use application/json',
str(error))
def test_success_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'application/json'
self.assertTrue(self.handler(req))
class TestPlacementURLs(test.NoDBTestCase):
def setUp(self):
super(TestPlacementURLs, self).setUp()
self.resource_provider = objects.ResourceProvider(
name=uuidsentinel.rp_name,
uuid=uuidsentinel.rp_uuid)
def test_resource_provider_url(self):
environ = {}
expected_url = '/resource_providers/%s' % uuidsentinel.rp_uuid
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_resource_provider_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = ('/placement/resource_providers/%s'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_inventories_url(self):
environ = {}
expected_url = ('/resource_providers/%s/inventories'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider))
def test_inventory_url(self):
resource_class = 'DISK_GB'
environ = {}
expected_url = ('/resource_providers/%s/inventories/%s'
% (uuidsentinel.rp_uuid, resource_class))
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider, resource_class))
|
from .test_antivirus import AbstractTests
import modules.antivirus.avg.avg as module
import modules.antivirus.base as base
from mock import patch
from pathlib import Path
class TestAvg(AbstractTests.TestAntivirus):
name = "AVG AntiVirus Free (Linux)"
scan_path = Path("/usr/bin/avgscan")
scan_args = ('--heur', '--paranoid', '--arc', '--macrow', '--pwdw',
'--pup')
module = module.AVGAntiVirusFree
scan_clean_stdout = """AVG command line Anti-Virus scanner
Copyright (c) 2013 AVG Technologies CZ
Virus database version: 4793/15678
Virus database release date: Mon, 21 May 2018 13:00:00 +0000
Files scanned : 1(1)
Infections found : 0(0)
PUPs found : 0
Files healed : 0
Warnings reported : 0
Errors reported : 0
"""
scan_virus_retcode = 4
virusname = "EICAR_Test"
scan_virus_stdout = """AVG command line Anti-Virus scanner
Copyright (c) 2013 AVG Technologies CZ
Virus database version: 4793/15678
Virus database release date: Mon, 21 May 2018 13:00:00 +0000
eicar.com.txt Virus identified EICAR_Test
Files scanned : 1(1)
Infections found : 1(1)
PUPs found : 0
Files healed : 0
Warnings reported : 0
Errors reported : 0
"""
version = "13.0.3118"
virus_database_version = "4793/15678 (21 May 2018)"
version_stdout = """AVG command line controller
Copyright (c) 2013 AVG Technologies CZ
------ AVG status ------
AVG version : 13.0.3118
Components version : Aspam:3111, Cfg:3109, Cli:3115, Common:3110, Core:4793, Doc:3115, Ems:3111, Initd:3113, Lng:3112, Oad:3118, Other:3109, Scan:3115, Sched:3110, Update:3109
Last update : Tue, 22 May 2018 07:52:31 +0000
------ License status ------
License number : LUOTY-674PL-VRWOV-APYEG-ZXHMA-E
License version : 10
License type : FREE
License expires on :
Registered user :
Registered company :
------ WD status ------
Component State Restarts UpTime
Avid running 0 13 minute(s)
Oad running 0 13 minute(s)
Sched running 0 13 minute(s)
Tcpd running 0 13 minute(s)
Update stopped 0 -
------ Sched status ------
Task name Next runtime Last runtime
Virus update Tue, 22 May 2018 18:04:00 +0000 Tue, 22 May 2018 07:46:29 +0000
Program update - -
User counting Wed, 23 May 2018 07:46:29 +0000 Tue, 22 May 2018 07:46:29 +0000
------ Tcpd status ------
E-mails checked : 0
SPAM messages : 0
Phishing messages : 0
E-mails infected : 0
E-mails dropped : 0
------ Avid status ------
Virus database reload times : 0
Virus database version : 4793/15678
Virus database release date : Mon, 21 May 2018 13:00:00 +0000
Virus database shared in memory : yes
------ Oad status ------
Files scanned : 0(0)
Infections found : 0(0)
PUPs found : 0
Files healed : 0
Warnings reported : 0
Errors reported : 0
Operation successful.
""" # nopep8
@patch.object(base.AntivirusUnix, "locate")
@patch.object(base.AntivirusUnix, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def setUp(self, m_run_cmd, m_locate_one, m_locate):
m_run_cmd.return_value = 0, self.version_stdout, ""
m_locate_one.return_value = self.scan_path
m_locate.return_value = self.database
super().setUp()
@patch.object(module, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def test_get_virus_db_error(self, m_run_cmd, m_locate_one):
m_locate_one.return_value = self.scan_path
m_run_cmd.return_value = -1, self.version_stdout, ""
with self.assertRaises(RuntimeError):
self.plugin.get_virus_database_version()
@patch.object(module, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def test_get_virus_db_no_version(self, m_run_cmd, m_locate_one):
m_locate_one.return_value = self.scan_path
wrong_stdout = "LOREM IPSUM"
m_run_cmd.return_value = 0, wrong_stdout, ""
with self.assertRaises(RuntimeError):
self.plugin.get_virus_database_version()
@patch.object(module, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def test_get_virus_db_no_release(self, m_run_cmd, m_locate_one):
m_locate_one.return_value = self.scan_path
wrong_stdout = "Virus database version : 4793/15678"
m_run_cmd.return_value = 0, wrong_stdout, ""
version = self.plugin.get_virus_database_version()
self.assertEquals(version, "4793/15678")
|
import unittest
import mock
class TestQuery(unittest.TestCase):
_PROJECT = 'PROJECT'
@staticmethod
def _get_target_class():
from google.cloud.datastore.query import Query
return Query
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _make_client(self):
return _Client(self._PROJECT)
def test_ctor_defaults(self):
client = self._make_client()
query = self._make_one(client)
self.assertIs(query._client, client)
self.assertEqual(query.project, client.project)
self.assertIsNone(query.kind)
self.assertEqual(query.namespace, client.namespace)
self.assertIsNone(query.ancestor)
self.assertEqual(query.filters, [])
self.assertEqual(query.projection, [])
self.assertEqual(query.order, [])
self.assertEqual(query.distinct_on, [])
def test_ctor_explicit(self):
from google.cloud.datastore.key import Key
_PROJECT = 'OTHER_PROJECT'
_KIND = 'KIND'
_NAMESPACE = 'OTHER_NAMESPACE'
client = self._make_client()
ancestor = Key('ANCESTOR', 123, project=_PROJECT)
FILTERS = [('foo', '=', 'Qux'), ('bar', '<', 17)]
PROJECTION = ['foo', 'bar', 'baz']
ORDER = ['foo', 'bar']
DISTINCT_ON = ['foo']
query = self._make_one(
client,
kind=_KIND,
project=_PROJECT,
namespace=_NAMESPACE,
ancestor=ancestor,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
distinct_on=DISTINCT_ON,
)
self.assertIs(query._client, client)
self.assertEqual(query.project, _PROJECT)
self.assertEqual(query.kind, _KIND)
self.assertEqual(query.namespace, _NAMESPACE)
self.assertEqual(query.ancestor.path, ancestor.path)
self.assertEqual(query.filters, FILTERS)
self.assertEqual(query.projection, PROJECTION)
self.assertEqual(query.order, ORDER)
self.assertEqual(query.distinct_on, DISTINCT_ON)
def test_ctor_bad_projection(self):
BAD_PROJECTION = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
projection=BAD_PROJECTION)
def test_ctor_bad_order(self):
BAD_ORDER = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
order=BAD_ORDER)
def test_ctor_bad_distinct_on(self):
BAD_DISTINCT_ON = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
distinct_on=BAD_DISTINCT_ON)
def test_ctor_bad_filters(self):
FILTERS_CANT_UNPACK = [('one', 'two')]
self.assertRaises(ValueError, self._make_one, self._make_client(),
filters=FILTERS_CANT_UNPACK)
def test_namespace_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.namespace = val
self.assertRaises(ValueError, _assign, object())
def test_namespace_setter(self):
_NAMESPACE = 'OTHER_NAMESPACE'
query = self._make_one(self._make_client())
query.namespace = _NAMESPACE
self.assertEqual(query.namespace, _NAMESPACE)
def test_kind_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.kind = val
self.assertRaises(TypeError, _assign, object())
def test_kind_setter_wo_existing(self):
_KIND = 'KIND'
query = self._make_one(self._make_client())
query.kind = _KIND
self.assertEqual(query.kind, _KIND)
def test_kind_setter_w_existing(self):
_KIND_BEFORE = 'KIND_BEFORE'
_KIND_AFTER = 'KIND_AFTER'
query = self._make_one(self._make_client(), kind=_KIND_BEFORE)
self.assertEqual(query.kind, _KIND_BEFORE)
query.kind = _KIND_AFTER
self.assertEqual(query.project, self._PROJECT)
self.assertEqual(query.kind, _KIND_AFTER)
def test_ancestor_setter_w_non_key(self):
query = self._make_one(self._make_client())
def _assign(val):
query.ancestor = val
self.assertRaises(TypeError, _assign, object())
self.assertRaises(TypeError, _assign, ['KIND', 'NAME'])
def test_ancestor_setter_w_key(self):
from google.cloud.datastore.key import Key
_NAME = u'NAME'
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('name', '=', _NAME)
query.ancestor = key
self.assertEqual(query.ancestor.path, key.path)
def test_ancestor_deleter_w_key(self):
from google.cloud.datastore.key import Key
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(client=self._make_client(), ancestor=key)
del query.ancestor
self.assertIsNone(query.ancestor)
def test_add_filter_setter_w_unknown_operator(self):
query = self._make_one(self._make_client())
self.assertRaises(ValueError, query.add_filter,
'firstname', '~~', 'John')
def test_add_filter_w_known_operator(self):
query = self._make_one(self._make_client())
query.add_filter('firstname', '=', u'John')
self.assertEqual(query.filters, [('firstname', '=', u'John')])
def test_add_filter_w_all_operators(self):
query = self._make_one(self._make_client())
query.add_filter('leq_prop', '<=', u'val1')
query.add_filter('geq_prop', '>=', u'val2')
query.add_filter('lt_prop', '<', u'val3')
query.add_filter('gt_prop', '>', u'val4')
query.add_filter('eq_prop', '=', u'val5')
self.assertEqual(len(query.filters), 5)
self.assertEqual(query.filters[0], ('leq_prop', '<=', u'val1'))
self.assertEqual(query.filters[1], ('geq_prop', '>=', u'val2'))
self.assertEqual(query.filters[2], ('lt_prop', '<', u'val3'))
self.assertEqual(query.filters[3], ('gt_prop', '>', u'val4'))
self.assertEqual(query.filters[4], ('eq_prop', '=', u'val5'))
def test_add_filter_w_known_operator_and_entity(self):
from google.cloud.datastore.entity import Entity
query = self._make_one(self._make_client())
other = Entity()
other['firstname'] = u'John'
other['lastname'] = u'Smith'
query.add_filter('other', '=', other)
self.assertEqual(query.filters, [('other', '=', other)])
def test_add_filter_w_whitespace_property_name(self):
query = self._make_one(self._make_client())
PROPERTY_NAME = ' property with lots of space '
query.add_filter(PROPERTY_NAME, '=', u'John')
self.assertEqual(query.filters, [(PROPERTY_NAME, '=', u'John')])
def test_add_filter___key__valid_key(self):
from google.cloud.datastore.key import Key
query = self._make_one(self._make_client())
key = Key('Foo', project=self._PROJECT)
query.add_filter('__key__', '=', key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_filter___key__not_equal_operator(self):
from google.cloud.datastore.key import Key
key = Key('Foo', project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('__key__', '<', key)
self.assertEqual(query.filters, [('__key__', '<', key)])
def test_filter___key__invalid_value(self):
query = self._make_one(self._make_client())
self.assertRaises(ValueError, query.add_filter, '__key__', '=', None)
def test_projection_setter_empty(self):
query = self._make_one(self._make_client())
query.projection = []
self.assertEqual(query.projection, [])
def test_projection_setter_string(self):
query = self._make_one(self._make_client())
query.projection = 'field1'
self.assertEqual(query.projection, ['field1'])
def test_projection_setter_non_empty(self):
query = self._make_one(self._make_client())
query.projection = ['field1', 'field2']
self.assertEqual(query.projection, ['field1', 'field2'])
def test_projection_setter_multiple_calls(self):
_PROJECTION1 = ['field1', 'field2']
_PROJECTION2 = ['field3']
query = self._make_one(self._make_client())
query.projection = _PROJECTION1
self.assertEqual(query.projection, _PROJECTION1)
query.projection = _PROJECTION2
self.assertEqual(query.projection, _PROJECTION2)
def test_keys_only(self):
query = self._make_one(self._make_client())
query.keys_only()
self.assertEqual(query.projection, ['__key__'])
def test_key_filter_defaults(self):
from google.cloud.datastore.key import Key
client = self._make_client()
query = self._make_one(client)
self.assertEqual(query.filters, [])
key = Key('Kind', 1234, project='project')
query.key_filter(key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_key_filter_explicit(self):
from google.cloud.datastore.key import Key
client = self._make_client()
query = self._make_one(client)
self.assertEqual(query.filters, [])
key = Key('Kind', 1234, project='project')
query.key_filter(key, operator='>')
self.assertEqual(query.filters, [('__key__', '>', key)])
def test_order_setter_empty(self):
query = self._make_one(self._make_client(), order=['foo', '-bar'])
query.order = []
self.assertEqual(query.order, [])
def test_order_setter_string(self):
query = self._make_one(self._make_client())
query.order = 'field'
self.assertEqual(query.order, ['field'])
def test_order_setter_single_item_list_desc(self):
query = self._make_one(self._make_client())
query.order = ['-field']
self.assertEqual(query.order, ['-field'])
def test_order_setter_multiple(self):
query = self._make_one(self._make_client())
query.order = ['foo', '-bar']
self.assertEqual(query.order, ['foo', '-bar'])
def test_distinct_on_setter_empty(self):
query = self._make_one(self._make_client(), distinct_on=['foo', 'bar'])
query.distinct_on = []
self.assertEqual(query.distinct_on, [])
def test_distinct_on_setter_string(self):
query = self._make_one(self._make_client())
query.distinct_on = 'field1'
self.assertEqual(query.distinct_on, ['field1'])
def test_distinct_on_setter_non_empty(self):
query = self._make_one(self._make_client())
query.distinct_on = ['field1', 'field2']
self.assertEqual(query.distinct_on, ['field1', 'field2'])
def test_distinct_on_multiple_calls(self):
_DISTINCT_ON1 = ['field1', 'field2']
_DISTINCT_ON2 = ['field3']
query = self._make_one(self._make_client())
query.distinct_on = _DISTINCT_ON1
self.assertEqual(query.distinct_on, _DISTINCT_ON1)
query.distinct_on = _DISTINCT_ON2
self.assertEqual(query.distinct_on, _DISTINCT_ON2)
def test_fetch_defaults_w_client_attr(self):
from google.cloud.datastore.query import Iterator
client = self._make_client()
query = self._make_one(client)
iterator = query.fetch()
self.assertIsInstance(iterator, Iterator)
self.assertIs(iterator._query, query)
self.assertIs(iterator.client, client)
self.assertIsNone(iterator.max_results)
self.assertEqual(iterator._offset, 0)
def test_fetch_w_explicit_client(self):
from google.cloud.datastore.query import Iterator
client = self._make_client()
other_client = self._make_client()
query = self._make_one(client)
iterator = query.fetch(limit=7, offset=8, client=other_client)
self.assertIsInstance(iterator, Iterator)
self.assertIs(iterator._query, query)
self.assertIs(iterator.client, other_client)
self.assertEqual(iterator.max_results, 7)
self.assertEqual(iterator._offset, 8)
class TestIterator(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore.query import Iterator
return Iterator
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
query = object()
client = object()
iterator = self._make_one(query, client)
self.assertFalse(iterator._started)
self.assertIs(iterator.client, client)
self.assertIsNotNone(iterator._item_to_value)
self.assertIsNone(iterator.max_results)
self.assertEqual(iterator.page_number, 0)
self.assertIsNone(iterator.next_page_token,)
self.assertEqual(iterator.num_results, 0)
self.assertIs(iterator._query, query)
self.assertIsNone(iterator._offset)
self.assertIsNone(iterator._end_cursor)
self.assertTrue(iterator._more_results)
def test_constructor_explicit(self):
query = object()
client = object()
limit = 43
offset = 9
start_cursor = b'8290\xff'
end_cursor = b'so20rc\ta'
iterator = self._make_one(
query, client, limit=limit, offset=offset,
start_cursor=start_cursor, end_cursor=end_cursor)
self.assertFalse(iterator._started)
self.assertIs(iterator.client, client)
self.assertIsNotNone(iterator._item_to_value)
self.assertEqual(iterator.max_results, limit)
self.assertEqual(iterator.page_number, 0)
self.assertEqual(iterator.next_page_token, start_cursor)
self.assertEqual(iterator.num_results, 0)
self.assertIs(iterator._query, query)
self.assertEqual(iterator._offset, offset)
self.assertEqual(iterator._end_cursor, end_cursor)
self.assertTrue(iterator._more_results)
def test__build_protobuf_empty(self):
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
client = _Client(None)
query = Query(client)
iterator = self._make_one(query, client)
pb = iterator._build_protobuf()
expected_pb = query_pb2.Query()
self.assertEqual(pb, expected_pb)
def test__build_protobuf_all_values(self):
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
client = _Client(None)
query = Query(client)
limit = 15
offset = 9
start_bytes = b'i\xb7\x1d'
start_cursor = 'abcd'
end_bytes = b'\xc3\x1c\xb3'
end_cursor = 'wxyz'
iterator = self._make_one(
query, client, limit=limit, offset=offset,
start_cursor=start_cursor, end_cursor=end_cursor)
self.assertEqual(iterator.max_results, limit)
iterator.num_results = 4
iterator._skipped_results = 1
pb = iterator._build_protobuf()
expected_pb = query_pb2.Query(
start_cursor=start_bytes,
end_cursor=end_bytes,
offset=offset - iterator._skipped_results,
)
expected_pb.limit.value = limit - iterator.num_results
self.assertEqual(pb, expected_pb)
def test__process_query_results(self):
from google.cloud.proto.datastore.v1 import query_pb2
iterator = self._make_one(None, None,
end_cursor='abcd')
self.assertIsNotNone(iterator._end_cursor)
entity_pbs = [
_make_entity('Hello', 9998, 'PRAHJEKT'),
]
cursor_as_bytes = b'\x9ai\xe7'
cursor = b'mmnn'
skipped_results = 4
more_results_enum = query_pb2.QueryResultBatch.NOT_FINISHED
response_pb = _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results)
result = iterator._process_query_results(response_pb)
self.assertEqual(result, entity_pbs)
self.assertEqual(iterator._skipped_results, skipped_results)
self.assertEqual(iterator.next_page_token, cursor)
self.assertTrue(iterator._more_results)
def test__process_query_results_done(self):
from google.cloud.proto.datastore.v1 import query_pb2
iterator = self._make_one(None, None,
end_cursor='abcd')
self.assertIsNotNone(iterator._end_cursor)
entity_pbs = [
_make_entity('World', 1234, 'PROJECT'),
]
cursor_as_bytes = b''
skipped_results = 44
more_results_enum = query_pb2.QueryResultBatch.NO_MORE_RESULTS
response_pb = _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results)
result = iterator._process_query_results(response_pb)
self.assertEqual(result, entity_pbs)
self.assertEqual(iterator._skipped_results, skipped_results)
self.assertIsNone(iterator.next_page_token)
self.assertFalse(iterator._more_results)
def test__process_query_results_bad_enum(self):
iterator = self._make_one(None, None)
more_results_enum = 999
response_pb = _make_query_response(
[], b'', more_results_enum, 0)
with self.assertRaises(ValueError):
iterator._process_query_results(response_pb)
def _next_page_helper(self, txn_id=None):
from google.cloud.iterator import Page
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
more_enum = query_pb2.QueryResultBatch.NOT_FINISHED
result = _make_query_response([], b'', more_enum, 0)
project = 'prujekt'
ds_api = _make_datastore_api(result)
if txn_id is None:
client = _Client(project, datastore_api=ds_api)
else:
transaction = mock.Mock(id=txn_id, spec=['id'])
client = _Client(
project, datastore_api=ds_api, transaction=transaction)
query = Query(client)
iterator = self._make_one(query, client)
page = iterator._next_page()
self.assertIsInstance(page, Page)
self.assertIs(page._parent, iterator)
partition_id = entity_pb2.PartitionId(project_id=project)
if txn_id is None:
read_options = datastore_pb2.ReadOptions()
else:
read_options = datastore_pb2.ReadOptions(transaction=txn_id)
empty_query = query_pb2.Query()
ds_api.run_query.assert_called_once_with(
project, partition_id, read_options, query=empty_query)
def test__next_page(self):
self._next_page_helper()
def test__next_page_in_transaction(self):
txn_id = b'1xo1md\xe2\x98\x83'
self._next_page_helper(txn_id)
def test__next_page_no_more(self):
from google.cloud.datastore.query import Query
ds_api = _make_datastore_api()
client = _Client(None, datastore_api=ds_api)
query = Query(client)
iterator = self._make_one(query, client)
iterator._more_results = False
page = iterator._next_page()
self.assertIsNone(page)
ds_api.run_query.assert_not_called()
class Test__item_to_entity(unittest.TestCase):
def _call_fut(self, iterator, entity_pb):
from google.cloud.datastore.query import _item_to_entity
return _item_to_entity(iterator, entity_pb)
def test_it(self):
entity_pb = mock.sentinel.entity_pb
patch = mock.patch(
'google.cloud.datastore.helpers.entity_from_protobuf')
with patch as entity_from_protobuf:
result = self._call_fut(None, entity_pb)
self.assertIs(result, entity_from_protobuf.return_value)
entity_from_protobuf.assert_called_once_with(entity_pb)
class Test__pb_from_query(unittest.TestCase):
def _call_fut(self, query):
from google.cloud.datastore.query import _pb_from_query
return _pb_from_query(query)
def test_empty(self):
from google.cloud.proto.datastore.v1 import query_pb2
pb = self._call_fut(_Query())
self.assertEqual(list(pb.projection), [])
self.assertEqual(list(pb.kind), [])
self.assertEqual(list(pb.order), [])
self.assertEqual(list(pb.distinct_on), [])
self.assertEqual(pb.filter.property_filter.property.name, '')
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op,
query_pb2.CompositeFilter.OPERATOR_UNSPECIFIED)
self.assertEqual(list(cfilter.filters), [])
self.assertEqual(pb.start_cursor, b'')
self.assertEqual(pb.end_cursor, b'')
self.assertEqual(pb.limit.value, 0)
self.assertEqual(pb.offset, 0)
def test_projection(self):
pb = self._call_fut(_Query(projection=['a', 'b', 'c']))
self.assertEqual([item.property.name for item in pb.projection],
['a', 'b', 'c'])
def test_kind(self):
pb = self._call_fut(_Query(kind='KIND'))
self.assertEqual([item.name for item in pb.kind], ['KIND'])
def test_ancestor(self):
from google.cloud.datastore.key import Key
from google.cloud.proto.datastore.v1 import query_pb2
ancestor = Key('Ancestor', 123, project='PROJECT')
pb = self._call_fut(_Query(ancestor=ancestor))
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
ancestor_pb = ancestor.to_protobuf()
self.assertEqual(pfilter.value.key_value, ancestor_pb)
def test_filter(self):
from google.cloud.proto.datastore.v1 import query_pb2
query = _Query(filters=[('name', '=', u'John')])
query.OPERATORS = {
'=': query_pb2.PropertyFilter.EQUAL,
}
pb = self._call_fut(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, 'name')
self.assertEqual(pfilter.value.string_value, u'John')
def test_filter_key(self):
from google.cloud.datastore.key import Key
from google.cloud.proto.datastore.v1 import query_pb2
key = Key('Kind', 123, project='PROJECT')
query = _Query(filters=[('__key__', '=', key)])
query.OPERATORS = {
'=': query_pb2.PropertyFilter.EQUAL,
}
pb = self._call_fut(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
key_pb = key.to_protobuf()
self.assertEqual(pfilter.value.key_value, key_pb)
def test_order(self):
from google.cloud.proto.datastore.v1 import query_pb2
pb = self._call_fut(_Query(order=['a', '-b', 'c']))
self.assertEqual([item.property.name for item in pb.order],
['a', 'b', 'c'])
self.assertEqual([item.direction for item in pb.order],
[query_pb2.PropertyOrder.ASCENDING,
query_pb2.PropertyOrder.DESCENDING,
query_pb2.PropertyOrder.ASCENDING])
def test_distinct_on(self):
pb = self._call_fut(_Query(distinct_on=['a', 'b', 'c']))
self.assertEqual([item.name for item in pb.distinct_on],
['a', 'b', 'c'])
class _Query(object):
def __init__(self,
client=object(),
kind=None,
project=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
distinct_on=()):
self._client = client
self.kind = kind
self.project = project
self.namespace = namespace
self.ancestor = ancestor
self.filters = filters
self.projection = projection
self.order = order
self.distinct_on = distinct_on
class _Client(object):
def __init__(self, project, datastore_api=None, namespace=None,
transaction=None):
self.project = project
self._datastore_api = datastore_api
self.namespace = namespace
self._transaction = transaction
@property
def current_transaction(self):
return self._transaction
def _make_entity(kind, id_, project):
from google.cloud.proto.datastore.v1 import entity_pb2
key = entity_pb2.Key()
key.partition_id.project_id = project
elem = key.path.add()
elem.kind = kind
elem.id = id_
return entity_pb2.Entity(key=key)
def _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results):
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import query_pb2
return datastore_pb2.RunQueryResponse(
batch=query_pb2.QueryResultBatch(
skipped_results=skipped_results,
end_cursor=cursor_as_bytes,
more_results=more_results_enum,
entity_results=[
query_pb2.EntityResult(entity=entity)
for entity in entity_pbs
],
),
)
def _make_datastore_api(result=None):
run_query = mock.Mock(return_value=result, spec=[])
return mock.Mock(run_query=run_query, spec=['run_query'])
|
from ._query import query
|
import re
import sys
import socket
import libvirt
import argparse
import traceback
import jsonpickle
import subprocess
from xml.etree import ElementTree
VERSION = 'check_virsh_domains v1.0'
DOMAIN_STATES = {
0: 'None',
1: 'Running',
2: 'Blocked on resource',
3: 'Paused by user',
4: 'Being shut down',
5: 'Shut off',
6: 'Crashed',
7: 'Suspended by guest power management'
}
ZABBIX_CONF = '/opt/zabbix/etc/zabbix_agentd.conf'
ZABBIX_SENDER = '/opt/zabbix/bin/zabbix_sender'
class Domain(object):
def __init__(self, vir_dom):
try:
# Get the domain's network interface device list
if_devices = self.get_if_devices(vir_dom)
# Get the domain's block device list
blk_devices = self.get_blk_devices(vir_dom)
# Get the domain's information
dom_info = vir_dom.info()
# Get the domain's memory stats
mem_stats = vir_dom.memoryStats()
# Get the domain's UUID
self.uuid = vir_dom.UUIDString()
# Compile the network interface stats for each network interface device
for if_num, if_dev in enumerate(if_devices):
# Get the interface stats
if_stats = vir_dom.interfaceStats(if_dev)
# Set class attributes using the interface index number (not the name)
setattr(self, 'if_%s_rx_bytes' % if_num, int(if_stats[0]))
setattr(self, 'if_%s_rx_packets' % if_num, int(if_stats[1]))
setattr(self, 'if_%s_rx_errors' % if_num, int(if_stats[2]))
setattr(self, 'if_%s_rx_drop' % if_num, int(if_stats[3]))
setattr(self, 'if_%s_tx_bytes' % if_num, int(if_stats[4]))
setattr(self, 'if_%s_tx_packets' % if_num, int(if_stats[5]))
setattr(self, 'if_%s_tx_errors' % if_num, int(if_stats[6]))
setattr(self, 'if_%s_tx_drop' % if_num, int(if_stats[7]))
# Compile the block device stats for each block device
for blk_dev in blk_devices:
#Get the block device stats
blk_stats = vir_dom.blockStats(blk_dev)
# Set class attributes using the device name
setattr(self, 'blk_%s_rd_req' % blk_dev, int(blk_stats[0]))
setattr(self, 'blk_%s_rd_bytes' % blk_dev, int(blk_stats[1]))
setattr(self, 'blk_%s_wr_req' % blk_dev, int(blk_stats[2]))
setattr(self, 'blk_%s_wr_bytes' % blk_dev, int(blk_stats[3]))
# Get the memory stats in kB and covert to B for consistency
self.mem_max_bytes = int(dom_info[1]) * 1024
self.mem_used_bytes = int(dom_info[2]) * 1024
# Get the number of vCPU's and the usage time in nanoseconds
self.cpu_count = int(dom_info[3])
self.cpu_time = int(dom_info[4])
# Get the state of the domain
self.state = DOMAIN_STATES[dom_info[0]]
# Note:
# To calculate %CPU utilization you need to have a time period. We're expecting that the
# %CPU calculation is done externally by a system that knows the time period between measurements.
#
# For reference:
# http://people.redhat.com/~rjones/virt-top/faq.html#calccpu
# cpu_time_diff = cpuTime_now - cpuTime_t_seconds_ago
# %CPU = 100 * cpu_time_diff / (t * host_cpus * 10^9)
# There may not be anything in mem_stats (support is limited), but let's add any values there may be
for key, value in mem_stats.iteritems():
value_bytes = int(value) * 1024
setattr(self, 'mem_%s' % key, value_bytes)
except OSError:
print 'Failed to get domain information'
def get_if_devices(self, vir_dom):
#Function to return a list of network devices used
#Create a XML tree from the domain XML description
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/interface/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in devices:
devices.append(dev)
#Completed device name list
return devices
def get_blk_devices(self, vir_dom):
#Function to return a list of block devices used
#Create a XML tree from the domain XML description
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/disk/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in devices:
devices.append(dev)
#Completed device name list
return devices
def health(self):
output = {'errorlevel': 0, 'errors': []}
# Check whether there are network interface errors or drops
for key in vars(self):
if re.match('if_.*_errors', key):
if vars(self)[key] > 0:
output['errors'].append('Domain has network interface errors.')
output['errorlevel'] = set_errorlevel(output['errorlevel'], 1)
if re.match('if_.*_drop', key):
if vars(self)[key] > 0:
output['errors'].append('Domain has network interface drops.')
output['errorlevel'] = set_errorlevel(output['errorlevel'], 1)
# Check whether the domain is in a 'blocked' or 'crashed' state
if self.state == 'Blocked on resource' or self.state == 'Crashed':
output['errors'].append('Domain is %s!' % self.state)
output['errorlevel'] = set_errorlevel(output['errorlevel'], 2)
return output
def inventory(self):
output = {}
output['mem_max_bytes'] = '%i' % self.mem_max_bytes
output['cpu_count'] = '%i' % self.cpu_count
output['state'] = '%s' % self.state
output['uuid'] = '%s' % self.uuid
return output
def perfdata(self):
output = {}
# Loop through all attributes and add the if and blk data
for key in vars(self):
if re.match('if_.*', key) or re.match('blk_.*', key):
output[key] = vars(self)[key]
output['mem_used_bytes'] = self.mem_used_bytes
output['cpu_time'] = self.cpu_time
return output
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--discovery', action='store_true', help='Only output discovery data')
ap.add_argument('-i', '--inventory', action='store_true', help='Include inventory data in output')
ap.add_argument('-o', '--output', default='stdout', choices=['stdout', 'nagios', 'zabbix'], help='Output format')
ap.add_argument('-p', '--perfdata', action='store_true', help='Include performance data in output')
ap.add_argument('-v', '--verbose', default=0, action='count', help='Verbose output')
ap.add_argument('-V', '--version', action='store_true', help='Show script version')
return ap.parse_args()
def set_errorlevel(current, target):
if current < target != 3:
return target
elif target == 3:
return 3
else:
return current
def output_status(item_name, check_type, errorlevel):
if errorlevel == 0:
return '%s %s OK' % (item_name, check_type)
elif errorlevel == 1:
return '%s %s WARNING' % (item_name, check_type)
elif errorlevel == 2:
return '%s %s CRITICAL' % (item_name, check_type)
else:
return '%s %s UNKNOWN' % (item_name, check_type)
def output_stdout(args):
domains = domain_list()
errorlevels = []
for domain in domains:
print output_status('Domain %s' % domain.uuid, 'Health', domain.health()['errorlevel'])
errorlevels.append(domain.health()['errorlevel'])
if args.verbose > 0:
for error in domain.health()['errors']:
print ' - %s' % error
if args.perfdata:
for key, value in domain.perfdata().iteritems():
print ' - %s = %s' % (key, value)
if args.inventory:
for key, value in domain.inventory().iteritems():
print ' - %s = %s' % (key, value)
# filter out 'unknown' errorlevels if there are any 'warning' or 'critical' errorlevels
if (1 in errorlevels or 2 in errorlevels) and max(errorlevels) == 3:
errorlevels = filter(lambda item: item != 3, errorlevels)
sys.exit(max(errorlevels))
def output_nagios(args):
domains = domain_list()
output_line = ''
output_perfdata = ' |'
errorlevels = []
for domain in domains:
if output_line != '':
output_line += '; '
output_line += output_status('Dom %s' % domain.uuid, 'Health',
domain.health()['errorlevel'])
errorlevels.append(domain.health()['errorlevel'])
if args.verbose > 0:
for error in domain.health()['errors']:
output_line += ' %s' % error
if args.perfdata:
for key, value in domain.perfdata().iteritems():
output_perfdata += " %s='%s'" % (key, value)
if args.perfdata:
output_line += output_perfdata
print output_line
# filter out 'unknown' errorlevels if there are any 'warning' or 'critical' errorlevels
if (1 in errorlevels or 2 in errorlevels) and max(errorlevels) == 3:
errorlevels = filter(lambda item: item != 3, errorlevels)
sys.exit(max(errorlevels))
def output_zabbix(args):
domains = domain_list()
output_line = ''
errorlevels = []
for domain in domains:
output_line += '%s virsh.domain[%s,health] %s\n' % (socket.gethostname(), domain.uuid, output_status(domain.uuid,'Health', domain.health()['errorlevel']))
errorlevels.append(domain.health()['errorlevel'])
if args.verbose > 0 and len(domain.health()['errors']) > 0:
output_line += '%s virsh.domain[%s,errors] %s\n' % (socket.gethostname(), domain.uuid, ";".join(domain.health()['errors']))
elif args.verbose > 0 and len(domain.health()['errors']) == 0:
output_line += '%s virsh.domain[%s,errors] None\n' % (socket.gethostname(), domain.uuid)
if args.perfdata:
for key, value in domain.perfdata().iteritems():
output_line += '%s virsh.domain[%s,%s] %s\n' % (socket.gethostname(), domain.uuid, key, value)
if args.inventory:
for key, value in domain.inventory().iteritems():
output_line += '%s virsh.domain[%s,%s] %s\n' % (socket.gethostname(), domain.uuid, key, value)
# filter out 'unknown' errorlevels if there are any 'warning' or 'critical' errorlevels
if (1 in errorlevels or 2 in errorlevels) and max(errorlevels) == 3:
errorlevels = filter(lambda item: item != 3, errorlevels)
#TODO: This should really have exception handling
cmd = '%s -c %s -v -i -' % (ZABBIX_SENDER, ZABBIX_CONF)
cmd = cmd.split(' ')
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.stdin.write(output_line)
status = p.poll()
stdout, stderr = p.communicate()
if not status:
print 'zabbix_sender output: %s' % stdout
else:
print 'zabbix_sender error: %s' % stdout
print output_status('Overall','Health', max(errorlevels))
sys.exit(max(errorlevels))
def output_zabbix_discovery(args):
#TODO: Sort this mess out.
#Using the objects was too slow - the discovery would keep failing when requested by the Zabbix Server
try:
# Connect to the local hypervisor (read only)
conn = libvirt.openReadOnly(None)
# Prepare the lists and dict objects
dom_list = []
return_dict = {}
# Loop through the running domains and retrieve the appropriate discovery information
for dom_id in conn.listDomainsID():
dom_dict = {}
vir_dom = conn.lookupByID(dom_id)
dom_dict['{#VIRSH_DOMAIN_UUID}'] = vir_dom.UUIDString()
if args.perfdata:
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
if_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/interface/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in if_devices:
if_devices.append(dev)
#Put the final device list into the domain's return dict
for if_num, if_dev in enumerate(if_devices):
dom_dict['{#VIRSH_DOMAIN_NIC}'] = str(if_num)
#The list of device names
blk_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/disk/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in blk_devices:
blk_devices.append(dev)
#Put the final device list into the domain's return dict
for blk_dev in blk_devices:
dom_dict['{#VIRSH_DOMAIN_DISK}'] = blk_dev
dom_list.append(dom_dict)
# Loop through the offline domains and retrieve the appropriate discovery information
for name in conn.listDefinedDomains():
dom_dict = {}
vir_dom = conn.lookupByID(dom_id)
dom_dict['{#VIRSH_DOMAIN_UUID}'] = vir_dom.UUIDString()
if args.perfdata:
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
if_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/interface/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in if_devices:
if_devices.append(dev)
#Put the final device list into the domain's return dict
for if_num, if_dev in enumerate(if_devices):
dom_dict['{#VIRSH_DOMAIN_NIC}'] = str(if_num)
#The list of device names
blk_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/disk/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in blk_devices:
blk_devices.append(dev)
#Put the final device list into the domain's return dict
for blk_dev in blk_devices:
dom_dict['{#VIRSH_DOMAIN_DISK}'] = blk_dev
dom_list.append(dom_dict)
return_dict['data'] = dom_list
# return the data encoded as json
print jsonpickle.encode(return_dict)
except OSError:
print 'Failed to get domain list'
def domain_list():
try:
# Connect to the local hypervisor (read only)
conn = libvirt.openReadOnly(None)
# Prepare the list of domains to return
dom_list = []
# Loop through the running domains, create and store the objects
for id in conn.listDomainsID():
vir_dom = conn.lookupByID(id)
dom_obj = Domain(vir_dom)
dom_list.append(dom_obj)
# Loop through the offline domains, create and store the objects
for name in conn.listDefinedDomains():
vir_dom = conn.lookupByName(name)
dom_obj = Domain(vir_dom)
dom_list.append(dom_obj)
return dom_list
except OSError:
print 'Failed to get domain list'
return []
if __name__ == '__main__':
args = parse_args()
try:
if args.version:
print VERSION
elif args.output == 'stdout':
output_stdout(args)
elif args.output == 'nagios':
output_nagios(args)
elif args.output == 'zabbix' and not args.discovery:
output_zabbix(args)
elif args.output == 'zabbix' and args.discovery:
output_zabbix_discovery(args)
sys.exit(0)
except Exception, err:
#print("ERROR: %s" % err)
ex, val, tb = sys.exc_info()
traceback.print_exception(ex, val, tb)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
|
import keystonemiddleware.audit as audit_middleware
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import pecan
from ironic.api import config
from ironic.api.controllers import base
from ironic.api import hooks
from ironic.api import middleware
from ironic.api.middleware import auth_token
from ironic.common import exception
from ironic.conf import CONF
class IronicCORS(cors_middleware.CORS):
"""Ironic-specific CORS class
We're adding the Ironic-specific version headers to the list of simple
headers in order that a request bearing those headers might be accepted by
the Ironic REST API.
"""
simple_headers = cors_middleware.CORS.simple_headers + [
'X-Auth-Token',
base.Version.max_string,
base.Version.min_string,
base.Version.string
]
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(pecan_config.app.acl_public_routes),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
hooks.PublicUrlHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
debug=CONF.pecan_debug,
static_root=pecan_config.app.static_root if CONF.pecan_debug else None,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if CONF.audit.enabled:
try:
app = audit_middleware.AuditMiddleware(
app,
audit_map_file=CONF.audit.audit_map_file,
ignore_req_list=CONF.audit.ignore_req_list
)
except (EnvironmentError, OSError,
audit_middleware.PycadfAuditApiConfigError) as e:
raise exception.InputFileError(
file_name=CONF.audit.audit_map_file,
reason=e
)
if CONF.auth_strategy == "keystone":
app = auth_token.AuthTokenMiddleware(
app, dict(cfg.CONF),
public_api_routes=pecan_config.app.acl_public_routes)
# Create a CORS wrapper, and attach ironic-specific defaults that must be
# included in all CORS responses.
app = IronicCORS(app, CONF)
cors_middleware.set_defaults(
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=[base.Version.max_string, base.Version.min_string,
base.Version.string]
)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
|
import os
from oslo_policy import opts
from oslo_service import wsgi
from manila.common import config
CONF = config.CONF
def set_defaults(conf):
_safe_set_of_opts(conf, 'verbose', True)
_safe_set_of_opts(conf, 'state_path', os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..',
'..')))
_safe_set_of_opts(conf, 'connection', "sqlite://", group='database')
_safe_set_of_opts(conf, 'sqlite_synchronous', False)
_POLICY_PATH = os.path.abspath(os.path.join(CONF.state_path,
'manila/tests/policy.json'))
opts.set_defaults(conf, policy_file=_POLICY_PATH)
_safe_set_of_opts(conf, 'share_export_ip', '0.0.0.0')
_safe_set_of_opts(conf, 'service_instance_user', 'fake_user')
_API_PASTE_PATH = os.path.abspath(os.path.join(CONF.state_path,
'etc/manila/api-paste.ini'))
wsgi.register_opts(conf)
_safe_set_of_opts(conf, 'api_paste_config', _API_PASTE_PATH)
_safe_set_of_opts(conf, 'share_driver',
'manila.tests.fake_driver.FakeShareDriver')
_safe_set_of_opts(conf, 'auth_strategy', 'noauth')
_safe_set_of_opts(conf, 'zfs_share_export_ip', '1.1.1.1')
_safe_set_of_opts(conf, 'zfs_service_ip', '2.2.2.2')
_safe_set_of_opts(conf, 'zfs_zpool_list', ['foo', 'bar'])
_safe_set_of_opts(conf, 'zfs_share_helpers', 'NFS=foo.bar.Helper')
_safe_set_of_opts(conf, 'zfs_replica_snapshot_prefix', 'foo_prefix_')
_safe_set_of_opts(conf, 'hitachi_hsp_host', '172.24.47.190')
_safe_set_of_opts(conf, 'hitachi_hsp_username', 'hsp_user')
_safe_set_of_opts(conf, 'hitachi_hsp_password', 'hsp_password')
_safe_set_of_opts(conf, 'qnap_management_url', 'http://1.2.3.4:8080')
_safe_set_of_opts(conf, 'qnap_share_ip', '1.2.3.4')
_safe_set_of_opts(conf, 'qnap_nas_login', 'admin')
_safe_set_of_opts(conf, 'qnap_nas_password', 'qnapadmin')
_safe_set_of_opts(conf, 'qnap_poolname', 'Storage Pool 1')
def _safe_set_of_opts(conf, *args, **kwargs):
try:
conf.set_default(*args, **kwargs)
except config.cfg.NoSuchOptError:
# Assumed that opt is not imported and not used
pass
|
import contextlib
import copy
import os
import mock
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume.drivers import smbfs
class SmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_MNT_BASE = '/mnt'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_TOTAL_SIZE = '2048'
_FAKE_TOTAL_AVAILABLE = '1024'
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc',
'size': 1,
'provider_location': _FAKE_SHARE,
'name': _FAKE_VOLUME_NAME,
'status': 'available'}
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash')
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME)
_FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID,
'volume': _FAKE_VOLUME,
'status': 'available',
'volume_size': 1}
_FAKE_SNAPSHOT_PATH = (
_FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_OPTIONS_DICT = {'username': 'Administrator',
'password': '12345'}
_FAKE_LISTDIR = [_FAKE_VOLUME_NAME, _FAKE_VOLUME_NAME + '.vhd',
_FAKE_VOLUME_NAME + '.vhdx', 'fake_folder']
_FAKE_SMBFS_CONFIG = mock.MagicMock()
_FAKE_SMBFS_CONFIG.smbfs_oversub_ratio = 2
_FAKE_SMBFS_CONFIG.smbfs_used_ratio = 0.5
_FAKE_SMBFS_CONFIG.smbfs_shares_config = '/fake/config/path'
_FAKE_SMBFS_CONFIG.smbfs_default_volume_format = 'raw'
_FAKE_SMBFS_CONFIG.smbfs_sparsed_volumes = False
def setUp(self):
super(SmbFsTestCase, self).setUp()
smbfs.SmbfsDriver.__init__ = lambda x: None
self._smbfs_driver = smbfs.SmbfsDriver()
self._smbfs_driver._remotefsclient = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock()
self._smbfs_driver.base = self._FAKE_MNT_BASE
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self._FAKE_VOLUME)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
def _test_setup(self, config, share_config_exists=True):
fake_exists = mock.Mock(return_value=share_config_exists)
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver.configuration = config
with mock.patch('os.path.exists', fake_exists):
if not (config.smbfs_shares_config and share_config_exists and
config.smbfs_oversub_ratio > 0 and
0 <= config.smbfs_used_ratio <= 1):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
None)
else:
self._smbfs_driver.do_setup(None)
self.assertEqual(self._smbfs_driver.shares, {})
fake_ensure_mounted.assert_called_once()
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(fake_config, None)
def test_setup_missing_shares_config_file(self):
self._test_setup(self._FAKE_SMBFS_CONFIG, False)
def test_setup_invlid_oversub_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_oversub_ratio = -1
self._test_setup(fake_config)
def test_setup_invalid_used_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = -1
self._test_setup(fake_config)
def _test_create_volume(self, volume_exists=False, volume_format=None):
fake_method = mock.MagicMock()
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._set_rw_permissions_for_all = mock.MagicMock()
fake_set_permissions = self._smbfs_driver._set_rw_permissions_for_all
self._smbfs_driver.get_volume_format = mock.MagicMock()
windows_image_format = False
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_volume_format.return_value = volume_format
if volume_format:
if volume_format in ('vhd', 'vhdx'):
windows_image_format = volume_format
if volume_format == 'vhd':
windows_image_format = 'vpc'
method = '_create_windows_image'
fake_vol_path += '.' + volume_format
else:
method = '_create_%s_file' % volume_format
if volume_format == 'sparsed':
self._smbfs_driver.configuration.smbfs_sparsed_volumes = (
True)
else:
method = '_create_regular_file'
setattr(self._smbfs_driver, method, fake_method)
with mock.patch('os.path.exists', new=lambda x: volume_exists):
if volume_exists:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self._FAKE_VOLUME)
return
self._smbfs_driver._do_create_volume(self._FAKE_VOLUME)
if windows_image_format:
fake_method.assert_called_once_with(
fake_vol_path,
self._FAKE_VOLUME['size'],
windows_image_format)
else:
fake_method.assert_called_once_with(
fake_vol_path, self._FAKE_VOLUME['size'])
fake_set_permissions.assert_called_once_with(fake_vol_path)
def test_create_existing_volume(self):
self._test_create_volume(volume_exists=True)
def test_create_vhdx(self):
self._test_create_volume(volume_format='vhdx')
def test_create_qcow2(self):
self._test_create_volume(volume_format='qcow2')
def test_create_sparsed(self):
self._test_create_volume(volume_format='sparsed')
def test_create_regular(self):
self._test_create_volume()
def _test_find_share(self, existing_mounted_shares=True,
eligible_shares=True):
if existing_mounted_shares:
mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3')
else:
mounted_shares = None
self._smbfs_driver._mounted_shares = mounted_shares
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=eligible_shares)
fake_capacity_info = ((2, 1, 5), (2, 1, 4), (2, 1, 1))
self._smbfs_driver._get_capacity_info = mock.Mock(
side_effect=fake_capacity_info)
if not mounted_shares:
self.assertRaises(exception.SmbfsNoSharesMounted,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
elif not eligible_shares:
self.assertRaises(exception.SmbfsNoSuitableShareFound,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
else:
ret_value = self._smbfs_driver._find_share(
self._FAKE_VOLUME['size'])
# The eligible share with the minimum allocated space
# will be selected
self.assertEqual(ret_value, 'fake_share3')
def test_find_share(self):
self._test_find_share()
def test_find_share_missing_mounted_shares(self):
self._test_find_share(existing_mounted_shares=False)
def test_find_share_missing_eligible_shares(self):
self._test_find_share(eligible_shares=False)
def _test_is_share_eligible(self, capacity_info, volume_size):
self._smbfs_driver._get_capacity_info = mock.Mock(
return_value=[float(x << 30) for x in capacity_info])
self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG
return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE,
volume_size)
def test_share_volume_above_used_ratio(self):
fake_capacity_info = (4, 1, 1)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_eligible_share(self):
fake_capacity_info = (4, 4, 0)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, True)
def test_share_volume_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 7)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_share_reserved_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 10)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_parse_options(self):
(opt_list,
opt_dict) = self._smbfs_driver.parse_options(
self._FAKE_SHARE_OPTS)
expected_ret = ([], self._FAKE_OPTIONS_DICT)
self.assertEqual(expected_ret, (opt_list, opt_dict))
def test_parse_credentials(self):
fake_smb_options = r'-o user=MyDomain\Administrator,noperm'
expected_flags = '-o username=Administrator,noperm'
flags = self._smbfs_driver.parse_credentials(fake_smb_options)
self.assertEqual(expected_flags, flags)
def test_get_volume_path(self):
self._smbfs_driver.get_volume_format = mock.Mock(
return_value='vhd')
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
expected = self._FAKE_VOLUME_PATH + '.vhd'
ret_val = self._smbfs_driver.local_path(self._FAKE_VOLUME)
self.assertEqual(expected, ret_val)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format='raw'))
fake_data = {'export': self._FAKE_SHARE,
'format': 'raw',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self._FAKE_VOLUME, None)
self.assertEqual(expected, ret_val)
def _test_extend_volume(self, extend_failed=False, image_format='raw'):
drv = self._smbfs_driver
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._check_extend_volume_support = mock.Mock(
return_value=True)
drv._is_file_size_equal = mock.Mock(
return_value=not extend_failed)
drv._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=image_format))
with contextlib.nested(
mock.patch.object(image_utils, 'resize_image'),
mock.patch.object(image_utils, 'convert_image')) as (
fake_resize, fake_convert):
if extend_failed:
self.assertRaises(exception.ExtendVolumeError,
drv._extend_volume,
self._FAKE_VOLUME, mock.sentinel.new_size)
else:
drv._extend_volume(
self._FAKE_VOLUME,
mock.sentinel.new_size)
if image_format in (drv._DISK_FORMAT_VHDX,
drv._DISK_FORMAT_VHD_LEGACY):
fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp'
fake_convert.assert_any_call(self._FAKE_VOLUME_PATH,
fake_tmp_path, 'raw')
fake_resize.assert_called_once_with(
fake_tmp_path, mock.sentinel.new_size)
fake_convert.assert_any_call(fake_tmp_path,
self._FAKE_VOLUME_PATH,
image_format)
else:
fake_resize.assert_called_once_with(
self._FAKE_VOLUME_PATH, mock.sentinel.new_size)
def test_extend_volume(self):
self._test_extend_volume()
def test_extend_volume_failed(self):
self._test_extend_volume(extend_failed=True)
def test_extend_vhd_volume(self):
self._test_extend_volume(image_format='vpc')
def _test_check_extend_support(self, has_snapshots=False,
is_eligible=True):
self._smbfs_driver.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
if has_snapshots:
active_file_path = self._FAKE_SNAPSHOT_PATH
else:
active_file_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=active_file_path)
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=is_eligible)
if has_snapshots:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
elif not is_eligible:
self.assertRaises(exception.ExtendVolumeError,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
else:
self._smbfs_driver._check_extend_volume_support(
self._FAKE_VOLUME, 2)
self._smbfs_driver._is_share_eligible.assert_called_once_with(
self._FAKE_SHARE, 1)
def test_check_extend_support(self):
self._test_check_extend_support()
def test_check_extend_volume_with_snapshots(self):
self._test_check_extend_support(has_snapshots=True)
def test_check_extend_volume_uneligible_share(self):
self._test_check_extend_support(is_eligible=False)
def test_create_volume_from_in_use_snapshot(self):
fake_snapshot = {'status': 'in-use'}
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self._FAKE_VOLUME, fake_snapshot)
def test_copy_volume_from_snapshot(self):
drv = self._smbfs_driver
fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'}
fake_img_info = mock.MagicMock()
fake_img_info.backing_file = self._FAKE_VOLUME_NAME
drv.get_volume_format = mock.Mock(
return_value='raw')
drv._local_path_volume_info = mock.Mock(
return_value=self._FAKE_VOLUME_PATH + '.info')
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv._read_info_file = mock.Mock(
return_value=fake_volume_info)
drv._qemu_img_info = mock.Mock(
return_value=fake_img_info)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH[:-1])
drv._extend_volume = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
with mock.patch.object(image_utils, 'convert_image') as (
fake_convert_image):
drv._copy_volume_from_snapshot(
self._FAKE_SNAPSHOT, self._FAKE_VOLUME,
self._FAKE_VOLUME['size'])
drv._extend_volume.assert_called_once_with(
self._FAKE_VOLUME, self._FAKE_VOLUME['size'])
fake_convert_image.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw')
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split())
def _test_copy_image_to_volume(self, unsupported_qemu_version=False,
wrong_size_after_fetch=False):
drv = self._smbfs_driver
vol_size_bytes = self._FAKE_VOLUME['size'] << 30
fake_image_service = mock.MagicMock()
fake_image_service.show.return_value = (
{'id': 'fake_image_id', 'disk_format': 'raw'})
fake_img_info = mock.MagicMock()
if wrong_size_after_fetch:
fake_img_info.virtual_size = 2 * vol_size_bytes
else:
fake_img_info.virtual_size = vol_size_bytes
if unsupported_qemu_version:
qemu_version = [1, 5]
else:
qemu_version = [1, 7]
drv.get_volume_format = mock.Mock(
return_value=drv._DISK_FORMAT_VHDX)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv.get_qemu_version = mock.Mock(
return_value=qemu_version)
drv._do_extend_volume = mock.Mock()
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = (
mock.sentinel.block_size)
exc = None
with contextlib.nested(
mock.patch.object(image_utils,
'fetch_to_volume_format'),
mock.patch.object(image_utils,
'qemu_img_info')) as (
fake_fetch,
fake_qemu_img_info):
if wrong_size_after_fetch:
exc = exception.ImageUnacceptable
elif unsupported_qemu_version:
exc = exception.InvalidVolume
fake_qemu_img_info.return_value = fake_img_info
if exc:
self.assertRaises(
exc, drv.copy_image_to_volume,
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
else:
drv.copy_image_to_volume(
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context, fake_image_service,
mock.sentinel.image_id, self._FAKE_VOLUME_PATH,
drv._DISK_FORMAT_VHDX,
mock.sentinel.block_size)
drv._do_extend_volume.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME['size'])
def test_copy_image_to_volume(self):
self._test_copy_image_to_volume()
def test_copy_image_to_volume_wrong_size_after_fetch(self):
self._test_copy_image_to_volume(wrong_size_after_fetch=True)
def test_copy_image_to_volume_unsupported_qemu_version(self):
self._test_copy_image_to_volume(unsupported_qemu_version=True)
def test_get_capacity_info(self):
fake_block_size = 4096.0
fake_total_blocks = 1024
fake_avail_blocks = 512
fake_total_allocated = fake_total_blocks * fake_block_size
fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks,
fake_avail_blocks), None)
fake_du = (str(fake_total_allocated), None)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock(
side_effect=(fake_df, fake_du))
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected = (fake_block_size * fake_total_blocks,
fake_block_size * fake_avail_blocks,
fake_total_allocated)
self.assertEqual(expected, ret_val)
|
"""Tests for tink.python.tink._keyset_reader."""
from typing import cast
from absl.testing import absltest
from tink.proto import tink_pb2
import tink
from tink import core
class JsonKeysetReaderTest(absltest.TestCase):
def test_read(self):
json_keyset = """
{
"primaryKeyId": 42,
"key": [
{
"keyData": {
"typeUrl": "type.googleapis.com/google.crypto.tink.AesGcmKey",
"keyMaterialType": "SYMMETRIC",
"value": "GhCS/1+ejWpx68NfGt6ziYHd"
},
"outputPrefixType": "TINK",
"keyId": 42,
"status": "ENABLED"
}
]
}"""
reader = tink.JsonKeysetReader(json_keyset)
keyset = reader.read()
self.assertEqual(keyset.primary_key_id, 42)
self.assertLen(keyset.key, 1)
def test_read_invalid(self):
reader = tink.JsonKeysetReader('not json')
with self.assertRaises(core.TinkError):
reader.read()
def test_read_encrypted(self):
# encryptedKeyset is a base64-encoding of 'some ciphertext with keyset'
json_encrypted_keyset = """
{
"encryptedKeyset": "c29tZSBjaXBoZXJ0ZXh0IHdpdGgga2V5c2V0",
"keysetInfo": {
"primaryKeyId": 42,
"keyInfo": [
{
"typeUrl": "type.googleapis.com/google.crypto.tink.AesGcmKey",
"outputPrefixType": "TINK",
"keyId": 42,
"status": "ENABLED"
}
]
}
}"""
reader = tink.JsonKeysetReader(json_encrypted_keyset)
enc_keyset = reader.read_encrypted()
self.assertEqual(enc_keyset.encrypted_keyset,
b'some ciphertext with keyset')
self.assertLen(enc_keyset.keyset_info.key_info, 1)
self.assertEqual(enc_keyset.keyset_info.key_info[0].type_url,
'type.googleapis.com/google.crypto.tink.AesGcmKey')
def test_read_encrypted_invalid(self):
reader = tink.JsonKeysetReader('not json')
with self.assertRaises(core.TinkError):
reader.read_encrypted()
class BinaryKeysetReaderTest(absltest.TestCase):
def test_read(self):
keyset = tink_pb2.Keyset()
keyset.primary_key_id = 42
key = keyset.key.add()
key.key_data.type_url = 'type.googleapis.com/google.crypto.tink.AesGcmKey'
key.key_data.key_material_type = tink_pb2.KeyData.SYMMETRIC
key.key_data.value = b'GhCS/1+ejWpx68NfGt6ziYHd'
key.output_prefix_type = tink_pb2.TINK
key.key_id = 42
key.status = tink_pb2.ENABLED
reader = tink.BinaryKeysetReader(keyset.SerializeToString())
self.assertEqual(keyset, reader.read())
def test_read_none(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(cast(bytes, None))
reader.read()
def test_read_empty(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'')
reader.read()
def test_read_invalid(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'some weird data')
reader.read()
def test_read_encrypted(self):
encrypted_keyset = tink_pb2.EncryptedKeyset()
encrypted_keyset.encrypted_keyset = b'c29tZSBjaXBoZXJ0ZXh0IHdpdGgga2V5c2V0'
encrypted_keyset.keyset_info.primary_key_id = 42
key_info = encrypted_keyset.keyset_info.key_info.add()
key_info.type_url = 'type.googleapis.com/google.crypto.tink.AesGcmKey'
key_info.output_prefix_type = tink_pb2.TINK
key_info.key_id = 42
key_info.status = tink_pb2.ENABLED
reader = tink.BinaryKeysetReader(
encrypted_keyset.SerializeToString())
self.assertEqual(encrypted_keyset, reader.read_encrypted())
def test_read_encrypted_none(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(cast(bytes, None))
reader.read_encrypted()
def test_read_encrypted_empty(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'')
reader.read_encrypted()
def test_read_encrypted_invalid(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'some weird data')
reader.read_encrypted()
if __name__ == '__main__':
absltest.main()
|
import math
print("Digite os termos da equacao ax2+bx+c")
a = float(input("Digite o valor de A:\n"))
if(a==0):
print("Nao e uma equacao de segundo grau")
else:
b = float(input("Valor de B:\n"))
c = float(input("Valor de C:\n"))
delta = (math.pow(b,2) - (4*a*c))
if(delta<0):
print("A equacao nao possui raizes reais")
elif(delta == 0):
raiz = ((-1)*b + math.sqrt(delta))/(2*a)
print("A equacao possui apenas uma raiz",raiz)
else:
raiz1 = ((-1)*b + math.sqrt(delta))/(2*a)
raiz2 = ((-1)*b - math.sqrt(delta))/(2*a)
print("A equacao possui duas raizes")
print("Primeira raiz:",raiz1)
print("Segunda raiz:",raiz2)
|
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class FullInspiration(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'id': 'str',
'location': 'FullLocation',
'type': 'str',
'title': 'str',
'introduction': 'str',
'content': 'str',
'photos': 'list[Photo]',
'videos': 'list[Video]'
}
self.attribute_map = {
'id': 'id',
'location': 'location',
'type': 'type',
'title': 'title',
'introduction': 'introduction',
'content': 'content',
'photos': 'photos',
'videos': 'videos'
}
# Inspiration Identifier
self.id = None # str
# Location
self.location = None # FullLocation
# Inspiration type
self.type = None # str
# Title
self.title = None # str
# Introduction
self.introduction = None # str
# Content
self.content = None # str
# Array of Photos
self.photos = None # list[Photo]
# Array of Videos
self.videos = None # list[Video]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
|
def load_GPS_EXIF(fname, python = True):
# Load sub-functions ...
from .load_GPS_EXIF1 import load_GPS_EXIF1
from .load_GPS_EXIF2 import load_GPS_EXIF2
# Check what the user wants ...
if python:
# Will use the Python module "exifread" ...
return load_GPS_EXIF1(fname)
else:
# Will use the binary "exiftool" ...
return load_GPS_EXIF2(fname)
|
'''
Copyright 2013 George Caley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import codecs
import os
import re
import sqlite3
PREREQS_RE = re.compile(r"Pre-?req(?:uisites?)?:(.*?)(?:</p>|;)")
EXCLUSIONS_RE = re.compile(r"((?:Excluded|Exclusion|Exclusions|(?:and )?Excludes)[: ](.*?))(?:</p>|<br />)", re.IGNORECASE)
COREQS_RE = re.compile(r"Co-?requisite:(.*?)</p>", re.IGNORECASE)
NAME_RE = re.compile(r"<title>UNSW Handbook Course - (.*?) - [A-Z]{4}[0-9]{4}</title>", re.DOTALL)
DESC_RE = re.compile(r"<!-- Start Course Description -->(.*?)<!-- End Course description -->", re.DOTALL | re.IGNORECASE)
GENED_RE = re.compile(r"Available for General Education:")
OUTLINE_RE = re.compile(r"Course Outline:.*?<a .*?href=[\"'](.*?)[\"']")
UOC_RE = re.compile(r"Units of Credit:.*?([0-9]+)")
COURSE_RE = re.compile(r"[A-Z]{4}[0-9]{4}", re.IGNORECASE)
BR_RE = re.compile(r"<br ?/?>", re.IGNORECASE)
TAG_RE = re.compile(r"</?.*?>")
TYPE_PREREQUISITE = "prerequisite"
TYPE_COREQUISITE = "corequisite"
TYPE_EXCLUSION = "exclusion"
DATABASE_FILENAME = "courses.db"
COURSE_DIR = "courses"
if os.path.exists(DATABASE_FILENAME):
print "Deleting existing database"
os.unlink(DATABASE_FILENAME)
print "Creating new database"
conn = sqlite3.connect(DATABASE_FILENAME)
cur = conn.cursor()
print "Creating tables"
cur.execute("CREATE TABLE courses (code text primary key, name text, description text, prerequisites text, corequisites text, exclusions text, gened integer, outline text, uoc integer)")
cur.execute("CREATE TABLE relationships (source text, destination text, type text)")
print "Loading course list"
print
filenames = os.listdir(COURSE_DIR)
i = 0
for filename in filenames:
i += 1
code = filename.rstrip(".html")
print "Reading %s (%d/%d)" % (code, i, len(filenames))
# open with unicode support
f = codecs.open("%s/%s" % (COURSE_DIR, filename), encoding="utf-8", mode="r")
data = f.read()
f.close()
# strip 's and <strong> tags
data = data.replace(" ", " ")
data = data.replace("<strong>", "")
data = data.replace("</strong>", "")
# find name
match = re.search(NAME_RE, data)
if match:
name = match.group(1).strip().replace("\n", "")
print "Found name:", name
else:
name = None
print "Couldn't find name"
print "Fatal error!"
quit()
# find exclusions. all of them.
exclusions = ""
exclusions_list = []
while True:
match = re.search(EXCLUSIONS_RE, data)
if match:
exclusions = match.group(2).strip()
print "Found exclusions:", exclusions
data = data.replace(match.group(1), "")
exclusions_list = re.findall(COURSE_RE, exclusions)
print "Exclusions list:", exclusions_list
else:
#exclusions = None
#exclusions_list = []
#print "Couldn't find exclusions"
break
# find corequisites
match = re.search(COREQS_RE, data)
if match:
coreqs = match.group(1).strip()
print "Found corequisites:", coreqs
data = data.replace(match.group(0), "")
coreqs_list = map(unicode.upper, re.findall(COURSE_RE, coreqs))
print "Corequisites list:", coreqs_list
else:
coreqs = None
coreqs_list = []
print "Couldn't find corequisites"
# find prerequisites
match = re.search(PREREQS_RE, data)
if match:
prereqs = match.group(1).strip()
print "Found prerequisites:", prereqs
data = data.replace(match.group(0), "")
prereqs_list = map(unicode.upper, re.findall(COURSE_RE, prereqs))
print "Prerequisites list:", prereqs_list
else:
prereqs = None
prereqs_list = []
print "Couldn't find prerequisites"
# find description
match = re.search(DESC_RE, data)
if match:
desc = match.group(1).strip()
# change <br>'s
#desc = re.sub(BR_RE, "\n", desc)
# strip tags
#desc = re.sub(TAG_RE, "", desc)
#print "Found description:", desc
print "Found description"
else:
desc = None
print "Couldn't find description"
# find general education statement
match = re.search(GENED_RE, data)
if match:
gened = 1
else:
gened = 0
# find course outline
match = re.search(OUTLINE_RE, data)
if match:
outline = match.group(1).strip()
print "Found course outline:", outline
else:
outline = None
print "Couldn't find course outline"
# find uoc
match = re.search(UOC_RE, data)
if match:
uoc = match.group(1).strip()
try:
uoc = int(uoc)
print "Found UoC:", uoc
except:
print "UoC was not an integer: '%s'" % uoc
uoc = None
else:
uoc = None
print "Couldn't find UoC"
print "Writing to database"
cur.execute("INSERT INTO courses (code, name, description, prerequisites, corequisites, exclusions, gened, outline, uoc) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", (code, name, desc, prereqs, coreqs, exclusions, gened, outline, uoc))
for prereq in prereqs_list:
cur.execute("INSERT INTO relationships (source, destination, type) VALUES (?, ?, ?)", (code, prereq, TYPE_PREREQUISITE))
for coreq in coreqs_list:
cur.execute("INSERT INTO relationships (source, destination, type) VALUES (?, ?, ?)", (code, coreq, TYPE_COREQUISITE))
for exclusion in exclusions_list:
cur.execute("INSERT INTO relationships (source, destination, type) VALUES (?, ?, ?)", (code, exclusion, TYPE_EXCLUSION))
print
conn.commit()
conn.close()
|
import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
CLUSTER_NAME_2 = os.environ.get("RANCHER_CLUSTER_NAME_2", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_client_for_token(token):
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False):
command = 'kubectl --kubeconfig {0} {1}'.format(
kube_fname, cmd)
if json_out:
command += ' -o json'
if stderr:
result = run_command_with_stderr(command)
else:
result = run_command(command)
if json_out:
result = json.loads(result)
print(result)
return result
def run_command(command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster):
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker:
schedulable_nodes.append(node)
return schedulable_nodes
def get_role_nodes(cluster, role):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster)
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = node.externalIpAddress
cmd = curl_args + " http://" + host_ip + path
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name):
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def validate_http_response(cmd, target_name_list, client_pod=None):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
ip_list.append(node.externalIpAddress)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload):
url = get_endpoint_url_for_workload(p_client, workload)
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster):
get_endpoint_url_for_workload(p_client, workload, 60)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port = wl.publicEndpoints[0]["port"]
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
app_data = client.list_app(name=app_id).data
start = time.time()
assert len(app_data) == 1, "Cannot find app"
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
app = client.list_app(name=app_id).data
assert len(app) == 1
application = app[0]
return application
def validate_response_app_endpoint(p_client, appId):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
try:
r = requests.head(url)
assert r.status_code == 200, \
"Http response is not 200. Failed to launch the app"
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
|
__author__ = 'mpetyx'
from tastypie.authorization import DjangoAuthorization
from .models import OpeniQuestion
from OPENiapp.APIS.OpeniGenericResource import GenericResource
from OPENiapp.APIS.OPENiAuthorization import Authorization
from OPENiapp.APIS.OPENiAuthentication import Authentication
class QuestionResource(GenericResource):
class Meta:
queryset = OpeniQuestion.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
resource_name = 'question'
authentication = Authentication()
authorization = Authorization()
# filtering = {
# 'slug': ALL,
# 'user': ALL_WITH_RELATIONS,
# 'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
# }
extra_actions = [
{
"name": "comments",
"http_method": "GET",
"resource_type": "list",
"description": "comments from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
},
{
"name": "likes",
"http_method": "GET",
"resource_type": "list",
"description": "likes from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
},
{
"name": "dislikes",
"http_method": "GET",
"resource_type": "list",
"description": "dislikes from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
}
]
|
"""
Views for managing Images and Snapshots.
"""
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import api
from horizon import exceptions
from horizon import tables
from horizon import tabs
from .images.tables import ImagesTable
from .snapshots.tables import SnapshotsTable
from .volume_snapshots.tables import VolumeSnapshotsTable
from .volume_snapshots.tabs import SnapshotDetailTabs
LOG = logging.getLogger(__name__)
class IndexView(tables.MultiTableView):
table_classes = (ImagesTable, SnapshotsTable, VolumeSnapshotsTable)
template_name = 'project/images_and_snapshots/index.html'
def has_more_data(self, table):
return getattr(self, "_more_%s" % table.name, False)
def get_images_data(self):
marker = self.request.GET.get(ImagesTable._meta.pagination_param, None)
try:
# FIXME(gabriel): The paging is going to be strange here due to
# our filtering after the fact.
(all_images,
self._more_images) = api.image_list_detailed(self.request,
marker=marker)
images = [im for im in all_images
if im.container_format not in ['aki', 'ari'] and
im.properties.get("image_type", '') != "snapshot"]
except:
images = []
exceptions.handle(self.request, _("Unable to retrieve images."))
return images
def get_snapshots_data(self):
req = self.request
marker = req.GET.get(SnapshotsTable._meta.pagination_param, None)
try:
snaps, self._more_snapshots = api.snapshot_list_detailed(req,
marker=marker)
except:
snaps = []
exceptions.handle(req, _("Unable to retrieve snapshots."))
return snaps
def get_volume_snapshots_data(self):
try:
snapshots = api.volume_snapshot_list(self.request)
except:
snapshots = []
exceptions.handle(self.request, _("Unable to retrieve "
"volume snapshots."))
return snapshots
class DetailView(tabs.TabView):
tab_group_class = SnapshotDetailTabs
template_name = 'project/images_and_snapshots/snapshots/detail.html'
|
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
import mock
from requests import models
import uuid
LOG = logging.getLogger(__name__)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanAPITestCase(test.TestCase):
'''DellSCSanAPITestCase
Class to test the Storage Center API using Mock.
'''
SC = {u'IPv6ManagementIPPrefix': 128,
u'connectionError': u'',
u'instanceId': u'64702',
u'scSerialNumber': 64702,
u'dataProgressionRunning': False,
u'hostOrIpAddress': u'192.168.0.80',
u'userConnected': True,
u'portsBalanced': True,
u'managementIp': u'192.168.0.80',
u'version': u'6.5.1.269',
u'location': u'',
u'objectType': u'StorageCenter',
u'instanceName': u'Storage Center 64702',
u'statusMessage': u'',
u'status': u'Up',
u'flashOptimizedConfigured': False,
u'connected': True,
u'operationMode': u'Normal',
u'userName': u'Admin',
u'nonFlashOptimizedConfigured': True,
u'name': u'Storage Center 64702',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'serialNumber': 64702,
u'raidRebalanceRunning': False,
u'userPasswordExpired': False,
u'contact': u'',
u'IPv6ManagementIP': u'::'}
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
INACTIVE_VOLUME = \
{u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': False,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
# ScServer where deletedAllowed=False (not allowed to be deleted)
SCSERVER_NO_DEL = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': False,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem':
{u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
SCSERVERS = [{u'scName': u'Storage Center 64702',
u'volumeCount': 5,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack4',
u'instanceId': u'64702.1',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Up',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0,
u'name': u'openstack4',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.3',
u'instanceName': u'Other Multipath',
u'objectType': u'ScServerOperatingSystem'}},
{u'scName': u'Storage Center 64702',
u'volumeCount': 1,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack5',
u'instanceId': u'64702.2',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Up',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0, u'name': u'openstack5',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.2',
u'instanceName': u'Other Singlepath',
u'objectType': u'ScServerOperatingSystem'}}]
# ScServers list where status = Down
SCSERVERS_DOWN = \
[{u'scName': u'Storage Center 64702',
u'volumeCount': 5,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack4',
u'instanceId': u'64702.1',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Down',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0,
u'name': u'openstack4',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.3',
u'instanceName': u'Other Multipath',
u'objectType': u'ScServerOperatingSystem'}}]
MAP_PROFILES = [{u'instanceId': u'64702.2941',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 3,
u'instanceName': u'6025-47',
u'lunRequested': u'N/A'}]
MAP_PROFILE = {u'instanceId': u'64702.2941',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 3,
u'instanceName': u'6025-47',
u'lunRequested': u'N/A'}
MAPPINGS = [{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
# Multiple mappings to test find_iscsi_properties with multiple portals
MAPPINGS_MULTI_PORTAL = \
[{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
MAPPINGS_READ_ONLY = \
[{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': True,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName':
u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
FC_MAPPINGS = [{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7639.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218607',
u'instanceName': u'21000024FF30441C',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64703.27.73',
u'instanceName':
u'21000024FF30441C-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7639',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7640.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.27.78',
u'instanceName': u'21000024FF30441D-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7640',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7638.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.28.76',
u'instanceName': u'21000024FF30441D-5000D31000FCBE3E',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736126.60',
u'instanceName': u'5000D31000FCBE3E',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7638',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'}]
RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-46-250',
u'description': u'Cinder Clone Replay',
u'parent': {u'instanceId': u'64702.46.249',
u'instanceName': u'64702-46-249',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.46.250',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'12/09/2014 03:52:08 PM',
u'createVolume': {u'instanceId': u'64702.46',
u'instanceName':
u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'objectType': u'ScVolume'},
u'expireTime': u'12/09/2014 04:52:08 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7910,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'12/09/2014 03:52:08 PM',
u'size': u'0.0 Bytes'
}
RPLAYS = [{u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-5',
u'description': u'Manually Created',
u'parent': {u'instanceId': u'64702.6025.4',
u'instanceName': u'64702-6025-4',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.5',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:55 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:55 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7889,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:55 PM',
u'size': u'0.0 Bytes'},
{u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-4',
u'description': u'Cinder Test Replay012345678910',
u'parent': {u'instanceId': u'64702.6025.3',
u'instanceName': u'64702-6025-3',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.4',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:47 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:47 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7869,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:47 PM',
u'size': u'0.0 Bytes'}]
TST_RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-4',
u'description': u'Cinder Test Replay012345678910',
u'parent': {u'instanceId': u'64702.6025.3',
u'instanceName': u'64702-6025-3',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.4',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:47 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:47 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7869,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:47 PM',
u'size': u'0.0 Bytes'}
FLDR = {u'status': u'Up',
u'instanceName': u'opnstktst',
u'name': u'opnstktst',
u'parent':
{u'instanceId': u'64702.0',
u'instanceName': u'Volumes',
u'objectType': u'ScVolumeFolder'},
u'instanceId': u'64702.43',
u'scName': u'Storage Center 64702',
u'notes': u'Folder for OpenStack Cinder Driver',
u'scSerialNumber': 64702,
u'parentIndex': 0,
u'okToDelete': True,
u'folderPath': u'',
u'root': False,
u'statusMessage': u'',
u'objectType': u'ScVolumeFolder'}
SVR_FLDR = {u'status': u'Up',
u'instanceName': u'devstacksrv',
u'name': u'devstacksrv',
u'parent': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'instanceId': u'64702.4',
u'scName': u'Storage Center 64702',
u'notes': u'Folder for OpenStack Cinder Driver',
u'scSerialNumber': 64702,
u'parentIndex': 0,
u'okToDelete': False,
u'folderPath': u'',
u'root': False,
u'statusMessage': u'',
u'objectType': u'ScServerFolder'}
ISCSI_HBA = {u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 1,
u'name': u'iqn.1993-08.org.debian:01:52332b70525',
u'connectivity': u'Down',
u'instanceId': u'64702.3786433166',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server':
{u'instanceId': u'64702.38',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:52332b70525',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'Iscsi',
u'instanceName': u'iqn.1993-08.org.debian:01:52332b70525',
u'objectType': u'ScServerHba'}
FC_HBAS = [{u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 2,
u'name': u'21000024FF30441C',
u'connectivity': u'Up',
u'instanceId': u'64702.3282218607',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441C',
u'objectType': u'ScServerHba'},
{u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 3,
u'name': u'21000024FF30441D',
u'connectivity': u'Partial',
u'instanceId': u'64702.3282218606',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'}]
FC_HBA = {u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 3,
u'name': u'21000024FF30441D',
u'connectivity': u'Partial',
u'instanceId': u'64702.3282218606',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'}
SVR_OS_S = [{u'allowsLunGaps': True,
u'product': u'Red Hat Linux',
u'supportsActiveMappingDeletion': True,
u'version': u'6.x',
u'requiresLunZero': False,
u'scName': u'Storage Center 64702',
u'virtualMachineGuest': True,
u'virtualMachineHost': False,
u'allowsCrossTransportMapping': False,
u'objectType': u'ScServerOperatingSystem',
u'instanceId': u'64702.38',
u'lunCanVaryAcrossPaths': False,
u'scSerialNumber': 64702,
u'maximumVolumeSize': u'0.0 Bytes',
u'multipath': True,
u'instanceName': u'Red Hat Linux 6.x',
u'supportsActiveMappingCreation': True,
u'name': u'Red Hat Linux 6.x'}]
ISCSI_FLT_DOMAINS = [{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}]
# For testing find_iscsi_properties where multiple portals are found
ISCSI_FLT_DOMAINS_MULTI_PORTALS = \
[{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'},
{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.25',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}]
ISCSI_FLT_DOMAIN = {u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}
CTRLR_PORT = {u'status': u'Up',
u'iscsiIpAddress': u'0.0.0.0',
u'WWN': u'5000D31000FCBE06',
u'name': u'5000D31000FCBE06',
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736070.51',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'FibreChannel',
u'virtual': False,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'iscsiName': u'',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.4.3',
u'instanceName': u'Domain 1',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE06',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
ISCSI_CTRLR_PORT = {u'preferredParent':
{u'instanceId': u'64702.5764839588723736074.69',
u'instanceName': u'5000D31000FCBE0A',
u'objectType': u'ScControllerPort'},
u'status': u'Up',
u'iscsiIpAddress': u'10.23.8.235',
u'WWN': u'5000D31000FCBE43',
u'name': u'5000D31000FCBE43',
u'parent':
{u'instanceId': u'64702.5764839588723736074.69',
u'instanceName': u'5000D31000FCBE0A',
u'objectType': u'ScControllerPort'},
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736131.91',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'Iscsi',
u'virtual': True,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.6.5',
u'instanceName': u'iSCSI 10G 2',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE43',
u'childStatus': u'Up',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
FC_CTRLR_PORT = {u'preferredParent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'status': u'Up',
u'iscsiIpAddress': u'0.0.0.0',
u'WWN': u'5000D31000FCBE36',
u'name': u'5000D31000FCBE36',
u'parent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736118.50',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'FibreChannel',
u'virtual': True,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'iscsiName': u'',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.1.0',
u'instanceName': u'Domain 0',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE36',
u'childStatus': u'Up',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
STRG_USAGE = {u'systemSpace': u'7.38197504E8 Bytes',
u'freeSpace': u'1.297659461632E13 Bytes',
u'oversubscribedSpace': u'0.0 Bytes',
u'instanceId': u'64702',
u'scName': u'Storage Center 64702',
u'savingVsRaidTen': u'1.13737990144E11 Bytes',
u'allocatedSpace': u'1.66791217152E12 Bytes',
u'usedSpace': u'3.25716017152E11 Bytes',
u'configuredSpace': u'9.155796533248E12 Bytes',
u'alertThresholdSpace': u'1.197207956992E13 Bytes',
u'availableSpace': u'1.3302310633472E13 Bytes',
u'badSpace': u'0.0 Bytes',
u'time': u'02/02/2015 02:23:39 PM',
u'scSerialNumber': 64702,
u'instanceName': u'Storage Center 64702',
u'storageAlertThreshold': 10,
u'objectType': u'StorageCenterStorageUsage'}
IQN = 'iqn.2002-03.com.compellent:5000D31000000001'
WWN = u'21000024FF30441C'
WWNS = [u'21000024FF30441C',
u'21000024FF30441D']
FLDR_PATH = 'StorageCenter/ScVolumeFolder/'
# Create a Response object that indicates OK
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates created
response_created = models.Response()
response_created.status_code = 201
response_created.reason = u'created'
RESPONSE_201 = response_created
# Create a Response object that indicates a failure (no content)
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
def setUp(self):
super(DellSCSanAPITestCase, self).setUp()
# Configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
# Set up the StorageCenterApi
self.scapi = dell_storagecenter_api.StorageCenterApi(
self.configuration.san_ip,
self.configuration.dell_sc_api_port,
self.configuration.san_login,
self.configuration.san_password)
self.volid = str(uuid.uuid4())
self.volume_name = "volume" + self.volid
def test_path_to_array(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._path_to_array(u'folder1/folder2/folder3')
expected = [u'folder1', u'folder2', u'folder3']
self.assertEqual(expected, res, 'Unexpected folder path')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=SC)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_sc(self,
mock_get,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_sc(64702)
mock_get.assert_called_once_with('StorageCenter/StorageCenter')
mock_get_result.assert_called()
self.assertEqual(u'64702', res, 'Unexpected SSN')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=None)
def test_find_sc_failure(self,
mock_get_result,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_sc, 12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_folder(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', 12345, '',
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_folder_with_parent(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where parent folder name is specified
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', 12345, 'parentFolder',
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_folder_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', 12345, '',
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Test Create folder - None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path(self,
mock_path_to_array,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path_create_fldr(self,
mock_path_to_array,
mock_find_folder,
mock_create_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where folder is not found and must be created
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called()
mock_create_folder.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path_failure(self,
mock_path_to_array,
mock_find_folder,
mock_create_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where folder is not found, must be created
# and creation fails
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called()
mock_create_folder.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=u'devstackvol/fcvm/')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_folder(self,
mock_post,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_result.assert_called()
self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=u'devstackvol/fcvm/')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_folder_multi_fldr(self,
mock_post,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case for folder path with multiple folders
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder', 12345,
u'testParentFolder/opnstktst')
mock_post.assert_called()
mock_get_result.assert_called()
self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_folder_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Test find folder - None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder_path',
return_value=FLDR)
def test_create_volume_folder_path(self,
mock_create_vol_fldr_path,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_volume_folder_path(
12345,
self.configuration.dell_sc_volume_folder)
mock_create_vol_fldr_path.assert_called_once_with(
'StorageCenter/ScVolumeFolder',
12345,
self.configuration.dell_sc_volume_folder)
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=FLDR)
def test_find_volume_folder(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_volume_folder(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScVolumeFolder/GetList',
12345,
self.configuration.dell_sc_volume_folder)
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_close_connection,
mock_open_connection,
mock_init):
self.scapi._init_volume(self.VOLUME)
mock_map_volume.assert_called()
mock_unmap_volume.assert_called()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_init_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer list fails
self.scapi._init_volume(self.VOLUME)
mock_post.assert_called()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS_DOWN)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume_servers_down(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer Status = Down
self.scapi._init_volume(self.VOLUME)
mock_map_volume.assert_called()
mock_unmap_volume.assert_called()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_volume(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_vol_and_folder(self,
mock_post,
mock_find_volume_folder,
mock_create_vol_folder_path,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling create_volume where volume folder has to be created
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_create_vol_folder_path.assert_called_once_with(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_vol_folder_fail(self,
mock_post,
mock_find_volume_folder,
mock_create_vol_folder_path,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling create_volume where volume folder does not exist and
# fails to be created
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_create_vol_folder_path.assert_called_once_with(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_volume_failure(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_volume_by_name(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find volume by name
res = self.scapi.find_volume(12345,
self.volume_name)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
# Test case to find volume by InstancedId
def test_find_volume_by_instanceid(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_volume(12345,
None,
'64702.3494')
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected volume')
def test_find_volume_no_name_or_instance(self,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling find_volume with no name or instanceid
res = self.scapi.find_volume(12345)
self.assertEqual(res, None, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_volume_not_found(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling find_volume with result of no volume found
res = self.scapi.find_volume(12345,
self.volume_name)
self.assertEqual(None, res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=True)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
def test_delete_volume(self,
mock_find_volume,
mock_delete,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.delete_volume(12345,
self.volume_name)
mock_delete.assert_called()
mock_find_volume.assert_called_once_with(12345, self.volume_name, None)
mock_get_json.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_204)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
def test_delete_volume_failure(self,
mock_find_volume,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.delete_volume, 12345, self.volume_name)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_delete_volume_no_vol_found(self,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume to be deleted does not exist
res = self.scapi.delete_volume(12345,
self.volume_name)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder_path',
return_value=SVR_FLDR)
def test_create_server_folder_path(self,
mock_create_svr_fldr_path,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_server_folder_path(
12345,
self.configuration.dell_sc_server_folder)
mock_create_svr_fldr_path.assert_called_once_with(
'StorageCenter/ScServerFolder',
12345,
self.configuration.dell_sc_server_folder)
self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=SVR_FLDR)
def test_find_server_folder(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_server_folder(
12345,
self.configuration.dell_sc_server_folder)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScServerFolder/GetList',
12345,
self.configuration.dell_sc_server_folder)
self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_add_hba(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.IQN,
False)
mock_post.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_add_hba_fc(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.WWN,
True)
mock_post.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_add_hba_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.IQN,
False)
mock_post.assert_called()
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SVR_OS_S)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serveros(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_serveros(12345, 'Red Hat Linux 6.x')
mock_get_json.assert_called()
mock_post.assert_called()
self.assertEqual('64702.38', res, 'Wrong InstanceId')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SVR_OS_S)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serveros_not_found(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test requesting a Server OS that will not be found
res = self.scapi._find_serveros(12345, 'Non existent OS')
mock_get_json.assert_called()
mock_post.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_serveros_failed(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_serveros(12345, 'Red Hat Linux 6.x')
self.assertEqual(None, res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=FC_HBA)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
def test_create_server_multiple_hbas(self,
mock_create_server,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server_multiple_hbas(
12345,
self.configuration.dell_sc_server_folder,
self.WWNS)
mock_create_server.assert_called()
mock_add_hba.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_find_serveros.assert_called()
mock_find_server_folder.assert_called()
mock_first_result.assert_called()
mock_add_hba.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_os_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_find_serveros.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_server_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_fldr_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_create_svr_fldr_path,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_find_server_folder.assert_called()
mock_create_svr_fldr_path.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_server_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_server_failure(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_create_svr_fldr_path,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_server_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_create_svr_fldr_path,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test create server where _first_result is None
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_delete_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_addhba_fail(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_delete_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Tests create server where add hba fails
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_delete_server.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_server(self,
mock_post,
mock_find_serverhba,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_server(12345,
self.IQN)
mock_find_serverhba.assert_called()
mock_first_result.assert_called()
self.assertIsNotNone(res, 'Expected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_server_no_hba(self,
mock_post,
mock_find_serverhba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer HBA does not exist with the specified IQN
# or WWN
res = self.scapi.find_server(12345,
self.IQN)
mock_find_serverhba.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_server_failure(self,
mock_post,
mock_find_serverhba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer does not exist with the specified
# ScServerHba
res = self.scapi.find_server(12345,
self.IQN)
mock_find_serverhba.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serverhba(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_server(12345,
self.IQN)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertIsNotNone(res, 'Expected ScServerHba')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_serverhba_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer does not exist with the specified
# ScServerHba
res = self.scapi.find_server(12345,
self.IQN)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_domains(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_domains(u'64702.5764839588723736074.69')
mock_get .assert_called()
mock_get_json.assert_called()
self.assertEqual(
self.ISCSI_FLT_DOMAINS, res, 'Unexpected ScIscsiFaultDomain')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_domains_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScControllerPort FaultDomainList fails
res = self.scapi._find_domains(u'64702.5764839588723736074.69')
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_domain(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_domain(u'64702.5764839588723736074.69',
u'192.168.0.21')
mock_get .assert_called()
mock_get_json.assert_called()
self.assertIsNotNone(res, 'Expected ScIscsiFaultDomain')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_domain_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScControllerPort FaultDomainList fails
res = self.scapi._find_domain(u'64702.5764839588723736074.69',
u'192.168.0.21')
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_domain_not_found(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where domainip does not equal any WellKnownIpAddress
# of the fault domains
res = self.scapi._find_domain(u'64702.5764839588723736074.69',
u'192.168.0.22')
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=FC_HBAS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_fc_initiators(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_fc_initiators(self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertIsNotNone(res, 'Expected WWN list')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_fc_initiators_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScServer HbaList fails
res = self.scapi._find_fc_initiators(self.SCSERVER)
self.assertListEqual([], res, 'Expected empty list')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_volume_count(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_volume_count(self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(len(self.MAPPINGS), res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_get_volume_count_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case of where get of ScServer MappingList fails
res = self.scapi.get_volume_count(self.SCSERVER)
mock_get.assert_called()
self.assertEqual(-1, res, 'Mapping count not -1')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_volume_count_no_volumes(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_volume_count(self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(len([]), res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_mappings(self.VOLUME)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.MAPPINGS, res, 'Mapping mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings_inactive_vol(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test getting volume mappings on inactive volume
res = self.scapi._find_mappings(self.INACTIVE_VOLUME)
mock_get.assert_called()
self.assertEqual([], res, 'No mappings expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_mappings_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case of where get of ScVolume MappingList fails
res = self.scapi._find_mappings(self.VOLUME)
mock_get.assert_called()
self.assertEqual([], res, 'Mapping count not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings_no_mappings(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume has no mappings
res = self.scapi._find_mappings(self.VOLUME)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual([], res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_controller_port(self,
mock_get,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_controller_port(u'64702.5764839588723736070.51')
mock_get.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.CTRLR_PORT, res, 'ScControllerPort mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_controller_port_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScVolume MappingList fails
res = self.scapi._find_controller_port(self.VOLUME)
mock_get.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=FC_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_fc_initiators',
return_value=WWNS)
def test_find_wwns(self,
mock_find_fc_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
mock_find_fc_initiators.assert_called()
mock_find_mappings.assert_called()
mock_find_controller_port.assert_called()
# The _find_controller_port is Mocked, so all mapping pairs
# will have the same WWN for the ScControllerPort
itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'],
u'21000024FF30441D':
[u'5000D31000FCBE36', u'5000D31000FCBE36']}
self.assertEqual(1, lun, 'Incorrect LUN')
self.assertIsNotNone(wwns, 'WWNs is None')
self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_fc_initiators',
return_value=FC_HBAS)
def test_find_wwns_no_mappings(self,
mock_find_fc_initiators,
mock_find_mappings,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScMapping(s)
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
mock_find_fc_initiators.assert_called()
mock_find_mappings.assert_called()
self.assertEqual(None, lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_fc_initiators',
return_value=WWNS)
def test_find_wwns_no_ctlr_port(self,
mock_find_fc_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScControllerPort is none
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
mock_find_fc_initiators.assert_called()
mock_find_mappings.assert_called()
mock_find_controller_port.assert_called()
self.assertEqual(None, lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_mappings(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_by_address(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.0.21', 3260)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_by_address_not_found(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port are not found
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.1.21', 3260)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
def test_find_iscsi_properties_no_mapping(self,
mock_find_mappings,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScMapping(s)
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Expected empty Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_no_domain(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScFaultDomain(s)
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Expected empty Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_no_ctrl_port(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScFaultDomain(s)
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Expected empty Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_READ_ONLY)
def test_find_iscsi_properties_ro(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where Read Only mappings are found
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'ro',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_MULTI_PORTAL)
def test_find_iscsi_properties_multi_portals(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are multiple portals
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals':
[u'192.168.0.21:3260', u'192.168.0.25:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=MAP_PROFILE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_map_volume(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_map_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where mapping volume to server fails
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
mock_post.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAP_PROFILES)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_unmap_volume(self,
mock_get,
mock_get_json,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
mock_delete.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_unmap_volume_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_unmap_volume_no_map_profile(self,
mock_get,
mock_get_json,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
mock_delete.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_204)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAP_PROFILES)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_unmap_volume_del_fail(self,
mock_get,
mock_get_json,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
mock_delete.assert_called()
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=STRG_USAGE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_storage_usage(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_storage_usage(64702)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.STRG_USAGE, res, 'Unexpected ScStorageUsage')
def test_get_storage_usage_no_ssn(self,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where SSN is none
res = self.scapi.get_storage_usage(None)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
# Test case where get of Storage Usage fails
def test_get_storage_usage_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_storage_usage(64702)
mock_get.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
60)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_init_volume')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_inact_vol(self,
mock_post,
mock_init_volume,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where the specified volume is inactive
res = self.scapi.create_replay(self.INACTIVE_VOLUME,
'Test Replay',
60)
mock_post.assert_called()
mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME)
mock_first_result.assert_called()
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_no_expire(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
0)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_no_volume(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where no ScVolume is specified
res = self.scapi.create_replay(None,
'Test Replay',
60)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_replay_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where create ScReplay fails
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
60)
mock_post.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_replay(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
mock_post.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.TST_RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_replay_no_replays(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where no replays are found
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
mock_post.assert_called()
mock_get_json.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_replay_failure(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where None is returned for replays
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
mock_post.assert_called()
mock_get_json.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_delete_replay(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
mock_post.assert_called()
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_delete_replay_no_replay(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where specified ScReplay does not exist
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
mock_post.assert_called()
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=TST_RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_delete_replay_failure(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where delete ScReplay results in an error
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
mock_post.assert_called()
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume(self,
mock_post,
mock_find_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume_create_fldr(self,
mock_post,
mock_find_volume_folder,
mock_create_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume folder does not exist and must be created
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_create_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume_no_vol_fldr(self,
mock_post,
mock_find_volume_folder,
mock_create_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume folder does not exist and cannot be created
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_create_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_view_volume_failure(self,
mock_post,
mock_find_volume_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where view volume create fails
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=RPLAY)
def test_create_cloned_volume(self,
mock_create_replay,
mock_create_view_volume,
mock_close_connection,
mock_open_connection,
mock_init):
vol_name = u'Test_create_clone_vol'
res = self.scapi.create_cloned_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.VOLUME)
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
mock_create_view_volume.assert_called_once_with(
vol_name,
self.configuration.dell_sc_volume_folder,
self.RPLAY)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_cloned_volume_failure(self,
mock_create_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where create cloned volumes fails because create_replay
# fails
vol_name = u'Test_create_clone_vol'
res = self.scapi.create_cloned_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.VOLUME)
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_expand_volume(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.expand_volume(self.VOLUME, 550)
mock_post.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_expand_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.expand_volume(self.VOLUME, 550)
mock_post.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_server(self,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._delete_server(self.SCSERVER)
mock_delete.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_server_del_not_allowed(self,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where delete of ScServer not allowed
res = self.scapi._delete_server(self.SCSERVER_NO_DEL)
mock_delete.assert_called()
self.assertIsNone(res, 'Expected None')
class DellSCSanAPIConnectionTestCase(test.TestCase):
'''DellSCSanAPIConnectionTestCase
Class to test the Storage Center API connection using Mock.
'''
# Create a Response object that indicates OK
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates a failure (no content)
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
def setUp(self):
super(DellSCSanAPIConnectionTestCase, self).setUp()
# Configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
# Set up the StorageCenterApi
self.scapi = dell_storagecenter_api.StorageCenterApi(
self.configuration.san_ip,
self.configuration.dell_sc_api_port,
self.configuration.san_login,
self.configuration.san_password)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_open_connection(self,
mock_post):
self.scapi.open_connection()
mock_post.assert_called()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_open_connection_failure(self,
mock_post):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.open_connection)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_close_connection(self,
mock_post):
self.scapi.close_connection()
mock_post.assert_called()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_close_connection_failure(self,
mock_post):
self.scapi.close_connection()
mock_post.assert_called()
|
import click
@click.command('config', short_help='Display remote client config')
@click.pass_obj
def cli(obj):
"""Display client config downloaded from API server."""
for k, v in obj.items():
if isinstance(v, list):
v = ', '.join(v)
click.echo(f'{k:20}: {v}')
|
import random
import sys
"""class schema:
files=[]
def __init__(self):
pass
def addFile(self,file):
self.files.append(file)
def setForeignKey(self,primaryFile,theOtherOne):
pass"""
class JoinReq:
def __init__(self,R,S,m,n,fing):
self.cost=0
self.first_req=True
self.cost = 0
tC,self.t1=R.getFirst(m)
#self.cost += tC
tC,self.t2=S.getFirst(n)
#self.cost += tC
self.first_req==False
self.R=R
self.S=S
self.m=m
self.n=n
self.fing=fing
def pull(self):
if self.fing==False:
temp=""
while self.t1 is not None:
#print str(t1[m]) + "=" + str(t2[n])
#print "x"
while self.t2 is not None:
#print str(self.t1[self.m]) + "=" + str(self.t2[self.n])
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
#print "vishnu"
self.t1=self.R.getNext(self.m)
self.cost+=1
#print str(t1) + "xx"
#if t2==None:
tC,self.t2=self.S.getFirst(self.n)
self.cost+=tC
return "eoo"
else:
"""savedLastKey=-1
while self.t1 is not None:
if self.t1>=savedLastKey:
while self.t2 is not None:
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
else:
tC,self.t2=self.S.getFirst(self.n)
self.cost+=tC
while self.t2 is not None:
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
savedLastKey=self.t1
self.t1=self.R.getNext(self.m)
self.cost+=1
return "eoo" """
savedLastKey=-1
while self.t1 is not None:
while self.t1 is not None:
while self.t2 is not None and self.t1[self.m]>=self.t2[self.n]:
#print str(self.t1[self.m]) + "=" + str(self.t2[self.n])
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
if self.t2 is None:
#print "t2 go non"
while self.t1 is not None:
self.t1=self.R.getNext(self.m)
self.cost+=1
if savedLastKey>self.t1[self.m]:
tC,self.t2=self.S.getFirst(self.n)
#print tC
self.cost+=tC
break
if self.t2[self.n]>self.t1[self.m]:
break
while self.t2 is not None:
while self.t1 is not None and self.t2[self.n]>=self.t1[self.m]:
#print str(self.t1[self.m]) + "=" + str(self.t2[self.n])
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
savedLastKey=self.t1[self.m]
self.t1=self.R.getNext(self.m)
self.cost+=1
if self.t1 is None:
return "eoo"
if savedLastKey>self.t1[self.m]:
tC,self.t2=self.S.getFirst(self.n)
#print tC
self.cost+=tC
#print self.t2
if self.t1[self.m]>self.t2[self.n]:
break
return "eoo"
def getCost(self):
return self.cost
class Xf:
#max=25
#data={}
#stats size,columns,runs,fingers,pkey,max,range
#stats=(size,columns,runs,fingers,pkey,max,range)
#stats={}
#stats = {}
def __init__(self,name):
self.stats={}
self.max=25
self.keyCol=None
self.stats["Name"]=name
self.data={}
self.setStats(0,0,0,0,0,0,0)
pass
def setStats(self,size,columns,runs,fingers,ordered,pkey,max): #set the status values
#print self
#print type(self.stats)
self.stats["size"]=size
self.stats["keyCol"]=pkey
self.stats["max"]=max
self.stats["columns"]=columns
self.stats["runs"]=runs
self.stats["cursors"]=[0 for x in range(columns)]
self.keyCol=self.stats["keyCol"]
self.max=self.stats["max"]
self.stats["fingers"]=fingers
self.stats["ordered"]=ordered
self.fingers=fingers
pass
def sortCol(self):
pass
def reset(self):
self.stats["fingers"]=[0 if x!=-1 else x for x in self.stats["fingers"]]
def getSize(self):
return int(self.stats["size"])
def getRuns(self,col):
return int(self.stats["runs"][col])
def getFirst(self,col):
tuple1 =[]
for col in range(self.stats["columns"]):
tuple1.append(self.data[str(col)][0])
#print str(self.stats["fingers"][col]) + "*"
tCost = self.stats["fingers"][col]
#print tCost
self.stats["fingers"][col]=0
#if self.stats["Name"] == "s":
#print "getFrist " + self.stats["Name"] + str(tuple1[col])
return tCost, tuple1
pass
def getNext(self,col):
#print self
fingerPos=self.stats["fingers"][col]
#print str(fingerPos) + "-" + str(len(self.data[str(0)])-2)
if int(fingerPos)>=(len(self.data[str(col)])-2):
#self.stats["fingers"][col]=0
#print "yo"
return None
if self.stats["fingers"][col]!=-1 :
self.stats["fingers"][col]+=1
#print self.stats["fingers"][col]
tuple1 =[]
for col in range(self.stats["columns"]):
tuple1.append(self.data[str(col)][fingerPos])
#if self.stats["Name"] == "s":
#print "getNext " + self.stats["Name"]+ str(tuple1[col])
return tuple1
pass
def getFinger(self,col):
return self.fingerPos
pass
def emit(self,x):
#print "yo"
#print x
pass
def eJoin(self,S,m,n):
cost = 0
tC,t1=self.getFirst(m)
cost += tC
tC,t2=S.getFirst(n)
cost += tC
while t1 is not None:
#print str(t1[m]) + "=" + str(t2[n])
#print "x"
while t2 is not None:
#print str(t1[m]) + "=" + str(t2[n])
if t1[m]==t2[n]:
self.emit((t1,t2))
t2=S.getNext(n)
cost+=1
#print "vishnu"
t1=self.getNext(m)
cost+=1
#print str(t1) + "xx"
#if t2==None:
tC,t2=S.getFirst(n)
cost+=tC
return cost
pass
def eJoin_pull(self,S,m,n):
cost = 0
tC,t1=self.getFirst(m)
cost += tC
tC,t2=S.getFirst(n)
cost += tC
while t1 is not None:
#print str(t1[m]) + "=" + str(t2[n])
#print "x"
while t2 is not None:
#print str(t1[m]) + "=" + str(t2[n])
if t1[m]==t2[n]:
self.emit((t1,t2))
t2=S.getNext(n)
cost+=1
#print "vishnu"
t1=self.getNext(m)
cost+=1
#print str(t1) + "xx"
#if t2==None:
tC,t2=S.getFirst(n)
cost+=tC
return cost
pass
#def __init__(self):
# self.data={}
# pass
def __repr__(self):
t1=""
for key in self.data.keys():
t1 = t1 + str(key) + " : " + str(self.data[key]) +"\n"
t1= str(t1) + "\nprimary key: " + str(self.keyCol)
return t1
def setConstraints(self,key,max): #there is some reduntant code here. Remove
self.stats["keyCol"]=key
self.keyCol=key
self.max=max
self.stats["max"]=max
pass
def printStats(self):
print self.stats
def replaceDupandSum(self,list1,list2):
counter = 0
for i in range(len(list1)):
counter=0
for j in range(len(list2)):
if list2[j]==list1[i]:
#print "xx" + str(list2[j])
#counter+=1
#if counter>1:
list2[j]=(list2[j]+list2[j+1])/2
return list1+list2
pass
def FormData(self):
""" for col in range(self.cols):
if col == self.keyCol:
#print "key" + str(col)
#print runs
for r in range(self.runs[col]):
temp=sorted(random.sample(range(self.max),size/runs[col]))
#print temp
self.data[str(col)]=self.replaceDupandSum(self.data.get(str(col),[]),temp)
#self.data[str(col)]=set(self.data[str(col)])
#print self.data[str(col)]
else:
for r in range(self.runs[col]):
temp=sorted([random.randrange(self.max) for x in range(size/runs[col])])
self.data[str(col)]=self.data.get(str(col),[])+temp"""
self.Generate(self.stats["columns"],self.stats["runs"],self.stats["size"])
def Generate(self,cols,runs,size):
for col in range(cols):
if col == self.keyCol:
#print "key" + str(col)
print runs
for r in range(runs[col]):
temp=sorted(random.sample(range(self.max),size/runs[col]))
#print temp
self.data[str(col)]=self.replaceDupandSum(self.data.get(str(col),[]),temp)
#self.data[str(col)]=set(self.data[str(col)])
#print self.data[str(col)]
else:
for r in range(runs[col]):
temp=sorted([random.randrange(self.max) for x in range(size/runs[col])])
self.data[str(col)]=self.data.get(str(col),[])+temp
if self.stats["ordered"][col]==True:
self.data[str(col)]=sorted(self.data[str(col)])
def write2File(self,fileName):
fp = open(fileName,'w')
for col in range(cols):
#print self.data[str(col)]
stringD=""
for x in self.data[str(col)]:
stringD=stringD+" "+ str(x)
fp.write(stringD+"\n")
fp.close()
pass
def readFile(self,fileName):
lines = open(fileName).read().splitlines()
for x in range(cols):
self.data[str(x)]=lines[0]
pass
def nJoin(R,S,m,n):
t1=R.getFirst(m)
t2=S.getFirst(n)
while t1 is not None:
print str(t1[m]) + "=" + str(t2[n])
#print "x"
while t2 is not None:
print str(t1[m]) + "=" + str(t2[n])
if t1[m]==t2[n]:
R.emit((t1,t2))
t2=S.getNext(n)
print "vishnu"
t1=R.getNext(m)
print str(t1) + "xx"
#if t2==None:
t2=S.getFirst(n)
pass
"""inst= file()
if len(sys.argv)>1:
cols = int(sys.argv[1])
runs = [int(x) for x in sys.argv[2:(len(sys.argv)-1)]]
size = int(sys.argv[len(sys.argv)-1])
inst.setConstraints(0,200000)
inst.Generate(cols,runs,size)
inst.write2File("file.txt")
"""
"""
inst3=Xf("r")
inst3.setStats(10,2,(2,3),[-1,0],0,40)
inst3.FormData()
inst4=Xf("s")
inst4.setStats(20,2,(2,3),[-1,0],0,40)
inst4.FormData()
print inst3
print inst4
"""
"""
inst3.printStats()
print inst3.getFirst()
print inst3.getNext(1)
print inst3.getNext(1)
print inst3.getNext(1)
print inst3.getNext(1)
print inst3.getNext(1)"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.