code
stringlengths 1
199k
|
|---|
import os
import csv
from datetime import datetime, date
import alcide.settings
import django.core.management
django.core.management.setup_environ(alcide.settings)
from django.db import transaction
from alcide.dossiers.models import PatientRecord
from alcide.personnes.models import Worker
from alcide.ressources.models import Service
from alcide.ressources.models import ActType
from alcide.actes.models import Act, ActValidationState
from import_dossiers import map_cs
db_path = "./scripts/20130104-213225"
dbs = ["F_ST_ETIENNE_SESSAD_TED", "F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD"]
def _to_datetime(str_date):
if not str_date:
return None
return datetime.strptime(str_date[:19], "%Y-%m-%d %H:%M:%S")
def _to_date(str_date):
dt = _to_datetime(str_date)
return dt and dt.date()
def _to_time(str_date):
dt = _to_datetime(str_date)
return dt and dt.time()
def _to_duration(str_date):
dt = _to_datetime(str_date)
if dt is None:
return None
return dt - datetime(1900, 01, 01, 0, 0)
def _to_int(str_int):
if not str_int:
return None
return int(str_int)
def _get_dict(cols, line):
""""""
res = {}
for i, data in enumerate(line):
res[cols[i]] = data.decode('utf-8')
return res
def batch_delete(qs, limit):
count = qs.count()
i = 0
while i < count:
ids = qs[i:i+limit].values_list('pk', flat=True)
qs.filter(pk__in=ids).delete()
i += limit
PERIOD_FAURE_NOUS = {1 : 1,
2 : 2,
3 : 3,
4 : 4,
5: 6,
6: 7,
7: 8,
8: 9,
9: None,
10: 10,
12: 11,
13: 12,
}
JOURS = {1: 'lundi',
2: 'mardi',
3: 'mercredi',
4: 'jeudi',
5: 'vendredi'
}
dic_worker = {}
def load_csv2(db, name, offset=0, limit=9999999, id_column=0):
csvfile = open(os.path.join(db_path, db, name + '.csv'), 'rb')
csvlines = csv.reader(csvfile, delimiter=';', quotechar='|')
cols = csvlines.next()
yield cols
i = 0
for line in csvlines:
if not (offset <= int(line[id_column]) < offset+limit):
continue
yield _get_dict(cols, line)
i += 1
csvfile.close()
def load_csv(db, name, offset=0, limit=9999999, id_column=0):
records = []
idx = {}
csvfile = open(os.path.join(db_path, db, name + '.csv'), 'rb')
csvlines = csv.reader(csvfile, delimiter=';', quotechar='|')
cols = csvlines.next()
i = 0
for line in csvlines:
if not (offset <= int(line[id_column]) < offset+limit):
continue
data = _get_dict(cols, line)
records.append(data)
idx[data['id']] = i
i += 1
csvfile.close()
return records, idx, cols
def add_invalid(d, reason):
d.setdefault('invalid', '')
if d['invalid']:
d['invalid'] += ', '
d['invalid'] += reason
log = open('import_some_acts-%s.log' % datetime.now().isoformat(), 'w+')
@transaction.commit_on_success
def main():
""" """
workers = Worker.objects.all()
for db in dbs:
workers_idx = {}
act_types_idx = {}
if "F_ST_ETIENNE_CMPP" == db:
service = Service.objects.get(name="CMPP")
elif "F_ST_ETIENNE_CAMSP" == db:
service = Service.objects.get(name="CAMSP")
elif "F_ST_ETIENNE_SESSAD_TED" == db:
service = Service.objects.get(name="SESSAD TED")
elif "F_ST_ETIENNE_SESSAD" == db:
service = Service.objects.get(name="SESSAD DYS")
print '===', service.name, '==='
print datetime.now()
print >>log, datetime.now(), '===', service.name, '==='
# load workers mapping
worker_reverse_idx = {}
for i, worker in enumerate(workers):
if service.name == 'CMPP':
j = worker.old_cmpp_id
elif service.name == 'CAMSP':
j = worker.old_camsp_id
elif service.name == 'SESSAD DYS':
j = worker.old_sessad_dys_id
elif service.name == 'SESSAD TED':
j = worker.old_sessad_ted_id
else:
print "service inconnu!!!"
exit(0)
if j:
workers_idx[j] = worker
worker_reverse_idx[worker] = j
# load act_type mapping
act_types = ActType.objects.for_service(service)
act_type_id_not_found = set()
for i, act_type in enumerate(act_types):
j = act_type.old_id
if j:
act_types_idx[j] = act_type
else:
act_type_id_not_found.add(act_type)
def set_act_type(row, not_found=None):
act_type_id = row['type_acte']
if act_type_id == '0':
add_invalid(row, 'no act_id=>not importable')
elif act_type_id in act_types_idx:
row['act_type'] = act_types_idx[act_type_id]
else:
add_invalid(row, 'act_type not found %s' % act_type_id)
if not_found:
not_found.add(act_type_id)
def handle_details2(data, idx, details, id_key):
for detail in details:
i = int(detail[id_key])
thera_id = detail['thera_id']
if i not in idx:
continue
row = data[idx[i]]
if thera_id in workers_idx:
ws = row.setdefault('workers', set())
theras = row.setdefault('theras', set())
ws.add(workers_idx[thera_id])
theras.add(thera_id)
else:
add_invalid(row, 'unknown thera_id %s' % thera_id)
print "%s - Nombre de types d'actes : %d" % (service.name, len(act_types))
print "%s - Liste des types d'actes sans id : %s" % (service.name, str(act_type_id_not_found))
# loading dossiers idx
enfant_idx = {}
for enfant in PatientRecord.objects.filter(service=service):
enfant_idx[enfant.old_id] = enfant
def set_enfant(row, not_found=None):
# connect enfant
enfant_id = row['enfant_id']
if enfant_id == '0':
add_invalid(row, 'no enfant_id=>not an appointment')
row['event'] = True
elif enfant_id in enfant_idx:
row['enfant'] = enfant_idx[enfant_id]
else:
add_invalid(row, 'enfant_id not found %s' % enfant_id)
if not_found:
not_found.add(enfant_id)
acts_ids = set([
193815,
173334,
171872,
193506,
182039,
166806,
183054,
181050,
172301,
193914,
186876,
161111,
])
rows = load_csv2(db, 'actes')
rows.next()
loaded_rows = []
for row in rows:
if int(row['id']) not in acts_ids:
continue
row.setdefault('invalid', '')
row.setdefault('workers', set())
row.setdefault('theras', set())
row['date'] = _to_date(row['date_acte'])
row['time'] = _to_time(row['heure'])
row['duration'] = _to_duration(row['duree'])
row['is_billed'] = row['marque'] == '1'
row['validation_locked'] = row['date'] < date(2013, 1, 3)
set_enfant(row)
set_act_type(row)
row['state'] = map_cs[service.name].get(row['cs'], 'VALIDE')
loaded_rows.append(row)
total = 0
for row in loaded_rows:
if row['invalid']:
print >>log, datetime.now(), 'row invalid', row
continue
print >>log, datetime.now(), 'act', row['id'], 'imported'
total += 1
print >>log, datetime.now(), 'created', total, 'new acts'
raise Exception()
if __name__ == "__main__":
main()
|
execfile("eval.py")
|
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
data = get_tracking_details(filters)
return columns, data
def get_columns():
return ["Freanchise:Link/Franchise:130", "Device ID:data:100", "Sub Franchise:Link/Sub Franchise:120","Sheduled Visting Date:date:150","Visted(Yes/No):data:120","Vistited Date Time:datetime:150","Reason:data:300"]
def get_tracking_details(filters):
#conditions = get_conditions(filters)
return webnotes.conn.sql("""select account_id,device_id,sf_name,visiting_date,visited,visited_date,reason from `tabSub Franchise Visiting Schedule` ORDER BY sf_name,visiting_date """)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('upload', '0002_uploadedpdf_num_pages'),
]
operations = [
migrations.AlterModelOptions(
name='uploadedpdf',
options={'verbose_name': 'Uploaded PDF'},
),
]
|
from functools import partial
from operator import itemgetter
from django.conf.urls import url
from django.contrib import admin, messages
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _, pgettext, ugettext, ungettext
from django_ace import AceWidget
from judge.models import Submission, SubmissionTestCase, ContestSubmission, ContestParticipation, ContestProblem, \
Profile
class SubmissionStatusFilter(admin.SimpleListFilter):
parameter_name = title = 'status'
__lookups = (('None', _('None')), ('NotDone', _('Not done')), ('EX', _('Exceptional'))) + Submission.STATUS
__handles = set(map(itemgetter(0), Submission.STATUS))
def lookups(self, request, model_admin):
return self.__lookups
def queryset(self, request, queryset):
if self.value() == 'None':
return queryset.filter(status=None)
elif self.value() == 'NotDone':
return queryset.exclude(status__in=['D', 'IE', 'CE', 'AB'])
elif self.value() == 'EX':
return queryset.exclude(status__in=['D', 'CE', 'G', 'AB'])
elif self.value() in self.__handles:
return queryset.filter(status=self.value())
class SubmissionResultFilter(admin.SimpleListFilter):
parameter_name = title = 'result'
__lookups = (('None', _('None')), ('BAD', _('Unaccepted'))) + Submission.RESULT
__handles = set(map(itemgetter(0), Submission.RESULT))
def lookups(self, request, model_admin):
return self.__lookups
def queryset(self, request, queryset):
if self.value() == 'None':
return queryset.filter(result=None)
elif self.value() == 'BAD':
return queryset.exclude(result='AC')
elif self.value() in self.__handles:
return queryset.filter(result=self.value())
class SubmissionTestCaseInline(admin.TabularInline):
fields = ('case', 'batch', 'status', 'time', 'memory', 'points', 'total')
readonly_fields = ('case', 'batch', 'total')
model = SubmissionTestCase
can_delete = False
max_num = 0
class ContestSubmissionInline(admin.StackedInline):
fields = ('problem', 'participation', 'points')
model = ContestSubmission
def get_formset(self, request, obj=None, **kwargs):
kwargs['formfield_callback'] = partial(self.formfield_for_dbfield, request=request, obj=obj)
return super(ContestSubmissionInline, self).get_formset(request, obj, **kwargs)
def formfield_for_dbfield(self, db_field, **kwargs):
submission = kwargs.pop('obj', None)
label = None
if submission:
if db_field.name == 'participation':
kwargs['queryset'] = ContestParticipation.objects.filter(user=submission.user,
contest__problems=submission.problem) \
.only('id', 'contest__name')
label = lambda obj: obj.contest.name
elif db_field.name == 'problem':
kwargs['queryset'] = ContestProblem.objects.filter(problem=submission.problem) \
.only('id', 'problem__name', 'contest__name')
label = lambda obj: pgettext('contest problem', '%(problem)s in %(contest)s') % {
'problem': obj.problem.name, 'contest': obj.contest.name
}
field = super(ContestSubmissionInline, self).formfield_for_dbfield(db_field, **kwargs)
if label is not None:
field.label_from_instance = label
return field
class SubmissionAdmin(admin.ModelAdmin):
readonly_fields = ('user', 'problem', 'date')
fields = ('user', 'problem', 'date', 'time', 'memory', 'points', 'language', 'source', 'status', 'result',
'case_points', 'case_total', 'judged_on', 'error')
actions = ('judge', 'recalculate_score')
list_display = ('id', 'problem_code', 'problem_name', 'user_column', 'execution_time', 'pretty_memory',
'points', 'language', 'status', 'result', 'judge_column')
list_filter = ('language', SubmissionStatusFilter, SubmissionResultFilter)
search_fields = ('problem__code', 'problem__name', 'user__user__username', 'user__name')
actions_on_top = True
actions_on_bottom = True
inlines = [SubmissionTestCaseInline, ContestSubmissionInline]
date_hierarchy = 'date'
def get_queryset(self, request):
queryset = Submission.objects.only(
'problem__code', 'problem__name', 'user__user__username', 'user__name', 'language__name',
'time', 'memory', 'points', 'status', 'result'
)
if not request.user.has_perm('judge.edit_all_problem'):
id = request.user.profile.id
queryset = queryset.filter(Q(problem__authors__id=id) | Q(problem__curators__id=id)).distinct()
return queryset
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
if not request.user.has_perm('judge.edit_own_problem'):
return False
if request.user.has_perm('judge.edit_all_problem') or obj is None:
return True
return obj.problem.is_editor(request.user.profile)
def judge(self, request, queryset):
if not request.user.has_perm('judge.rejudge_submission') or not request.user.has_perm('judge.edit_own_problem'):
self.message_user(request, ugettext('You do not have the permission to rejudge submissions.'),
level=messages.ERROR)
return
queryset = queryset.order_by('id')
if queryset.count() > 10 and not request.user.has_perm('judge.rejudge_submission_lot'):
self.message_user(request, ugettext('You do not have the permission to rejudge THAT many submissions.'),
level=messages.ERROR)
return
if not request.user.has_perm('judge.edit_all_problem'):
id = request.user.profile.id
queryset = queryset.filter(Q(problem__authors__id=id) | Q(problem__curators__id=id))
judged = len(queryset)
for model in queryset:
model.judge(rejudge=True)
self.message_user(request, ungettext('%d submission were successfully scheduled for rejudging.',
'%d submissions were successfully scheduled for rejudging.',
judged) % judged)
judge.short_description = _('Rejudge the selected submissions')
def recalculate_score(self, request, queryset):
if not request.user.has_perm('judge.rejudge_submission'):
self.message_user(request, ugettext('You do not have the permission to rejudge submissions.'),
level=messages.ERROR)
return
submissions = list(queryset.select_related('problem').only('points', 'case_points', 'case_total',
'problem__partial', 'problem__points'))
for submission in submissions:
submission.points = round(submission.case_points / submission.case_total * submission.problem.points
if submission.case_total else 0, 1)
if not submission.problem.partial and submission.points < submission.problem.points:
submission.points = 0
submission.save()
if hasattr(submission, 'contest'):
contest = submission.contest
contest.points = round(submission.case_points / submission.case_total * contest.problem.points
if submission.case_total > 0 else 0, 1)
if not contest.problem.partial and contest.points < contest.problem.points:
contest.points = 0
contest.save()
for profile in Profile.objects.filter(id__in=queryset.values_list('user_id', flat=True).distinct()):
profile.calculate_points()
cache.delete('user_complete:%d' % profile.id)
cache.delete('user_attempted:%d' % profile.id)
for participation in ContestParticipation.objects.filter(
id__in=queryset.values_list('contest__participation_id')):
participation.recalculate_score()
self.message_user(request, ungettext('%d submission were successfully rescored.',
'%d submissions were successfully rescored.',
len(submissions)) % len(submissions))
recalculate_score.short_description = _('Rescore the selected submissions')
def problem_code(self, obj):
return obj.problem.code
problem_code.short_description = _('Problem code')
problem_code.admin_order_field = 'problem__code'
def problem_name(self, obj):
return obj.problem.name
problem_name.short_description = _('Problem name')
problem_name.admin_order_field = 'problem__name'
def user_column(self, obj):
return format_html(u'<span title="{display}">{username}</span>',
username=obj.user.user.username,
display=obj.user.name)
user_column.admin_order_field = 'user__user__username'
user_column.short_description = _('User')
def execution_time(self, obj):
return round(obj.time, 2) if obj.time is not None else 'None'
execution_time.short_description = _('Time')
execution_time.admin_order_field = 'time'
def pretty_memory(self, obj):
memory = obj.memory
if memory is None:
return ugettext('None')
if memory < 1000:
return ugettext('%d KB') % memory
else:
return ugettext('%.2f MB') % (memory / 1024.)
pretty_memory.admin_order_field = 'memory'
pretty_memory.short_description = _('Memory')
def judge_column(self, obj):
return '<input type="button" value="Rejudge" onclick="location.href=\'%s/judge/\'" />' % obj.id
judge_column.short_description = ''
judge_column.allow_tags = True
def get_urls(self):
return [url(r'^(\d+)/judge/$', self.judge_view, name='judge_submission_rejudge')] + \
super(SubmissionAdmin, self).get_urls()
def judge_view(self, request, id):
if not request.user.has_perm('judge.rejudge_submission') or not request.user.has_perm('judge.edit_own_problem'):
raise PermissionDenied()
submission = get_object_or_404(Submission, id=id)
if not request.user.has_perm('judge.edit_all_problem') and \
not submission.problem.is_editor(request.user.profile):
raise PermissionDenied()
submission.judge(rejudge=True)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def get_form(self, request, obj=None, **kwargs):
form = super(SubmissionAdmin, self).get_form(request, obj, **kwargs)
if obj is not None:
form.base_fields['source'].widget = AceWidget(obj.language.ace, request.user.profile.ace_theme)
return form
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('menjobe', '0002_product_name'),
]
operations = [
migrations.CreateModel(
name='RetailPoint',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
],
options={
},
bases=(models.Model,),
),
]
|
import os
from waflib.Errors import ConfigurationError
def options(self):
self.add_option(
'--with-amba',
type='string',
help='Basedir of your AmbaKit installation',
dest='ambadir',
default=os.environ.get("AMBA_HOME")
)
def find(self, path = None):
if path:
incpath = [os.path.abspath(os.path.expanduser(os.path.expandvars(path))),
os.path.join(os.path.abspath(os.path.expanduser(os.path.expandvars(path))), "dependencies", "AMBA-PV", "include")]
else:
incpath = []
self.check_cxx(
header_name = "amba.h",
uselib_store = 'AMBA',
mandatory = True,
includes = incpath,
use = 'BOOST SYSTEMC TLM GREENSOCS',
okmsg = "ok",
errmsg = 'AMBAKit not found please give the location with --amba=',
fragment = '''
#include <systemc.h>
#include <tlm.h>
#include <amba.h>
extern "C" {
int sc_main(int argc, char** argv) {
return 0;
};
}
'''
)
# Check for AMBAKit Headers
self.check_cxx(
msg = "Checking for AMBAKit Version > 1.0.5",
uselib_store = 'AMBA',
mandatory = True,
execute = True,
rpath = self.env.LIBPATH_SYSTEMC,
fragment = """
#include <amba.h>
int sc_main(int argc, char *argv[]) {
return !((AMBA_TLM_VERSION_MAJOR >= 1) && (AMBA_TLM_VERSION_MINOR >= 0) && (AMBA_TLM_VERSION_REVISION >= 6));
}
""",
use = 'BOOST SYSTEMC TLM GREENSOCS AMBA',
okmsg = "ok",
)
def configure(self):
try:
if self.options.ambadir:
find(self, self.options.ambadir)
else:
find(self)
except ConfigurationError as e:
name = "amba_socket"
version = "1.0.15"
self.dep_fetch(
name = name,
version = version,
tar_url = "https://git.greensocs.com/chef/amba-kit/raw/master/files/default/amba_socket-1.0.15.tgz",
base = name,
patch = [os.path.join(self.path.abspath(), "core", "waf", "ambakit-2015-10-16-rmeyer.patch")]
)
find(self, self.dep_path(name, version).rstrip("-"+version))
|
"""Defines fixtures available to all tests."""
from pytest import fixture, yield_fixture
from webtest import TestApp
from tegenaria.app import create_app
from tegenaria.database import db as _db
from tegenaria.settings import TestConfig
@yield_fixture(scope="function")
def app():
"""Yield a new app."""
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@fixture(scope="function")
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@yield_fixture(scope="function")
def db(app):
"""Yield an empty database."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
_db.drop_all()
|
import os
import csv
from pprint import pprint
from collections import OrderedDict
from .data_templates import make_record
def atomize_to_mongo(source_path, mongo):
"""
Accepts the path name of the input data file and the MongoWrapper
instance to be used for writing the atomized records.
"""
if not os.path.exists(source_path):
raise FileNotFoundError(source_path)
with open(source_path, encoding='utf-8') as csvfile:
csvreader = csv.DictReader(csvfile)
# generate standarized keys for each csv field
field_keys = OrderedDict()
for field in csvreader.fieldnames:
field_keys[field] = field.lower().replace(' ','_')
# turn csv rows into documents
for i, row in enumerate(csvreader):
doc = make_record()
for field, key in field_keys.items():
doc['source'][key] = row[field]
mongo.add_doc(doc, doc_id=i)
#print(i)
#pprint(doc)
|
from django.contrib.auth.models import User
from django.core.validators import (
MaxValueValidator,
MinValueValidator
)
from django.db import models
from django.utils.translation import ugettext_lazy as _
class WeightEntry(models.Model):
"""
Model for a weight point
"""
date = models.DateField(verbose_name=_('Date'))
weight = models.DecimalField(verbose_name=_('Weight'),
max_digits=5,
decimal_places=2,
validators=[MinValueValidator(30), MaxValueValidator(600)])
user = models.ForeignKey(User,
verbose_name=_('User'),
on_delete=models.CASCADE)
"""
The user the weight entry belongs to.
NOTE: this field is neither marked as editable false nor is it excluded in
the form. This is done intentionally because otherwise it's *very* difficult
and ugly to validate the uniqueness of unique_together fields and one field
is excluded from the form. This does not pose any security risk because the
value from the form is ignored and the request's user always used.
"""
class Meta:
"""
Metaclass to set some other properties
"""
verbose_name = _('Weight entry')
ordering = ["date", ]
get_latest_by = "date"
unique_together = ("date", "user")
def __str__(self):
"""
Return a more human-readable representation
"""
return "{0}: {1:.2f} kg".format(self.date, self.weight)
def get_owner_object(self):
"""
Returns the object that has owner information
"""
return self
|
import quotation_followup
|
__version__ = '0.1.0'
|
from .array import ARRAY
from ...sql import elements
from ...sql import expression
from ...sql import functions
from ...sql.schema import ColumnCollectionConstraint
class aggregate_order_by(expression.ColumnElement):
"""Represent a PostgreSQL aggregate order by expression.
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
would represent the expression::
SELECT array_agg(a ORDER BY b DESC) FROM table;
Similarly::
expr = func.string_agg(
table.c.a,
aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select([expr])
Would represent::
SELECT string_agg(a, ',' ORDER BY a) FROM table;
.. versionadded:: 1.1
.. versionchanged:: 1.2.13 - the ORDER BY argument may be multiple terms
.. seealso::
:class:`.array_agg`
"""
__visit_name__ = "aggregate_order_by"
def __init__(self, target, *order_by):
self.target = elements._literal_as_binds(target)
_lob = len(order_by)
if _lob == 0:
raise TypeError("at least one ORDER BY element is required")
elif _lob == 1:
self.order_by = elements._literal_as_binds(order_by[0])
else:
self.order_by = elements.ClauseList(
*order_by, _literal_as_text=elements._literal_as_binds
)
def self_group(self, against=None):
return self
def get_children(self, **kwargs):
return self.target, self.order_by
def _copy_internals(self, clone=elements._clone, **kw):
self.target = clone(self.target, **kw)
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return self.target._from_objects + self.order_by._from_objects
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
__ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
""" # noqa
__visit_name__ = "exclude_constraint"
where = None
@elements._document_text_coercion(
"where",
":class:`.ExcludeConstraint`",
":paramref:`.ExcludeConstraint.where`",
)
def __init__(self, *elements, **kw):
r"""
Create an :class:`.ExcludeConstraint` object.
E.g.::
const = ExcludeConstraint(
(Column('period'), '&&'),
(Column('group'), '='),
where=(Column('group') != 'some group')
)
The constraint is normally embedded into the :class:`.Table` construct
directly, or added later using :meth:`.append_constraint`::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('period', TSRANGE()),
Column('group', String)
)
some_table.append_constraint(
ExcludeConstraint(
(some_table.c.period, '&&'),
(some_table.c.group, '='),
where=some_table.c.group != 'some group',
name='some_table_excl_const'
)
)
:param \*elements:
A sequence of two tuples of the form ``(column, operator)`` where
"column" is a SQL expression element or a raw SQL string, most
typically a :class:`.Column` object, and "operator" is a string
containing the operator to use. In order to specify a column name
when a :class:`.Column` object is not available, while ensuring
that any necessary quoting rules take effect, an ad-hoc
:class:`.Column` or :func:`.sql.expression.column` object should be
used.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param using:
Optional string. If set, emit USING <index_method> when issuing DDL
for this constraint. Defaults to 'gist'.
:param where:
Optional SQL expression construct or literal SQL string.
If set, emit WHERE <predicate> when issuing DDL
for this constraint.
"""
columns = []
render_exprs = []
self.operators = {}
expressions, operators = zip(*elements)
for (expr, column, strname, add_element), operator in zip(
self._extract_col_expression_collection(expressions), operators
):
if add_element is not None:
columns.append(add_element)
name = column.name if column is not None else strname
if name is not None:
# backwards compat
self.operators[name] = operator
expr = expression._literal_as_column(expr)
render_exprs.append((expr, name, operator))
self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
*columns,
name=kw.get("name"),
deferrable=kw.get("deferrable"),
initially=kw.get("initially")
)
self.using = kw.get("using", "gist")
where = kw.get("where")
if where is not None:
self.where = expression._literal_as_text(
where, allow_coercion_to_text=True
)
def copy(self, **kw):
elements = [(col, self.operators[col]) for col in self.columns.keys()]
c = self.__class__(
*elements,
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
where=self.where,
using=self.using
)
c.dispatch._update(self.dispatch)
return c
def array_agg(*arg, **kw):
"""PostgreSQL-specific form of :class:`.array_agg`, ensures
return type is :class:`.postgresql.ARRAY` and not
the plain :class:`.types.ARRAY`, unless an explicit ``type_``
is passed.
.. versionadded:: 1.1
"""
kw["_default_array_type"] = ARRAY
return functions.func.array_agg(*arg, **kw)
|
"""
XBlock Courseware Components
"""
import warnings
import xblock.core
import xblock.fields
class XBlockMixin(xblock.core.XBlockMixin):
"""
A wrapper around xblock.core.XBlockMixin that provides backwards compatibility for the old location.
Deprecated.
"""
def __init__(self, *args, **kwargs):
warnings.warn("Please use xblock.core.XBlockMixin", DeprecationWarning, stacklevel=2)
super(XBlockMixin, self).__init__(*args, **kwargs)
xblock.fields.XBlockMixin = XBlockMixin
__version__ = "0.4.7"
|
"""
Unit tests for email feature flag in new instructor dashboard.
Additionally tests that bulk email is always disabled for
non-Mongo backed courses, regardless of email feature flag, and
that the view is conditionally available when Course Auth is turned on.
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from mock import patch
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from bulk_email.models import CourseAuthorization
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_TOY_MODULESTORE, ModuleStoreTestCase
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.factories import CourseFactory
class TestNewInstructorDashboardEmailViewMongoBacked(ModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
for Mongo-backed courses
"""
def setUp(self):
super(TestNewInstructorDashboardEmailViewMongoBacked, self).setUp()
self.course = CourseFactory.create()
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
# URL for email view
self.email_link = '<a href="" data-section="send_email">Email</a>'
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
# In order for bulk email to work, we must have both the ENABLE_INSTRUCTOR_EMAIL_FLAG
# set to True and for the course to be Mongo-backed.
# The flag is enabled and the course is Mongo-backed (should work)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_true_mongo_true(self):
# Assert that instructor email is enabled for this course - since REQUIRE_COURSE_EMAIL_AUTH is False,
# all courses should be authorized to use email.
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertIn(self.email_link, response.content)
send_to_label = '<label for="id_to">Send to:</label>'
self.assertTrue(send_to_label in response.content)
self.assertEqual(response.status_code, 200)
# The course is Mongo-backed but the flag is disabled (should not work)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': False})
def test_email_flag_false_mongo_true(self):
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertFalse(self.email_link in response.content)
# Flag is enabled, but we require course auth and haven't turned it on for this course
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_course_not_authorized(self):
# Assert that instructor email is not enabled for this course
self.assertFalse(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertFalse(self.email_link in response.content)
# Flag is enabled, we require course auth and turn it on for this course
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_course_authorized(self):
# Assert that instructor email is not enabled for this course
self.assertFalse(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertFalse(self.email_link in response.content)
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that instructor email is enabled for this course
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertTrue(self.email_link in response.content)
# Flag is disabled, but course is authorized
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': False, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_course_authorized_feature_off(self):
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that instructor email IS enabled for this course
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view IS NOT in the response
response = self.client.get(self.url)
self.assertFalse(self.email_link in response.content)
class TestNewInstructorDashboardEmailViewXMLBacked(ModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
"""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
def setUp(self):
super(TestNewInstructorDashboardEmailViewXMLBacked, self).setUp()
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course_key.to_deprecated_string()})
# URL for email view
self.email_link = '<a href="" data-section="send_email">Email</a>'
# The flag is enabled, and since REQUIRE_COURSE_EMAIL_AUTH is False, all courses should
# be authorized to use email. But the course is not Mongo-backed (should not work)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_true_mongo_false(self):
response = self.client.get(self.url)
self.assertFalse(self.email_link in response.content)
# The flag is disabled and the course is not Mongo-backed (should not work)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': False, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_false_mongo_false(self):
response = self.client.get(self.url)
self.assertFalse(self.email_link in response.content)
|
"""
PYBOSSA api module for domain object Category via an API.
This package adds GET, POST, PUT and DELETE methods for:
* categories
"""
from werkzeug.exceptions import BadRequest
from api_base import APIBase
from pybossa.model.category import Category
class CategoryAPI(APIBase):
"""Class API for domain object Category."""
reserved_keys = set(['id', 'created'])
__class__ = Category
def _forbidden_attributes(self, data):
for key in data.keys():
if key in self.reserved_keys:
msg = "Reserved keys in payload: %s" % key
raise BadRequest(msg)
|
import frappe
import unittest
test_records = frappe.get_test_records('Chart of Accounts')
class TestChartofAccounts(unittest.TestCase):
pass
|
from course_material.models import CourseMaterial
from rest_framework import serializers
class CourseMaterialSerializer(serializers.ModelSerializer):
class Meta:
model = CourseMaterial
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0002_inventorysettings'),
]
operations = [
migrations.AlterField(
model_name='useditem',
name='timestamp_returned',
field=models.DateTimeField(blank=True, null=True),
),
]
|
import datetime
import io
import uuid
from .sqlfilter import SQLFilter
from lib.exceptions import *
from lib.orm.fields import TimeField, BoolField, ImageField, BinaryField
from lib.orm.binary import Binary
class Sql(object):
""" Generate SQL requests. """
def _replaceSQL(self):
f, v = self._prepareData()
sql = "REPLACE INTO {}.{} ({})\n".format(self._dbname, self.__class__.__name__.lower(), ", ".join(f))
sql += "VALUES ({});".format(", ".join(['%s' for i in range(0, len(f))]))
return sql, v
def _updateSQL(self, data2save, domain):
f, v = self._prepareData(data2save)
sqlFilter = SQLFilter(domain)
if sqlFilter:
sql = "UPDATE {}.{} ".format(self._dbname, self.__class__.__name__.lower())
sql += "SET {} = %s ".format(" = %s, ".join(f))
sql += "WHERE "
sql += sqlFilter.condition
sql += ";"
v.extend(sqlFilter.data)
return sql, v
else:
raise Core400Exception("Condition needed to process update")
@classmethod
def _deleteSQL(cls, domain):
sqlFilter = SQLFilter(domain)
if sqlFilter:
sql = "DELETE FROM {}.{} ".format(cls._dbname, cls.__name__.lower())
sql += "WHERE "
sql += sqlFilter.condition
sql += ";"
return sql, sqlFilter.data
else:
raise Core400Exception("Condition needed to process delete")
@classmethod
def _selectSQL(cls, domain, fields=None, count=None, offset=None, sort=None):
if fields and type(fields) is list:
fields = ", ".join(set(cls._overrideColName(fields + cls._identifiers))) # Always return identifiers
else:
fields = '*'
if domain:
sqlFilter = SQLFilter(domain)
condition = sqlFilter.condition
data = sqlFilter.data
else:
condition = "1"
data = tuple()
sql = "SELECT {} FROM {}.{} ".format(fields, cls._dbname, cls.__name__.lower())
sql += "WHERE "
sql += condition
if sort:
sql += " ORDER BY {}".format(", ".join(cls._overrideColName(sort)))
if count and offset:
sql += " LIMIT {}, {}".format(offset, count)
elif count:
sql += " LIMIT {}".format(count)
sql += ";"
return sql, data
@classmethod
def _createDatabaseSQL(cls):
sql = "CREATE DATABASE IF NOT EXISTS {};".format(cls._dbname)
return sql
@classmethod
def _dropDatabaseSQL(cls):
sql = "DROP DATABASE IF EXISTS {};".format(cls._dbname)
return sql
@classmethod
def _createTableSQL(cls):
# TODO remove compute field from CREATE TABLE (resu of cls.__getColumnsSQL())
sql = "CREATE TABLE IF NOT EXISTS {}.{} (\n".format(cls._dbname, cls.__name__.lower())
sql += cls.__getColumnsSQL()
sql += ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;"
return sql
@classmethod
def _dropTableSQL(cls):
return "DROP TABLE IF EXISTS {}.{};".format(cls._dbname, cls.__name__.lower())
@classmethod
def _overrideColName(cls, fields):
f = list()
for colname in fields:
col = getattr(cls, "_{}_field".format(colname), None)
if not col:
raise Core404Exception("Attribute '" + colname + "' doesn't exist.")
if not col.compute:
f.append(col.fieldName or colname)
return f
@classmethod
def _reverseData(cls, fields):
"""
Replace SQL field name by python field name.
Replace SQL value by python value.
"""
for colname in cls._columns:
col = getattr(cls, "_{}_field".format(colname), None)
# Change field name
name = col.fieldName
if name in fields:
fields[colname] = fields.pop(name)
# Adapt Value
if isinstance(col, TimeField) and colname in fields:
fields[colname] = (datetime.datetime.min + fields[colname]).time()
elif isinstance(col, BoolField) and colname in fields:
fields[colname] = fields[colname] is 1
elif isinstance(col, BinaryField) and colname in fields and fields[colname]:
identifiers = list()
for identifier in cls._identifiers:
ids = getattr(cls, "_{}_field".format(identifier))
ids = ids.fieldName or identifier
identifiers.append(str(fields[ids]))
if col.backendFS:
metadata = fields[colname].split("\n")
fields[colname] = Binary(cls.__name__.lower(), colname, metadata[1], metadata[2], uuid=metadata[0])
fields[colname].loadStreamFromFS(identifiers)
else:
if type(col) is ImageField:
fields[colname] = Binary(cls.__name__.lower(), colname, col.mimeType, col.extension, io.BytesIO(fields[colname]))
fields[colname].loadStreamFromDB(identifiers)
else:
data = fields[colname].split("\n", 2)
fields[colname] = Binary(cls.__name__.lower(), colname, data[0], data[1], io.BytesIO(data[2]))
fields[colname].loadStreamFromDB(identifiers)
def _prepareData(self, data2save=None):
"""
Field : Convert field name to SQL column name
Value : Convert python type to SQL type
"""
k = list()
v = list()
for colname in self._columns:
col = getattr(self, "_{}_field".format(colname), None)
if not col:
raise Core404Exception("Attribute '" + colname + "' doesn't exist.")
val = getattr(self, colname)
# Use for UPDATE (partial fields), not for INSERT/REPLACE
if data2save and (colname not in data2save):
procceed = False
else:
procceed = True
if not col.compute and val and procceed:
# Colname
k.append(col.fieldName or colname)
# Value
if type(val) is list:
v.append(",".join(val))
elif type(val) is Binary:
# Save ressource's identifiers in Binary object to get URL in hooks
identifiers = list()
for identifier in self._identifiers:
ids = getattr(self, "_{}_field".format(identifier))
ids = ids.fieldName or identifier
identifiers.append(str(getattr(self, ids)))
val.loadStreamFromDB(identifiers)
if col.backendFS:
# Remove old file on FS before saving new one
if val.uuid:
val.removeStreamFromFS()
val.uuid = uuid.uuid4().hex
# data to FS
val.save()
# metadata to DB
v.append(val.uuid + "\n" + val.mimetype + "\n" + val.extension)
else:
if type(col) is ImageField:
v.append(val.stream.getvalue())
else:
data = io.BytesIO()
# metadata
data.write(str(val.mimetype))
data.write("\x0A")
data.write(str(val.extension))
data.write("\x0A")
# data
data.write(val.stream.getvalue())
v.append(data.getvalue())
else:
v.append(val)
return (k, v)
@classmethod
def __getColumnsSQL(cls):
sql = ""
for colname in cls._columns:
col = getattr(cls, '_' + colname + '_field')
name = col.fieldName or colname
sql += getattr(Sql, "_{}__get{}SQL".format(Sql.__name__, col.__class__.__name__))(name, col)
# Indexes
sql += cls.__getIndexesSQL()
# TODO gerer creation foreign key
"""
# Foreign keys
for fk in lib.orm.one2many[sqlcls.__name__]:
if fk.cascade:
cascade = "ON UPDATE CASCADE ON DELETE CASCADE"
else:
cascade = "ON UPDATE NO ACTION ON DELETE NO ACTION"
if fk.table.connectionInfos:
db = fk.table.connectionInfos.databaseName
else:
db = self._database
sql += "FOREIGN KEY ({}) REFERENCES {}.{}({}) {},\n".format(",".join(fk.table._Identifiers), db, fk.table._tableName, ",".join(fk.table._Identifiers), cascade)
"""
# Primary keys
sql += "PRIMARY KEY ({})\n".format(",".join(cls._identifiers))
return sql
@classmethod
def __getIndexesSQL(cls):
if not cls._indexes:
return ""
sql = ""
for idxname in cls._indexes:
# Retrieve column's mame
index = getattr(cls, idxname)
if type(index.columns) is not list:
cols = [index.columns]
else:
cols = index.columns
# Check if column's name is different from SQL column's name
columns = list()
for colname in cols:
col = getattr(cls, "_" + colname + "_field")
columns.append(col.fieldName or colname)
sql += "INDEX {} ({}),\n".format(idxname, ",".join(columns))
return sql
@classmethod
def __getColStruct(cls, col):
default = col.default
if type(col.default) is list:
default = ",".join(col.default)
elif type(col.default) is bool:
if col.default:
default = '1'
else:
default = '0'
elif type(col.default) is unicode:
default = col.default.encode('utf-8')
elif col.default:
default = str(col.default)
if default:
default = " DEFAULT '{}'".format(default.replace("'", "''"))
else:
default = ""
if col.require:
null = " NOT NULL"
else:
null = " NULL"
if col.unique:
unique = " UNIQUE KEY"
null = " NOT NULL"
default = ""
else:
unique = ""
if col.identifier:
null = " NOT NULL"
return (default, null, unique)
@classmethod
def __getStringFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
if col.length is None or col.length > 2000:
t = "TEXT"
default = ""
else:
t = "VARCHAR"
sqltype = col.backendType or t
if col.length:
length = "({})".format(col.length)
else:
length = ""
if sqltype == "TEXT":
default = ""
length = ""
return "{} {}{}{}{}{},\n".format(name, sqltype, length, null, default, unique)
@classmethod
def __getUrlFieldSQL(cls, name, col):
return cls.__getStringFieldSQL(name, col)
@classmethod
def __getEmailFieldSQL(cls, name, col):
return cls.__getStringFieldSQL(name, col)
@classmethod
def __getColorFieldSQL(cls, name, col):
return cls.__getStringFieldSQL(name, col)
@classmethod
def __getPhoneFieldSQL(cls, name, col):
return cls.__getStringFieldSQL(name, col)
@classmethod
def __getSetFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "SET"
values = "('{}')".format("','".join(col.values))
return "{} {}{}{}{}{},\n".format(name, sqltype, values, null, default, unique)
@classmethod
def __getEnumFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "ENUM"
values = "('{}')".format("','".join(col.values).encode('utf-8'))
return "{} {}{}{}{}{},\n".format(name, sqltype, values, null, default, unique)
@classmethod
def __getDecimalFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "DECIMAL"
return "{} {}({},{}){}{}{},\n".format(name, sqltype, col.size, col.precision, null, default, unique)
@classmethod
def __getCurrencyFieldSQL(cls, name, col):
return cls.__getDecimalFieldSQL(name, col)
@classmethod
def __getBinaryFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
if col.backendFS:
# Only metadata are store on DB
sql = "{} {}{},\n".format(name, "TEXT", null)
else:
# Everything is store on DB
sql = "{} {}{},\n".format(name, "BLOB", null)
return sql
@classmethod
def __getImageFieldSQL(cls, name, col):
return cls.__getBinaryFieldSQL(name, col)
@classmethod
def __getIntFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
if col.size or col.size < 12:
t = "INT"
else:
t = "BIGINT"
sqltype = col.backendType or t
if col.size:
size = "({})".format(col.size)
else:
size = ""
if col.zerofill:
zerofill = " ZEROFILL"
else:
zerofill = ""
if col.unsigned:
unsigned = " UNSIGNED"
else:
unsigned = ""
if col.autoIncrement:
autoIncrement = " AUTO_INCREMENT"
null = " NOT NULL"
default = ""
unique = ""
else:
autoIncrement = ""
return "{} {}{}{}{}{}{}{}{},\n".format(name, sqltype, size, unsigned, zerofill, null, autoIncrement, default, unique)
@classmethod
def __getBoolFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "TINYINT(1)"
return "{} {}{}{}{},\n".format(name, sqltype, " NOT NULL", default, unique)
@classmethod
def __getDateFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "DATE"
return "{} {}{}{}{},\n".format(name, sqltype, null, default, unique)
@classmethod
def __getTimeFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "TIME"
return "{} {}{}{}{},\n".format(name, sqltype, null, default, unique)
@classmethod
def __getDatetimeFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "DATETIME"
return "{} {}{}{}{},\n".format(name, sqltype, null, default, unique)
@classmethod
def __getListFieldSQL(cls, name, col):
default, null, unique = cls.__getColStruct(col)
sqltype = "VARCHAR({})".format(col.length)
return "{} {}{}{},\n".format(name, sqltype, null, default)
|
from . import partner, stock_picking
|
from .mapinfo import MapInfo
from .map import Map
from .address import Address
mapinfo = MapInfo()
_map = Map()
address = Address()
|
from datetime import datetime, timedelta
import logging
import math
import re
from babel import Locale
from pylons import config
from sqlalchemy import Table, Column, ForeignKey, func, or_
from sqlalchemy import DateTime, Integer, Float, Boolean, Unicode, UnicodeText
from sqlalchemy.orm import reconstructor
import adhocracy.model
from adhocracy.model import meta
log = logging.getLogger(__name__)
instance_table = Table(
'instance', meta.data,
Column('id', Integer, primary_key=True),
Column('key', Unicode(20), nullable=False, unique=True),
Column('label', Unicode(255), nullable=False),
Column('description', UnicodeText(), nullable=True),
Column('required_majority', Float, nullable=False),
Column('activation_delay', Integer, nullable=False),
Column('create_time', DateTime, default=func.now()),
Column('access_time', DateTime, default=func.now(),
onupdate=func.now()),
Column('delete_time', DateTime, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('default_group_id', Integer, ForeignKey('group.id'),
nullable=True),
Column('allow_adopt', Boolean, default=True),
Column('allow_delegate', Boolean, default=True),
Column('allow_propose', Boolean, default=True),
Column('allow_index', Boolean, default=True),
Column('hidden', Boolean, default=False),
Column('locale', Unicode(7), nullable=True),
Column('css', UnicodeText(), nullable=True),
Column('frozen', Boolean, default=False),
Column('milestones', Boolean, default=False),
Column('use_norms', Boolean, nullable=True, default=True),
Column('require_selection', Boolean, nullable=True, default=False),
Column('is_authenticated', Boolean, nullable=True, default=False),
Column('hide_global_categories', Boolean, nullable=True, default=False),
Column('editable_comments_default', Boolean, nullable=True, default=True),
Column('require_valid_email', Boolean, nullable=True, default=True),
)
class Instance(meta.Indexable):
__tablename__ = 'instance'
INSTANCE_KEY = re.compile("^[a-zA-Z][a-zA-Z0-9-]{2,18}$")
# Special purpose instances
SPECIAL_KEYS = [u'test', u'feedback']
def __init__(self, key, label, creator, description=None):
self.key = key
self.label = label
self.creator = creator
self.description = description
self.required_majority = 0.66
self.activation_delay = 7
self.allow_adopt = True
self.allow_delegate = True
self.allow_propose = True
self.allow_index = True
self.hidden = False
self.frozen = False
self.require_selection = False
self._required_participation = None
@reconstructor
def _reconstruct(self):
self._required_participation = None
def _get_locale(self):
if not self._locale:
return None
return Locale.parse(self._locale)
def _set_locale(self, locale):
self._locale = unicode(locale)
locale = property(_get_locale, _set_locale)
def current_memberships(self):
return [m for m in self.memberships if not m.is_expired()]
def members(self):
'''
return all users that are members of this instance through
global or local membership
'''
from adhocracy.model.permission import Permission
members = [membership.user for membership in
self.current_memberships()]
global_membership = Permission.find('global.member')
for group in global_membership.groups:
for membership in group.memberships:
if membership.instance == None and not membership.expire_time:
members.append(membership.user)
return list(set(members))
def _get_required_participation(self):
if self._required_participation is None:
from adhocracy.lib.democracy import Decision
avg = Decision.average_decisions(self)
required = int(math.ceil(max(2, avg * self.required_majority)))
self._required_participation = required
return self._required_participation
required_participation = property(_get_required_participation)
def _get_activation_timedelta(self):
return timedelta(days=self.activation_delay)
#return timedelta(minutes=self.activation_delay)
activation_timedelta = property(_get_activation_timedelta)
def _get_num_proposals(self):
from proposal import Proposal
q = meta.Session.query(Proposal)
q = q.filter(Proposal.instance == self)
q = q.filter(or_(Proposal.delete_time == None,
Proposal.delete_time >= datetime.utcnow()))
return q.count()
num_proposals = property(_get_num_proposals)
def _get_num_members(self):
from membership import Membership
q = meta.Session.query(Membership)
q = q.filter(Membership.instance == self)
q = q.filter(or_(Membership.expire_time == None,
Membership.expire_time >= datetime.utcnow()))
return q.count()
num_members = property(_get_num_members)
@classmethod
#@meta.session_cached
def find(cls, key, instance_filter=True, include_deleted=False):
key = unicode(key).lower()
try:
q = meta.Session.query(Instance)
try:
q = q.filter(Instance.id == int(key))
except ValueError:
q = q.filter(Instance.key == unicode(key))
if not include_deleted:
q = q.filter(or_(Instance.delete_time == None,
Instance.delete_time > datetime.utcnow()))
return q.limit(1).first()
except Exception, e:
log.warn("find(%s): %s" % (key, e))
return None
def is_deleted(self, at_time=None):
if at_time is None:
at_time = datetime.utcnow()
return (self.delete_time is not None) and \
self.delete_time <= at_time
def delete(self, delete_time=None):
if delete_time is None:
delete_time = datetime.utcnow()
for delegateable in self.delegateables:
delegateable.delete(delete_time)
for membership in self.memberships:
membership.expire(delete_time)
if not self.is_deleted(delete_time):
self.delete_time = delete_time
def is_shown(self, at_time=None):
if at_time is None:
at_time = datetime.utcnow()
return not (self.is_deleted(at_time) or self.hidden)
_index_id_attr = 'key'
@classmethod
def all_q(cls):
return meta.Session.query(Instance)
@classmethod
def all(cls, limit=None, include_deleted=False, include_hidden=False):
q = cls.all_q()
q = meta.Session.query(Instance)
if not include_deleted:
q = q.filter(or_(Instance.delete_time == None,
Instance.delete_time > datetime.utcnow()))
if not include_hidden:
q = q.filter(or_(Instance.hidden == None,
Instance.hidden == False))
if limit is not None:
q = q.limit(limit)
return q.all()
@classmethod
def create(cls, key, label, user, description=None, locale=None):
from group import Group
from membership import Membership
from page import Page
instance = Instance(unicode(key).lower(), label, user)
instance.description = description
instance.default_group = Group.by_code(Group.INSTANCE_DEFAULT)
if locale is not None:
instance.locale = locale
meta.Session.add(instance)
supervisor_group = Group.by_code(Group.CODE_SUPERVISOR)
membership = Membership(user, instance, supervisor_group,
approved=True)
meta.Session.add(membership)
Page.create(instance, label, u"", user)
# Autojoin the user in instances
config_autojoin = config.get('adhocracy.instances.autojoin')
if (config_autojoin and
(config_autojoin == 'ALL' or
key in (k.strip() for k in config_autojoin.split(',')))):
users = adhocracy.model.User.all()
for u in users:
autojoin_membership = Membership(u, instance,
instance.default_group)
meta.Session.add(autojoin_membership)
meta.Session.flush()
return instance
def to_dict(self):
from adhocracy.lib import helpers as h
d = dict(id=self.id,
key=self.key,
label=self.label,
creator=self.creator.user_name,
required_majority=self.required_majority,
activation_delay=self.activation_delay,
allow_adopt=self.allow_adopt,
allow_delegate=self.allow_delegate,
allow_propose=self.allow_propose,
allow_index=self.allow_index,
hidden=self.hidden,
url=h.entity_url(self),
instance_url=h.instance.url(self),
default_group=self.default_group.code,
create_time=self.create_time)
if self.description:
d['description'] = self.description
return d
def to_index(self):
from adhocracy.lib.event import stats as estats
index = super(Instance, self).to_index()
if self.hidden:
index['skip'] = True
index.update(dict(
instance=self.key,
title=self.label,
tags=[],
body=self.description,
user=self.creator.user_name,
activity=estats.instance_activity(self)
))
return index
def __repr__(self):
return u"<Instance(%d,%s)>" % (self.id, self.key)
|
from .default import default
import os
import re
class image(default):
def __init__(self, key, stat):
default.__init__(self, key, stat)
self.data = {}
def compile(self, prop):
if not os.path.exists(prop['value']):
print("Image '{}' not found.".format(prop['value']))
return repr(prop['value'])
def stat_value(self, prop):
if prop['value'] is None:
return prop['value']
if os.path.exists(prop['value']):
from wand.image import Image
img = Image(filename=prop['value'])
self.data[prop['value']] = img.size
if not prop['key'] in self.stat['global_data']:
self.stat['global_data'][prop['key']] = {}
self.stat['global_data'][prop['key']][prop['value']] = img.size
return prop['value']
def get_global_data(self):
self.stat.property_values(self.key)
return self.data
|
from odoo import models, api
class BaseConfigSettings(models.TransientModel):
_inherit = 'base.config.settings'
def _partner_names_order_selection(self):
options = super(
BaseConfigSettings, self)._partner_names_order_selection()
new_labels = {
'last_first': 'Lastname SecondLastname Firstname',
'last_first_comma': 'Lastname SecondLastname, Firstname',
'first_last': 'Firstname Lastname SecondLastname',
}
return [(k, new_labels[k]) if k in new_labels else (k, v)
for k, v in options]
@api.multi
def _partners_for_recalculating(self):
return self.env['res.partner'].search([
('is_company', '=', False),
'|', '&', ('firstname', '!=', False), ('lastname', '!=', False),
'|', '&', ('firstname', '!=', False), ('lastname2', '!=', False),
'&', ('lastname', '!=', False), ('lastname2', '!=', False),
])
|
import calendar
from datetime import datetime, date
from dateutil import relativedelta
from lxml import etree
import json
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.addons.resource.faces import task as Task
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'case_default': fields.boolean('Default for New Projects',
help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
}
def _get_default_project_ids(self, cr, uid, ctx={}):
project_id = self.pool['project.task']._get_default_project_id(cr, uid, context=ctx)
if project_id:
return [project_id]
return None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
_period_number = 5
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.id
WHERE (account.user_id = %s or rel.uid = %s)"""%(user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
child_parent = self._get_project_and_children(cr, uid, ids, context)
# compute planned_hours, total_hours, effective_hours specific to each project
cr.execute("""
SELECT project_id, COALESCE(SUM(planned_hours), 0.0),
COALESCE(SUM(total_hours), 0.0), COALESCE(SUM(effective_hours), 0.0)
FROM project_task
LEFT JOIN project_task_type ON project_task.stage_id = project_task_type.id
WHERE project_task.project_id IN %s AND project_task_type.fold = False
GROUP BY project_id
""", (tuple(child_parent.keys()),))
# aggregate results into res
res = dict([(id, {'planned_hours':0.0, 'total_hours':0.0, 'effective_hours':0.0}) for id in ids])
for id, planned, total, effective in cr.fetchall():
# add the values specific to id to all parent projects of id in the result
while id:
if id in ids:
res[id]['planned_hours'] += planned
res[id]['total_hours'] += total
res[id]['effective_hours'] += effective
id = child_parent[id]
# compute progress rates
for id in ids:
if res[id]['total_hours']:
res[id]['progress_rate'] = round(100.0 * res[id]['effective_hours'] / res[id]['total_hours'], 2)
else:
res[id]['progress_rate'] = 0.0
return res
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
analytic_account_to_delete = set()
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise osv.except_osv(_('Invalid Action!'),
_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
if proj.analytic_account_id and not proj.analytic_account_id.line_ids:
analytic_account_to_delete.add(proj.analytic_account_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
self.pool['account.analytic.account'].unlink(cr, uid, list(analytic_account_to_delete), context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
res={}
for tasks in self.browse(cr, uid, ids, context):
res[tasks.id] = len(tasks.task_ids)
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('public', 'Public project'),
('employees', 'Internal project: all employees can access'),
('followers', 'Private project: followers Only')]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
month_begin = date.today().replace(day=1)
section_result = [{
'value': 0,
'tooltip': (month_begin + relativedelta.relativedelta(months=-i)).strftime('%B'),
} for i in range(self._period_number - 1, -1, -1)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if obj.fields_get(cr, uid, groupby_field)[groupby_field]['type'] == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern)
month_delta = relativedelta.relativedelta(month_begin, group_begin_date)
section_result[self._period_number - (month_delta.months + 1)] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field, 0)}
return section_result
def _get_project_task_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['project.task']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
created_domain = [('project_id', '=', id), ('create_date', '>=', date_begin ), ('create_date', '<=', date_end ), ('stage_id.fold', '=', False)]
res[id] = json.dumps(self.__get_bar_values(cr, uid, obj, created_domain, [ 'create_date'], 'create_date_count', 'create_date', context=context))
return res
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
help="Link this project to an analytic account if you need financial management on projects. "
"It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.",
ondelete="cascade", required=True, auto_join=True),
'members': fields.many2many('res.users', 'project_user_rel', 'project_id', 'uid', 'Project Members',
help="Project's members are users who can have an access to the tasks related to this project.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'planned_hours': fields.function(_progress_rate, multi="progress", string='Planned Time', help="Sum of planned hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'effective_hours': fields.function(_progress_rate, multi="progress", string='Time Spent', help="Sum of spent hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'total_hours': fields.function(_progress_rate, multi="progress", string='Total Time', help="Sum of total hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'progress_rate': fields.function(_progress_rate, multi="progress", string='Progress', type='float', group_operator="avg", help="Percent of tasks closed according to the total of tasks todo.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'task_count': fields.function(_task_count, type='integer', string="Tasks",),
'task_ids': fields.one2many('project.task', 'project_id',
domain=[('stage_id.fold', '=', False)]),
'color': fields.integer('Color Index'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized"
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True,
help="Holds visibility of the tasks or issues that belong to the current project:\n"
"- Public: everybody sees everything; if portal is activated, portal users\n"
" see all tasks or issues; if anonymous portal is activated, visitors\n"
" see all tasks or issues\n"
"- Portal (only available if Portal is installed): employees see everything;\n"
" if portal is activated, portal users see the tasks or issues followed by\n"
" them or by someone of their company\n"
"- Employees Only: employees see all tasks or issues\n"
"- Followers Only: employees see only the followed tasks or issues; if portal\n"
" is activated, portal users see the followed tasks or issues."),
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('cancelled', 'Cancelled'),
('pending','Pending'),
('close','Closed')],
'Status', required=True, copy=False),
'monthly_tasks': fields.function(_get_project_task_data, type='char', readonly=True,
string='Project Task By Month'),
'doc_count': fields.function(
_get_attached_docs, string="Number of documents attached", type='integer'
)
}
def _get_type_common(self, cr, uid, context):
ids = self.pool.get('project.task.type').search(cr, uid, [('case_default','=',1)], context=context)
return ids
_order = "sequence, id"
_defaults = {
'active': True,
'type': 'contract',
'state': 'open',
'sequence': 10,
'type_ids': _get_type_common,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
}
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for data in self.browse(cr, uid, ids, context=context):
if data.partner_id:
reason = _('Customer Email') if data.partner_id.email else _('Customer')
self._message_add_suggested_recipient(cr, uid, recipients, data, partner=data.partner_id, reason= '%s' % reason)
return recipients
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower then project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=False, context=context)
def set_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def set_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
def set_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
# preserve task name and stage, normally altered during copy
defaults = {'stage_id': task.stage_id.id,
'name': task.name}
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, defaults, context=context)
self.write(cr, uid, [new_project_id], {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
context = dict(context or {})
context['active_test'] = False
proj = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
self.map_tasks(cr, uid, id, res, context=context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
context = dict(context or {})
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
parent_id = context.get('parent_id', False)
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name':_("%s (copy)") % (proj.name),
'state':'open',
'date_start':new_date_start,
'date':new_date_end,
'parent_id':parent_id}, context=context)
result.append(new_id)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)], context=context)
parent_id = self.read(cr, uid, new_id, ['analytic_account_id'])['analytic_account_id'][0]
if child_ids:
self.duplicate_template(cr, uid, child_ids, context={'parent_id': parent_id})
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
'nodestroy': True
}
# set active value for a project, its sub projects and its tasks
def setActive(self, cr, uid, ids, value=True, context=None):
task_obj = self.pool.get('project.task')
for proj in self.browse(cr, uid, ids, context=None):
self.write(cr, uid, [proj.id], {'state': value and 'open' or 'template'}, context)
cr.execute('select id from project_task where project_id=%s', (proj.id,))
tasks_id = [x[0] for x in cr.fetchall()]
if tasks_id:
task_obj.write(cr, uid, tasks_id, {'active': value}, context=context)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)])
if child_ids:
self.setActive(cr, uid, child_ids, value, context=None)
return True
def _schedule_header(self, cr, uid, ids, force_members=True, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
for project in projects:
if (not project.members) and force_members:
raise osv.except_osv(_('Warning!'),_("You must assign members on the project '%s'!") % (project.name,))
resource_pool = self.pool.get('resource.resource')
result = "from openerp.addons.resource.faces import *\n"
result += "import datetime\n"
for project in self.browse(cr, uid, ids, context=context):
u_ids = [i.id for i in project.members]
if project.user_id and (project.user_id.id not in u_ids):
u_ids.append(project.user_id.id)
for task in project.tasks:
if task.user_id and (task.user_id.id not in u_ids):
u_ids.append(task.user_id.id)
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
resource_objs = resource_pool.generate_resources(cr, uid, u_ids, calendar_id, context=context)
for key, vals in resource_objs.items():
result +='''
class User_%s(Resource):
efficiency = %s
''' % (key, vals.get('efficiency', False))
result += '''
def Project():
'''
return result
def _schedule_project(self, cr, uid, project, context=None):
resource_pool = self.pool.get('resource.resource')
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
working_days = resource_pool.compute_working_calendar(cr, uid, calendar_id, context=context)
# TODO: check if we need working_..., default values are ok.
puids = [x.id for x in project.members]
if project.user_id:
puids.append(project.user_id.id)
result = """
def Project_%d():
start = \'%s\'
working_days = %s
resource = %s
""" % (
project.id,
project.date_start or time.strftime('%Y-%m-%d'), working_days,
'|'.join(['User_'+str(x) for x in puids]) or 'None'
)
vacation = calendar_id and tuple(resource_pool.compute_vacation(cr, uid, calendar_id, context=context)) or False
if vacation:
result+= """
vacation = %s
""" % ( vacation, )
return result
#TODO: DO Resource allocation and compute availability
def compute_allocation(self, rc, uid, ids, start_date, end_date, context=None):
if context == None:
context = {}
allocation = {}
return allocation
def schedule_tasks(self, cr, uid, ids, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
result = self._schedule_header(cr, uid, ids, False, context=context)
for project in projects:
result += self._schedule_project(cr, uid, project, context=context)
result += self.pool.get('project.task')._generate_task(cr, uid, project.tasks, ident=4, context=context)
local_dict = {}
exec result in local_dict
projects_gantt = Task.BalancedProject(local_dict['Project'])
for project in projects:
project_gantt = getattr(projects_gantt, 'Project_%d' % (project.id,))
for task in project.tasks:
if task.stage_id and task.stage_id.fold:
continue
p = getattr(project_gantt, 'Task_%d' % (task.id,))
self.pool.get('project.task').write(cr, uid, [task.id], {
'date_start': p.start.strftime('%Y-%m-%d %H:%M:%S'),
'date_end': p.end.strftime('%Y-%m-%d %H:%M:%S')
}, context=context)
if (not task.user_id) and (p.booked_resource):
self.pool.get('project.task').write(cr, uid, [task.id], {
'user_id': int(p.booked_resource[0].name[5:]),
}, context=context)
return True
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# Prevent double project creation when 'use_tasks' is checked + alias management
create_context = dict(context, project_creation_in_progress=True,
alias_model_name=vals.get('alias_model', 'project.task'),
alias_parent_model_name=self._name)
if vals.get('type', False) not in ('template', 'contract'):
vals['type'] = 'contract'
project_id = super(project, self).create(cr, uid, vals, context=create_context)
project_rec = self.browse(cr, uid, project_id, context=context)
ir_values = self.pool.get('ir.values').get_default( cr, uid, 'project.config.settings', 'generate_project_alias' )
values = { 'alias_parent_thread_id': project_id, 'alias_defaults': {'project_id': project_id}}
if ir_values:
values = dict(values, alias_name=vals['name'])
self.pool.get('mail.alias').write(cr, uid, [project_rec.alias_id.id], values, context=context)
return project_id
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
return super(project, self).write(cr, uid, ids, vals, context=context)
class task(osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project.mt_task_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project.mt_task_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id.sequence > 1,
},
'user_id': {
'project.mt_task_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project.mt_task_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project.mt_task_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return (self._resolve_project_id_from_context(cr, uid, context=context) or False)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context['default_project_id']
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return project_ids[0][0]
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _read_group_user_id(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
res_users = self.pool.get('res.users')
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
access_rights_uid = access_rights_uid or uid
if project_id:
ids += self.pool.get('project.project').read(cr, access_rights_uid, project_id, ['members'], context=context)['members']
order = res_users._order
# lame way to allow reverting search, should just work in the trivial case
if read_group_order == 'user_id desc':
order = '%s desc' % order
# de-duplicate and apply search order
ids = res_users._search(cr, uid, [('id','in',ids)], order=order, access_rights_uid=access_rights_uid, context=context)
result = res_users.name_get(cr, access_rights_uid, ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(ids.index(x[0]), ids.index(y[0])))
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
'user_id': _read_group_user_id,
}
def _str_get(self, task, level=0, border='***', context=None):
return border+' '+(task.user_id and task.user_id.name.upper() or '')+(level and (': L'+str(level)) or '')+(' - %.1fh / %.1fh'%(task.effective_hours or 0.0,task.planned_hours))+' '+border+'\n'+ \
border[0]+' '+(task.name or '')+'\n'+ \
(task.description or '')+'\n\n'
# Compute: effective_hours, total_hours, progress
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
res = {}
cr.execute("SELECT task_id, COALESCE(SUM(hours),0) FROM project_task_work WHERE task_id IN %s GROUP BY task_id",(tuple(ids),))
hours = dict(cr.fetchall())
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = {'effective_hours': hours.get(task.id, 0.0), 'total_hours': (task.remaining_hours or 0.0) + hours.get(task.id, 0.0)}
res[task.id]['delay_hours'] = res[task.id]['total_hours'] - task.planned_hours
res[task.id]['progress'] = 0.0
if (task.remaining_hours + hours.get(task.id, 0.0)):
res[task.id]['progress'] = round(min(100.0 * hours.get(task.id, 0.0) / res[task.id]['total_hours'], 99.99),2)
# TDE CHECK: if task.state in ('done','cancelled'):
if task.stage_id and task.stage_id.fold:
res[task.id]['progress'] = 100.0
return res
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value': {'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value': {'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def onchange_user_id(self, cr, uid, ids, user_id, context=None):
vals = {}
if user_id:
vals['date_start'] = fields.datetime.now()
return {'value': vals}
def duplicate_task(self, cr, uid, map_ids, context=None):
mapper = lambda t: map_ids.get(t.id, t.id)
for task in self.browse(cr, uid, map_ids.values(), context):
new_child_ids = set(map(mapper, task.child_ids))
new_parent_ids = set(map(mapper, task.parent_ids))
if new_child_ids or new_parent_ids:
task.write({'parent_ids': [(6,0,list(new_parent_ids))],
'child_ids': [(6,0,list(new_child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name'):
current = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % current.name
return super(task, self).copy_data(cr, uid, id, default, context)
def _is_template(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = True
if task.project_id:
if task.project_id.active == False or task.project_id.state == 'template':
res[task.id] = False
return res
def _get_task(self, cr, uid, ids, context=None):
result = {}
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id: result[work.task_id.id] = True
return result.keys()
_columns = {
'active': fields.function(_is_template, store=True, string='Not a Template Task', type='boolean', help="This field is computed automatically and have the same behavior than the boolean 'active' field: if the task is linked to a template or unactivated project, it will be hidden unless specifically asked."),
'name': fields.char('Task Summary', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.html('Description'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'categ_ids': fields.many2many('project.category', string='Tags'),
'kanban_state': fields.selection([('normal', 'In Progress'),('done', 'Ready for next stage'),('blocked', 'Blocked')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=False, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True, track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'effective_hours': fields.function(_hours_get, string='Hours Spent', multi='hours', help="Computed using the sum of the task work done.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'total_hours': fields.function(_hours_get, string='Total', multi='hours', help="Computed as: Time Spent + Remaining Time.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'progress': fields.function(_hours_get, string='Working Time Progress (%)', multi='hours', group_operator="avg", help="If the task has a progress of 99.99% you should close the task if it's finished or reevaluate the time",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'delay_hours': fields.function(_hours_get, string='Delay Hours', multi='hours', help="Computed as difference between planned hours by the project manager and the total hours of the task.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'delegated_user_id': fields.related('child_ids', 'user_id', type='many2one', relation='res.users', string='Delegated To'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'work_ids': fields.one2many('project.task.work', 'task_id', 'Work done'),
'manager_id': fields.related('project_id', 'analytic_account_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': _get_default_project_id,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'progress': 0,
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
'date_start': fields.datetime.now,
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task end-date must be greater then task start-date', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu=submenu)
if tm in ['Hours','Hour']:
return res
eview = etree.fromstring(res['arch'])
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
for f in res['fields']:
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours',tm)
return res
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_document_name'] = _("tasks")
return super(task, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.stage_id and not child.stage_id.fold:
raise osv.except_osv(_("Warning!"), _("Child task still open.\nPlease cancel or complete child task first."))
return True
def _delegate_task_attachments(self, cr, uid, task_id, delegated_task_id, context=None):
attachment = self.pool.get('ir.attachment')
attachment_ids = attachment.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', task_id)], context=context)
new_attachment_ids = []
for attachment_id in attachment_ids:
new_attachment_ids.append(attachment.copy(cr, uid, attachment_id, default={'res_id': delegated_task_id}, context=context))
return new_attachment_ids
def do_delegate(self, cr, uid, ids, delegate_data=None, context=None):
"""
Delegate Task to another users.
"""
if delegate_data is None:
delegate_data = {}
assert delegate_data['user_id'], _("Delegated User should be specified")
delegated_tasks = {}
for task in self.browse(cr, uid, ids, context=context):
delegated_task_id = self.copy(cr, uid, task.id, {
'name': delegate_data['name'],
'project_id': delegate_data['project_id'] and delegate_data['project_id'][0] or False,
'stage_id': delegate_data.get('stage_id') and delegate_data.get('stage_id')[0] or False,
'user_id': delegate_data['user_id'] and delegate_data['user_id'][0] or False,
'planned_hours': delegate_data['planned_hours'] or 0.0,
'parent_ids': [(6, 0, [task.id])],
'description': delegate_data['new_task_description'] or '',
'child_ids': [],
'work_ids': []
}, context=context)
self._delegate_task_attachments(cr, uid, task.id, delegated_task_id, context=context)
newname = delegate_data['prefix'] or ''
task.write({
'remaining_hours': delegate_data['planned_hours_me'],
'planned_hours': delegate_data['planned_hours_me'] + (task.effective_hours or 0.0),
'name': newname,
}, context=context)
delegated_tasks[task.id] = delegated_task_id
return delegated_tasks
def set_remaining_time(self, cr, uid, ids, remaining_time=1.0, context=None):
for task in self.browse(cr, uid, ids, context=context):
if (task.stage_id and task.stage_id.sequence <= 1) or (task.planned_hours == 0.0):
self.write(cr, uid, [task.id], {'planned_hours': remaining_time}, context=context)
self.write(cr, uid, ids, {'remaining_hours': remaining_time}, context=context)
return True
def set_remaining_time_1(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 1.0, context)
def set_remaining_time_2(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 2.0, context)
def set_remaining_time_5(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 5.0, context)
def set_remaining_time_10(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 10.0, context)
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'user_id': task.user_id.id
}, context=context)
return True
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
# for default stage
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# user_id change: update date_start
if vals.get('user_id') and not vals.get('start_date'):
vals['date_start'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
# user_id change: update date_start
if vals.get('user_id') and 'date_start' not in vals:
vals['date_start'] = fields.datetime.now()
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the task changes.
if vals and not 'kanban_state' in vals and 'stage_id' in vals:
new_stage = vals.get('stage_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
write_vals = vals_reset_kstate if t.stage_id.id != new_stage else vals
super(task, self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task, self).write(cr, uid, ids, vals, context=context)
if any(item in vals for item in ['stage_id', 'remaining_hours', 'user_id', 'kanban_state']):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context == None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
for task in tasks:
if task.stage_id and task.stage_id.fold:
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2fH\"
%s effort = \"%.2fH\"''' % (ident,task.id, ident,task.remaining_hours, ident,task.total_hours)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
tasks = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([task.project_id.id for task in tasks if task.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((task.id, aliases.get(task.project_id and task.project_id.id or 0, False)) for task in tasks)
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
}
defaults.update(custom_values)
res = super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
email_list = tools.email_split(msg.get('to', '') + ',' + msg.get('cc', ''))
new_task = self.browse(cr, uid, res, context=context)
if new_task.project_id and new_task.project_id.alias_name: # check left-part is not already an alias
email_list = filter(lambda x: x.split('@')[0] != new_task.project_id.alias_name, email_list)
partner_ids = filter(lambda x: x, self._find_partner_from_emails(cr, uid, None, email_list, context=context, check_followers=False))
self.message_subscribe(cr, uid, [res], partner_ids, context=context)
return res
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
class project_work(osv.osv):
_name = "project.task.work"
_description = "Project Task Work"
_columns = {
'name': fields.char('Work summary'),
'date': fields.datetime('Date', select="1"),
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select="1"),
'hours': fields.float('Time Spent'),
'user_id': fields.many2one('res.users', 'Done by', required=True, select="1"),
'company_id': fields.related('task_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S')
}
_order = "date desc"
def create(self, cr, uid, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'task_id' in vals:
cr.execute('update project_task set remaining_hours=remaining_hours - %s where id=%s', (vals.get('hours',0.0), vals['task_id']))
self.pool.get('project.task').invalidate_cache(cr, uid, ['remaining_hours'], [vals['task_id']], context=context)
return super(project_work,self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'hours' in vals:
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids, context=context):
cr.execute('update project_task set remaining_hours=remaining_hours - %s + (%s) where id=%s', (vals.get('hours',0.0), work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids):
cr.execute('update project_task set remaining_hours=remaining_hours + %s where id=%s', (work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work,self).unlink(cr, uid, ids, context=context)
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_tasks': fields.boolean('Tasks',help="If checked, this contract will be available in the project menu and you will be able to manage tasks or track issues"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', type='many2one', relation='product.uom'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id','=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'type': vals.get('type','contract'),
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
if not vals.get('type'):
vals_for_project['type'] = account.type
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
proj_ids = self.pool['project.project'].search(cr, uid, [('analytic_account_id', 'in', ids)])
has_tasks = self.pool['project.task'].search(cr, uid, [('project_id', 'in', proj_ids)], count=True, context=context)
if has_tasks:
raise osv.except_osv(_('Warning!'), _('Please remove existing tasks in the project linked to the accounts you want to delete.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.type_id and history.type_id.fold:
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'kanban_state': fields.selection([('normal', 'Normal'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16, 2)),
'planned_hours': fields.float('Planned Time', digits=(16, 2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'nbr_tasks': fields.integer('# of Tasks', readonly=True),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state,
count(h.task_id) as nbr_tasks,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
GROUP BY
h.id,
h.task_id,
t.project_id
) AS history
)
""")
class project_category(osv.osv):
""" Category of project's task (or issue) """
_name = "project.category"
_description = "Category of project's task, issue, ..."
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
|
from nextcloudappstore.settings.base import *
DEBUG = True
SECRET_KEY = 'secret'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
RECAPTCHA_PUBLIC_KEY = '<RECAPTCHA_PUBLIC_KEY>'
RECAPTCHA_PRIVATE_KEY = '<RECAPTCHA_PRIVATE_KEY>'
EMAIL_HOST = 'localhost'
DEFAULT_FROM_EMAIL = 'Nextcloud App Store <appstore@nextcloud.com>'
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE_CLASSES.append('debug_toolbar.middleware.DebugToolbarMiddleware')
INTERNAL_IPS = ('127.0.0.1',)
|
import csv
from pppcemr.models import *
dataReader = csv.reader(open('ndc_database_2016_02_21/package.txt'), delimiter='\t')
for row in dataReader:
package = DrugPackage()
package.product_id = row[0]
package.product_ndc = row[1]
package.package_ndc = row[2]
package.package_name = row[3]
package.save()
|
from openerp import models, fields
class PassangerType(models.Model):
_name = "fleet.work_order_passanger_type"
_description = "Passanger Type"
name = fields.Char(
string="Name",
required=True,
)
code = fields.Char(
string="Code",
required=True,
)
active = fields.Boolean(
string="Active",
default=True,
)
note = fields.Text(
string="Notes",
)
|
"""change return type of functions
Revision ID: d2d2f196738b
Revises:
Create Date: 2017-09-03 16:13:31.799366
"""
from alembic import op
import sqlalchemy as sa
revision = 'd2d2f196738b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute('drop function if exists update_departments(jsonb)')
conn.execute(sa.text("""create or replace function update_departments(_university_id numeric, _json jsonb)
returns numeric as $func$
declare
_s_id numeric;
_d_id numeric;
_count numeric := 0;
_abbr varchar;
_name varchar;
_school varchar;
begin
for _abbr, _name, _school in
select
department ->> 'value' as _abbr,
(regexp_matches(department ->> 'label', '.+(?=\()')) [1] as _name,
department ->> 'school' as _school
from jsonb_array_elements(_json -> 'departments') department
loop
-- get the school id
select id
into _s_id
from schools
where abbreviation = _school and university_id = _university_id;
-- get the department id if it exists
select id
into _d_id
from departments
where school_id = _s_id and abbreviation = _abbr;
-- if department does not exist, create it
if _d_id is null
then
insert into departments (abbreviation, name, school_id) values (_abbr, _name, _s_id);
end if;
_count = _count + 1;
end loop;
return _count;
end;
$func$ language plpgsql;"""))
def downgrade():
pass
|
from pyledger.server import run
from pyledger.server.contract import SimpleContract
class Hello(SimpleContract):
counter = 0
def say_hello(self, name: str):
if name == 'Guillen':
raise Exception('You probably mispelled Guillem')
self.counter += 1
return 'Hello {} # {}'.format(name, self.counter)
run(Hello)
|
__metaclass__ = type
__all__ = [
'InformationTypePortletMixin',
]
from lazr.restful.interfaces import IJSONRequestCache
from lp.app.enums import PRIVATE_INFORMATION_TYPES
from lp.app.interfaces.informationtype import IInformationType
from lp.app.utilities import json_dump_information_types
class InformationTypePortletMixin:
def _getContext(self):
information_typed = IInformationType(self.context, None)
if information_typed is None:
return self.context
return information_typed
def initialize(self):
context = self._getContext()
if IInformationType.providedBy(context):
cache = IJSONRequestCache(self.request)
json_dump_information_types(
cache,
context.getAllowedInformationTypes(self.user))
@property
def information_type(self):
context = self._getContext()
if IInformationType.providedBy(context):
return context.information_type.title
return None
@property
def information_type_description(self):
context = self._getContext()
if IInformationType.providedBy(context):
return context.information_type.description
return None
@property
def information_type_css(self):
context = self._getContext()
if (IInformationType.providedBy(context) and
context.information_type in PRIVATE_INFORMATION_TYPES):
return 'sprite private'
else:
return 'sprite public'
@property
def privacy_portlet_css(self):
context = self._getContext()
if (IInformationType.providedBy(context) and
context.information_type in PRIVATE_INFORMATION_TYPES):
return 'portlet private'
else:
return 'portlet public'
|
from . import import_statement
|
from openerp.osv import orm, fields
class oehealth_annotation(orm.Model):
_inherit = 'oehealth.annotation'
_columns = {
'person_id' : fields.many2one ('oehealth.person', 'Person'),
}
oehealth_annotation()
|
import pytest
import pandas as pd
import numpy as np
import os
from re import sub
from urbanaccess.gtfs import utils_format
from urbanaccess import config
@pytest.fixture
def folder_feed_1():
return r'/data/gtfs_feeds/agency_a'
@pytest.fixture
def folder_feed_2():
return r'/data/gtfs_feeds/agency_b'
@pytest.fixture
def folder_feed_4():
return r'/data/gtfs_feeds/city'
@pytest.fixture()
def agency_txt_w_invalid_values(tmpdir, agency_feed_1):
# create df with col names with spaces
raw_df = agency_feed_1.rename(
columns={'agency_phone': ' agency_phone',
'agency_timezone': 'agency_timezone '})
feed_path = os.path.join(tmpdir.strpath, 'test_agency_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('agency')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
return raw_df, agency_feed_1, feed_path
@pytest.fixture()
def stops_txt_w_invalid_values(tmpdir, stops_feed_1):
# create df with col names with spaces
raw_df = stops_feed_1.rename(
columns={'stop_name': ' stop_name',
'location_type': 'location_type '})
feed_path = os.path.join(tmpdir.strpath, 'test_stops_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('stops')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
return raw_df, stops_feed_1, feed_path
@pytest.fixture()
def routes_txt_w_invalid_values(tmpdir, routes_feed_1):
# create df with col names with spaces
raw_df = routes_feed_1.rename(
columns={'route_short_name': ' route_short_name',
'route_long_name': 'route_long_name '})
feed_path = os.path.join(tmpdir.strpath, 'test_routes_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('routes')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
return raw_df, routes_feed_1, feed_path
@pytest.fixture()
def stop_times_txt_w_invalid_values(tmpdir, stop_times_feed_1):
# create df with col names with spaces
raw_df = stop_times_feed_1.rename(
columns={'pickup_type': ' pickup_type',
'drop_off_type': 'drop_off_type '})
feed_path = os.path.join(tmpdir.strpath, 'test_stop_times_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('stop_times')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
return raw_df, stop_times_feed_1, feed_path
@pytest.fixture()
def calendar_txt_w_invalid_values(tmpdir, calendar_feed_1):
# create df with col names with spaces
raw_df = calendar_feed_1.rename(
columns={'monday': ' monday',
'tuesday': 'tuesday '})
feed_path = os.path.join(tmpdir.strpath, 'test_calendar_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('calendar')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
return raw_df, calendar_feed_1, feed_path
@pytest.fixture()
def calendar_dates_txt_w_invalid_values(tmpdir, calendar_dates_feed_1):
# create df with col names with spaces
raw_df = calendar_dates_feed_1.rename(
columns={'exception_type': ' exception_type',
'schedule_type': 'schedule_type '})
feed_path = os.path.join(tmpdir.strpath,
'test_calendar_dates_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('calendar_dates')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
return raw_df, calendar_dates_feed_1, feed_path
@pytest.fixture()
def trips_txt_w_invalid_values(tmpdir):
# create df with ints instead of str, col names with spaces, and
# values with spaces before and after the value for relational columns
data = {
'route_id': ['10-101', '10-101', '10-101', '10-101',
111, '00111', '12-101', '12-101',
'13-101', '13-101'],
'trip_id': ['a1 ', ' a2', ' a3 ', 'a 4',
'b1', 'b2', 'c1', 'c2', 'd1', 'd2'],
'service_id ': ['weekday -1', 'weekday-1 ', 'weekday-1',
'weekday-1', 'weekday-2', 'weekday-2',
'weekday-3', 'weekday-3', 'weekend-1', 'weekend-1'],
' direction_id ': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
'wheelchair_ accessible': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'bikes_allowed': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'shape_id': ['10_0_40', '10_0_40', '10_0_41', '10_0_41',
'11_0_00', '11_0_00', '12_0_40', '12_0_40',
'13_0_40', '13_0_40']
}
index = range(10)
raw_df = pd.DataFrame(data, index)
feed_path = os.path.join(tmpdir.strpath, 'test_trips_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('trips')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
data = {
'route_id': ['10-101', '10-101', '10-101', '10-101',
'111', '00111', '12-101', '12-101', '13-101', '13-101'],
'trip_id': ['a1', 'a2', 'a3', 'a 4',
'b1', 'b2', 'c1', 'c2', 'd1', 'd2'],
'service_id': ['weekday -1', 'weekday-1', 'weekday-1',
'weekday-1', 'weekday-2', 'weekday-2',
'weekday-3', 'weekday-3', 'weekend-1', 'weekend-1'],
'direction_id': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
'wheelchair_ accessible': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'bikes_allowed': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'shape_id': ['10_0_40', '10_0_40', '10_0_41', '10_0_41',
'11_0_00', '11_0_00', '12_0_40', '12_0_40',
'13_0_40', '13_0_40']
}
index = range(10)
expected_df = pd.DataFrame(data, index)
return raw_df, expected_df, feed_path
@pytest.fixture()
def trips_txt_w_missing_req_col(tmpdir):
# remove the required trip and service ID cols
data = {
'route_id': ['10-101', '10-101', '10-101', '10-101',
'111', '00111', '12-101', '12-101', '13-101', '13-101'],
'direction_id': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
'wheelchair_ accessible': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'bikes_allowed': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'shape_id': ['10_0_40', '10_0_40', '10_0_41', '10_0_41',
'11_0_00', '11_0_00', '12_0_40', '12_0_40',
'13_0_40', '13_0_40']
}
index = range(10)
raw_df = pd.DataFrame(data, index)
feed_path = os.path.join(tmpdir.strpath, 'test_trips_invalid_values')
os.makedirs(feed_path)
print('writing test data to dir: {}'.format(feed_path))
feed_file_name = '{}.txt'.format('trips')
raw_df.to_csv(os.path.join(feed_path, feed_file_name), index=False)
return raw_df, feed_path
def test_calendar_dates_agencyid_feed_1(calendar_dates_feed_1,
routes_feed_1,
trips_feed_1,
agency_feed_1,
folder_feed_1):
data = {'unique_agency_id': ['agency_a_city_a'] * 4}
index = range(4)
expected_result = pd.concat([calendar_dates_feed_1,
pd.DataFrame(data, index)],
axis=1)
result_df = utils_format._calendar_dates_agencyid(
calendar_dates_df=calendar_dates_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
agency_df=agency_feed_1,
feed_folder=folder_feed_1)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = calendar_dates_feed_1.columns
assert calendar_dates_feed_1.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_1['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_1['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_1['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_calendar_dates_agencyid_feed_2(calendar_dates_feed_2,
routes_feed_2,
trips_feed_2,
agency_feed_2,
folder_feed_2):
data = {'unique_agency_id': ['agency_b_district_1',
'agency_b_district_1',
'agency_b_district_2',
'agency_b_district_2']}
index = range(4)
expected_result = pd.concat([calendar_dates_feed_2,
pd.DataFrame(data, index)],
axis=1)
result_df = utils_format._calendar_dates_agencyid(
calendar_dates_df=calendar_dates_feed_2,
routes_df=routes_feed_2,
trips_df=trips_feed_2,
agency_df=agency_feed_2,
feed_folder=folder_feed_2)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = calendar_dates_feed_2.columns
assert calendar_dates_feed_2.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_2['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_2['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_2['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_calendar_dates_agencyid_feed_4(calendar_dates_feed_4,
routes_feed_4,
trips_feed_4,
agency_feed_4,
folder_feed_4):
data = {'service_id': ['wk-1'] * 3,
'date': [20161224] * 3,
'exception_type': [1] * 3,
'unique_agency_id': ['agency_1_bus', 'agency_2_rail',
'agency_3_metro']}
index = range(3)
expected_result = pd.DataFrame(data, index)
result_df = utils_format._calendar_dates_agencyid(
calendar_dates_df=calendar_dates_feed_4,
routes_df=routes_feed_4,
trips_df=trips_feed_4,
agency_df=agency_feed_4,
feed_folder=folder_feed_4)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = calendar_dates_feed_4.columns
dedup_df = result_df.drop_duplicates(subset='service_id',
keep='first',
inplace=False)
dedup_df.reset_index(inplace=True)
assert calendar_dates_feed_4.equals(dedup_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_4['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_4['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_4['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_calendar_agencyid_feed_1(calendar_feed_1,
routes_feed_1,
trips_feed_1,
agency_feed_1,
folder_feed_1):
data = {'unique_agency_id': ['agency_a_city_a'] * 4}
index = range(4)
expected_result = pd.concat([calendar_feed_1,
pd.DataFrame(data, index)],
axis=1)
result_df = utils_format._calendar_agencyid(calendar_df=calendar_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
agency_df=agency_feed_1,
feed_folder=folder_feed_1)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = calendar_feed_1.columns
assert calendar_feed_1.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_1['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_1['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_1['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_calendar_agencyid_feed_2(calendar_feed_2,
routes_feed_2,
trips_feed_2,
agency_feed_2,
folder_feed_2):
data = {'unique_agency_id': ['agency_b_district_1',
'agency_b_district_1',
'agency_b_district_2',
'agency_b_district_2']}
index = range(4)
expected_result = pd.concat([calendar_feed_2,
pd.DataFrame(data, index)],
axis=1)
result_df = utils_format._calendar_agencyid(calendar_df=calendar_feed_2,
routes_df=routes_feed_2,
trips_df=trips_feed_2,
agency_df=agency_feed_2,
feed_folder=folder_feed_2)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = calendar_feed_2.columns
assert calendar_feed_2.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_2['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_2['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_2['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_calendar_agencyid_feed_4(calendar_feed_4,
routes_feed_4,
trips_feed_4,
agency_feed_4,
folder_feed_4):
data = {'service_id': ['wk-1'] * 3,
'monday': [1] * 3,
'tuesday': [1] * 3,
'wednesday': [1] * 3,
'thursday': [1] * 3,
'friday': [1] * 3,
'saturday': [0] * 3,
'sunday': [0] * 3,
'start_date': [20161224] * 3,
'end_date': [20170318] * 3,
'unique_agency_id': ['agency_1_bus', 'agency_2_rail',
'agency_3_metro']}
index = range(3)
expected_result = pd.DataFrame(data, index)
result_df = utils_format._calendar_agencyid(calendar_df=calendar_feed_4,
routes_df=routes_feed_4,
trips_df=trips_feed_4,
agency_df=agency_feed_4,
feed_folder=folder_feed_4)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = calendar_feed_4.columns
dedup_df = result_df.drop_duplicates(subset='service_id',
keep='first',
inplace=False)
dedup_df.reset_index(inplace=True)
assert calendar_feed_4.equals(dedup_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_4['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_4['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_4['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_trips_agencyid_feed_1(trips_feed_1,
routes_feed_1,
agency_feed_1):
data = {'unique_agency_id': ['agency_a_city_a'] * 10}
index = range(10)
expected_result = pd.concat([trips_feed_1,
pd.DataFrame(data, index)],
axis=1)
result_df = utils_format._trips_agencyid(trips_df=trips_feed_1,
routes_df=routes_feed_1,
agency_df=agency_feed_1)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = trips_feed_1.columns
assert trips_feed_1.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_1['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_1['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_1['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_trips_agencyid_feed_2(trips_feed_2,
routes_feed_2,
agency_feed_2):
data = {'unique_agency_id': ['agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2']}
index = range(10)
expected_result = pd.concat([trips_feed_2,
pd.DataFrame(data, index)],
axis=1)
result_df = utils_format._trips_agencyid(trips_df=trips_feed_2,
routes_df=routes_feed_2,
agency_df=agency_feed_2)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = trips_feed_2.columns
assert trips_feed_2.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_2['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_2['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_2['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_trips_agencyid_feed_4(trips_feed_4,
routes_feed_4,
agency_feed_4):
data = {
'unique_agency_id': ['agency_1_bus', 'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus', 'agency_1_bus',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_3_metro', 'agency_3_metro',
'agency_3_metro', 'agency_3_metro']}
index = range(14)
expected_result = pd.concat(
[trips_feed_4, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._trips_agencyid(
trips_df=trips_feed_4, routes_df=routes_feed_4,
agency_df=agency_feed_4)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = trips_feed_4.columns
assert trips_feed_4.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_4['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_4['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_4['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_stops_agencyid_feed_1(stops_feed_1,
trips_feed_1,
routes_feed_1,
stop_times_feed_1,
agency_feed_1,
folder_feed_1):
data = {'unique_agency_id': ['agency_a_city_a'] * 9}
index = range(9)
expected_result = pd.concat(
[stops_feed_1, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._stops_agencyid(stops_df=stops_feed_1,
trips_df=trips_feed_1,
routes_df=routes_feed_1,
stop_times_df=stop_times_feed_1,
agency_df=agency_feed_1,
feed_folder=folder_feed_1)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = stops_feed_1.columns
assert stops_feed_1.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_1['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_1['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_1['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_stops_agencyid_feed_2(stops_feed_2,
trips_feed_2,
routes_feed_2,
stop_times_feed_2,
agency_feed_2,
folder_feed_2):
data = {'unique_agency_id': ['agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2']}
index = range(16)
expected_result = pd.concat(
[stops_feed_2, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._stops_agencyid(stops_df=stops_feed_2,
trips_df=trips_feed_2,
routes_df=routes_feed_2,
stop_times_df=stop_times_feed_2,
agency_df=agency_feed_2,
feed_folder=folder_feed_2)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = stops_feed_2.columns
assert stops_feed_2.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_2['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_2['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_2['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_stops_agencyid_feed_4(stops_feed_4,
trips_feed_4,
routes_feed_4,
stop_times_feed_4,
agency_feed_4,
folder_feed_4):
data = {
'stop_id': ['70', '70', '71', '71', '72', '72', '73', '73', '74',
'74', '75', '75', '76', '76', '77', '77', '78', '78'],
'stop_name': ['station 1', 'station 1', 'station 2', 'station 2',
'station 3', 'station 3', 'station 4', 'station 4',
'station 5', 'station 5', 'station 6', 'station 6',
'station 7', 'station 7', 'station 8', 'station 8',
'station 9', 'station 9'],
'stop_lat': [20.797484, 20.797484, 20.774963, 20.774963, 20.803664,
20.803664, 20.80787, 20.80787, 20.828415, 20.828415,
20.844601, 20.844601, 20.664174, 20.664174, 20.591208,
20.591208, 20.905628, 20.905628],
'stop_lon': [-100.265609, -100.265609, -100.224274, -100.224274,
-100.271604, -100.271604, -100.269029, -100.269029,
-100.267227, -100.267227, -100.251793, -100.251793,
-100.444116, -100.444116, -100.017867, -100.017867,
-100.067423, -100.067423],
'unique_agency_id': ['agency_1_bus', 'agency_2_rail', 'agency_1_bus',
'agency_2_rail', 'agency_1_bus', 'agency_2_rail',
'agency_1_bus', 'agency_2_rail', 'agency_1_bus',
'agency_2_rail', 'agency_1_bus', 'agency_2_rail',
'agency_2_rail', 'agency_3_metro',
'agency_2_rail', 'agency_3_metro',
'agency_2_rail', 'agency_3_metro']
}
index = range(18)
expected_result = pd.DataFrame(data, index)
result_df = utils_format._stops_agencyid(stops_df=stops_feed_4,
trips_df=trips_feed_4,
routes_df=routes_feed_4,
stop_times_df=stop_times_feed_4,
agency_df=agency_feed_4,
feed_folder=folder_feed_4)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = stops_feed_4.columns
dedup_df = result_df.drop_duplicates(subset='stop_id',
keep='first',
inplace=False)
dedup_df.reset_index(inplace=True)
assert stops_feed_4.equals(dedup_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_4['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_4['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_4['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_routes_agencyid_feed_1(routes_feed_1,
agency_feed_1):
data = {'unique_agency_id': ['agency_a_city_a'] * 4}
index = range(4)
expected_result = pd.concat(
[routes_feed_1, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._routes_agencyid(
routes_df=routes_feed_1, agency_df=agency_feed_1)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = routes_feed_1.columns
assert routes_feed_1.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_1['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_1['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_1['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_routes_agencyid_feed_2(routes_feed_2,
agency_feed_2):
data = {'unique_agency_id': ['agency_b_district_1', 'agency_b_district_1',
'agency_b_district_2', 'agency_b_district_2']}
index = range(4)
expected_result = pd.concat(
[routes_feed_2, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._routes_agencyid(
routes_df=routes_feed_2, agency_df=agency_feed_2)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = routes_feed_2.columns
assert routes_feed_2.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_2['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_2['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_2['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_routes_agencyid_feed_4(routes_feed_4,
agency_feed_4):
data = {'unique_agency_id': ['agency_1_bus', 'agency_1_bus',
'agency_2_rail', 'agency_2_rail',
'agency_3_metro', 'agency_3_metro']}
index = range(6)
expected_result = pd.concat(
[routes_feed_4, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._routes_agencyid(
routes_df=routes_feed_4, agency_df=agency_feed_4)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = routes_feed_4.columns
assert routes_feed_4.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_4['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_4['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_4['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_stop_times_agencyid_feed_1(stop_times_feed_1,
routes_feed_1,
trips_feed_1,
agency_feed_1):
data = {'unique_agency_id': ['agency_a_city_a'] * 54}
index = range(54)
expected_result = pd.concat(
[stop_times_feed_1, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._stop_times_agencyid(
stop_times_df=stop_times_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
agency_df=agency_feed_1)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = stop_times_feed_1.columns
assert stop_times_feed_1.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_1['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_1['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_1['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_stop_times_agencyid_feed_2(stop_times_feed_2,
routes_feed_2,
trips_feed_2,
agency_feed_2):
data = {'unique_agency_id': ['agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_1', 'agency_b_district_1',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2',
'agency_b_district_2', 'agency_b_district_2']}
index = range(54)
expected_result = pd.concat(
[stop_times_feed_2, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._stop_times_agencyid(
stop_times_df=stop_times_feed_2,
routes_df=routes_feed_2,
trips_df=trips_feed_2,
agency_df=agency_feed_2)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = stop_times_feed_2.columns
assert stop_times_feed_2.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_2['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_2['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_2['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_stop_times_agencyid_feed_4(stop_times_feed_4,
routes_feed_4,
trips_feed_4,
agency_feed_4):
data = {'unique_agency_id': ['agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_1_bus', 'agency_1_bus',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_2_rail', 'agency_2_rail',
'agency_3_metro', 'agency_3_metro',
'agency_3_metro', 'agency_3_metro',
'agency_3_metro', 'agency_3_metro',
'agency_3_metro', 'agency_3_metro',
'agency_3_metro', 'agency_3_metro',
'agency_3_metro', 'agency_3_metro']}
index = range(66)
expected_result = pd.concat(
[stop_times_feed_4, pd.DataFrame(data, index)], axis=1)
result_df = utils_format._stop_times_agencyid(
stop_times_df=stop_times_feed_4,
routes_df=routes_feed_4,
trips_df=trips_feed_4,
agency_df=agency_feed_4)
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = stop_times_feed_4.columns
assert stop_times_feed_4.equals(result_df[original_cols])
# test that output df is identical to expected df
# re-sort cols so they are in same order for test
expected_result.sort_index(axis=1, inplace=True)
result_df.sort_index(axis=1, inplace=True)
assert expected_result.equals(result_df)
# test that all output unique_agency_id values match all agency_name
# values in agency file
col = agency_feed_4['agency_name'].astype(str)
col_snake_case = col.str.replace(r'\s+', '_')
col_snake_no_amps = col_snake_case.str.replace('&', 'and')
agency_feed_4['unique_agency_id'] = col_snake_no_amps.str.lower()
assert all(agency_feed_4['unique_agency_id'].unique() == result_df[
'unique_agency_id'].unique())
def test_add_unique_gtfsfeed_id(stops_feed_1, routes_feed_1, trips_feed_1,
stop_times_feed_1, calendar_feed_1,
calendar_dates_feed_1, folder_feed_1):
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_gtfsfeed_id(
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=folder_feed_1,
feed_number=1)
df_dict = {'stops': [stops_df, stops_feed_1],
'routes': [routes_df, routes_feed_1],
'trips': [trips_df, trips_feed_1],
'stop_times': [stop_times_df, stop_times_feed_1],
'calendar': [calendar_df, calendar_feed_1],
'calendar_dates': [calendar_dates_df, calendar_dates_feed_1]}
feed_folder = sub(r'\s+', '_', os.path.split(folder_feed_1)[1]).replace(
'&', 'and').lower()
unique_feed_id = '_'.join([feed_folder, str(1)])
for df in df_dict.keys():
# create new unique_feed_id column based on the name of the feed folder
assert df_dict[df][0]['unique_feed_id'].unique() == unique_feed_id
# test that cols not touched by function in output df are
# identical to the cols in input df
original_cols = df_dict[df][1].columns
assert df_dict[df][1].equals(df_dict[df][0][original_cols])
def test_remove_whitespace_from_values(trips_txt_w_invalid_values):
raw_df, expected_df, feed_path = trips_txt_w_invalid_values
# convert the one int record to str to match dtype of what would be read by
# read_gtfs function
raw_df['route_id'] = raw_df['route_id'].astype('str')
# test when col_list is used
result = utils_format._remove_whitespace(
df=raw_df,
textfile='trips.txt',
col_list=['trip_id', 'service_id', 'route_id'])
# re-sort cols so they are in same order for test
expected_df.sort_index(axis=1, inplace=True)
result.sort_index(axis=1, inplace=True)
assert result.equals(expected_df)
# test when no col_list is used
result_no_col_list = utils_format._remove_whitespace(
df=raw_df,
textfile='trips.txt',
col_list=None)
# spaces in cols should be removed
assert list(result_no_col_list.columns) == list(expected_df.columns)
# spaces in values should remain
assert result_no_col_list['trip_id'].str.len().sum() == raw_df[
'trip_id'].str.len().sum()
def test_read_gtfs_trips_w_invalid_values(trips_txt_w_invalid_values):
raw_df, expected_df, feed_path = trips_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='trips.txt')
# re-sort cols so they are in same order for test
expected_df.sort_index(axis=1, inplace=True)
result.sort_index(axis=1, inplace=True)
assert result.equals(expected_df)
def test_read_gtfs_agency(agency_txt_w_invalid_values):
raw_df, expected_df, feed_path = agency_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='agency.txt')
assert result.equals(expected_df)
def test_read_gtfs_stops(stops_txt_w_invalid_values):
raw_df, expected_df, feed_path = stops_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='stops.txt')
# ensure lat long precision is the same for equals()
result['stop_lat'] = result['stop_lat'].round(6)
result['stop_lon'] = result['stop_lon'].round(6)
expected_df['stop_lat'] = expected_df['stop_lat'].round(6)
expected_df['stop_lon'] = expected_df['stop_lon'].round(6)
assert result.equals(expected_df)
def test_read_gtfs_routes(routes_txt_w_invalid_values):
raw_df, expected_df, feed_path = routes_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='routes.txt')
assert result.equals(expected_df)
def test_read_gtfs_trips(trips_txt_w_invalid_values):
raw_df, expected_df, feed_path = trips_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='trips.txt')
assert result.equals(expected_df)
def test_read_gtfs_stop_times(stop_times_txt_w_invalid_values):
raw_df, expected_df, feed_path = stop_times_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='stop_times.txt')
assert result.equals(expected_df)
def test_read_gtfs_calendar(calendar_txt_w_invalid_values):
raw_df, expected_df, feed_path = calendar_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='calendar.txt')
assert result.equals(expected_df)
def test_read_gtfs_calendar_no_data(
capsys,
agency_a_feed_on_disk_w_calendar_and_calendar_dates_empty_txt):
feed_path = agency_a_feed_on_disk_w_calendar_and_calendar_dates_empty_txt
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='calendar.txt')
# check that expected print prints
captured = capsys.readouterr()
assert 'service_ids' in captured.out
def test_read_gtfs_calendar_dates(calendar_dates_txt_w_invalid_values):
raw_df, expected_df, feed_path = calendar_dates_txt_w_invalid_values
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='calendar_dates.txt')
assert result.equals(expected_df)
def test_read_gtfs_calendar_dates_no_data(
capsys,
agency_a_feed_on_disk_w_calendar_and_calendar_dates_empty_txt):
feed_path = agency_a_feed_on_disk_w_calendar_and_calendar_dates_empty_txt
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='calendar_dates.txt')
# check that expected print prints
captured = capsys.readouterr()
assert 'service_ids' in captured.out
def test_read_gtfs_file_general_errors(
agency_a_feed_on_disk_w_calendar_and_calendar_dates_empty_txt,
trips_txt_w_missing_req_col):
feed_path = agency_a_feed_on_disk_w_calendar_and_calendar_dates_empty_txt
with pytest.raises(ValueError) as excinfo:
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='random.txt')
expected_error = (
"random.txt is not a supported GTFS file. Supported files are: "
"['agency.txt', 'stops.txt', 'routes.txt', 'trips.txt', "
"'stop_times.txt', 'calendar.txt', 'calendar_dates.txt'].")
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='trips.txt')
expected_error = 'trips.txt has no records. This file cannot be empty.'
assert expected_error in str(excinfo.value)
raw_df, feed_path = trips_txt_w_missing_req_col
with pytest.raises(ValueError) as excinfo:
result = utils_format._read_gtfs_file(
textfile_path=feed_path, textfile='trips.txt')
expected_error = ("trips.txt is missing required column(s): ['trip_id', "
"'service_id'].")
assert expected_error in str(excinfo.value)
def test_list_raw_txt_columns(calendar_dates_txt_w_invalid_values):
raw_df, expected_df, feed_path = calendar_dates_txt_w_invalid_values
file = os.path.join(feed_path, 'calendar_dates.txt')
result_cols = utils_format._list_raw_txt_columns(file)
assert isinstance(result_cols, list)
expected_cols = ['service_id', 'date',
' exception_type', 'schedule_type ']
assert sorted(result_cols) == sorted(expected_cols)
def test_timetoseconds(stop_times_feed_1):
# create 1 record that is missing a 0 in the hr position
stop_times_feed_1['departure_time'].iloc[8] = '1:20:00'
result = utils_format._timetoseconds(
stop_times_feed_1, time_cols=['departure_time'])
# check that 'departure_time_sec' was created and is not empty
assert 'departure_time_sec' in result.columns
assert result['departure_time_sec'].empty is False
# remainder of df should not have changed
assert result[stop_times_feed_1.columns].equals(stop_times_feed_1)
# ensure subset of values are correct
# check conversion of 06:15:00 is 22500.0 sec past midnight
assert result.iloc[0]['departure_time_sec'] == 22500.0
# check nans stay as nans
assert pd.isna(result.iloc[2]['departure_time_sec'])
# check conversion of a time past midnight 26:20:00 is 22500.0 sec past
# midnight of the prior day
assert result.iloc[37]['departure_time_sec'] == 94800.0
# check that value that was missing a 0 was fixed and it was converted
# to seconds
assert result.iloc[8]['departure_time'] == '01:20:00'
assert result.iloc[8]['departure_time_sec'] == 4800.0
def test_timetoseconds_invalid_params(stop_times_feed_1):
with pytest.raises(ValueError) as excinfo:
result = utils_format._timetoseconds(
stop_times_feed_1, time_cols='departure_time')
expected_error = 'departure_time is not a list.'
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
# create 1 record with invalid formatting
stop_times_feed_1['departure_time'].iloc[0] = '100:90:80'
result = utils_format._timetoseconds(
stop_times_feed_1, time_cols=['departure_time'])
expected_error = ('Check formatting of value: 100:90:80 as it is in '
'the incorrect format and should be 8 character '
'string 00:00:00.')
assert expected_error in str(excinfo.value)
def test_timetoseconds_invalid_data(capsys, stop_times_feed_1):
# add 2 records with invalid and large hr, min, sec values
stop_times_feed_1['departure_time'].iloc[0] = '60:90:80'
stop_times_feed_1['departure_time'].iloc[2] = '70:80:70'
result = utils_format._timetoseconds(
stop_times_feed_1, time_cols=['departure_time'])
# test that warning prints were printed
captured = capsys.readouterr()
assert 'hour value(s) are greater' in captured.out
assert 'minute value(s) are greater' in captured.out
assert 'second value(s) are greater' in captured.out
def test_add_txt_definitions_no_cols(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1):
# drop cols that would be used to map values in function for test
trips_feed_1.drop(columns=['bikes_allowed', 'wheelchair_accessible'],
inplace=True)
stop_times_feed_1.drop(columns=['pickup_type', 'drop_off_type'],
inplace=True)
routes_feed_1.drop(columns=['route_type'],
inplace=True)
stops_feed_1.drop(columns=['location_type', 'wheelchair_boarding'],
inplace=True)
stops_df, routes_df, stop_times_df, trips_df = \
utils_format._add_txt_definitions(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1)
# df should be identical to input
assert stops_df.equals(stops_feed_1)
assert routes_df.equals(routes_feed_1)
assert stop_times_df.equals(stop_times_feed_1)
assert trips_df.equals(trips_feed_1)
def test_add_txt_definitions_general(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1):
stops_df, routes_df, stop_times_df, trips_df = \
utils_format._add_txt_definitions(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1)
df_list = [stops_df, routes_df, stop_times_df, trips_df]
# check that there are generated '_desc' cols in result df and they are not
# empty or nulls
for df in df_list:
desc_cols = [col for col in df.columns if '_desc' in col]
assert len(desc_cols) > 0
for col in desc_cols:
assert df[col].empty is False
df[col].isnull().values.any() == False # noqa
def test_add_txt_definitions_stops(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1):
result, routes_df, stop_times_df, trips_df = \
utils_format._add_txt_definitions(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1)
# check that expected columns were created and they are not empty
expected_cols = ['location_type_desc', 'wheelchair_boarding_desc']
for col in expected_cols:
assert col in result.columns
assert result[col].empty is False
# check that mapped values are what is expected
assert all(result['location_type_desc'].isin(
config._STOPS_LOCATION_TYPE_LOOKUP.values()))
assert all(result['wheelchair_boarding_desc'].isin(
config._STOPS_WHEELCHAIR_BOARDINGS.values()))
# check subset of values
assert result.iloc[0]['location_type'] == 1 and result.iloc[0][
'location_type_desc'] == 'station'
assert result.iloc[0]['wheelchair_boarding'] == 1 and result.iloc[0][
'wheelchair_boarding_desc'] == (
'At least some vehicles at this stop can be boarded by a '
'rider in a wheelchair')
# remainder of df should not have changed
assert result[stops_feed_1.columns].equals(stops_feed_1)
def test_add_txt_definitions_routes(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1):
stops_df, result, stop_times_df, trips_df = \
utils_format._add_txt_definitions(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1)
# check that expected columns were created and they are not empty
assert 'route_type_desc' in result.columns
assert result['route_type_desc'].empty is False
# check that mapped values are what is expected
assert all(result['route_type_desc'].isin(
config._ROUTES_MODE_TYPE_LOOKUP.values()))
assert result.iloc[0]['route_type'] == 3 and result.iloc[0][
'route_type_desc'] == 'Bus'
# remainder of df should not have changed
assert result[routes_feed_1.columns].equals(routes_feed_1)
def test_add_txt_definitions_stop_times(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1):
# add optional timepoint col that is missing with nans
stop_times_feed_1['timepoint'] = np.nan
stops_df, routes_df, result, trips_df = \
utils_format._add_txt_definitions(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1)
# check that expected columns were created and they are not empty
expected_cols = ['pickup_type_desc', 'drop_off_type_desc',
'timepoint_desc']
for col in expected_cols:
assert col in result.columns
assert result[col].empty is False
# check that mapped values are what is expected
assert all(result['pickup_type_desc'].isin(
config._STOP_TIMES_PICKUP_TYPE.values()))
assert all(result['drop_off_type_desc'].isin(
config._STOP_TIMES_DROP_OFF_TYPE.values()))
assert all(result['timepoint_desc'].isin(
config._STOP_TIMES_TIMEPOINT.values()))
# check subset of values
assert result.iloc[0]['pickup_type'] == 0 and result.iloc[0][
'pickup_type_desc'] == 'Regularly scheduled pickup'
assert result.iloc[0]['drop_off_type'] == 0 and result.iloc[0][
'drop_off_type_desc'] == 'Regularly Scheduled'
assert pd.isna(result.iloc[0]['timepoint']) and result.iloc[0][
'timepoint_desc'] == 'Times are considered exact'
# remainder of df should not have changed
assert result[stop_times_feed_1.columns].equals(stop_times_feed_1)
def test_add_txt_definitions_trips(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1):
stops_df, routes_df, stop_times_df, result = \
utils_format._add_txt_definitions(
stops_feed_1, routes_feed_1, stop_times_feed_1, trips_feed_1)
# check that expected columns were created and they are not empty
expected_cols = ['bikes_allowed_desc', 'wheelchair_accessible_desc']
for col in expected_cols:
assert col in result.columns
assert result[col].empty is False
# check that mapped values are what is expected
assert all(result['bikes_allowed_desc'].isin(
config._TRIPS_BIKES_ALLOWED.values()))
assert all(result['wheelchair_accessible_desc'].isin(
config._TRIPS_WHEELCHAIR_ACCESSIBLE.values()))
# check subset of values
assert result.iloc[0]['bikes_allowed'] == 1 and result.iloc[0][
'bikes_allowed_desc'] == (
'Vehicle being used on this particular trip can accommodate '
'at least one bicycle.')
assert result.iloc[0]['wheelchair_accessible'] == 1 and result.iloc[0][
'wheelchair_accessible_desc'] == (
'Vehicle being used on this particular trip can accommodate at least '
'one rider in a wheelchair')
# remainder of df should not have changed
assert result[trips_feed_1.columns].equals(trips_feed_1)
def test_apply_gtfs_definition(stops_feed_1):
desc_dict = {'location_type': config._STOPS_LOCATION_TYPE_LOOKUP,
'wheelchair_boarding': config._STOPS_WHEELCHAIR_BOARDINGS}
test_1 = stops_feed_1.copy()
result = utils_format._apply_gtfs_definition(test_1, desc_dict)
desc_cols = [col for col in result.columns if '_desc' in col]
assert len(desc_cols) > 0
for col in desc_cols:
assert result[col].empty is False
result[col].isnull().values.any() == False # noqa
# test when there is mismatch between value in table and value mapping
# config
# add unrecognized value to df
test_2 = stops_feed_1.copy()
test_2['location_type'].loc[0:1] = 40
# test with nans
test_2['location_type'].loc[2:3] = np.nan
result = utils_format._apply_gtfs_definition(test_2, desc_dict)
desc_cols = [col for col in result.columns if '_desc' in col]
assert len(desc_cols) > 0
for col in desc_cols:
assert result[col].empty is False
# location_type that was 40 we expect to be None given there is no
# lookup value for it
assert result.iloc[0]['location_type_desc'] is None
# location_type that was nan we expect to be 'stop' given config maps
# nans to 'stop'
assert result.iloc[2]['location_type_desc'] == 'stop'
def test_append_route_type(stops_feed_1, stop_times_feed_1, routes_feed_1,
trips_feed_1):
result_stops_df = utils_format._append_route_type(
stops_df=stops_feed_1,
stop_times_df=stop_times_feed_1,
routes_df=routes_feed_1[['route_id', 'route_type']],
trips_df=trips_feed_1[['trip_id', 'route_id']],
info_to_append='route_type_to_stops')
result_stop_times_df = utils_format._append_route_type(
stops_df=stops_feed_1,
stop_times_df=stop_times_feed_1,
routes_df=routes_feed_1[['route_id', 'route_type']],
trips_df=trips_feed_1[['trip_id', 'route_id']],
info_to_append='route_type_to_stop_times')
# check that route_type col was added and has no nulls
assert 'route_type' in result_stops_df.columns
assert result_stops_df[
'route_type'].isnull().values.any() == False # noqa
assert 'route_type' in result_stop_times_df.columns
assert result_stop_times_df[
'route_type'].isnull().values.any() == False # noqa
# remainder of df should not have changed
assert result_stops_df[stops_feed_1.columns].equals(stops_feed_1)
assert result_stop_times_df[stop_times_feed_1.columns].equals(
stop_times_feed_1)
# check subset of values
assert result_stops_df.iloc[0]['route_type'] == 3 and \
result_stops_df.iloc[6]['route_type'] == 1
assert result_stop_times_df.iloc[0]['route_type'] == 3 and \
result_stop_times_df.iloc[36]['route_type'] == 1
def test_append_route_type_invalid_param(stops_feed_1, stop_times_feed_1,
routes_feed_1, trips_feed_1):
with pytest.raises(ValueError) as excinfo:
result_stops_df = utils_format._append_route_type(
stops_df=stops_feed_1,
stop_times_df=stop_times_feed_1,
routes_df=routes_feed_1[['route_id', 'route_type']],
trips_df=trips_feed_1[['trip_id', 'route_id']],
info_to_append='route_type_to_stops2')
expected_error = 'route_type_to_stops2 is not a valid parameter.'
assert expected_error in str(excinfo.value)
def test_add_unique_agencyid_case_1(
agency_a_feed_on_disk_wo_agency, stops_feed_1, stop_times_feed_1,
routes_feed_1, trips_feed_1, calendar_feed_1, calendar_dates_feed_1):
# case 1: no agency.txt file so we expect 'unique_agency_id' will be
# generated using the GTFS feed folder name
# a blank agency df will be generated by a prior function in workflow
# so replicate this df for this test
blank_agency = pd.DataFrame()
# use test GTFS feed that has no agency.txt file in its dir
feed_path = agency_a_feed_on_disk_wo_agency
expected_unique_agency_id = os.path.split(feed_path)[1]
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=blank_agency,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df, calendar_df,
calendar_dates_df]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check subset of values
assert set(df['unique_agency_id'].unique()) == \
set([expected_unique_agency_id])
# remainder of df should not have changed
assert stops_df[stops_feed_1.columns].equals(stops_feed_1)
assert routes_df[routes_feed_1.columns].equals(routes_feed_1)
assert trips_df[trips_feed_1.columns].equals(trips_feed_1)
assert stop_times_df[stop_times_feed_1.columns].equals(stop_times_feed_1)
assert calendar_df[calendar_feed_1.columns].equals(calendar_feed_1)
assert calendar_dates_df[calendar_dates_feed_1.columns].equals(
calendar_dates_feed_1)
def test_add_unique_agencyid_case_2(
agency_a_feed_on_disk_wo_calendar_dates, agency_feed_3, stops_feed_1,
stop_times_feed_1, routes_feed_1, trips_feed_1, calendar_feed_1,
calendar_dates_feed_1):
# case 2: has agency.txt but agency_id is missing and has 1 agency
# (agency_feed_3) so we expect 'unique_agency_id' will be generated
# using the agency_name in the agency.txt file
feed_path = agency_a_feed_on_disk_wo_calendar_dates
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_3,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df, calendar_df,
calendar_dates_df]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check subset of values
assert set(df['unique_agency_id'].unique()) == set(['agency_c'])
# remainder of df should not have changed
assert stops_df[stops_feed_1.columns].equals(stops_feed_1)
assert routes_df[routes_feed_1.columns].equals(routes_feed_1)
assert trips_df[trips_feed_1.columns].equals(trips_feed_1)
assert stop_times_df[stop_times_feed_1.columns].equals(stop_times_feed_1)
assert calendar_df[calendar_feed_1.columns].equals(calendar_feed_1)
assert calendar_dates_df[calendar_dates_feed_1.columns].equals(
calendar_dates_feed_1)
def test_add_unique_agencyid_case_3(
agency_a_feed_on_disk_wo_calendar_dates, agency_feed_2, stops_feed_2,
stop_times_feed_2, routes_feed_2, trips_feed_2, calendar_feed_2,
calendar_dates_feed_2):
# case 3: has agency.txt and has agency_id and has 2 agencies
# (agency_feed_2) so we expect 'unique_agency_id' will be generated using
# the agency_id and agency_name in the agency.txt file
feed_path = agency_a_feed_on_disk_wo_calendar_dates
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_2,
stops_df=stops_feed_2,
routes_df=routes_feed_2,
trips_df=trips_feed_2,
stop_times_df=stop_times_feed_2,
calendar_df=calendar_feed_2,
calendar_dates_df=calendar_dates_feed_2,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df, calendar_df,
calendar_dates_df]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check subset of values
assert set(df['unique_agency_id'].unique()) == set(
['agency_b_district_2', 'agency_b_district_1'])
# remainder of df should not have changed
assert stops_df[stops_feed_2.columns].equals(stops_feed_2)
assert routes_df[routes_feed_2.columns].equals(routes_feed_2)
assert trips_df[trips_feed_2.columns].equals(trips_feed_2)
assert stop_times_df[stop_times_feed_2.columns].equals(stop_times_feed_2)
assert calendar_df[calendar_feed_2.columns].equals(calendar_feed_2)
assert calendar_dates_df[calendar_dates_feed_2.columns].equals(
calendar_dates_feed_2)
def test_add_unique_agencyid_case_4(
agency_a_feed_on_disk_wo_calendar_dates, agency_feed_1, stops_feed_1,
stop_times_feed_1, routes_feed_1, trips_feed_1, calendar_feed_1,
calendar_dates_feed_1):
# case 4: has agency.txt and has agency_id and has 1 agency
# (agency_feed_1) so we expect 'unique_agency_id' will be generated using
# the agency_name in the agency.txt file
feed_path = agency_a_feed_on_disk_wo_calendar_dates
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_1,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df, calendar_df,
calendar_dates_df]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check subset of values
assert set(df['unique_agency_id'].unique()) == set(['agency_a_city_a'])
# remainder of df should not have changed
assert stops_df[stops_feed_1.columns].equals(stops_feed_1)
assert routes_df[routes_feed_1.columns].equals(routes_feed_1)
assert trips_df[trips_feed_1.columns].equals(trips_feed_1)
assert stop_times_df[stop_times_feed_1.columns].equals(stop_times_feed_1)
assert calendar_df[calendar_feed_1.columns].equals(calendar_feed_1)
assert calendar_dates_df[calendar_dates_feed_1.columns].equals(
calendar_dates_feed_1)
def test_add_unique_agencyid_case_5(
agency_a_feed_on_disk_wo_calendar_dates, agency_feed_1, stops_feed_1,
stop_times_feed_1, routes_feed_1, trips_feed_1, calendar_feed_1):
# case 5: same as case 4 but with no calendar_dates.txt
feed_path = agency_a_feed_on_disk_wo_calendar_dates
# a blank calendar_dates df will be generated by a prior function in
# workflow so replicate this df for this test
blank_calendar_dates = pd.DataFrame()
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_1,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=blank_calendar_dates,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df, calendar_df]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df.empty is False
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check subset of values
assert set(df['unique_agency_id'].unique()) == set(['agency_a_city_a'])
# remainder of df should not have changed
assert stops_df[stops_feed_1.columns].equals(stops_feed_1)
assert routes_df[routes_feed_1.columns].equals(routes_feed_1)
assert trips_df[trips_feed_1.columns].equals(trips_feed_1)
assert stop_times_df[stop_times_feed_1.columns].equals(stop_times_feed_1)
assert calendar_df[calendar_feed_1.columns].equals(calendar_feed_1)
assert calendar_dates_df.equals(blank_calendar_dates)
def test_add_unique_agencyid_case_6(
agency_a_feed_on_disk_wo_calendar_dates, agency_feed_1, stops_feed_1,
stop_times_feed_1, routes_feed_1, trips_feed_1, calendar_dates_feed_1):
# case 6: same as case 4 but with no calendar.txt
feed_path = agency_a_feed_on_disk_wo_calendar_dates
# a blank calendar_dates df will be generated by a prior function in
# workflow so replicate this df for this test
blank_calendar = pd.DataFrame()
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_1,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=blank_calendar,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df,
calendar_dates_feed_1]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df.empty is False
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check subset of values
assert set(df['unique_agency_id'].unique()) == set(['agency_a_city_a'])
# remainder of df should not have changed
assert stops_df[stops_feed_1.columns].equals(stops_feed_1)
assert routes_df[routes_feed_1.columns].equals(routes_feed_1)
assert trips_df[trips_feed_1.columns].equals(trips_feed_1)
assert stop_times_df[stop_times_feed_1.columns].equals(stop_times_feed_1)
assert calendar_dates_df[calendar_dates_feed_1.columns].equals(
calendar_dates_feed_1)
assert calendar_df.equals(blank_calendar)
def test_add_unique_agencyid_multi_agency_id_mismatch_via_agency_txt(
agency_a_feed_on_disk_wo_calendar_dates, agency_feed_2, stops_feed_2,
stop_times_feed_2, routes_feed_2, trips_feed_2, calendar_feed_2,
calendar_dates_feed_2):
# has agency.txt and has agency_id and has 2 agencies (agency_feed_2)
feed_path = agency_a_feed_on_disk_wo_calendar_dates
# change one agency record 'agency_id' to something that will not
# match existing records in routes.txt for test
agency_feed_2['agency_id'].loc[1] = 'agency missing bus'
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_2,
stops_df=stops_feed_2,
routes_df=routes_feed_2,
trips_df=trips_feed_2,
stop_times_df=stop_times_feed_2,
calendar_df=calendar_feed_2,
calendar_dates_df=calendar_dates_feed_2,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df, calendar_df,
calendar_dates_df]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check values
assert set(df['unique_agency_id'].unique()) == set(
['multiple_operators_agency_a_wo_calendar_dates',
'agency_b_district_1'])
# remainder of df should not have changed
assert stops_df[stops_feed_2.columns].equals(stops_feed_2)
assert routes_df[routes_feed_2.columns].equals(routes_feed_2)
assert trips_df[trips_feed_2.columns].equals(trips_feed_2)
assert stop_times_df[stop_times_feed_2.columns].equals(stop_times_feed_2)
assert calendar_df[calendar_feed_2.columns].equals(calendar_feed_2)
assert calendar_dates_df[calendar_dates_feed_2.columns].equals(
calendar_dates_feed_2)
def test_add_unique_agencyid_multi_agency_id_mismatch_via_routes_txt(
agency_a_feed_on_disk_wo_calendar_dates, agency_feed_2, stops_feed_2,
stop_times_feed_2, routes_feed_2, trips_feed_2, calendar_feed_2,
calendar_dates_feed_2):
# has agency.txt and has agency_id and has 2 agencies (agency_feed_2)
feed_path = agency_a_feed_on_disk_wo_calendar_dates
# change one agency record 'agency_id' to something that will not
# match existing records in agency.txt for test
routes_feed_2['agency_id'].loc[0:1] = 'agency missing bus'
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_2,
stops_df=stops_feed_2,
routes_df=routes_feed_2,
trips_df=trips_feed_2,
stop_times_df=stop_times_feed_2,
calendar_df=calendar_feed_2,
calendar_dates_df=calendar_dates_feed_2,
feed_folder=feed_path,
nulls_as_folder=True)
df_list = [stops_df, routes_df, trips_df, stop_times_df, calendar_df,
calendar_dates_df]
for df in df_list:
# check that unique_agency_id column was created and there are no nulls
assert 'unique_agency_id' in df.columns
assert df['unique_agency_id'].isnull().values.any() == False # noqa
# check values
assert set(df['unique_agency_id'].unique()) == set(
['multiple_operators_agency_a_wo_calendar_dates',
'agency_b_district_2'])
# remainder of df should not have changed
assert stops_df[stops_feed_2.columns].equals(stops_feed_2)
assert routes_df[routes_feed_2.columns].equals(routes_feed_2)
assert trips_df[trips_feed_2.columns].equals(trips_feed_2)
assert stop_times_df[stop_times_feed_2.columns].equals(stop_times_feed_2)
assert calendar_df[calendar_feed_2.columns].equals(calendar_feed_2)
assert calendar_dates_df[calendar_dates_feed_2.columns].equals(
calendar_dates_feed_2)
def test_add_unique_agencyid_value_errors(
agency_a_feed_on_disk_wo_calendar_dates,
agency_a_feed_on_disk_wo_agency, agency_feed_1, stops_feed_1,
stop_times_feed_1, routes_feed_1, trips_feed_1, calendar_feed_1,
calendar_dates_feed_1):
feed_path_wo_agency = agency_a_feed_on_disk_wo_agency
feed_path_w_agency = agency_a_feed_on_disk_wo_calendar_dates
with pytest.raises(ValueError) as excinfo:
# throw error if nulls_as_folder=False and no agency.txt file is found
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_feed_1,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path_wo_agency,
nulls_as_folder=False)
expected_error = (
'No agency.txt file was found in {}. Add the missing file to '
'folder or set nulls_as_folder to True.'.format(feed_path_wo_agency))
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
# throw error if nulls_as_folder=False and both 'agency_id' and
# 'agency_name' cols do not exist in agency.txt
data = {
'agency_url': 'http://www.agency_c.org',
'agency_timezone': 'America/Los_Angeles',
'agency_phone': '(000) 000-0000'}
index = range(1)
agency_df = pd.DataFrame(data, index)
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_df,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path_w_agency,
nulls_as_folder=False)
expected_error = (
'Both agency_name and agency_id columns were not found in agency.txt.')
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
# throw error if nulls_as_folder=False and 'agency_id' and
# 'agency_name' with 1 record = '' in agency.txt
data = {
'agency_id': '',
'agency_name': '',
'agency_url': 'http://www.agency_c.org',
'agency_timezone': 'America/Los_Angeles',
'agency_phone': '(000) 000-0000'}
index = range(1)
agency_df = pd.DataFrame(data, index)
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_df,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path_w_agency,
nulls_as_folder=False)
expected_error = 'agency.txt has no agency_id or agency_name values.'
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
# throw error if nulls_as_folder=False and both 'agency_id' and
# 'agency_name' with 1 record = ' ' in agency.txt
data = {
'agency_id': ' ',
'agency_name': ' ',
'agency_url': 'http://www.agency_c.org',
'agency_timezone': 'America/Los_Angeles',
'agency_phone': '(000) 000-0000'}
index = range(1)
agency_df = pd.DataFrame(data, index)
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_df,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path_w_agency,
nulls_as_folder=False)
expected_error = 'agency.txt has no agency_id or agency_name values.'
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
# throw error if nulls_as_folder=False and both 'agency_id' and
# 'agency_name' with 1 record = nan in agency.txt
data = {
'agency_id': np.nan,
'agency_name': np.nan,
'agency_url': 'http://www.agency_c.org',
'agency_timezone': 'America/Los_Angeles',
'agency_phone': '(000) 000-0000'}
index = range(1)
agency_df = pd.DataFrame(data, index)
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_df,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path_w_agency,
nulls_as_folder=False)
expected_error = 'agency.txt has no agency_id or agency_name values.'
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
# throw error if nulls_as_folder=False and both 'agency_id' and
# 'agency_name' with 1 record = nan in agency.txt
data = {
'agency_id': 'agency_a',
'agency_name': '',
'agency_url': 'http://www.agency_c.org',
'agency_timezone': 'America/Los_Angeles',
'agency_phone': '(000) 000-0000'}
index = range(1)
agency_df = pd.DataFrame(data, index)
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_df,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path_w_agency,
nulls_as_folder=False)
expected_error = 'Null value in agency_name was found.'
assert expected_error in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
# throw error if nulls_as_folder=False and both 'agency_id' and
# 'agency_name' with more than 1 record have nulls or blank strs
# in agency.txt
data = {
'agency_id': ['agency_a', ' '],
'agency_name': ['', np.nan],
'agency_url': ['http://www.agency_c.org'],
'agency_timezone': ['America/Los_Angeles', 'America/Los_Angeles'],
'agency_phone': ['(000) 000-0000', '(000) 000-0000']}
index = range(2)
agency_df = pd.DataFrame(data, index)
stops_df, routes_df, trips_df, stop_times_df, calendar_df, \
calendar_dates_df = utils_format._add_unique_agencyid(
agency_df=agency_df,
stops_df=stops_feed_1,
routes_df=routes_feed_1,
trips_df=trips_feed_1,
stop_times_df=stop_times_feed_1,
calendar_df=calendar_feed_1,
calendar_dates_df=calendar_dates_feed_1,
feed_folder=feed_path_w_agency,
nulls_as_folder=False)
expected_error = 'Null values found in agency_id and agency_name.'
assert expected_error in str(excinfo.value)
|
from unittest import TestCase
from libzsm.datastructures import contains_list
class ListTest(TestCase):
def test_contains_list(self):
self.assertEqual(
True, contains_list(['manage.py', 'runserver', '-v', '3'], ['-v', '3'])
)
self.assertEqual(
True, contains_list(['manage.py', 'runserver', '-v', '3', '-l'], ['-v', '3'])
)
self.assertEqual(
True, contains_list(['-v', '3', '--arg', 'val'], ['-v', '3'])
)
|
"""Class to parse camt files."""
import re
from datetime import datetime
from lxml import etree
from openerp.addons.account_bank_statement_import.parserlib import (
BankStatement)
class CamtParser(object):
"""Parser for camt bank statement import files."""
def parse_amount(self, ns, node):
"""Parse element that contains Amount and CreditDebitIndicator."""
if node is None:
return 0.0
sign = 1
amount = 0.0
sign_node = node.xpath('ns:CdtDbtInd', namespaces={'ns': ns})
if sign_node and sign_node[0].text == 'DBIT':
sign = -1
amount_node = node.xpath('ns:Amt', namespaces={'ns': ns})
if amount_node:
amount = sign * float(amount_node[0].text)
return amount
def add_value_from_node(
self, ns, node, xpath_str, obj, attr_name, join_str=None):
"""Add value to object from first or all nodes found with xpath.
If xpath_str is a list (or iterable), it will be seen as a series
of search path's in order of preference. The first item that results
in a found node will be used to set a value."""
if not isinstance(xpath_str, (list, tuple)):
xpath_str = [xpath_str]
for search_str in xpath_str:
found_node = node.xpath(search_str, namespaces={'ns': ns})
if found_node:
if join_str is None:
attr_value = found_node[0].text
else:
attr_value = join_str.join([x.text for x in found_node])
setattr(obj, attr_name, attr_value)
break
def parse_transaction_details(self, ns, node, transaction):
"""Parse transaction details (message, party, account...)."""
# message
self.add_value_from_node(
ns, node, [
'./ns:RmtInf/ns:Ustrd',
'./ns:AddtlTxInf',
'./ns:AddtlNtryInf',
], transaction, 'message', join_str='\n')
# eref
self.add_value_from_node(
ns, node, [
'./ns:RmtInf/ns:Strd/ns:CdtrRefInf/ns:Ref',
'./ns:Refs/ns:EndToEndId',
],
transaction, 'eref'
)
# remote party values
party_type = 'Dbtr'
party_type_node = node.xpath(
'../../ns:CdtDbtInd', namespaces={'ns': ns})
if party_type_node and party_type_node[0].text != 'CRDT':
party_type = 'Cdtr'
party_node = node.xpath(
'./ns:RltdPties/ns:%s' % party_type, namespaces={'ns': ns})
if party_node:
self.add_value_from_node(
ns, party_node[0], './ns:Nm', transaction, 'remote_owner')
self.add_value_from_node(
ns, party_node[0], './ns:PstlAdr/ns:Ctry', transaction,
'remote_owner_country'
)
address_node = party_node[0].xpath(
'./ns:PstlAdr/ns:AdrLine', namespaces={'ns': ns})
if address_node:
transaction.remote_owner_address = [address_node[0].text]
# Get remote_account from iban or from domestic account:
account_node = node.xpath(
'./ns:RltdPties/ns:%sAcct/ns:Id' % party_type,
namespaces={'ns': ns}
)
if account_node:
iban_node = account_node[0].xpath(
'./ns:IBAN', namespaces={'ns': ns})
if iban_node:
transaction.remote_account = iban_node[0].text
bic_node = node.xpath(
'./ns:RltdAgts/ns:%sAgt/ns:FinInstnId/ns:BIC' % party_type,
namespaces={'ns': ns}
)
if bic_node:
transaction.remote_bank_bic = bic_node[0].text
else:
self.add_value_from_node(
ns, account_node[0], './ns:Othr/ns:Id', transaction,
'remote_account'
)
def parse_transaction(self, ns, node, transaction):
"""Parse transaction (entry) node."""
self.add_value_from_node(
ns, node, './ns:BkTxCd/ns:Prtry/ns:Cd', transaction,
'transfer_type'
)
self.add_value_from_node(
ns, node, './ns:BookgDt/ns:Dt', transaction, 'execution_date')
self.add_value_from_node(
ns, node, './ns:ValDt/ns:Dt', transaction, 'value_date')
transaction.transferred_amount = self.parse_amount(ns, node)
details_node = node.xpath(
'./ns:NtryDtls/ns:TxDtls', namespaces={'ns': ns})
if details_node:
self.parse_transaction_details(ns, details_node[0], transaction)
if not transaction.message:
self.add_value_from_node(
ns, node, './ns:AddtlNtryInf', transaction, 'message')
if not transaction.eref:
self.add_value_from_node(
ns, node, [
'./ns:NtryDtls/ns:Btch/ns:PmtInfId',
],
transaction, 'eref'
)
transaction.data = etree.tostring(node)
return transaction
def get_balance_amounts(self, ns, node):
"""Return opening and closing balance.
Depending on kind of balance and statement, the balance might be in a
different kind of node:
OPBD = OpeningBalance
PRCD = PreviousClosingBalance
ITBD = InterimBalance (first ITBD is start-, second is end-balance)
CLBD = ClosingBalance
"""
start_balance_node = None
end_balance_node = None
for node_name in ['OPBD', 'PRCD', 'CLBD', 'ITBD']:
code_expr = (
'./ns:Bal/ns:Tp/ns:CdOrPrtry/ns:Cd[text()="%s"]/../../..' %
node_name
)
balance_node = node.xpath(code_expr, namespaces={'ns': ns})
if balance_node:
if node_name in ['OPBD', 'PRCD']:
start_balance_node = balance_node[0]
elif node_name == 'CLBD':
end_balance_node = balance_node[0]
else:
if not start_balance_node:
start_balance_node = balance_node[0]
if not end_balance_node:
end_balance_node = balance_node[-1]
return (
self.parse_amount(ns, start_balance_node),
self.parse_amount(ns, end_balance_node)
)
def parse_statement(self, ns, node):
"""Parse a single Stmt node."""
statement = BankStatement()
self.add_value_from_node(
ns, node, [
'./ns:Acct/ns:Id/ns:IBAN',
'./ns:Acct/ns:Id/ns:Othr/ns:Id',
], statement, 'local_account'
)
self.add_value_from_node(
ns, node, './ns:Id', statement, 'statement_id')
self.add_value_from_node(
ns, node, './ns:Acct/ns:Ccy', statement, 'local_currency')
(statement.start_balance, statement.end_balance) = (
self.get_balance_amounts(ns, node))
transaction_nodes = node.xpath('./ns:Ntry', namespaces={'ns': ns})
for entry_node in transaction_nodes:
transaction = statement.create_transaction()
self.parse_transaction(ns, entry_node, transaction)
if statement['transactions']:
execution_date = statement['transactions'][0].execution_date
statement.date = datetime.strptime(execution_date, "%Y-%m-%d")
# Prepend date of first transaction to improve id uniquenes
if execution_date not in statement.statement_id:
statement.statement_id = "%s-%s" % (
execution_date, statement.statement_id)
return statement
def check_version(self, ns, root):
"""Validate validity of camt file."""
# Check wether it is camt at all:
re_camt = re.compile(
r'(^urn:iso:std:iso:20022:tech:xsd:camt.'
r'|^ISO:camt.)'
)
if not re_camt.search(ns):
raise ValueError('no camt: ' + ns)
# Check wether version 052 or 053:
re_camt_version = re.compile(
r'(^urn:iso:std:iso:20022:tech:xsd:camt.053.'
r'|^urn:iso:std:iso:20022:tech:xsd:camt.052.'
r'|^ISO:camt.053.'
r'|^ISO:camt.052.)'
)
if not re_camt_version.search(ns):
raise ValueError('no camt 052 or 053: ' + ns)
# Check GrpHdr element:
root_0_0 = root[0][0].tag[len(ns) + 2:] # strip namespace
if root_0_0 != 'GrpHdr':
raise ValueError('expected GrpHdr, got: ' + root_0_0)
def parse(self, data):
"""Parse a camt.052 or camt.053 file."""
try:
root = etree.fromstring(
data, parser=etree.XMLParser(recover=True))
except etree.XMLSyntaxError:
# ABNAmro is known to mix up encodings
root = etree.fromstring(
data.decode('iso-8859-15').encode('utf-8'))
if root is None:
raise ValueError(
'Not a valid xml file, or not an xml file at all.')
ns = root.tag[1:root.tag.index("}")]
self.check_version(ns, root)
statements = []
for node in root[0][1:]:
statement = self.parse_statement(ns, node)
if len(statement['transactions']):
statements.append(statement)
return statements
|
import datetime
from decimal import Decimal
import mock
import uuid
from django.test import TestCase
from attribution.models.enums.function import Functions
from base.models.enums.learning_container_year_types import LearningContainerYearType
from base.models.enums.vacant_declaration_type import VacantDeclarationType
from ddd.logic.application.commands import GetChargeSummaryCommand
from ddd.logic.application.domain.builder.applicant_identity_builder import ApplicantIdentityBuilder
from ddd.logic.application.domain.model._allocation_entity import AllocationEntity
from ddd.logic.application.domain.model.applicant import Applicant
from ddd.logic.application.domain.model.application_calendar import ApplicationCalendar, ApplicationCalendarIdentity
from ddd.logic.application.domain.model._attribution import Attribution
from ddd.logic.application.domain.model.vacant_course import VacantCourse, VacantCourseIdentity
from ddd.logic.application.dtos import LearningUnitVolumeFromServiceDTO, ApplicantAttributionChargeSummaryDTO
from ddd.logic.learning_unit.domain.model.learning_unit import LearningUnitIdentity
from ddd.logic.shared_kernel.academic_year.builder.academic_year_identity_builder import AcademicYearIdentityBuilder
from infrastructure.application.repository.applicant_in_memory import ApplicantInMemoryRepository
from infrastructure.application.repository.application_calendar_in_memory import ApplicationCalendarInMemoryRepository
from infrastructure.application.repository.vacant_course_in_memory import VacantCourseInMemoryRepository
from infrastructure.messages_bus import message_bus_instance
class GetChargeSummary(TestCase):
@classmethod
def setUpTestData(cls):
today = datetime.date.today()
cls.application_calendar = ApplicationCalendar(
entity_id=ApplicationCalendarIdentity(uuid=uuid.uuid4()),
authorized_target_year=AcademicYearIdentityBuilder.build_from_year(year=2019),
start_date=today - datetime.timedelta(days=5),
end_date=today + datetime.timedelta(days=10),
)
cls.global_id = '123456789'
cls.applicant = Applicant(
entity_id=ApplicantIdentityBuilder.build_from_global_id(global_id=cls.global_id),
first_name="Thomas",
last_name="Durant",
attributions=[
Attribution(
course_id=LearningUnitIdentity(
code="LDROI1200",
academic_year=AcademicYearIdentityBuilder.build_from_year(year=2019)
),
course_title='Introduction au droit',
course_is_in_suppression_proposal=False,
course_type=LearningContainerYearType.COURSE.name,
function=Functions.CO_HOLDER,
end_year=AcademicYearIdentityBuilder.build_from_year(year=2030),
start_year=AcademicYearIdentityBuilder.build_from_year(year=2016),
lecturing_volume=Decimal(10),
practical_volume=Decimal(15),
is_substitute=False
)
]
)
cls.vacant_course_ldroi1200 = VacantCourse(
entity_id=VacantCourseIdentity(
code='LDROI1200',
academic_year=AcademicYearIdentityBuilder.build_from_year(year=2019)
),
lecturing_volume_available=Decimal(10),
practical_volume_available=Decimal(50),
title='Introduction au droit',
vacant_declaration_type=VacantDeclarationType.RESEVED_FOR_INTERNS,
is_in_team=False,
allocation_entity=AllocationEntity(code='DRT')
)
cls.applicant_repository = ApplicantInMemoryRepository([cls.applicant])
cls.application_calendar_repository = ApplicationCalendarInMemoryRepository([cls.application_calendar])
cls.vacant_course_repository = VacantCourseInMemoryRepository([cls.vacant_course_ldroi1200])
cls.learning_unit_service_mocked = mock.Mock()
cls.learning_unit_service_mocked.search_learning_unit_volumes_dto = mock.Mock(return_value=[
LearningUnitVolumeFromServiceDTO(
code='LDROI1200',
year=2019,
lecturing_volume_total=Decimal(50),
practical_volume_total=Decimal(70),
)
])
cls.learning_unit_service_mocked.search_tutor_attribution_dto = mock.Mock(return_value=[])
def setUp(self) -> None:
message_bus_patcher = mock.patch.multiple(
'infrastructure.messages_bus',
ApplicantRepository=lambda: self.applicant_repository,
ApplicationCalendarRepository=lambda: self.application_calendar_repository,
VacantCourseRepository=lambda: self.vacant_course_repository,
LearningUnitTranslator=lambda: self.learning_unit_service_mocked
)
message_bus_patcher.start()
self.addCleanup(message_bus_patcher.stop)
self.message_bus = message_bus_instance
def test_should_return_charge_summary_list_dto(self):
cmd = GetChargeSummaryCommand(global_id=self.global_id)
results = self.message_bus.invoke(cmd)
self.assertIsInstance(results, list)
self.assertEqual(len(results), 1)
self.assertIsInstance(results[0], ApplicantAttributionChargeSummaryDTO)
self.assertEqual(results[0].code, "LDROI1200")
self.assertEqual(results[0].year, 2019)
self.assertEqual(results[0].title, "Introduction au droit")
self.assertEqual(results[0].course_is_in_suppression_proposal, False)
self.assertEqual(results[0].start_year, 2016)
self.assertEqual(results[0].end_year, 2030)
self.assertEqual(results[0].function, Functions.CO_HOLDER)
self.assertEqual(results[0].lecturing_volume, Decimal(10))
self.assertEqual(results[0].practical_volume, Decimal(15))
self.assertEqual(results[0].lecturing_volume_available, Decimal(10))
self.assertEqual(results[0].practical_volume_available, Decimal(50))
self.assertEqual(results[0].total_lecturing_volume_course, Decimal(50))
self.assertEqual(results[0].total_practical_volume_course, Decimal(70))
self.assertEqual(results[0].tutors, [])
|
import datetime
import re
import json
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.core.mail import EmailMultiAlternatives
from django.core.validators import validate_email
from django.conf import settings
from django.template import RequestContext
from django_fixmystreet.fixmystreet.models import (
OrganisationEntity, ReportCategory,
Report, ReportMainCategoryClass)
from django_fixmystreet.fixmystreet.utils import get_current_user, transform_notification_template
from django_fixmystreet.fixmystreet.utils import generate_pdf, JsonHttpResponse
SRID_SPHERICAL_MERCATOR = 3857
SRID_EQUIRECTANGULAR = 4326
SRID_ELLIPTICAL_MERCATOR = 3395
DEFAULT_SRID = SRID_EQUIRECTANGULAR
def saveCategoryConfiguration(request):
categoriesList = request.REQUEST.getlist("category")
groupsList = request.REQUEST.getlist("group")
# Assign new groups to categories. So for each group, add category to dispatch_categories
for idx, groupParam in enumerate(groupsList):
newGroup = OrganisationEntity.objects.get(id=groupParam)
category = ReportCategory.objects.get(pk=categoriesList[idx])
# Before add, need to remove this category from the old group.
oldGroups = category.assigned_to_department.filter(dependency=request.user.fmsuser.get_organisation)
for group in oldGroups:
group.dispatch_categories.remove(category)
newGroup.dispatch_categories.add(category)
return HttpResponseRedirect(reverse("category_gestionnaire_configuration"))
def get_report_popup_details(request):
report = Report.objects.all().related_fields().visible().transform(DEFAULT_SRID).get(id=request.REQUEST.get("id"))
response = {
"id": report.id,
"type": report.get_status_for_js_map(),
"latlng": [report.point.x, report.point.y],
"address": {
"street": report.address,
"number": report.address_number,
"postalCode": report.postalcode,
"city": report.get_address_commune_name(),
},
"categories": report.get_category_path(),
"photo": report.thumbnail,
"icons": report.get_icons_for_js_map(pro=True),
"url": reverse("report_show_pro", args=[report.get_slug(), report.id]),
}
response_json = json.dumps(response)
return HttpResponse(response_json, mimetype="application/json")
def secondary_category_for_main_category(request):
main_category_id = int(request.GET["main_category"])
secondary_categories = ReportCategory.objects.filter(category_class=main_category_id)
jsonstring = ReportCategory.listToJSON(secondary_categories)
return HttpResponse(jsonstring, mimetype="application/json")
def update_category_for_report(request, report_id):
main_category_id = int(request.POST["main_category"])
secondary_category_id = int(request.POST["secondary_category"])
report = get_object_or_404(Report, id=report_id)
secondary_category = ReportCategory.objects.get(id=secondary_category_id)
if not report.private and not secondary_category.public:
messages.add_message(request, messages.ERROR, _("Cannot set a private category to a public report"))
else:
report.category = ReportMainCategoryClass.objects.get(id=main_category_id)
report.secondary_category = secondary_category
report.save()
return HttpResponseRedirect(report.get_absolute_url_pro())
def send_pdf(request, report_id):
to_return = {
"status": "success",
"message": "",
"logMessages": [],
}
user = get_current_user()
recipients = request.POST.get('to')
comments = request.POST.get('comments', '')
# Only set privacy as private if user is auth and privacy POST param is private
if request.fmsuser.is_pro() and "private" == request.POST.get('privacy'):
pro_version = True
else:
pro_version = False
report = get_object_or_404(Report, id=report_id)
#generate the pdf
pdffile = generate_pdf("reports/pdf.html", {
'report': report,
'files': report.files() if pro_version else report.active_files(),
'comments': report.comments() if pro_version else report.active_comments(),
'activity_list': report.activities.all(),
'privacy': 'private' if pro_version else 'public',
'BACKOFFICE': pro_version,
'base_url': getattr(settings, 'RENDER_PDF_BASE_URL', None),
}, context_instance=RequestContext(request))
subject, html, text = transform_notification_template("mail-pdf", report, user, comment=comments)
recepients = re.compile("[\\s,;]+").split(recipients)
for recepient in recepients:
recepient = recepient.strip()
if not recepient:
continue
try:
validate_email(recepient)
except ValidationError:
to_return["status"] = "error"
to_return["logMessages"].append(_("'{email}' is not a valid email address.").format(email=recepient))
continue
msg = EmailMultiAlternatives(subject, text, settings.DEFAULT_FROM_EMAIL, (recepient,))
if html:
msg.attach_alternative(html, "text/html")
#reset the seek to 0 to be able to read multiple times the same file
pdffile.seek(0)
name = "export-incident-%s-date-%s.pdf" % (report.id, datetime.date.today().isoformat())
msg.attach(name, pdffile.read(), 'application/pdf')
msg.send()
to_return["logMessages"].append(_("Successfully sent to '{email}'.").format(email=recepient))
if to_return["status"] == "success":
to_return["message"] = _("PDF sent by email.")
else:
to_return["message"] = _("There were errors.")
return JsonHttpResponse(to_return)
|
import sys
import libxml2
libxml2.debugMemory(1)
def foo(ctx, x):
return x + 1
def bar(ctx, x):
return "%d" % (x + 2)
doc = libxml2.parseFile("tst.xml")
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
if len(res) != 2:
print("xpath query: wrong node set size")
sys.exit(1)
if res[0].name != "doc" or res[1].name != "foo":
print("xpath query: wrong node set value")
sys.exit(1)
libxml2.registerXPathFunction(ctxt._o, "foo", None, foo)
libxml2.registerXPathFunction(ctxt._o, "bar", None, bar)
i = 10000
while i > 0:
res = ctxt.xpathEval("foo(1)")
if res != 2:
print("xpath extension failure")
sys.exit(1)
i = i - 1
i = 10000
while i > 0:
res = ctxt.xpathEval("bar(1)")
if res != "3":
print("xpath extension failure got %s expecting '3'")
sys.exit(1)
i = i - 1
doc.freeDoc()
ctxt.xpathFreeContext()
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
|
from . import models
|
from .common import *
class SphericalSurfaceSelectionGate:
def allow(self, doc, obj, sub):
if sub.startswith('Face'):
face = getObjectFaceFromName( obj, sub)
return str( face.Surface ).startswith('Sphere ')
elif sub.startswith('Vertex'):
return True
else:
return False
def parseSelection(selection, objectToUpdate=None):
validSelection = False
if len(selection) == 2:
s1, s2 = selection
if s1.ObjectName != s2.ObjectName:
if ( vertexSelected(s1) or sphericalSurfaceSelected(s1)) \
and ( vertexSelected(s2) or sphericalSurfaceSelected(s2)):
validSelection = True
cParms = [ [s1.ObjectName, s1.SubElementNames[0], s1.Object.Label ],
[s2.ObjectName, s2.SubElementNames[0], s2.Object.Label ] ]
if not validSelection:
msg = '''To add a spherical surface constraint select two spherical surfaces (or vertexs), each from a different part. Selection made:
%s''' % printSelection(selection)
QtGui.QMessageBox.information( QtGui.QApplication.activeWindow(), "Incorrect Usage", msg)
return
if objectToUpdate == None:
cName = findUnusedObjectName('sphericalSurfaceConstraint')
debugPrint(2, "creating %s" % cName )
c = FreeCAD.ActiveDocument.addObject("App::FeaturePython", cName)
c.addProperty("App::PropertyString","Type","ConstraintInfo").Type = 'sphericalSurface'
c.addProperty("App::PropertyString","Object1","ConstraintInfo").Object1 = cParms[0][0]
c.addProperty("App::PropertyString","SubElement1","ConstraintInfo").SubElement1 = cParms[0][1]
c.addProperty("App::PropertyString","Object2","ConstraintInfo").Object2 = cParms[1][0]
c.addProperty("App::PropertyString","SubElement2","ConstraintInfo").SubElement2 = cParms[1][1]
c.setEditorMode('Type',1)
for prop in ["Object1","Object2","SubElement1","SubElement2"]:
c.setEditorMode(prop, 1)
c.Proxy = ConstraintObjectProxy()
c.ViewObject.Proxy = ConstraintViewProviderProxy( c, ':/assembly2/icons/sphericalSurfaceConstraint.svg', True, cParms[1][2], cParms[0][2])
else:
debugPrint(2, "redefining %s" % objectToUpdate.Name )
c = objectToUpdate
c.Object1 = cParms[0][0]
c.SubElement1 = cParms[0][1]
c.Object2 = cParms[1][0]
c.SubElement2 = cParms[1][1]
updateObjectProperties(c)
recordConstraints( FreeCAD.ActiveDocument, s1, s2 )
c.purgeTouched()
c.Proxy.callSolveConstraints()
repair_tree_view()
selection_text = '''Selection options:
- spherical surface
- vertex'''
class SphericalSurfaceConstraintCommand:
def Activated(self):
selection = FreeCADGui.Selection.getSelectionEx()
if len(selection) == 2:
parseSelection( selection )
else:
FreeCADGui.Selection.clearSelection()
ConstraintSelectionObserver(
SphericalSurfaceSelectionGate(),
parseSelection,
taskDialog_title ='add spherical surface constraint',
taskDialog_iconPath = self.GetResources()['Pixmap'],
taskDialog_text = selection_text
)
def GetResources(self):
return {
'Pixmap' : ':/assembly2/icons/sphericalSurfaceConstraint.svg',
'MenuText': 'Add a spherical surface constraint',
'ToolTip': 'Add a spherical surface constraint between two objects'
}
FreeCADGui.addCommand('assembly2_addSphericalSurfaceConstraint', SphericalSurfaceConstraintCommand())
class RedefineSphericalSurfaceConstraintCommand:
def Activated(self):
self.constObject = FreeCADGui.Selection.getSelectionEx()[0].Object
debugPrint(3,'redefining %s' % self.constObject.Name)
FreeCADGui.Selection.clearSelection()
ConstraintSelectionObserver(
SphericalSurfaceSelectionGate(),
self.UpdateConstraint,
taskDialog_title ='redefine spherical surface constraint',
taskDialog_iconPath = ':/assembly2/icons/sphericalSurfaceConstraint.svg',
taskDialog_text = selection_text
)
def UpdateConstraint(self, selection):
parseSelection( selection, self.constObject)
def GetResources(self):
return { 'MenuText': 'Redefine' }
FreeCADGui.addCommand('assembly2_redefineSphericalSurfaceConstraint', RedefineSphericalSurfaceConstraintCommand())
|
"""
German-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'achtung': 'attention',
'vorsicht': 'caution',
'gefahr': 'danger',
'fehler': 'error',
'hinweis': 'hint',
'wichtig': 'important',
'notiz': 'note',
'tipp': 'tip',
'warnung': 'warning',
'ermahnung': 'admonition',
'kasten': 'sidebar',
'seitenkasten': 'sidebar',
'thema': 'topic',
'zeilen-block': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubrik': 'rubric',
'epigraph': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote', # kasten too ?
'zusammengesetzt': 'compound',
'verbund': 'compound',
#'fragen': 'questions',
'tabelle': 'table',
'csv-tabelle': 'csv-table',
'list-table (translation required)': 'list-table',
'meta': 'meta',
#'imagemap': 'imagemap',
'bild': 'image',
'abbildung': 'figure',
u'unver\xe4ndert': 'raw',
u'roh': 'raw',
u'einf\xfcgen': 'include',
'ersetzung': 'replace',
'ersetzen': 'replace',
'ersetze': 'replace',
'unicode': 'unicode',
'klasse': 'class',
'rolle': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'inhalt': 'contents',
'kapitel-nummerierung': 'sectnum',
'abschnitts-nummerierung': 'sectnum',
u'linkziel-fu\xdfnoten': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'fu\xdfnoten': 'footnotes',
#'zitate': 'citations',
}
"""German name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abk\xfcrzung': 'abbreviation',
'akronym': 'acronym',
'index': 'index',
'tiefgestellt': 'subscript',
'hochgestellt': 'superscript',
'titel-referenz': 'title-reference',
'pep-referenz': 'pep-reference',
'rfc-referenz': 'rfc-reference',
'betonung': 'emphasis',
'fett': 'strong',
u'w\xf6rtlich': 'literal',
'benannte-referenz': 'named-reference',
'unbenannte-referenz': 'anonymous-reference',
u'fu\xdfnoten-referenz': 'footnote-reference',
'zitat-referenz': 'citation-reference',
'ersetzungs-referenz': 'substitution-reference',
'ziel': 'target',
'uri-referenz': 'uri-reference',
u'unver\xe4ndert': 'raw',
u'roh': 'raw',}
"""Mapping of German role names to canonical role names for interpreted text.
"""
|
"""Settings manager module. This will load/save user settings from a
defined settings backend."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Consorcio Fernando de los Rios."
__license__ = "LGPL"
import imp
import importlib
import os
from gi.repository import Gio, GLib
from . import debug
from . import orca_i18n
from . import script_manager
from . import settings
from . import pronunciation_dict
from .keybindings import KeyBinding
try:
_proxy = Gio.DBusProxy.new_for_bus_sync(
Gio.BusType.SESSION,
Gio.DBusProxyFlags.NONE,
None,
'org.a11y.Bus',
'/org/a11y/bus',
'org.freedesktop.DBus.Properties',
None)
except:
_proxy = None
_scriptManager = script_manager.getManager()
class SettingsManager(object):
"""Settings backend manager. This class manages orca user's settings
using different backends"""
_instance = None
def __new__(cls, *args, **kwargs):
if '__instance' not in vars(cls):
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
def __init__(self, backend='json'):
"""Initialize a SettingsManager Object.
If backend isn't defined then uses default backend, in this
case json-backend.
backend parameter can use the follow values:
backend='json'
"""
debug.println(debug.LEVEL_FINEST, 'INFO: Initializing settings manager')
self.backendModule = None
self._backend = None
self.profile = None
self.backendName = backend
self._prefsDir = None
# Dictionaries for store the default values
# The keys and values are defined at orca.settings
#
## self.defaultGeneral contain some constants names as values
self.defaultGeneral = {}
## self.defaultGeneralValues contain the actual values, no constants
self.defaultGeneralValues = {}
self.defaultPronunciations = {}
self.defaultKeybindings = {}
# Dictionaries that store the key:value pairs which values are
# different from the current profile and the default ones
#
self.profileGeneral = {}
self.profilePronunciations = {}
self.profileKeybindings = {}
# Dictionaries that store the current settings.
# They are result to overwrite the default values with
# the ones from the current active profile
self.general = {}
self.pronunciations = {}
self.keybindings = {}
if not self._loadBackend():
raise Exception('SettingsManager._loadBackend failed.')
self.customizedSettings = {}
self._customizationCompleted = False
# For handling the currently-"classic" application settings
self.settingsPackages = ["app-settings"]
debug.println(debug.LEVEL_FINEST, 'INFO: Settings manager initialized')
def activate(self, prefsDir=None, customSettings={}):
debug.println(debug.LEVEL_FINEST, 'INFO: Activating settings manager')
self.customizedSettings.update(customSettings)
self._prefsDir = prefsDir \
or os.path.join(GLib.get_user_data_dir(), "orca")
# Load the backend and the default values
self._backend = self.backendModule.Backend(self._prefsDir)
self._setDefaultGeneral()
self._setDefaultPronunciations()
self._setDefaultKeybindings()
self.defaultGeneralValues = getRealValues(self.defaultGeneral)
self.general = self.defaultGeneralValues.copy()
if not self.isFirstStart():
self.general.update(self._backend.getGeneral())
self.pronunciations = self.defaultPronunciations.copy()
self.keybindings = self.defaultKeybindings.copy()
# If this is the first time we launch Orca, there is no user settings
# yet, so we need to create the user config directories and store the
# initial default settings
#
self._createDefaults()
debug.println(debug.LEVEL_FINEST, 'INFO: Settings manager activated')
# Set the active profile and load its stored settings
if self.profile is None:
self.profile = self.general.get('startingProfile')[1]
self.setProfile(self.profile)
def _loadBackend(self):
"""Load specific backend for manage user settings"""
try:
backend = '.backends.%s_backend' % self.backendName
self.backendModule = importlib.import_module(backend, 'orca')
return True
except:
return False
def _createDefaults(self):
"""Let the active backend to create the initial structure
for storing the settings and save the default ones from
orca.settings"""
def _createDir(dirName):
if not os.path.isdir(dirName):
os.makedirs(dirName)
# Set up the user's preferences directory
# ($XDG_DATA_HOME/orca by default).
#
orcaDir = self._prefsDir
_createDir(orcaDir)
# Set up $XDG_DATA_HOME/orca/orca-scripts as a Python package
#
orcaScriptDir = os.path.join(orcaDir, "orca-scripts")
_createDir(orcaScriptDir)
initFile = os.path.join(orcaScriptDir, "__init__.py")
if not os.path.exists(initFile):
os.close(os.open(initFile, os.O_CREAT, 0o700))
# Set up $XDG_DATA_HOME/orca/app-settings as a Python package.
#
orcaSettingsDir = os.path.join(orcaDir, "app-settings")
_createDir(orcaSettingsDir)
initFile = os.path.join(orcaSettingsDir, "__init__.py")
if not os.path.exists(initFile):
os.close(os.open(initFile, os.O_CREAT, 0o700))
# Set up $XDG_DATA_HOME/orca/orca-customizations.py empty file and
# define orcaDir as a Python package.
initFile = os.path.join(orcaDir, "__init__.py")
if not os.path.exists(initFile):
os.close(os.open(initFile, os.O_CREAT, 0o700))
userCustomFile = os.path.join(orcaDir, "orca-customizations.py")
if not os.path.exists(userCustomFile):
os.close(os.open(userCustomFile, os.O_CREAT, 0o700))
if self.isFirstStart():
self._backend.saveDefaultSettings(self.defaultGeneral,
self.defaultPronunciations,
self.defaultKeybindings)
def _setDefaultPronunciations(self):
"""Get the pronunciations by default from orca.settings"""
self.defaultPronunciations = {}
def _setDefaultKeybindings(self):
"""Get the keybindings by default from orca.settings"""
self.defaultKeybindings = {}
def _setDefaultGeneral(self):
"""Get the general settings by default from orca.settings"""
self._getCustomizedSettings()
self.defaultGeneral = {}
for key in settings.userCustomizableSettings:
value = self.customizedSettings.get(key)
if value == None:
try:
value = getattr(settings, key)
except:
pass
self.defaultGeneral[key] = value
def _getCustomizedSettings(self):
if self._customizationCompleted:
return self.customizedSettings
originalSettings = {}
for key, value in list(settings.__dict__.items()):
originalSettings[key] = value
self._customizationCompleted = self._loadUserCustomizations()
for key, value in list(originalSettings.items()):
customValue = settings.__dict__.get(key)
if value != customValue:
self.customizedSettings[key] = customValue
def _loadUserCustomizations(self):
"""Attempt to load the user's orca-customizations. Returns a boolean
indicating our success at doing so, where success is measured by the
likelihood that the results won't be different if we keep trying."""
success = False
pathList = [self._prefsDir]
try:
msg = "Attempt to load orca-customizations "
(fileHnd, moduleName, desc) = \
imp.find_module("orca-customizations", pathList)
msg += "from %s " % moduleName
imp.load_module("orca-customizations", fileHnd, moduleName, desc)
except ImportError:
success = True
msg += "failed due to ImportError. Giving up."
except AttributeError:
return False
else:
msg += "succeeded."
fileHnd.close()
success = True
debug.println(debug.LEVEL_ALL, msg)
return success
def getPrefsDir(self):
return self._prefsDir
def setSetting(self, settingName, settingValue):
self._setSettingsRuntime({settingName:settingValue})
def getSetting(self, settingName):
return getattr(settings, settingName)
def getVoiceLocale(self, voice='default'):
voices = self.getSetting('voices')
v = voices.get(voice, {})
lang = v.getLocale()
dialect = v.getDialect()
if dialect and len(str(dialect)) == 2:
lang = "%s_%s" % (lang, dialect.upper())
return lang
def _getGeneral(self, profile=None):
"""Get from the active backend the general settings for
the current profile"""
if profile is None:
profile = self.profile
self.general = self._backend.getGeneral(profile)
def _getPronunciations(self, profile=None):
"""Get from the active backend the pronunciations settings for
the current profile"""
if profile is None:
profile = self.profile
self.pronunciations = self._backend.getPronunciations(profile)
def _getKeybindings(self, profile=None):
"""Get from the active backend the keybindings settings for
the current profile"""
if profile is None:
profile = self.profile
self.keybindings = self._backend.getKeybindings(profile)
def _loadProfileSettings(self, profile=None):
"""Get from the active backend all the settings for the current
profile and store them in the object's attributes.
A profile can be passed as a parameter. This could be useful for
change from one profile to another."""
if profile is None:
profile = self.profile
self.profileGeneral = self.getGeneralSettings(profile) or {}
self.profilePronunciations = self.getPronunciations(profile) or {}
self.profileKeybindings = self.getKeybindings(profile) or {}
def _mergeSettings(self):
"""Update the changed values on the profile settings
over the current and active settings"""
profileGeneral = getRealValues(self.profileGeneral) or {}
self.general.update(profileGeneral)
self.pronunciations.update(self.profilePronunciations)
self.keybindings.update(self.profileKeybindings)
def _enableAccessibility(self):
"""Enables the GNOME accessibility flag. Users need to log out and
then back in for this to take effect.
Returns True if an action was taken (i.e., accessibility was not
set prior to this call).
"""
alreadyEnabled = self.isAccessibilityEnabled()
if not alreadyEnabled:
self.setAccessibility(True)
return not alreadyEnabled
def isAccessibilityEnabled(self):
if not _proxy:
return False
return _proxy.Get('(ss)', 'org.a11y.Status', 'IsEnabled')
def setAccessibility(self, enable):
if not _proxy:
return False
vEnable = GLib.Variant('b', enable)
_proxy.Set('(ssv)', 'org.a11y.Status', 'IsEnabled', vEnable)
def isScreenReaderServiceEnabled(self):
"""Returns True if the screen reader service is enabled. Note that
this does not necessarily mean that Orca (or any other screen reader)
is running at the moment."""
if not _proxy:
return False
return _proxy.Get('(ss)', 'org.a11y.Status', 'ScreenReaderEnabled')
def setStartingProfile(self, profile=None):
if profile is None:
profile = settings.profile
self._backend._setProfileKey('startingProfile', profile)
def getProfile(self):
return self.profile
def setProfile(self, profile='default', updateLocale=False):
"""Set a specific profile as the active one.
Also the settings from that profile will be loading
and updated the current settings with them."""
oldVoiceLocale = self.getVoiceLocale('default')
self.profile = profile
self._loadProfileSettings(profile)
self._mergeSettings()
self._setSettingsRuntime(self.general)
if not updateLocale:
return
newVoiceLocale = self.getVoiceLocale('default')
if oldVoiceLocale != newVoiceLocale:
orca_i18n.setLocaleForNames(newVoiceLocale)
orca_i18n.setLocaleForMessages(newVoiceLocale)
orca_i18n.setLocaleForGUI(newVoiceLocale)
def getPreferences(self, profile='default'):
general = self.getGeneralSettings(profile)
pronunciations = self.getPronunciations(profile)
keybindings = self.getKeybindings(profile)
return (general, pronunciations, keybindings)
def _setSettingsRuntime(self, settingsDict):
for key, value in list(settingsDict.items()):
setattr(settings, str(key), value)
self._getCustomizedSettings()
for key, value in list(self.customizedSettings.items()):
setattr(settings, str(key), value)
self._setPronunciationsRuntime()
def _setPronunciationsRuntime(self):
pronunciation_dict.pronunciation_dict = {}
for pron in self.pronunciations:
key, value = self.pronunciations[pron]
if key and value:
pronunciation_dict.setPronunciation(key, value)
def getGeneralSettings(self, profile='default'):
"""Return the current general settings.
Those settings comes from updating the default settings
with the profiles' ones"""
generalDict = self._backend.getGeneral(profile)
self._setSettingsRuntime(generalDict)
return generalDict
def getPronunciations(self, profile='default'):
"""Return the current pronunciations settings.
Those settings comes from updating the default settings
with the profiles' ones"""
return self._backend.getPronunciations(profile)
def getKeybindings(self, profile='default'):
"""Return the current keybindings settings.
Those settings comes from updating the default settings
with the profiles' ones"""
return self._backend.getKeybindings(profile)
def _setProfileGeneral(self, general):
"""Set the changed general settings from the defaults' ones
as the profile's."""
self.profileGeneral = {}
for key, value in list(general.items()):
if key in settings.excludeKeys:
continue
elif key == 'profile':
self.profileGeneral[key] = value
elif value != self.defaultGeneralValues.get(key):
self.profileGeneral[key] = value
elif self.general.get(key) != value:
self.profileGeneral[key] = value
def _setProfilePronunciations(self, pronunciations):
"""Set the changed pronunciations settings from the defaults' ones
as the profile's."""
self.profilePronunciations = self.defaultPronunciations.copy()
self.profilePronunciations.update(pronunciations)
def _setProfileKeybindings(self, keybindings):
"""Set the changed keybindings settings from the defaults' ones
as the profile's."""
self.profileKeybindings = self.defaultKeybindings.copy()
self.profileKeybindings.update(keybindings)
def saveSettings(self, general, pronunciations, keybindings):
"""Let the active backend to store the default settings and
the profiles' ones."""
# Assign current profile
_profile = general.get('profile', settings.profile)
currentProfile = _profile[1]
self.profile = currentProfile
# Elements that need to stay updated in main configuration.
self.defaultGeneral['startingProfile'] = general.get('startingProfile',
_profile)
self._setProfileGeneral(general)
self._setProfilePronunciations(pronunciations)
self._setProfileKeybindings(keybindings)
self._backend.saveProfileSettings(self.profile,
self.profileGeneral,
self.profilePronunciations,
self.profileKeybindings)
return self._enableAccessibility()
def _adjustBindingTupleValues(self, bindingTuple):
"""Converts the values of bindingTuple into KeyBinding-ready values."""
keysym, mask, mods, clicks = bindingTuple
if not keysym:
bindingTuple = ('', 0, 0, 0)
else:
bindingTuple = (keysym, int(mask), int(mods), int(clicks))
return bindingTuple
def overrideKeyBindings(self, script, scriptKeyBindings):
keybindingsSettings = self.getKeybindings(self.profile)
for handlerString, bindingTuples in list(keybindingsSettings.items()):
handler = script.inputEventHandlers.get(handlerString)
if not handler:
continue
scriptKeyBindings.removeByHandler(handler)
for bindingTuple in bindingTuples:
bindingTuple = self._adjustBindingTupleValues(bindingTuple)
keysym, mask, mods, clicks = bindingTuple
newBinding = KeyBinding(keysym, mask, mods, handler, clicks)
scriptKeyBindings.add(newBinding)
return scriptKeyBindings
def isFirstStart(self):
"""Check if the firstStart key is True or false"""
return self._backend.isFirstStart()
def setFirstStart(self, value=False):
"""Set firstStart. This user-configurable settting is primarily
intended to serve as an indication as to whether or not initial
configuration is needed."""
self._backend.setFirstStart(value)
def availableProfiles(self):
"""Get available profiles from active backend"""
return self._backend.availableProfiles()
def loadAppSettings(self, script):
"""Load the users application specific settings for an app.
Note that currently the settings manager does not manage
application settings in Orca; instead the old/"classic" files
are used. This is scheduled to change.
Arguments:
- script: the current active script.
"""
self._loadProfileSettings()
script.voices = self.getSetting('voices')
app = script.app
moduleName = _scriptManager.getModuleName(app)
if not moduleName:
return
module = None
for package in self.settingsPackages:
name = '.'.join((package, moduleName))
debug.println(debug.LEVEL_FINEST, "Looking for %s.py" % name)
try:
module = importlib.import_module(name)
except ImportError:
debug.println(
debug.LEVEL_FINEST, "Could not import %s.py" % name)
continue
try:
imp.reload(module)
except:
debug.println(debug.LEVEL_FINEST, "Could not load %s.py" % name)
module = None
else:
debug.println(debug.LEVEL_FINEST, "Loaded %s.py" % name)
break
if not module:
return
if self.profile == 'default':
appVoices = self.getSetting('voices')
for voiceType, voiceDef in list(appVoices.items()):
script.voices[voiceType].update(voiceDef)
else:
self.setSetting('voices', script.voices)
keybindings = getattr(module, 'overrideAppKeyBindings', None)
if keybindings:
script.overrideAppKeyBindings = keybindings
script.keyBindings = keybindings(script, script.keyBindings)
pronunciations = getattr(module, 'overridePronunciations', None)
if pronunciations:
script.overridePronunciations = pronunciations
script.app_pronunciation_dict = \
pronunciations(script, script.app_pronunciation_dict)
def getVoiceKey(voice):
voicesKeys = getattr(settings, 'voicesKeys')
for key in list(voicesKeys.keys()):
if voicesKeys[key] == voice:
return key
return ""
def getValueForKey(prefsDict, key):
need2repr = ['brailleEOLIndicator', 'brailleContractionTable',
'brailleRequiredStateString', 'enabledBrailledTextAttributes',
'enabledSpokenTextAttributes', 'speechRequiredStateString',
'speechServerFactory', 'presentDateFormat',
'presentTimeFormat']
value = None
if key in prefsDict:
if isinstance(prefsDict[key], str):
if key in need2repr:
value = "\'%s\'" % prefsDict[key]
elif key == 'voices':
key = getVoiceKey(key)
value = prefsDict[key]
else:
try:
value = getattr(settings, prefsDict[key])
except:
debug.println(debug.LEVEL_SEVERE,
"Something went wront with key: " % key)
debug.printStack(debug.LEVEL_FINEST)
else:
value = prefsDict[key]
return value
def getRealValues(prefs):
"""Get the actual values for any constant stored on a
general settings dictionary.
prefs is a dictionary with the userCustomizableSettings keys
and values."""
#for key in prefs.keys():
# prefs[key] = getValueForKey(prefs, key)
return prefs
_manager = SettingsManager()
def getManager():
return _manager
|
"""
Test that updating an alias saves it to the roster.
"""
import dbus
from servicetest import EventPattern, call_async
from gabbletest import acknowledge_iq, exec_test, make_result_iq
import constants as cs
import ns
def test(q, bus, conn, stream):
conn.Connect()
_, event, event2 = q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED]),
EventPattern('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard'),
EventPattern('stream-iq', query_ns=ns.ROSTER))
acknowledge_iq(stream, event.stanza)
acknowledge_iq(stream, event2.stanza)
while True:
event = q.expect('dbus-signal', signal='NewChannel')
path, type, handle_type, handle, suppress_handler = event.args
if type != cs.CHANNEL_TYPE_CONTACT_LIST:
continue
chan_name = conn.InspectHandles(handle_type, [handle])[0]
if chan_name == 'subscribe':
break
# request subscription
chan = bus.get_object(conn.bus_name, path)
group_iface = dbus.Interface(chan, cs.CHANNEL_IFACE_GROUP)
assert group_iface.GetMembers() == []
handle = conn.RequestHandles(1, ['bob@foo.com'])[0]
group_iface.AddMembers([handle], '')
event = q.expect('stream-iq', iq_type='set', query_ns=ns.ROSTER)
item = event.query.firstChildElement()
acknowledge_iq(stream, event.stanza)
call_async(q, conn.Aliasing, 'RequestAliases', [handle])
event = q.expect('stream-iq', iq_type='get',
query_ns='http://jabber.org/protocol/pubsub',
to='bob@foo.com')
result = make_result_iq(stream, event.stanza)
pubsub = result.firstChildElement()
items = pubsub.addElement('items')
items['node'] = 'http://jabber.org/protocol/nick'
item = items.addElement('item')
item.addElement('nick', 'http://jabber.org/protocol/nick',
content='Bobby')
stream.send(result)
event, _ = q.expect_many(
EventPattern('stream-iq', iq_type='set', query_ns=ns.ROSTER),
EventPattern('dbus-return', method='RequestAliases',
value=(['Bobby'],)))
item = event.query.firstChildElement()
assert item['jid'] == 'bob@foo.com'
assert item['name'] == 'Bobby'
if __name__ == '__main__':
exec_test(test)
|
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import rpmodel
from gensim import matutils
from gensim.test.utils import datapath, get_tmpfile
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def test_transform(self):
# create the transformation model
# HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
np.random.seed(13)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.70710677, 0.70710677])
self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up to sign
def test_persistence(self):
fname = get_tmpfile('gensim_models.tst')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def test_persistence_compressed(self):
fname = get_tmpfile('gensim_models.tst.gz')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
from pycp2k.inputsection import InputSection
class _xwpbe2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Scale_x = None
self.Scale_x0 = None
self.Omega = None
self._name = "XWPBE"
self._keywords = {'Scale_x0': 'SCALE_X0', 'Omega': 'OMEGA', 'Scale_x': 'SCALE_X'}
self._attributes = ['Section_parameters']
|
import SocketServer
import threading
import time
class MiTcpHandler(SocketServer.BaseRequestHandler):
#sobrescribo la funcion handle
def handle(self):
while self.data[0] != ".salir":
#intento recibir informacion
try:
self.data.append(self.request.recv(1024))
self.request.send("listo")
del self.data[0]
time.sleep(0.1) #espero 0.1 segundos antes de leer neuvamente
#si hubo un error lo digo y termino el handle
except:
print "El cliente D/C o hubo un error"
self.data[0]=".salir"
class MiTcpHandler2(SocketServer.BaseRequestHandler):#para uso en bases de datos
#sobrescribo la funcion handle
def handle(self):
while self.data[0] != ".salir":
#intento recibir informacion
try:
self.data.append(self.request.recv(1024))
self.request.send("listo")
self.data[0]=".salir"
#si hubo un error lo digo y termino el handle
except Exception, e:
print "El cliente D/C o hubo un error"
print e
self.data[0]=".salir"
class ThreadServer (SocketServer.ThreadingMixIn, SocketServer.ForkingTCPServer):
pass
def serverSock(host,port,welcome="Server corriendo..",data=[""]):
#host & port
#creo el server
MiTcpHandler.data=data
server = ThreadServer((host,port),MiTcpHandler)
#creo un thread del server
server_thread = threading.Thread(target=server.serve_forever)
#empiezo el thread
server_thread.start()
print welcome
def serverSock2(host,port,welcome="Server corriendo..",data=[""]):
#host & port
#creo el server
MiTcpHandler2.data=data
server = ThreadServer((host,port),MiTcpHandler2)
#creo un thread del server
server_thread = threading.Thread(target=server.serve_forever)
#empiezo el thread
server_thread.start()
print welcome
def setCookie(cookie):
print "<script type='text/javascript'>"
print "document.cookie='"+cookie+"'"
print "</script>"
def getCookie():
import os
return os.environ["HTTP_COOKIE"]
def clienteSock(host,port,msj="",welcome="Ingrese un mensaje o salir para terminar"):
import socket
#creo un socket y me conecto
sock= socket.socket()
sock.connect((host,port))
enviar=True
print welcome
while enviar==True:
#intento mandar msj
try:
sock.send(msj)
time.sleep(0.1)
print sock.recv(1024)
break
enviar=False
# si no se puede entonces salgo
except Exception, e:
print "no se mando el mensaje"
print e
enviar=False
sock.close() #recuerden cerrar el socket
def sendEmail(rem,dest,password,mensaje,asunto="", remAlias="",destAlias="",debug=False):
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Enviar correo Gmail con Python
# www.pythondiario.com
try:
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# me == my email address
# you == recipient's email address
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = asunto
msg['From'] = rem
msg['To'] = dest
# Create the body of the message (a plain-text and an HTML version).
text = "Hi!\nHow are you?\nHere is the link you wanted:\nhttp://www.python.org"
'''
html = """\
<html>
<head></head>
<body>
<p>Hi!<br>
How are you?<br>
Here is the <a href="http://www.python.org">link</a> you wanted.
</p>
</body>
</html>
"""
'''
html = """\
<html>
<head>
<meta charset="utf-8">
</head>
<body>
"""+mensaje+"""
</p>
</body>
</html>
"""
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
mail = smtplib.SMTP('smtp.gmail.com', 587)
mail.ehlo()
mail.starttls()
mail.login(rem, password)
mail.sendmail(rem, dest, msg.as_string())
mail.quit()
except Exception as e:
print "<?php mail('"+dest+"','"+asunto+"','"+"','"+mensaje+"','"+rem+")?>"
if debug==True:
print e," esto puede ser culpa del servidor"
print "Se intento hacer el envio por PHP"
def normalizar(v):
try:
exec("a="+v)
return a
except:
return v
def zAPI(linea,vars):
for elem in vars:
exec(elem+"=vars['"+elem+"']")
if len(linea)<=200:
c=0
mark=0
mark2=0#para los bucles y condicionales
codigo=""
nivel=0
condicion=[]
funciones=0
enlace=False
lineas=0
__r=None
while c<len(linea):
#si hay condiciones
if len(condicion)>1:
if condicion[-1]=="==" or condicion[-1]=="!=" or condicion[-1]==">=" or condicion[-1]=="<=" or condicion[-1]=="<" or condicion[-1]==">" or condicion[-1]=="in" or condicion[-1]=="for " or condicion[-1]=="while ":
if c>0 and c<=3:
if linea[c]!="=" and linea[c]!="!" and linea[c]!="<" and linea[c]!=">" and linea[c]!=" ":
if linea[c-2]=="=" and linea[c-1]=="=":
#if len()
pass
elif linea[c-2]=="!" and linea[c-1]=="=":
pass
elif linea[c-2]==">" and linea[c-1]=="=":
pass
elif linea[c-2]=="<" and linea[c-1]=="=":
pass
elif linea[c-2]=="<" and linea[c-1]!="=":
pass
elif linea[c-2]==">" and linea[c-1]!="=":
pass
#es una asignacion
elif linea[c-4]!=">" and linea[c-3]!="!" and linea[c-2]!="<" and linea[c-1]=="=":
pass
pass
pass
#------------------------------
if c>=3 and c<=5:
if linea[c-1]!=" " and linea[c]!="=" and linea[c]!="!" and linea[c]!="<" and linea[c]!=">" and linea[c]!=" ":
if linea[c-2]=="=" and linea[c-1]=="=":
pass
elif linea[c-2]=="!" and linea[c-1]=="=":
pass
elif linea[c-2]==">" and linea[c-1]=="=":
pass
elif linea[c-2]=="<" and linea[c-1]=="=":
pass
elif linea[c-2]=="<" and linea[c-1]!="=":
pass
elif linea[c-2]==">" and linea[c-1]!="=":
pass
#es una asignacion
elif linea[c-4]!=">" and linea[c-3]!="!" and linea[c-2]!="<" and linea[c-1]=="=":
pass
pass
elif linea[c-1]==" " and linea[c]!="=" and linea[c]!="!" and linea[c]!="<" and linea[c]!=">" and linea[c]!=" ":
if linea[c-3]=="=" and linea[c-2]=="=":
pass
elif linea[c-3]=="!" and linea[c-2]=="=":
pass
elif linea[c-3]==">" and linea[c-2]=="=":
pass
elif linea[c-3]=="<" and linea[c-2]=="=":
pass
elif linea[c-3]=="<" and linea[c-2]!="=":
pass
elif linea[c-3]==">" and linea[c-2]!="=":
pass
pass
#para lo anterior y bucle while
if c>=5 and c<=9:
if linea[c-1]=="=" and linea[c]=="=":
pass
elif linea[c-1]=="!" and linea[c]=="=":
pass
elif linea[c-1]==">" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]!="=":
pass
elif linea[c-1]==">" and linea[c]!="=":
pass
#usa in
elif linea[c-3]==" " and linea[c-2]=="i" and linea[c-1]=="n" and linea[c]==" ":
pass
#es una asignacion
elif linea[c-1]!=">" and linea[c-1]!="!" and linea[c-1]!="<" and linea[c]=="=":
pass
pass
#para bucle for
elif c>=9:
if linea[c-1]=="=" and linea[c]=="=":
pass
elif linea[c-1]=="!" and linea[c]=="=":
pass
elif linea[c-1]==">" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]!="=":
pass
elif linea[c-1]==">" and linea[c]!="=":
pass
#usa in
elif linea[c-3]==" " and linea[c-2]=="i" and linea[c-1]=="n" and linea[c]==" ":
pass
#es una asignacion
elif linea[c-1]!=">" and linea[c-1]!="!" and linea[c-1]!="<" and linea[c]=="=":
pass
pass
#para bucle while y for
elif c>=11 and c<13:
if linea[c-1]=="=" and linea[c]=="=":
pass
elif linea[c-1]=="!" and linea[c]=="=":
pass
elif linea[c-1]==">" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]!="=":
pass
elif linea[c-1]==">" and linea[c]!="=":
pass
#usa in
elif linea[c-3]==" " and linea[c-2]=="i" and linea[c-1]=="n" and linea[c]==" ":
if "for " in linea[:c-3]:
pass
else:
pass
pass
#es una asignacion
elif linea[c-1]!=">" and linea[c-1]!="!" and linea[c-1]!="<" and linea[c]=="=":
if "while " in linea[:c-3]:
pass
else:
pass
#para el bucle while con in
elif c>=13:
if linea[c-1]=="=" and linea[c]=="=":
pass
elif linea[c-1]=="!" and linea[c]=="=":
pass
elif linea[c-1]==">" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]!="=":
pass
elif linea[c-1]==">" and linea[c]!="=":
pass
#usa in
elif linea[c-3]==" " and linea[c-2]=="i" and linea[c-1]=="n" and linea[c]==" ":
if "for " in linea[:c-3]:
pass
else:
pass
pass
#es una asignacion
elif linea[c-1]!=">" and linea[c-1]!="!" and linea[c-1]!="<" and linea[c]=="=":
if "while " in linea[:c-3]:
pass
else:
pass
else:
if linea[c]==";":
if nivel==0:
codigo.append(linea[mark:c])
mark=c
lineas+=1
else:
tab=" "*condicion
codigo.append(tab+linea[mark:c])
lineas+=1
elif linea[c]=="]":
pass
elif linea[c]=="}":
pass
elif linea[c]=="]]":
pass
elif linea[c]=="}}":
pass
else:
if linea[c]==";":
pass
elif linea[c]=="]":
pass
elif linea[c]=="}":
pass
elif linea[c]=="]]":
pass
elif linea[c]=="}}":
pass
else:
#para una pregunta boolean
if c>0 and c<=3:
if linea[c]!="=" and linea[c]!="!" and linea[c]!="<" and linea[c]!=">" and linea[c]!=" ":
if linea[c-2]=="=" and linea[c-1]=="=":
if len(linea)<=4:
exec("__r="+linea[mark:c+1])
elif linea[c-2]=="!" and linea[c-1]=="=":
if len(linea)<=4:
exec("__r="+linea[mark:c+1])
elif linea[c-2]==">" and linea[c-1]=="=":
if len(linea)<=4:
exec("__r="+linea[mark:c+1])
elif linea[c-2]=="<" and linea[c-1]=="=":
if len(linea)<=4:
exec("__r="+linea[mark:c+1])
elif linea[c-2]=="<" and linea[c-1]!="=":
if len(linea)<=4:
exec("__r="+linea[mark:c+1])
elif linea[c-2]==">" and linea[c-1]!="=":
if len(linea)<=4:
exec("__r="+linea[mark:c+1])
#es una asignacion
elif linea[c-4]!=">" and linea[c-3]!="!" and linea[c-2]!="<" and linea[c-1]=="=":
if len(linea)<=4:
exec("__r="+linea[mark:c+1])
pass
pass
#------------------------------
if c>=3 and c<=7:
if linea[c-1]!=" " and linea[c]!="=" and linea[c]!="!" and linea[c]!="<" and linea[c]!=">" and linea[c]!=" ":
if linea[c-2]=="=" and linea[c-1]=="=":
exec("__r="+linea[mark:c+1])
elif linea[c-2]=="!" and linea[c-1]=="=":
exec("__r="+linea[mark:c+1])
elif linea[c-2]==">" and linea[c-1]=="=":
exec("__r="+linea[mark:c+1])
elif linea[c-2]=="<" and linea[c-1]=="=":
exec("__r="+linea[mark:c+1])
elif linea[c-2]=="<" and linea[c-1]!="=":
exec("__r="+linea[mark:c+1])
elif linea[c-2]==">" and linea[c-1]!="=":
exec("__r="+linea[mark:c+1])
#es una asignacion
elif linea[c-4]!=">" and linea[c-3]!="!" and linea[c-2]!="<" and linea[c-1]=="=":
exec("__r="+linea[mark:c+1])
pass
#para lo anterior y bucle while
if c>7 and c<=10:
if linea[c-1]=="=" and linea[c]=="=":
pass
elif linea[c-1]=="!" and linea[c]=="=":
pass
elif linea[c-1]==">" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]!="=":
pass
elif linea[c-1]==">" and linea[c]!="=":
pass
#usa in
elif " in " in linea[mark:c-2] and linea[c-2]==" " and linea[c]!=" ":
nivel+=1
if condicion==[]:
codigo+="if "+linea[mark:c-2]+":\n"
mark=c
else:
if condicion[-1]=="if":
pass
elif condicion[-1]=="elif":
pass
mark=c
condicion.append("for")
#es una asignacion
elif linea[c-1]!=">" and linea[c-1]!="!" and linea[c-1]!="<" and linea[c]=="=":
pass
else:
if nivel>0:
tab=" "*nivel
if linea[c]==":":
codigo+=tab+linea[mark-1:c]+"("
c+=1
mark=c
funciones+=1
if linea[c]==";":
if funciones>0:
codigo+=tab+linea[mark:c]+")"
else:
codigo+="\n"
else:
pass
#para bucle for
elif c>10 and c<=13:
if linea[c-1]=="=" and linea[c]=="=":
pass
elif linea[c-1]=="!" and linea[c]=="=":
pass
elif linea[c-1]==">" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]!="=":
pass
elif linea[c-1]==">" and linea[c]!="=":
pass
#usa in
elif linea[c-3]==" " and linea[c-2]=="i" and linea[c-1]=="n" and linea[c]==" ":
pass
#es una asignacion
elif linea[c-1]!=">" and linea[c-1]!="!" and linea[c-1]!="<" and linea[c]=="=":
pass
else:
if nivel>0:
pass
#para bucle while y for
elif c>13:
if linea[c-1]=="=" and linea[c]=="=":
pass
elif linea[c-1]=="!" and linea[c]=="=":
pass
elif linea[c-1]==">" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]=="=":
pass
elif linea[c-1]=="<" and linea[c]!="=":
pass
elif linea[c-1]==">" and linea[c]!="=":
pass
#usa in
elif linea[c-3]==" " and linea[c-2]=="i" and linea[c-1]=="n" and linea[c]==" ":
if "for " in linea[:c-3]:
pass
else:
pass
pass
#es una asignacion
elif linea[c-1]!=">" and linea[c-1]!="!" and linea[c-1]!="<" and linea[c]=="=":
if "while " in linea[:c-3]:
pass
else:
pass
else:
if nivel>0:
pass
#para el bucle while con in
c+=1
if funciones>0:
codigo+=linea[mark:c]+")"
try:
print "-----------"
print codigo
print "-----------"
exec(codigo)
return __r
except Exception,e:
print e
def urlParamDecod(param):
return param.replace("+"," ").replace("%40","@").replace("®","reg")
def redirect(url,tiempo=0):
print "<META HTTP-EQUIV='Refresh' CONTENT='"+str(tiempo)+"; URL="+url+"'>";
def charset(cha="utf-8"):
print "<meta charset='"+cha+"'>"
def zform(db,action,controller="post.py",placeholder="",submit="Enviar",i=None,style="",display="block",_class="ff pad-1",ignorar=[],confirmar=[],valores={},clases={}):
try:
form=style+"<form name='_FORM"+action+"' id='_FORM"+action+"' action='"+controller+"'"+(" class='text-center "+_class+"'" if display=="block-center" or display=="inline-block-center" else "")+(" class='text-right "+_class+"'" if display=="block-right" or display=="inline-block-right" else "")+(" class='text-justify "+_class+"'" if display=="block-justify" or display=="inline-block-justify" else "")+" method='post'>"
c=0
id_elem=1
d=""
d2=""
script=""
for elem in db.campos[db.seleccion]:
value=""
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
if elem[0] in valores:
value="value='"+str(valores[elem[0]])+"'"
else:
value=""
if elem[0] in clases:
if elem[0] in ignorar:
_class=" class='"+clases[elem[0]]+" hidden'"
else:
_class=" class='"+clases[elem[0]]+"'"
else:
if elem[0] in ignorar:
_class=" class='hidden'"
else:
_class=""
if display=="block" or display=="block-center" or display=="block-rigth" or display=="block-justify":
d="<br "+_class+">"
d2="<br "+_class+">"
elif display=="inline-block" or display=="inline-block-center" or display=="inline-block-rigth" or display=="inline-block-justify":
d2="<br "+_class+">"
if elem[1]==db.str:
if elem[0] in confirmar:
if placeholder!="":
form+="<div id='zform"+action+str(id_elem)+"'><input type='text' "+value+_class+" name='"+elem[0]+"' id='"+action+elem[0]+"'></div>"+d
id_elem+=1
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='"+placeholder[c]+elem[0]+"'"+value+_class+" name='"+elem[0]+"' id='"+action+elem[0]+"'></div>"+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='"+placeholder+elem[0]+"'"+value+_class+" name='"+elem[0]+"' id='"+action+elem[0]+"'></div>"+d2
id_elem+=2
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<div id='zform"+action+str(id_elem)+"'><input type='text' "+value+_class+" id='_CONF"+action+elem[0]+"'><span id='_CONFSPAN"+action+elem[0]+" class='hidden bg-ubuntu_green white' >Datos confirmados</span><span id='_CONFSPAN2"+action+elem[0]+" class='hidden bg-ubuntu_red white'> La confirmación de "+elem[0]+" no coincide </span></div>"+d
id_elem+=1
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+"> Confirmar "+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='"+placeholder[c]+elem[0]+"'"+value+_class+" id='_CONF"+action+elem[0]+"'></div><span id='_CONFSPAN"+action+elem[0]+" class='hidden bg-ubuntu_green white' >Datos confirmados</span><span id='_CONFSPAN2"+action+elem[0]+" class='hidden bg-ubuntu_red white'> La confirmación de "+elem[0]+" no coincide </span>"+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+"> Confirmar "+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='Confirmar "+placeholder+elem[0]+"'"+value+_class+" id='_CONF"+action+elem[0]+"'></div><span id='_CONFSPAN"+action+elem[0]+" class='hidden bg-ubuntu_green white' >Datos confirmados</span><span id='_CONFSPAN2"+action+elem[0]+" class='hidden bg-ubuntu_red white'> La confirmación de "+elem[0]+" no coincide </span>"+d2
id_elem+=2
script+="""
$('#_CONF"""+action+elem[0]+"""').keyup(function(){
if (document.getElementById('"""+action+elem[0]+"""').value== document.getElementById('_CONF"""+action+elem[0]+"""').value){$('#_CONFSPAN"""+action+elem[0]+"""').removeClass('hidden');$('#_CONFSPAN2"""+action+elem[0]+"""').addClass('hidden');$('#_FORM"""+action+"""').val(true) }
else{
$('#_CONFSPAN2"""+action+elem[0]+"""').removeClass('hidden');$('#_CONFSPAN"""+action+elem[0]+"""').addClass('hidden');$('#_FORM"""+action+"""').val(flase) }
})
"""
else:
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<div id='zform"+action+str(id_elem)+"'><input type='text' "+value+_class+" name='"+elem[0]+"'></div>"+d
id_elem+=1
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='"+placeholder[c]+elem[0]+"'"+value+_class+" name='"+elem[0]+"'></div>"+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='"+placeholder+elem[0]+"'"+value+_class+" name='"+elem[0]+"'></div>"+d2
id_elem+=2
elif elem[1]==db.password:
if elem[0] in confirmar:
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<div id='zform"+action+str(id_elem)+"'><input type='password' "+value+_class+" name='"+elem[0]+"' id='"+action+elem[0]+"'></div>"+d
id_elem+=1
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='password' placeholder='"+placeholder[c]+elem[0]+"'"+value+_class+" name='"+elem[0]+"' id='"+action+elem[0]+"'></div>"+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='password' placeholder='"+placeholder+elem[0]+"'"+value+_class+" name='"+elem[0]+"' id='"+action+elem[0]+"'></div>"+d2
id_elem+=2
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<div id='zform"+action+str(id_elem)+"'><input type='password' "+value+_class+" id='_CONF"+action+elem[0]+"'><span id='_CONFSPAN"+action+elem[0]+"'' class='hidden bg-ubuntu_green white' >Datos confirmados</span><span id='_CONFSPAN2"+action+elem[0]+"'' class='hidden bg-ubuntu_red white'> La confirmación de "+elem[0]+" no coincide </span></div>"+d
id_elem+=1
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+"> Confirmar "+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='password' placeholder='"+placeholder[c]+elem[0]+"'"+value+_class+" id='_CONF"+action+elem[0]+"'></div><span id='_CONFSPAN"+action+elem[0]+"' class='hidden bg-ubuntu_green white' >Datos confirmados</span><span id='_CONFSPAN2"+action+elem[0]+"' class='hidden bg-ubuntu_red white'> La confirmación de "+elem[0]+" no coincide </span>"+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+"> Confirmar "+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='password' placeholder='Confirmar "+placeholder+elem[0]+"'"+value+_class+" id='_CONF"+action+elem[0]+"'></div><span id='_CONFSPAN"+action+elem[0]+"' class='hidden bg-ubuntu_green white' >Datos confirmados</span><span id='_CONFSPAN2"+action+elem[0]+"' class='hidden bg-ubuntu_red white'> La confirmación de "+elem[0]+" no coincide </span>"+d2
id_elem+=2
script+="""
$('#_CONF"""+action+elem[0]+"""').keyup(function(){
if (document.getElementById('"""+action+elem[0]+"""').value== document.getElementById('_CONF"""+action+elem[0]+"""').value){$('#_CONFSPAN"""+action+elem[0]+"""').removeClass('hidden');$('#_CONFSPAN2"""+action+elem[0]+"""').addClass('hidden');$('#_FORM"""+action+"""').val(true) }
else{
$('#_CONFSPAN2"""+action+elem[0]+"""').removeClass('hidden');$('#_CONFSPAN"""+action+elem[0]+"""').addClass('hidden');$('#_FORM"""+action+"""').val(false) }
})
"""
else:
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<div id='zform"+action+str(id_elem)+"'><input type='text' "+value+_class+" name='"+elem[0]+"'></div>"+d
id_elem+=1
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='"+placeholder[c]+elem[0]+"'"+value+_class+" name='"+elem[0]+"'></div>"+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<div id='zform"+action+str(id_elem+1)+"'><input type='text' placeholder='"+placeholder+elem[0]+"'"+value+_class+" name='"+elem[0]+"'></div>"+d2
id_elem+=2
elif elem[1]==db.doc:
if placeholder!="":
if i!=None:
value=str(db.obtenerCampo(i,elem[0]))
else:
value=""
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<textarea name='"+elem[0]+"'"+_class+">"+value+"</textarea>"+d2
id_elem+=2
else:
if i!=None:
value=str(db.obtenerCampo(i,elem[0]))
else:
value=""
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<textarea placeholder='"+placeholder[c]+elem[0]+"' name='"+elem[0]+"' id='"+action+elem[0]+"'"+_class+">"+value+"</textarea>"+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+"</label>"+d+"<textarea placeholder='"+placeholder+elem[0]+"' name='"+elem[0]+"' id='"+action+elem[0]+"'"+_class+">"+value+"</textarea>"+d2
id_elem+=2
elif elem[1]==db.email:
if elem[7]!=-1:
maxi=' max="'+str(elem[7])+'"'
else:
maxi=""
if elem[6]!=0:
mini=' min="'+str(elem[6])+'"'
else:
mini=""
if elem[8]!=None:
step=' step="'+str(elem[8])+'"'
else:
step=""
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<div id="zform'+action+str(id_elem+1)+'"><input type="email" '+mini+maxi+step+value+_class+'name="'+elem[0]+'"></div>'+d2
id_elem+=2
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<div id="zform'+action+str(id_elem+1)+'"><input type="email" placeholder="'+placeholder[c]+'"'+mini+maxi+step+value+_class+'name="'+elem[0]+'"></div>'+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<div id="zform'+action+str(id_elem+1)+'"><input type="email" placeholder="'+placeholder+'"'+mini+maxi+step+value+_class+'name="'+elem[0]+'"></div>'+d2
id_elem+=2
elif elem[1]==db.int or elem[1]==db.float or elem[1]==db.long:
if elem[7]!=-1:
maxi=' max="'+str(elem[7])+'"'
else:
maxi=""
if elem[6]!=0:
mini=' min="'+str(elem[6])+'"'
else:
mini=""
if elem[8]!=None:
step=' step="'+str(elem[8])+'"'
else:
step=""
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'" type="number" '+mini+maxi+step+value+_class+'name="'+elem[0]+'">'+d2
id_elem+=2
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'" type="number" placeholder="'+placeholder[c]+'"'+mini+maxi+step+value+_class+'name="'+elem[0]+'">'+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'" type="number" placeholder="'+placeholder+'"'+mini+maxi+step+value+_class+'name="'+elem[0]+'">'+d2
id_elem+=2
elif elem[1]==db.datetime:
if elem[7]!=0:
maxi=' max="'+str(elem[7])+'"'
else:
maxi=""
if elem[6]!=0:
mini=' min="'+str(elem[6])+'"'
else:
mini=""
if elem[8]!=None:
step=' step="'+str(elem[8])+'"'
else:
step=""
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="datetime" onclick="mostrarCalendar(this,'yyyy/mm/dd hh:ii',true)" '''+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="datetime" onclick="mostrarCalendar(this,'yyyy/mm/dd hh:ii',true)" placeholder="'''+placeholder[c]+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="datetime" onclick="mostrarCalendar(this,'yyyy/mm/dd hh:ii',true)" placeholder="'''+placeholder+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
elif elem[1]==db.time:
if elem[7]!=0:
maxi=' max="'+str(elem[7])+'"'
else:
maxi=""
if elem[6]!=0:
mini=' min="'+str(elem[6])+'"'
else:
mini=""
if elem[8]!=None:
step=' step="'+str(elem[8])+'"'
else:
step=""
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="time" onclick="mostrarCalendar(this,'hh:ii')" '''+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="time" onclick="mostrarCalendar(this,'hh:ii')" placeholder="'''+placeholder[c]+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="time" onclick="mostrarCalendar(this,'hh:ii')" placeholder="'''+placeholder+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
elif elem[1]==db.date:
if elem[7]!=0:
maxi=' max="'+str(elem[7])+'"'
else:
maxi=""
if elem[6]!=0:
mini=' min="'+str(elem[6])+'"'
else:
mini=""
if elem[8]!=None:
step=' step="'+str(elem[8])+'"'
else:
step=""
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="date" class="date" onclick="mostrarCalendar(this,'yyyy/mm/dd')" '''+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="date" class="date" onclick="mostrarCalendar(this,'yyyy/mm/dd')" placeholder="'''+placeholder[c]+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<input id="'+str(id_elem+1)+'''" type="date" class="date" onclick="mostrarCalendar(this,'yyyy/mm/dd')" placeholder="'''+placeholder+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'">'+d2
id_elem+=2
elif elem[1]==db.file:
if elem[7]!=0:
maxi=' max="'+str(elem[7])+'"'
else:
maxi=""
if elem[6]!=0:
mini=' min="'+str(elem[6])+'"'
else:
mini=""
if elem[8]!=None:
step=' step="'+str(elem[8])+'"'
else:
step=""
if i!=None:
value=" value='"+str(db.obtenerCampo(i,elem[0]))+"'"
else:
value=""
if placeholder!="":
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<div id="zform'+str(id_elem+1)+'"><input type="file" '+mini+maxi+step+value+_class+'></div>'+d2
id_elem+=2
else:
if type(placeholder) ==list or type(placeholder) ==tuple:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<div id="zform'+str(id_elem+1)+'"><input type="file" placeholder="'+placeholder[c]+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'"><div>'+d2
id_elem+=2
else:
form+="<label id='zform"+action+str(id_elem)+"' "+_class+">"+elem[0]+'</label>'+d+'<div id="zform'+str(id_elem+1)+'"><input type="file" placeholder="'+placeholder+'"'+mini+maxi+step+value+_class+' name="'+elem[0]+'"></div>'+d2
id_elem+=2
c+=1
return form+"<input type='text' class='hidden' name='action' value='"+action+"'><input type='submit' id='"+str(id_elem+1)+"' class='btn white b-r5' value='"+submit+"'></form><script>"+script+"</script>"
except Exception, e:
print "Error en el zform"
print e
def INPUT(_type="text",_name="",_id="",_onclick="",_class="",_placeholder="",_value=""):
if _type=="time":
_onclick="mostrarCalendar(this,'hh:ii');"+_onclick
elif _type=="date":
_onclick="mostrarCalendar(this,'yyyy/mm/dd');"+_onclick
elif _type=="datetime":
_onclick="mostrarCalendar(this,'yyyy/mm/dd hh:ii',true);"+_onclick
return "<input "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' value="'''+_value+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"
def DIV(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<div "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</div>"
def FORM(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder="",_action=None,_method="post",_zaction=None):
return "<form "+''' method="'''+_method+'''"'''+''' onclick="'''+_onclick+'''"'''+(''' action="'''+_action+'''"''' if _action!=None else '')+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+(INPUT(_name="action",_class="hidden",_value=_zaction) if _zaction!=None else '')+"</form>"
def P(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<p "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</p>"
def B(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<B "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</B>"
def H1(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<h1 "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</h1>"
def H2(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<h2 "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</h2>"
def H3(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<h3 "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</h3>"
def H4(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<h4 "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</h4>"
def H5(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<h5 "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</h5>"
def H6(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<h6 "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</h6>"
def H7(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<h7 "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</h7>"
def SPAN(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<span "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</span>"
def ARTICLE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<article "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</article>"
def SECTION(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<section "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</section>"
def BODY(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<body "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</body>"
def HEAD(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<head "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</head>"
def HEADER(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<header "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</header>"
def FOOTER(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<footer "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</footer>"
def ASIDE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<aside "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</aside>"
def STYLE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<style "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</style>"
def CANVAS(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<canvas "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</canvas>"
def AUDIO(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder="", _src=""):
return "<audio "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' src="'''+_src+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</audio>"
def VIDEO(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder="", _src=""):
return "<video "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' src="'''+_src+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</video>"
def SCRIPT(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder="", _src=""):
return "<script "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' src="'''+_src+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</script>"
def NOSCRIPT(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder="", _src=""):
return "<noscript "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' src="'''+_src+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</noscript>"
def NAV(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<aside "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</nav>"
def TABLE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<table "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</table>"
def TR(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<tr "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</tr>"
def TD(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<td "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</td>"
def CAPTION(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<caption "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</caption>"
def COL(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<col "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</col>"
def COLGROUP(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<colgroup "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</colgroup>"
def MAIN(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<main "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</main>"
def LI(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<li "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</li>"
def UL(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<ul "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</ul>"
def DL(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<dl"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</dl>"
def DT(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<dt"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</dt>"
def DD(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<dd"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</dd>"
def FIGURE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<figure"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</figure>"
def FIGCAPTION(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<figcaption"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</figcaption>"
def STRONG(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<strong"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</strong>"
def EM(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<em"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</em>"
def S(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<S"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</S>"
def CITE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<cite"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</cite>"
def MENU(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<menu"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</menu>"
def COMMAND(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<command"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</command>"
def DETALIST(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<detalist"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</detalist>"
def METER(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<meter"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</meter>"
def PROGRESS(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<progress"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</progress>"
def OUTPUT(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<output"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</output>"
def KEYGEN(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<keygen"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</keygen>"
def TEXTAREA(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<textarea"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</textarea>"
def OPTION(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<option"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</option>"
def DATALIST(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<datalist"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</datalist>"
def SELECT(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<select"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</select>"
def BUTTON(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<button"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</button>"
def LABEL(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<label"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</label>"
def legend(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<legend"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</legend>"
def FIELDSET(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<fieldset"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</fieldset>"
def MAP(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<map "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</map>"
def AREA(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<area "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</area>"
def A(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder="",_href=""):
return "<a "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' href="'''+_href+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</a>"
def IMG(_type="text",_name="",_id="",_onclick="",_class="",_placeholder="",_src=""):
return "<img "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' src="'''+_src+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"
def BR(_type="text",_name="",_id="",_onclick="",_class="",_placeholder="",_src=""):
return "<br"+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' src="'''+_src+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"
def SOURCE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<source "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</source>"
def PARAM(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<param "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</param>"
def EMBED(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<embebed "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</embebed>"
def IFRAME(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<iframe "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</iframe>"
def DEL(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<del "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</del>"
def INS(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<ins "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</ins>"
def WBR(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<wbr "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</wbr>"
def BDO(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<bdo "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</bdo>"
def BDI(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<bdi "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</bdi>"
def MARK(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<mark "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</mark>"
def U(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<u "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</u>"
def I(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<i "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</i>"
def SUB(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<sub "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</sub>"
def SUP(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<sup "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</sup>"
def KBD(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<kbd "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</kbd>"
def SAMP(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<samp "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</samp>"
def VAR(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<var "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</var>"
def CODE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<code "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</code>"
def TIME(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<time "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</time>"
def DATA(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<data "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</data>"
def ABBR(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<abbr "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</abbr>"
def DFN(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<dfn "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</dfn>"
def Q(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<q "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</q>"
def SMALL(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<small "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</small>"
def PRE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<pre "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</pre>"
def HR(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<hr "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</hr>"
def OL(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<ol "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</ol>"
def BLOCKQUOTE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<blockquote "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</blockquote>"
def ADDRESS(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<address "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</address>"
def LINK(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<link "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</link>"
def BASE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<base "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</base>"
def META(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<meta "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</meta>"
def TITLE(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<title "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</title>"
def TRACK(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<track "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</track>"
def MATH(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<math "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</math>"
def OBJECT(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<object "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</object>"
def RP(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<rp "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</rp>"
def RT(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<rt "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</rt>"
def RUBY(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<ruby "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</ruby>"
def SUMMARY(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<summary "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</summary>"
def LEGEND(_content="",_type="text",_name="",_id="",_onclick="",_class="",_placeholder=""):
return "<legend "+''' onclick="'''+_onclick+'''"'''+''' class="'''+_class+'''"'''+''' id="'''+_id+'''"'''+''' placeholder="'''+_placeholder+'''"'''''' type="'''+_type+'''"'''+">"+_content+"</legend>"
def redirecter(base,app,vista):
redirecter.base=base
redirecter.app=app
redirecter.vista=vista
def redireccionador(vista=redirecter.vista,**args):
if "app" not in args:
app="app="+redirecter.app
else:
app="app="+args["app"]
c=""
for elem in args:
if elem != "app" and elem != "vista":
c+="&"+elem+"="+args[elem]
vista="vista="+vista
redirect(redirecter.base+app+"&"+vista+c)
return redireccionador
|
"""
Superfunctional builder function & handlers.
The new definition of functionals is based on a dictionary with the following structure
dict = {
"name": "", name of the functional - matched against name.lower() in method lookup
"alias": [""], alternative names for the method in lookup functions, processed with .lower()
"citation": "", citation of the method in the standard indented format, printed in output
"description": "", description of the method, printed in output
"xc_functionals": { definition of a full XC functional from LibXC
"XC_METHOD_NAME": {} must match a LibXC method, see dict_xc_funcs.py for examples
}, if present, the x/c_functionals and x_hf/c_mp2 parameters are not read!
"x_functionals": { definition of X contributions
"X_METHOD_NAME": { must match a LibXC method
"alpha": 1.0, coefficient for (global) GGA exchange, by default 1.0
"omega": 0.0, range-separation parameter
"use_libxc": False whether "x_hf" parameters should be set from LibXC values for this method
"tweak": [], tweak the underlying functional
},
"x_hf": { definition of HF exchange for hybrid functionals
"alpha": 0.0, coefficient for (global) HF exchange, by default none
"beta": 0.0, coefficient for long range HF exchange
"omega": 0.0, range separation parameters
"use_libxc": "X_METHOD_NAME" reads the above 3 values from specified X functional
},
"c_functionals": { definition of C contributions
"C_METHOD_NAME": { must match a LibXC method
"alpha": 1.0, coefficient for (global) GGA correlation, by default 1.0
"tweak": [], tweak the underlying functional
},
"c_mp2": { definition of MP2 correlation double hybrid functionals
"alpha": 0.0, coefficient for MP2 correlation, by default none
"ss": 0.0, coefficient for same spin correlation in SCS methods, forces alpha = 1.0
"os": 0.0, coefficient for opposite spin correlation in SCS methods, forces alpha = 1.0
},
"dispersion": { definition of dispersion corrections
"type": "", dispersion type - "d2", "d3zero", "d3bj" etc., see empirical_dispersion.py
"params": {}, parameters for the dispersion correction
"citation": "", special reference for the dispersion correction, appended to output
},
}
"""
from psi4 import core
from psi4.driver.p4util.exceptions import *
from psi4.driver.procrouting.empirical_dispersion import get_dispersion_aliases
from psi4.driver.qcdb.dashparam import dashcoeff
import copy
from . import dict_xc_funcs
from . import dict_lda_funcs
from . import dict_gga_funcs
from . import dict_mgga_funcs
from . import dict_hyb_funcs
from . import dict_dh_funcs
dict_functionals = {}
dict_functionals.update(dict_xc_funcs.functional_list)
dict_functionals.update(dict_lda_funcs.functional_list)
dict_functionals.update(dict_gga_funcs.functional_list)
dict_functionals.update(dict_mgga_funcs.functional_list)
dict_functionals.update(dict_hyb_funcs.functional_list)
dict_functionals.update(dict_dh_funcs.functional_list)
def get_functional_aliases(functional_dict):
if "alias" in functional_dict:
aliases = [each.lower() for each in functional_dict["alias"]]
aliases.append(functional_dict["name"].lower())
else:
aliases = [functional_dict["name"].lower()]
return aliases
dispersion_names = get_dispersion_aliases()
functionals = {}
for functional_name in dict_functionals:
functional_aliases = get_functional_aliases(dict_functionals[functional_name])
# first create copies for aliases of parent functional
for alias in functional_aliases:
functionals[alias] = dict_functionals[functional_name]
# if the parent functional is already dispersion corrected, skip to next
if "dispersion" in dict_functionals[functional_name]:
continue
# else loop through dispersion types in dashparams (also considering aliases)
# and build dispersion corrected version (applies also for aliases)
for dispersion_name in dispersion_names:
dispersion_type = dispersion_names[dispersion_name]
for dispersion_functional in dashcoeff[dispersion_type]:
if dispersion_functional.lower() in functional_aliases:
func = copy.deepcopy(dict_functionals[functional_name])
func["name"] = func["name"] + "-" + dispersion_type
func["dispersion"] = dict()
# we need to pop the citation as the EmpiricalDispersion class only expects dashparams
if "citation" in dashcoeff[dispersion_type][dispersion_functional]:
func["dispersion"]["citation"] = dashcoeff[dispersion_type][dispersion_functional].pop("citation")
func["dispersion"]["type"] = dispersion_type
func["dispersion"]["params"] = dashcoeff[dispersion_type][dispersion_functional]
# this ensures that M06-2X-D3, M06-2X-D3ZERO, M062X-D3 or M062X-D3ZERO
# all point to the same method (M06-2X-D3ZERO)
for alias in functional_aliases:
alias = alias + "-" + dispersion_name.lower()
functionals[alias] = func
def check_consistency(func_dictionary):
"""
This checks the consistency of the definitions of exchange and correlation components
of the functional, including detecting duplicate requests for LibXC params, inconsistent
requests for HF exchange and missing correlation. It also makes sure that names of methods
passed in using dft_functional={} syntax have a non-implemented name.
"""
# 0a) make sure method name is set:
if "name" not in func_dictionary:
raise ValidationError("SCF: No method name was specified in functional dictionary.")
else:
name = func_dictionary["name"]
# 0b) make sure provided name is unique:
if (name.lower() in functionals.keys()) and (func_dictionary not in functionals.values()):
raise ValidationError("SCF: Provided name for a custom dft_functional matches an already defined one: %s." % (name))
# 1a) sanity checks definition of xc_functionals
if "xc_functionals" in func_dictionary:
if "x_functionals" in func_dictionary or "x_hf" in func_dictionary:
raise ValidationError("SCF: Duplicate specification of exchange (XC + X) in functional %s." % (name))
elif "c_functionals" in func_dictionary or "c_mp2" in func_dictionary:
raise ValidationError("SCF: Duplicate specification of correlation (XC + C) in functional %s." % (name))
# 1b) require at least an empty exchange functional entry or X_HF
elif "x_functionals" not in func_dictionary and "x_hf" not in func_dictionary:
raise ValidationError("SCF: No exchange specified in functional %s." % (name))
# 1c) require at least an empty correlation functional entry or C_MP2
elif "c_functionals" not in func_dictionary and "c_mp2" not in func_dictionary:
raise ValidationError("SCF: No correlation specified in functional %s." % (name))
# 2) use_libxc handling:
use_libxc = 0
if "x_functionals" in func_dictionary:
for item in func_dictionary["x_functionals"]:
if "use_libxc" in func_dictionary["x_functionals"][item] and \
func_dictionary["x_functionals"][item]["use_libxc"]:
use_libxc += 1
# 2a) only 1 component in x_functionals can have "use_libxc": True to prevent libxc conflicts
if use_libxc > 1:
raise ValidationError("SCF: Duplicate request for libxc exchange parameters in functional %s." % (name))
# 2b) if "use_libxc" is defined in x_functionals, there shouldn't be an "x_hf" key
elif use_libxc == 1 and "x_hf" in func_dictionary:
raise ValidationError("SCF: Inconsistent definition of exchange in functional %s." % (name))
# 2c) ensure libxc params requested in "x_hf" are for a functional that is included in "x_functionals"
elif "x_hf" in func_dictionary and "use_libxc" in func_dictionary["x_hf"] \
and func_dictionary["x_hf"]["use_libxc"] not in func_dictionary["x_functionals"]:
raise ValidationError(
"SCF: Libxc parameters requested for an exchange functional not defined as a component of %s." % (name))
def build_superfunctional_from_dictionary(func_dictionary, npoints, deriv, restricted):
"""
This returns a (core.SuperFunctional, dispersion) tuple based on the requested name.
The npoints, deriv and restricted parameters are also respected.
"""
# Sanity check first, raises ValidationError if something is wrong
check_consistency(func_dictionary)
# Either process the "xc_functionals" special case
if "xc_functionals" in func_dictionary:
for xc_key in func_dictionary["xc_functionals"]:
xc_name = "XC_" + xc_key
sup = core.SuperFunctional.XC_build(xc_name, restricted)
descr = " " + func_dictionary["name"] + " "
if sup.is_gga():
if sup.x_alpha() > 0:
descr += "Hyb-GGA "
else:
descr += "GGA "
descr += "Exchange-Correlation Functional\n"
sup.set_description(descr)
# or combine X and C contributions into a blank SuperFunctional
else:
sup = core.SuperFunctional.blank()
descr = []
citation = []
# Exchange processing - first the GGA part:
# LibXC uses capital labels for the CAM coefficients, by default we're not using LibXC parms
x_HF = {"ALPHA": 0.0, "OMEGA": 0.0, "BETA": 0.0, "used": False}
if "x_functionals" in func_dictionary:
x_funcs = func_dictionary["x_functionals"]
for x_key in x_funcs:
# Lookup the functional in LibXC
x_name = "XC_" + x_key
x_func = core.LibXCFunctional(x_name, restricted)
x_params = x_funcs[x_key]
# If we're told to use libxc parameters for x_hf from this GGA, do so and set flag
if "use_libxc" in x_params and x_params["use_libxc"]:
x_HF.update(x_func.query_libxc("XC_HYB_CAM_COEF"))
x_HF["used"] = True
x_func.set_alpha(1.0)
if "tweak" in x_params:
x_func.set_tweak(x_params["tweak"])
if "alpha" in x_params:
x_func.set_alpha(x_params["alpha"])
if "omega" in x_params:
x_func.set_omega(x_params["omega"])
sup.add_x_functional(x_func)
# This ensures there is at least some citation for the method
if x_func.citation() not in citation:
citation.append(x_func.citation())
if x_func.description() not in descr:
descr.append(x_func.description())
# Exchange processing - HF part:
# x_HF contains zeroes or "use_libxc" params from a GGA above
if "x_hf" in func_dictionary:
x_params = func_dictionary["x_hf"]
# if "use_libxc" specified here, fetch parameters and set flag
# Duplicate definition of "use_libxc" caught in check_consistency.
if "use_libxc" in x_params:
x_name = "XC_" + x_params["use_libxc"]
x_HF.update(core.LibXCFunctional(x_name, restricted).query_libxc("XC_HYB_CAM_COEF"))
x_HF["used"] = True
if "alpha" in x_params:
sup.set_x_alpha(x_params["alpha"])
else:
x_params["alpha"] = 0.0
if "beta" in x_params:
sup.set_x_beta(x_params["beta"])
if "omega" in x_params:
sup.set_x_omega(x_params["omega"])
# Set LibXC parameters if requested above.
# LibXC uses different nomenclature:
# we need to shuffle the long and short range contributions around
# by default, all 3 are 0.0 - different values are set only if "use_libxc" is specified
if x_HF["used"]:
sup.set_x_alpha(x_HF["ALPHA"])
sup.set_x_beta(x_HF["BETA"])
sup.set_x_omega(x_HF["OMEGA"])
# Correlation processing - GGA part, generally same as above.
if "c_functionals" in func_dictionary:
c_funcs = func_dictionary["c_functionals"]
for c_key in c_funcs:
c_name = "XC_" + c_key
c_func = core.LibXCFunctional(c_name, restricted)
c_params = func_dictionary["c_functionals"][c_key]
if "tweak" in c_params:
c_func.set_tweak(c_params["tweak"])
if "alpha" in c_params:
c_func.set_alpha(c_params["alpha"])
else:
c_func.set_alpha(1.0)
sup.add_c_functional(c_func)
if c_func.citation() not in citation:
citation.append(c_func.citation())
if c_func.description() not in descr:
descr.append(c_func.description())
# Correlation processing - MP2 part
if "c_mp2" in func_dictionary:
c_params = func_dictionary["c_mp2"]
if "alpha" in c_params:
sup.set_c_alpha(c_params["alpha"])
else:
sup.set_c_alpha(0.0)
# The value of alpha is locked to 1.0 C++-side when SCS is detected
if "ss" in c_params:
sup.set_c_ss_alpha(c_params["ss"])
sup.set_c_alpha(1.0)
if "os" in c_params:
sup.set_c_os_alpha(c_params["os"])
sup.set_c_alpha(1.0)
# Merge descriptions and citations from above components obtained from LibXC as a fallback.
descr = "\n".join(descr)
citation = "\n".join(citation)
sup.set_citation(citation)
sup.set_description(descr)
# Here, the above joining is usually overwritten by the proper reference.
if "citation" in func_dictionary:
sup.set_citation(func_dictionary["citation"])
if "description" in func_dictionary:
sup.set_description(func_dictionary["description"])
# Dispersion handling for tuple assembly
dispersion = False
if "dispersion" in func_dictionary:
d_params = func_dictionary["dispersion"]
if "citation" not in d_params:
d_params["citation"] = False
if d_params["type"] == 'nl':
sup.set_vv10_b(d_params["params"]["b"])
sup.set_vv10_c(d_params["params"]["c"])
dispersion = d_params
sup.set_max_points(npoints)
sup.set_deriv(deriv)
sup.set_name(func_dictionary["name"].upper())
sup.allocate()
return (sup, dispersion)
|
from distutils.core import setup
setup(
name=u'nmadb-contacts'.encode('utf-8'),
version='0.1',
author=u'Vytautas Astrauskas'.encode('utf-8'),
author_email=u'vastrauskas@gmail.com'.encode('utf-8'),
packages=['nmadb_contacts',],
package_dir={'': 'src'},
#package_data={'nmadb_contacts': []},
# List of data files to be included
# into package.
requires=[
'distribute',
],
install_requires=[ # Dependencies for the package.
'django_db_utils',
'odfpy',
],
scripts=[], # List of python script files.
#data_files=[('/etc/init.d', ['init-script'])]
# List of files, which have to
# be installed into specific
# locations.
#url='', # Home page.
#download_url='', # Page from which package could
# be downloaded.
description=u'nmadb-contacts'.encode('utf-8'),
long_description=(
open('README.rst').read()+open('CHANGES.txt').read()),
# Full list of classifiers could be found at:
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 1 - Planning',
#'Environment :: Console',
#'Framework :: Django',
'Intended Audience :: Developers',
(
'License :: OSI Approved :: '
'GNU Library or Lesser General Public License (LGPL)'),
#'Natural Language :: Lithuanian',
'Operating System :: OS Independent',
'Programming Language :: Python',
(
'Topic :: Software Development :: Libraries :: '
'Python Modules'),
],
license='LGPL'
)
|
"""Module for on/off switches."""
from .api.command_send import CommandSend
from .exception import PyVLXException
from .node import Node
from .parameter import SwitchParameter, SwitchParameterOff, SwitchParameterOn
class OnOffSwitch(Node):
"""Class for controlling on-off switches."""
def __init__(self, pyvlx, node_id, name, serial_number):
"""Initialize opening device."""
super().__init__(
pyvlx=pyvlx, node_id=node_id, name=name, serial_number=serial_number
)
self.parameter = SwitchParameter()
async def set_state(self, parameter):
"""Set switch to desired state."""
command_send = CommandSend(
pyvlx=self.pyvlx, node_id=self.node_id, parameter=parameter
)
await command_send.do_api_call()
if not command_send.success:
raise PyVLXException("Unable to send command")
self.parameter = parameter
await self.after_update()
async def set_on(self):
"""Set switch on."""
await self.set_state(SwitchParameterOn())
async def set_off(self):
"""Set switch off."""
await self.set_state(SwitchParameterOff())
def is_on(self):
"""Return if switch is set to on."""
return self.parameter.is_on()
def is_off(self):
"""Return if switch is set to off."""
return self.parameter.is_off()
|
import time
from fenrirscreenreader.core import debug
outputData = {
'nextFlush': time.time(),
'messageText': '',
'messageOffset': None,
'cursorOffset': None,
}
|
from pyblish import api
from pyblish_bumpybox import inventory
class ExtractGroup(api.InstancePlugin):
""" Extract gizmos from group nodes. """
order = inventory.get_order(__file__, "ExtractGroup")
optional = True
families = ["gizmo", "lut"]
label = "Group"
hosts = ["nuke", "nukeassist"]
def process(self, instance):
import os
import nuke
if not instance.data["publish"]:
return
file_path = instance.data["output_path"]
directory = os.path.dirname(file_path)
# Create workspace if necessary
if not os.path.exists(directory):
os.makedirs(directory)
# Export gizmo
# Deselect all nodes
for node in nuke.selectedNodes():
node["selected"].setValue(False)
instance[0]["selected"].setValue(True)
nuke.nodeCopy(file_path)
data = ""
with open(file_path, "r") as f:
data = f.read()
data = data.replace("set cut_paste_input [stack 0]\n", "")
data = data.replace("push $cut_paste_input\n", "")
data = data.replace("Group {\n", "Gizmo {\n")
with open(file_path, "w") as f:
f.write(data)
|
program = {
'program_author': 'David McAllister',
'program_date': '2013-09-21',
'program_name': './backup.py',
'program_version': '1.0'
}
config_file = ('/dir_1/dir_2/some_user_name/backup_report.py',)
|
from pycraft.service.part.biome import BiomeID, biomes
from pycraft.service.part.block import SaplingType
from ..structure import Tree
from .tree import TreePopulator
static_defs = {
BiomeID.FOREST : TreePopulator(Tree(SaplingType.OAK), 5),
BiomeID.BIRCH_FOREST : TreePopulator(Tree(SaplingType.BIRCH), 5),
BiomeID.TAIGA : TreePopulator(Tree(SaplingType.SPRUCE), 10),
}
def regist():
for biome_id, populator in static_defs.items():
biomes.regist_populator(biome_id, populator)
|
"""
An implementation which send to files data
"""
import bibusinterface
class Bibus2Logs(bibusinterface.BibusInterface):
"""
An implementation which send to files data
"""
def __init__(self, log_file=""):
super().__init__()
def update_data(self, id_, remaining_time=None):
if not remaining_time:
remaining_time = 'UNKNOW'
print("bus id n°", id_, "arrive in nearly", remaining_time, "seconds")
|
'''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com www.jeremiepasserin.com
Fork Author: Miquel Campos hello@miqueltd.com www.miqueltd.com
Date: 2013 / 08 / 16
'''
import os
from gear.xsi import c
import gear.xsi.plugin as plu
def addLayout(layout, prop):
# Buttons ----------------------
layout.AddRow()
item = layout.AddItem("controlers_01_grp", "", "dscontrol")
item.SetAttribute("class", "button")
item.SetAttribute(c.siUICaption, "Primary")
item.SetAttribute(c.siUIStyle, 0x00001003)
item.SetAttribute(c.siUINoLabel, True)
item = layout.AddItem("controlers_slider_grp", "", "dscontrol")
item.SetAttribute("class", "button")
item.SetAttribute(c.siUICaption, "Slider")
item.SetAttribute(c.siUIStyle, 0x00001003)
item.SetAttribute(c.siUINoLabel, True)
item = layout.AddItem("controlers_facial_grp", "", "dscontrol")
item.SetAttribute("class", "button")
item.SetAttribute(c.siUICaption, "Facial")
item.SetAttribute(c.siUIStyle, 0x00001003)
item.SetAttribute(c.siUINoLabel, True)
layout.EndRow()
# HTML Page ---------------------
path = os.path.join(plu.getPluginPath("gear_Synoptic"), "tabs", "_common", "biped_body", "biped_body.htm")
prop.Parameters("biped_body_path").Value = path
item = layout.AddItem("biped_body_path", "", c.siControlSynoptic)
item.SetAttribute(c.siUINoLabel, True)
item.SetAttribute(c.siUICX, 308)
|
from server.bones import baseBone
from server import db
from random import random, sample, shuffle
from itertools import chain
class randomSliceBone( baseBone ):
"""
Simulates the orderby=random from SQL.
If you sort by this bone, the query will return a random set of elements from that query.
"""
type = "randomslice"
def __init__(self, indexed=True, visible=False, readOnly=True, slices=2, sliceSize=0.5, *args, **kwargs ):
"""
Initializes a new randomSliceBone.
"""
if not indexed or visible or not readOnly:
raise NotImplemented("A RandomSliceBone must be indexed, not visible and readonly!")
baseBone.__init__( self, indexed=True, visible=False, readOnly=True, *args, **kwargs )
self.slices = slices
self.sliceSize = sliceSize
def serialize(self, valuesCache, name, entity):
"""
Serializes this bone into something we
can write into the datastore.
This time, we just ignore whatever is set on this bone and write a randomly chosen
float [0..1) as value for this bone.
:param name: The property-name this bone has in its Skeleton (not the description!)
:type name: str
:returns: dict
"""
entity.set(name, random(), True)
return entity
def buildDBSort( self, name, skel, dbFilter, rawFilter ):
"""
Same as buildDBFilter, but this time its not about filtering
the results, but by sorting them.
Again: rawFilter is controlled by the client, so you *must* expect and safely handle
malformed data!
This function is somewhat special as it doesn't just change in which order the selected
Elements are being returned - but also changes *which* Elements are beeing returned (=>
a random selection)
:param name: The property-name this bone has in its Skeleton (not the description!)
:type name: str
:param skel: The :class:`server.skeleton.Skeleton` instance this bone is part of
:type skel: :class:`server.skeleton.Skeleton`
:param dbFilter: The current :class:`server.db.Query` instance the filters should be applied to
:type dbFilter: :class:`server.db.Query`
:param rawFilter: The dictionary of filters the client wants to have applied
:type rawFilter: dict
:returns: The modified :class:`server.db.Query`
"""
def applyFilterHook(dbfilter, property, value):
"""
Applies dbfilter._filterHook to the given filter if set,
else return the unmodified filter.
Allows orderby=random also be used in relational-queries.
:param dbfilter:
:param property:
:param value:
:return:
"""
if dbFilter._filterHook is None:
return property, value
try:
property, value = dbFilter._filterHook(dbFilter, property, value)
except:
# Either, the filterHook tried to do something special to dbFilter (which won't
# work as we are currently rewriting the core part of it) or it thinks that the query
# is unsatisfiable (fe. because of a missing ref/parent key in relationalBone).
# In each case we kill the query here - making it to return no results
raise RuntimeError()
return property, value
if "orderby" in rawFilter and rawFilter["orderby"] == name:
# We select a random set of elements from that collection
assert not isinstance(dbFilter.datastoreQuery, db.MultiQuery), "Orderby random is not possible on a query that already uses an IN-filter!"
origFilter = dbFilter.datastoreQuery
origKind = dbFilter.getKind()
queries = []
for unused in range(0,self.slices): #Fetch 3 Slices from the set
rndVal = random() # Choose our Slice center
# Right Side
q = db.DatastoreQuery( kind=origKind )
property, value = applyFilterHook(dbFilter, "%s <=" % name, rndVal)
q[property] = value
q.Order( (name, db.DESCENDING) )
queries.append( q )
# Left Side
q = db.DatastoreQuery( kind=origKind )
property, value = applyFilterHook(dbFilter, "%s >" % name, rndVal)
q[property] = value
queries.append( q )
dbFilter.datastoreQuery = db.MultiQuery(queries, None)
# Map the original filter back in
for k, v in origFilter.items():
dbFilter.datastoreQuery[ k ] = v
dbFilter._customMultiQueryMerge = self.customMultiQueryMerge
dbFilter._calculateInternalMultiQueryAmount = self.calculateInternalMultiQueryAmount
def calculateInternalMultiQueryAmount(self, targetAmount):
"""
Tells :class:`server.db.Query` How much entries should be fetched in each subquery.
:param targetAmount: How many entries shall be returned from db.Query
:type targetAmount: int
:returns: The amount of elements db.Query should fetch on each subquery
:rtype: int
"""
return int(targetAmount*self.sliceSize)
def customMultiQueryMerge(self, dbFilter, result, targetAmount):
"""
Randomly returns 'targetAmount' elements from 'result'
:param dbFilter: The db.Query calling this function
:type: dbFilter: server.db.Query
:param result: The list of results for each subquery we've run
:type result: list of list of :class:`server.db.Entity`
:param targetAmount: How many results should be returned from db.Query
:type targetAmount: int
:return: list of elements which should be returned from db.Query
:rtype: list of :class:`server.db.Entity`
"""
# res is a list of iterators at this point, chain them together
res = chain(*[list(x) for x in result])
# Remove duplicates
tmpDict = {}
for item in res:
tmpDict[ str(item.key()) ] = item
res = list(tmpDict.values())
# Slice the requested amount of results our 3times lager set
res = sample(res, min(len(res), targetAmount))
shuffle(res)
return res
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
os.system("pip install -r requirements.txt")
setup(
name="sleepy",
version="1.2.11",
author="Adam Haney",
author_email="adam.haney@akimbo.io",
description=("""A RESTful library that is used at retickr on top"""\
"""of Django we use it for a few apis internally."""),
license="Closed",
keywords="JSON RESTful",
url="http://about.retickr.com",
packages=['sleepy'],
long_description=read('README'),
dependency_links = [],
install_requires=[
"gitpython==0.3.2.RC1"
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Framework",
"License :: OSI Approved :: Closed",
]
)
|
'''
one trade
似乎该考虑加减码
'''
import math
import sys
import os
cwd = os.getcwd()
sys.path.insert(0, os.getcwd())
import data_api
class Trade():
def __init__(self):
self.enter_date = ''
self.enter_price = 0
self.enter_index = -1
self.exit_date = ''
self.exit_price = 0
self.exit_index = -1
self.volume = 0
self.fee = 0
self.profit = 0
self.MFE = 0
self.MAE = 0
def on_enter(self, dataApi, account, index):
self.enter_date = dataApi.date(index)
self.enter_price = dataApi.close(index)
self.enter_index = index
self.volume = math.floor(account.cash * account.percent / (self.enter_price * 100)) * 100
self.fee = self.enter_price * self.volume * account.enter_fee
maxFNF_N = 70
if index >= maxFNF_N:
self.MFE = dataApi.hhv(index - maxFNF_N + 1, maxFNF_N, data_api.KDataType.High) / self.enter_price - 1
self.MAE = 1 - dataApi.llv(index - maxFNF_N + 1, maxFNF_N, data_api.KDataType.Low) / self.enter_price
def on_exit(self, dataApi, account, index):
self.exit_date = dataApi.date(index)
self.exit_price = dataApi.close(index)
self.exit_index = index
self.profit = (self.exit_price - self.enter_price) * self.volume
self.fee += self.exit_price * self.volume * account.exit_fee
account.on_exit(self)
|
from pycp2k.inputsection import InputSection
from ._each351 import _each351
class _wannier_spreads5(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each351()
self._name = "WANNIER_SPREADS"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
from time import clock
from math import sqrt
from itertools import count
def timer(function):
def wrapper(*args, **kwargs):
start = clock()
print(function(*args, **kwargs))
print("Solution took: %f seconds." % (clock() - start))
return wrapper
@timer
def find_answer():
consecutive = 0
for i in count(647):
if len(distinct_prime_factors(i)) == 4:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return i - 3
def distinct_prime_factors(n):
factors = set()
while n % 2 == 0:
factors.add(2)
n //= 2
limit = sqrt(n + 1)
i = 3
while i <= limit:
if n % i == 0:
factors.add(i)
n //= i
limit = sqrt(n + i)
else:
i += 2
if n != 1:
factors.add(n)
return factors
if __name__ == "__main__":
find_answer()
|
import sys
import toolbox
import re
file = sys.argv[1]
def clean_ip(ip):
r = re.search('([0-9]{1,3}\.[0-9]{1,3})(\.[0-9]{1,3}\.[0-9]{1,3})', ip)
firstpart = r.group(1)
secondpart = r.group(2)
return firstpart, secondpart
def compare_to_dictonary(line):
'''
make sure line is clean and there are only two fields:
ABCDEFG 1.2.3.4
spaces in f2 will break this function
'''
tid, ip = line.split(',')
tid = tid.strip()
ip = ip.strip()
if not ip:
ip == 'n/a'
# firstpart, secondpart = ip[:len(ip) / 2], ip[len(ip) /2:]
firstpart, secondpart = clean_ip(ip)
if firstpart in nat_d.keys():
nat_ip = nat_d[firstpart] + secondpart
new_line = tid + "," + ip + "," + nat_ip
else:
new_line = tid + "," + ip + ",n/a"
return new_line
def write_to_file(write_list):
with open('new_ip.csv', 'w') as f:
for line in write_list:
f.write(line + '\n')
def main():
with open(file, 'r') as f:
write_list = [compare_to_dictonary(x) for x in f]
write_to_file(write_list)
if __name__ == '__main__':
# create two list to zip as key, values to a dictionary
real_prefix = toolbox.generate_ip_prefix(10,16,32)
nat_prefix = toolbox.generate_ip_prefix(30,240,256)
# zip two list and return dictionary
nat_d = dict(zip(real_prefix, nat_prefix))
main()
|
def main():
change_owed = get_change()
min_coins = get_min_coins(change_owed)
print(min_coins)
def get_change():
print("O hai! ", end="")
while True:
try:
change = float(input("How much change is owed?\n"))
except ValueError:
continue
if change > 0:
break
return change
def get_min_coins(change):
min_coins = 0
change *= 100
quarter = 25
dime = 10
nickel = 5
penny = 1
while change > 0:
if change - quarter >= 0:
change -= quarter
min_coins += 1
elif change - dime >= 0:
change -= dime
min_coins += 1
elif change - nickel >= 0:
change -= nickel
min_coins += 1
else:
change -= penny
min_coins += 1
return min_coins
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals
import base64
import re
import time
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
float_or_none,
remove_end,
struct_unpack,
)
def _decrypt_url(png):
encrypted_data = base64.b64decode(png)
text_index = encrypted_data.find(b'tEXt')
text_chunk = encrypted_data[text_index - 4:]
length = struct_unpack('!I', text_chunk[:4])[0]
# Use bytearray to get integers when iterating in both python 2.x and 3.x
data = bytearray(text_chunk[8:8 + length])
data = [chr(b) for b in data if b != 0]
hash_index = data.index('#')
alphabet_data = data[:hash_index]
url_data = data[hash_index + 1:]
alphabet = []
e = 0
d = 0
for l in alphabet_data:
if d == 0:
alphabet.append(l)
d = e = (e + 1) % 4
else:
d -= 1
url = ''
f = 0
e = 3
b = 1
for letter in url_data:
if f == 0:
l = int(letter) * 10
f = 1
else:
if e == 0:
l += int(letter)
url += alphabet[l]
e = (b + 3) % 4
f = 0
b += 1
else:
e -= 1
return url
class RTVEALaCartaIE(InfoExtractor):
IE_NAME = 'rtve.es:alacarta'
IE_DESC = 'RTVE a la carta'
_VALID_URL = r'http://www\.rtve\.es/(m/)?alacarta/videos/[^/]+/[^/]+/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.rtve.es/alacarta/videos/balonmano/o-swiss-cup-masculina-final-espana-suecia/2491869/',
'md5': '1d49b7e1ca7a7502c56a4bf1b60f1b43',
'info_dict': {
'id': '2491869',
'ext': 'mp4',
'title': 'Balonmano - Swiss Cup masculina. Final: España-Suecia',
'duration': 5024.566,
},
}, {
'note': 'Live stream',
'url': 'http://www.rtve.es/alacarta/videos/television/24h-live/1694255/',
'info_dict': {
'id': '1694255',
'ext': 'flv',
'title': 'TODO',
},
'skip': 'The f4m manifest can\'t be used yet',
}, {
'url': 'http://www.rtve.es/m/alacarta/videos/cuentame-como-paso/cuentame-como-paso-t16-ultimo-minuto-nuestra-vida-capitulo-276/2969138/?media=tve',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
info = self._download_json(
'http://www.rtve.es/api/videos/%s/config/alacarta_videos.json' % video_id,
video_id)['page']['items'][0]
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % video_id
png = self._download_webpage(png_url, video_id, 'Downloading url information')
video_url = _decrypt_url(png)
if not video_url.endswith('.f4m'):
auth_url = video_url.replace(
'resources/', 'auth/resources/'
).replace('.net.rtve', '.multimedia.cdn.rtve')
video_path = self._download_webpage(
auth_url, video_id, 'Getting video url')
# Use mvod1.akcdn instead of flash.akamaihd.multimedia.cdn to get
# the right Content-Length header and the mp4 format
video_url = compat_urlparse.urljoin(
'http://mvod1.akcdn.rtve.es/', video_path)
subtitles = None
if info.get('sbtFile') is not None:
subtitles = self.extract_subtitles(video_id, info['sbtFile'])
return {
'id': video_id,
'title': info['title'],
'url': video_url,
'thumbnail': info.get('image'),
'page_url': url,
'subtitles': subtitles,
'duration': float_or_none(info.get('duration'), scale=1000),
}
def _get_subtitles(self, video_id, sub_file):
subs = self._download_json(
sub_file + '.json', video_id,
'Downloading subtitles info')['page']['items']
return dict(
(s['lang'], [{'ext': 'vtt', 'url': s['src']}])
for s in subs)
class RTVELiveIE(InfoExtractor):
IE_NAME = 'rtve.es:live'
IE_DESC = 'RTVE.es live streams'
_VALID_URL = r'http://www\.rtve\.es/(?:deportes/directo|noticias|television)/(?P<id>[a-zA-Z0-9-]+)'
_TESTS = [{
'url': 'http://www.rtve.es/noticias/directo-la-1/',
'info_dict': {
'id': 'directo-la-1',
'ext': 'flv',
'title': 're:^La 1 de TVE [0-9]{4}-[0-9]{2}-[0-9]{2}Z[0-9]{6}$',
},
'params': {
'skip_download': 'live stream',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
start_time = time.gmtime()
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
player_url = self._search_regex(
r'<param name="movie" value="([^"]+)"/>', webpage, 'player URL')
title = remove_end(self._og_search_title(webpage), ' en directo')
title += ' ' + time.strftime('%Y-%m-%dZ%H%M%S', start_time)
vidplayer_id = self._search_regex(
r' id="vidplayer([0-9]+)"', webpage, 'internal video ID')
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % vidplayer_id
png = self._download_webpage(png_url, video_id, 'Downloading url information')
video_url = _decrypt_url(png)
return {
'id': video_id,
'ext': 'flv',
'title': title,
'url': video_url,
'app': 'rtve-live-live?ovpfv=2.1.2',
'player_url': player_url,
'rtmp_live': True,
}
|
from bitmovin.resources.models import AzureOutput
from ..rest_service import RestService
class Azure(RestService):
BASE_ENDPOINT_URL = 'encoding/outputs/azure'
def __init__(self, http_client):
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=AzureOutput)
|
import sys
import re
element_match_pattern = re.compile(r'([A-Z][a-z]?)')
final_molecule = str()
while True:
line = sys.stdin.readline()
if line == "\n":
final_molecule = sys.stdin.readline().rstrip()
break
results = re.findall(element_match_pattern, final_molecule)
tokens = len(results)
parenthesis = results.count('Rn') + results.count('Ar')
commas = results.count('Y')
minimum_steps = tokens - parenthesis - 2*commas - 1
print(minimum_steps)
|
from time import clock
def timer(function):
def wrapper(*args, **kwargs):
start = clock()
print(function(*args, **kwargs))
print("Solution took: %f seconds." % (clock() - start))
return wrapper
@timer
def find_answer():
return sum([i for i in range(1000) if i % 3 == 0 or i % 5 == 0])
if __name__ == "__main__":
find_answer()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from skflow import *
|
u"""Mapping of rt_params to genesis_params.
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from io import open
from pykern import pkarray
from pykern.pkdebug import pkdc, pkdp
def to_beam(params):
'''Convert beam params to dict with Genesis keys'''
res = dict()
res['NPART']=params['num_particle']
res['GAMMA0']=params['gamma']
res['DELGAM']=params['rms_energy_spread']
res['RXBEAM']=params['rms_horizontal_width']
res['RYBEAM']=params['rms_vertical_width']
res['EMITX']=params['rms_horizontal_emittance']
res['EMITY']=params['rms_vertical_emittance']
res['ALPHAX']=params['horizontal_alpha']
res['ALPHAY']=params['vertical_alpha']
res['XBEAM']=params['horizontal_coord']
res['YBEAM']=params['vertical_coord']
res['PXBEAM']=params['horizontal_angle']
res['PYBEAM']=params['vertical_angle']
res['CURPEAK']=params['current']
res['CUTTAIL']=params['cut_col']
res['BUNCH']=params['bunch_factor']
res['BUNCHPHASE']=params['bunch_phase']
res['EMOD']=params['energy_modulation']
res['EMODPHASE']=params['energy_modulation_phase']
res['CONDITX']=params['conditx']
res['CONDITY']=params['condity']
return res
def to_undulator(params):
'''Convert undulator params to dict with Genesis keys'''
res = dict()
res['AW0']=params['undulator_parameter']
res['XLAMD']=params['period_len']
res['IWITYP']=params['undulator_type']
res['XKX']=params['horizontal_focus']
res['XKY']=params['vertical_focus']
res['FBESS0']=params['coupling_factor']
res['NWIG']=params['num_periods']
res['NSEC']=params['num_section']
res['AWD']=params['virtual_undulator_parameter']
res['WCOEFZ(1)']=params['taper_start']
res['WCOEFZ(2)']=params['taper_field']
res['WCOEFZ(3)']=params['taper_type']
res['IERTYP']=params['error_type']
res['DELAW']=params['error']
res['ISEED']=params['rng_seed']
res['AWX']=params['horizontal_misalignment']
res['AWY']=params['vertical_misalignment']
return res
def to_radiation(params):
'''Convert radiation params to dict with Genesis keys'''
res = dict()
res['XLAMDS']=params['resonant_wavelength']
res['PRAD0']=params['input_power']
res['ZRAYL']=params['rayleigh_length']
res['ZWAIST']=params['rayleigh_waist']
res['NHARM']=params['num_harmonic']
res['IALLHARM']=params['all_harmonic']
res['IHARMSC']=params['harmonic_coupling']
res['PRADH0']=params['harmonic_power']
return res
def to_particle_loading(params):
'''Convert particle loading params to dict with Genesis keys'''
res = dict()
res['ILDPSI']=params['ham_particle_phase']
res['ILDGAM']=params['ham_energy_distribution']
res['ILDX']=params['ham_horizontal_distribution']
res['ILDY']=params['ham_vertical_distribution']
res['ILDPX']=params['ham_horizontal_angle']
res['ILDPY']=params['ham_vertical_angle']
res['IGAMGAUS']=params['energy_profile']
res['ITGAUS']=params['trans_profile']
res['INVERFC']=params['generate_gaus']
res['IALL']=params['ham_all']
res['IPSEED']=params['rng_sn_seed']
res['NBINS']=params['num_bins']
return res
def to_mesh(params):
'''Convert mesh params to dict with Genesis keys'''
res = dict()
res['NCAR']=params['num_grid']
res['LBC']=params['boundary']
res['RMAX0']=params['auto_grid_size']
res['DGRID']=params['direct_grid_size']
res['NSCR']=params['azimuthal_modes']
res['NPTR']=params['rad_grid']
res['RMAX0SC']=params['rad_grid_size']
res['ISCRKUP']=params['sc_calc']
return res
def to_focusing(params):
'''Convert focusing params to dict with Genesis keys'''
res = dict()
res['QUADF']=params['focus_strength']
res['QUADD']=params['defocus_strength']
res['FL']=params['focus_length']
res['DL']=params['defocus_length']
res['DRL']=params['drift_length']
res['F1ST']=params['focus_length']*params['cell_start'].value
res['QFDX']=params['max_horizontal_misalignment']
res['QFDY']=params['max_vertical_misalignment']
res['SOLEN']=params['solenoid_strength']
res['SL']=params['solenoid_length']
return res
def to_time(params):
'''Convert time dependence params to dict with Genesis keys'''
res=dict()
res['ITDP']=params['set_time_dependent']
res['CURLEN']=params['bunch_length']
res['ZSEP']=params['slice_separation']
res['NSLICE']=params['num_slice']
res['NTAIL']=params['start_slice']
res['SHOTNOISE']=params['shotnoise']
res['ISNTYP']=params['shotnoise_algorithm']
return res
def to_sim_control(params):
'''Convert simulation control params to dict with Genesis keys'''
res = dict()
res['DELZ']=params['step_size']
res['ZSTOP']=params['integration_length']
res['IORB']=params['orbit_correct']
res['ISRAVG']=params['energy_loss']
res['ISRSIG']=params['energy_spread']
res['ELOSS']=params['eloss']
return res
def to_scan(params):
'''Convert scan params to dict with Genesis keys'''
res = dict()
res['ISCAN']=params['scan_variable']
res['NSCAN']=params['num_scan']
res['SVAR']=params['scan_range']
return res
def to_io_control(params):
'''Convert io params to dict with Genesis keys'''
res = dict()
lout = []
res['IPHSTY']= params['output_num_step']
res['ISHSTY']=params['output_num_slice']
res['IPPART']=params['particle_dist_num_step']
res['ISPART']=params['particle_dist_num_slice']
res['IPRADI']=params['field_dist_num_step']
res['ISRADI']=params['field_dist_num_slice']
res['IOTAIL']=params['time_window']
res['OUTPUTFILE']=params['output_filename']
res['MAGINFILE']=params['mag_input_filename']
res['MAGOUTFILE']=params['mag_output_filename']
res['IDUMP']=params['dump_all']
res['IDMPFLD']=params['dump_field']
res['IDMPPAR']=params['dump_particle']
res['BEAMFILE']=params['beam_file']
res['RADFILE']=params['rad_file']
res['DISTFILE']=params['phase_file']
res['NDCUT']=params['ndcut']
res['FIELDFILE']=params['field_file']
res['ALIGNRADF']=params['align_rad']
res['OFFSETRADF']=params['offset_rad']
res['PARTFILE']=params['part_dist_file']
res['CONVHARM']=params['convharm']
res['IBFIELD']=params['chicane_field']
res['IMAGL']=params['chicane_mag_length']
res['IDRIL']=params['chicane_drift']
res['ILOG']=params['log']
res['FFSPEC']=params['ff_spectrum']
#res['TRAMA']=params['trama']
#ITRAM TRANSLATOR LOOP THROUGH 36 ELEMENTS OF 6X6 MATRIX TO GENESIS FORM ITRAM##=VALUE
#if bool(res['TRAMA']) is True:
# for i,v enumerate(params['trama']['itram']):
# for j,w enumerate(v):
# str_key = 'ITRAM'+str(i)+str(j)
# res[str_key]=w
#LOOP THROUGH CHILDREN OF OUTPUT_PARAMS TO TURN INTO LIST
for key in params['output_parameters']:
lout.append(int(params['output_parameters'][key]))
res['LOUT']=lout
return res
def to_genesis():
res = dict()
res['NPART']='num_particle'
res['GAMMA0']='gamma'
res['DELGAM']='rms_energy_spread'
res['RXBEAM']='rms_horizontal_width'
res['RYBEAM']='rms_vertical_width'
res['EMITX']='rms_horizontal_emittance'
res['EMITY']='rms_vertical_emittance'
res['ALPHAX']='horizontal_alpha'
res['ALPHAY']='vertical_alpha'
res['XBEAM']='horizontal_coord'
res['YBEAM']='vertical_coord'
res['PXBEAM']='horizontal_angle'
res['PYBEAM']='vertical_angle'
res['CURPEAK']='current'
res['CUTTAIL']='cut_col'
res['BUNCH']='bunch_factor'
res['BUNCHPHASE']='bunch_phase'
res['EMOD']='energy_modulation'
res['EMODPHASE']='energy_modulation_phase'
res['CONDITX']='conditx'
res['CONDITY']='condity'
res['AW0']='undulator_parameter'
res['XLAMD']='period_len'
res['IWITYP']='undulator_type'
res['XKX']='horizontal_focus'
res['XKY']='vertical_focus'
res['FBESS0']='coupling_factor'
res['NWIG']='num_periods'
res['NSEC']='num_section'
res['AWD']='virtual_undulator_parameter'
res['WCOEFZ(1)']='taper_start'
res['WCOEFZ(2)']='taper_field'
res['WCOEFZ(3)']='taper_type'
res['IERTYP']='error_type'
res['DELAW']='error'
res['ISEED']='rng_seed'
res['AWX']='horizontal_misalignment'
res['AWY']='vertical_misalignment'
res['XLAMDS']='resonant_wavelength'
res['PRAD0']='input_power'
res['ZRAYL']='rayleigh_length'
res['ZWAIST']='rayleigh_waist'
res['NHARM']='num_harmonic'
res['IALLHARM']='all_harmonic'
res['IHARMSC']='harmonic_coupling'
res['PRADH0']='harmonic_power'
res['ILDPSI']='ham_particle_phase'
res['ILDGAM']='ham_energy_distribution'
res['ILDX']='ham_horizontal_distribution'
res['ILDY']='ham_vertical_distribution'
res['ILDPX']='ham_horizontal_angle'
res['ILDPY']='ham_vertical_angle'
res['IGAMGAUS']='energy_profile'
res['ITGAUS']='trans_profile'
res['INVERFC']='generate_gaus'
res['IALL']='ham_all'
res['IPSEED']='rng_sn_seed'
res['NBINS']='num_bins'
res['NCAR']='num_grid'
res['LBC']='boundary'
res['RMAX0']='auto_grid_size'
res['DGRID']='direct_grid_size'
res['NSCR']='azimuthal_modes'
res['NPTR']='rad_grid'
res['RMAX0SC']='rad_grid_size'
res['ISCRKUP']='sc_calc'
res['QUADF']='focus_strength'
res['QUADD']='defocus_strength'
res['FL']='focus_length'
res['DL']='defocus_length'
res['DRL']='drift_length'
res['F1ST']='cell_start'
res['QFDX']='max_horizontal_misalignment'
res['QFDY']='max_vertical_misalignment'
res['SOLEN']='solenoid_strength'
res['SL']='solenoid_length'
res['ITDP']='set_time_dependent'
res['CURLEN']='bunch_length'
res['ZSEP']='slice_separation'
res['NSLICE']='num_slice'
res['NTAIL']='start_slice'
res['SHOTNOISE']='shotnoise'
res['ISNTYP']='shotnoise_algorithm'
res['DELZ']='step_size'
res['ZSTOP']='integration_length'
res['IORB']='orbit_correct'
res['ISRAVG']='energy_loss'
res['ISRSIG']='energy_spread'
res['ELOSS']='eloss'
res['ISCAN']='scan_variable'
res['NSCAN']='num_scan'
res['SVAR']='scan_range'
res['IPHSTY']='output_num_step'
res['ISHSTY']='output_num_slice'
res['IPPART']='particle_dist_num_step'
res['ISPART']='particle_dist_num_slice'
res['IPRADI']='field_dist_num_step'
res['ISRADI']='field_dist_num_slice'
res['IOTAIL']='time_window'
res['OUTPUTFILE']='output_filename'
res['MAGINFILE']='mag_input_filename'
res['MAGOUTFILE']='mag_output_filename'
res['IDUMP']='dump_all'
res['IDMPFLD']='dump_field'
res['IDMPPAR']='dump_particle'
res['BEAMFILE']='beam_file'
res['RADFILE']='rad_file'
res['DISTFILE']='phase_file'
res['NDCUT']='ndcut'
res['FIELDFILE']='field_file'
res['ALIGNRADF']='align_rad'
res['OFFSETRADF']='offset_rad'
res['PARTFILE']='part_dist_file'
res['CONVHARM']='convharm'
res['IBFIELD']='chicane_field'
res['IMAGL']='chicane_mag_length'
res['IDRIL']='chicane_drift'
res['ILOG']='log'
res['FFSPEC']='ff_spectrum'
res['LOUT']='output_parameters'
res['TRAMA']='trama'
return res
|
"""
This example shows how to reconnect to a model if you encounter an error
1. Connects to current model.
2. Attempts to get an application that doesn't exist.
3. Disconnect then reconnect.
"""
from juju import jasyncio
from juju.model import Model
from juju.errors import JujuEntityNotFoundError
async def main():
model = Model()
retryCount = 3
for i in range(0, retryCount):
await model.connect_current()
try:
model.applications['foo'].relations
except JujuEntityNotFoundError as e:
print(e.entity_name)
finally:
await model.disconnect()
# Everything worked out, continue on wards.
if __name__ == '__main__':
jasyncio.run(main())
|
from scheduler.views.rest_dispatch import RESTDispatch
from scheduler.utils.validation import Validation
from scheduler.views.api.exceptions import MissingParamException, \
InvalidParamException
from panopto_client import PanoptoAPIException
from panopto_client.remote_recorder import RemoteRecorderManagement
from scheduler.models import RecorderCache, RecorderCacheEntry
from scheduler.utils.recorder import get_api_recorder_details, \
RecorderException
from uw_r25.spaces import get_space_by_id
from restclients_core.exceptions import DataFailureException
import datetime
import logging
import json
import re
import pytz
logger = logging.getLogger(__name__)
class Recorder(RESTDispatch):
def __init__(self, *args, **kwargs):
self._space_list_cache_timeout = 1 # timeout in hours
self._api = RemoteRecorderManagement()
super(Recorder, self).__init__(*args, **kwargs)
def get(self, request, *args, **kwargs):
recorder_id = kwargs.get('recorder_id')
if request.GET.get('timeout'):
self._space_list_cache_timeout = float(request.GET.get('timeout'))
if (recorder_id):
return self._get_recorder_details(recorder_id)
else:
return self._list_recorders()
def put(self, request, *args, **kwargs):
recorder_id = kwargs.get('recorder_id')
try:
Validation().panopto_id(recorder_id)
data = json.loads(request.body)
external_id = data.get('external_id', None)
if external_id is not None:
rv = self._api.updateRemoteRecorderExternalId(recorder_id,
external_id)
try:
cache_entry = RecorderCacheEntry.objects.get(
recorder_id=recorder_id)
cache_entry.recorder_external_id = external_id
cache_entry.save()
except RecorderCacheEntry.DoesNotExist:
pass
return self._get_recorder_details(recorder_id)
except (MissingParamException, InvalidParamException,
PanoptoAPIException) as err:
return self.error_response(400, message="{}".format(err))
def _get_recorder_details(self, recorder_id):
try:
recorders = get_api_recorder_details(self._api, recorder_id)
except (RecorderException, PanoptoAPIException,
MissingParamException, InvalidParamException) as err:
return self.error_response(400, message="{}".format(err))
if recorders is None:
return self.error_response(404, message="No Recorder Found")
return self._recorder_rep(recorders)
def _recorder_rep(self, recorders):
reps = []
for recorder in recorders:
rep = {
'id': recorder.Id,
'external_id': recorder.ExternalId,
'name': recorder.Name,
'settings_url': recorder.SettingsUrl,
'state': recorder.State,
'space': None,
'scheduled_recordings': []
}
if recorder.ScheduledRecordings and hasattr(
recorder.ScheduledRecordings, 'guid'):
for recording in recorder.ScheduledRecordings.guid:
rep['scheduled_recordings'].append(recording)
if recorder.ExternalId:
try:
space = get_space_by_id(recorders[0].ExternalId)
rep['space'] = {
'space_id': space.space_id,
'name': space.name,
'formal_name': space.formal_name
}
except DataFailureException as err:
logger.error('Cannot get space for id: {}: {}'.format(
recorders[0].ExternalId, err))
reps.append(rep)
return self.json_response(reps)
def _list_recorders(self):
try:
rec_cache = RecorderCache.objects.all()[0]
now = pytz.UTC.localize(datetime.datetime.now())
timeout = datetime.timedelta(hours=self._space_list_cache_timeout)
if (now - timeout) > rec_cache.created_date:
self._scrub_recorder_cache(rec_cache)
except (IndexError, RecorderCache.DoesNotExist):
try:
recorders = self._api.listRecorders()
rec_cache = self._cache_recorders(recorders)
except PanoptoAPIException as err:
return self.error_response(400, message="{}".format(err))
rep = []
for recorder in RecorderCacheEntry.objects.filter(cache=rec_cache):
rep.append({
'id': recorder.recorder_id,
'external_id': recorder.recorder_external_id,
'name': recorder.name,
'scheduled_recordings': []
})
return self.json_response(rep)
def _cache_recorders(self, recorders):
rec_cache = RecorderCache()
rec_cache.save()
for recorder in recorders:
RecorderCacheEntry.objects.create(
cache=rec_cache,
recorder_id=recorder.Id,
recorder_external_id=recorder.ExternalId or '',
name=recorder.Name)
return rec_cache
def _scrub_recorder_cache(self, rec_cache):
RecorderCacheEntry.objects.filter(cache=rec_cache).delete()
rec_cache.delete()
raise RecorderCache.DoesNotExist()
|
"""
Client side of the scheduler manager RPC API.
"""
from nova import flags
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc.proxy
FLAGS = flags.FLAGS
class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the scheduler rpc API.
API version history:
1.0 - Initial version.
1.1 - Changes to prep_resize():
- remove instance_uuid, add instance
- remove instance_type_id, add instance_type
- remove topic, it was unused
1.2 - Remove topic from run_instance, it was unused
1.3 - Remove instance_id, add instance to live_migration
1.4 - Remove update_db from prep_resize
1.5 - Add reservations argument to prep_resize()
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
default_version=self.BASE_RPC_API_VERSION)
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties, reservations, call=True):
rpc_method = self.call if call else self.cast
return rpc_method(ctxt, self.make_msg('run_instance',
request_spec=request_spec, admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
is_first_time=is_first_time,
filter_properties=filter_properties,
reservations=reservations), version='1.2')
def prep_resize(self, ctxt, instance, instance_type, image,
request_spec, filter_properties, reservations):
instance_p = jsonutils.to_primitive(instance)
instance_type_p = jsonutils.to_primitive(instance_type)
self.cast(ctxt, self.make_msg('prep_resize',
instance=instance_p, instance_type=instance_type_p,
image=image, request_spec=request_spec,
filter_properties=filter_properties,
reservations=reservations), version='1.5')
def show_host_resources(self, ctxt, host):
return self.call(ctxt, self.make_msg('show_host_resources', host=host))
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance, dest, topic):
# NOTE(comstud): Call vs cast so we can get exceptions back, otherwise
# this call in the scheduler driver doesn't return anything.
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('live_migration',
block_migration=block_migration,
disk_over_commit=disk_over_commit, instance=instance_p,
dest=dest, topic=topic), version='1.3')
def update_service_capabilities(self, ctxt, service_name, host,
capabilities):
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
service_name=service_name, host=host,
capabilities=capabilities))
|
__source__ = 'https://leetcode.com/problems/bitwise-ors-of-subarrays/'
import unittest
class Solution(object):
pass # your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
Approach 1: Frontier Set
Note: 1,
result(i, j) = A[i] | A[i+1] | ... | A[j] then
result(i, j+1) = result(i, j) | A[j+1]
Note: 2,
the number of unique values in this set cur is at most 32,
since the list result(k, k), result(k-1, k), result(k-2, k), ... is monotone increasing,
and any subsequent values that are different must have more 1s in it's binary representation (to a maximum of 32 ones).
Complexity Analysis
Time Complexity: O(NlogW), where N is the length of A, and WW is the maximum size of elements in A.
Space Complexity: O(NlogW), the size of the answer.
class Solution {
public int subarrayBitwiseORs(int[] A) {
Set<Integer> ans = new HashSet();
Set<Integer> cur = new HashSet();
cur.add(0);
for (int x : A) {
Set<Integer> cur2 = new HashSet();
for (int y : cur) cur2.add(x | y);
cur2.add(x);
cur = cur2;
ans.addAll(cur);
}
return ans.size();
}
}
class Solution {
public int subarrayBitwiseORs(int[] A) {
Set<Integer> set = new HashSet<>();
int ors = 0, res = 0, cum = 0;
for (int i = 0; i < A.length; i++) {
int cand = 0;
for (int j = i; j >= 0; j--) {
cand |= A[j];
set.add(cand);
if ((cand & (cand + 1)) == 0 && cand >= cum) break;
}
cum |= A[i];
}
return set.size();
}
}
class Solution {
public int subarrayBitwiseORs(int[] A) {
if (A == null || A.length == 0) return 0;
int n = A.length;
// initialize
Set<Integer> seen = new HashSet<>();
for (int i = 0; i < A.length; i++) seen.add(A[i]);
// join pairs [i] and [i+1]
int end = A.length - 1;
while (end >= 1) {
for (int i = 0; i < end; i++) A[i] |= A[i + 1];
// filter out consecutive duplicates in array and add [i] to the set
int i = 0;
seen.add(A[i]);
int j = 1;
while (j < end) {
if (A[i] != A[j]) {
A[++i] = A[j];
seen.add(A[i]);
}
j++;
}
end = i;
}
return seen.size();
}
}
'''
|
import itertools
from nova.conf import paths
from oslo_config import cfg
LIVE_MIGRATION_DOWNTIME_MIN = 100
LIVE_MIGRATION_DOWNTIME_STEPS_MIN = 3
LIVE_MIGRATION_DOWNTIME_DELAY_MIN = 10
libvirt_group = cfg.OptGroup("libvirt",
title="Libvirt Options",
help="""
Libvirt options allows cloud administrator to configure related
libvirt hypervisor driver to be used within an OpenStack deployment.
""")
libvirt_general_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image. This will not be used if an image id '
'is provided by the user.'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('virt_type',
default='kvm',
choices=('kvm', 'lxc', 'qemu', 'uml', 'xen', 'parallels'),
help='Libvirt domain type'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_inbound_addr',
help='Live migration target ip or hostname '
'(if this option is set to None, which is the default, '
'the hostname of the migration target '
'compute node will be used)'),
cfg.StrOpt('live_migration_uri',
help='Override the default libvirt live migration target URI '
'(which is dependent on virt_type) '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED',
help='Migration flags to be set for live migration',
deprecated_for_removal=True,
deprecated_reason='The correct live migration flags can be '
'inferred from the new '
'live_migration_tunnelled config option. '
'live_migration_flag will be removed to '
'avoid potential misconfiguration.'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration',
deprecated_for_removal=True,
deprecated_reason='The correct block migration flags can be '
'inferred from the new '
'live_migration_tunnelled config option. '
'block_migration_flag will be removed to '
'avoid potential misconfiguration.'),
cfg.BoolOpt('live_migration_tunnelled',
help='Whether to use tunnelled migration, where migration '
'data is transported over the libvirtd connection. If '
'True, we use the VIR_MIGRATE_TUNNELLED migration flag, '
'avoiding the need to configure the network to allow '
'direct hypervisor to hypervisor communication. If '
'False, use the native transport. If not set, Nova '
'will choose a sensible default based on, for example '
'the availability of native encryption support in the '
'hypervisor.'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth(in MiB/s) to be used during migration. '
'If set to 0, will choose a suitable default. Some '
'hypervisors do not support this feature and will return '
'an error if bandwidth is not 0. Please refer to the '
'libvirt documentation for further details'),
cfg.IntOpt('live_migration_downtime',
default=500,
help='Maximum permitted downtime, in milliseconds, for live '
'migration switchover. Will be rounded up to a minimum '
'of %dms. Use a large value if guest liveness is '
'unimportant.' % LIVE_MIGRATION_DOWNTIME_MIN),
cfg.IntOpt('live_migration_downtime_steps',
default=10,
help='Number of incremental steps to reach max downtime value. '
'Will be rounded up to a minimum of %d steps' %
LIVE_MIGRATION_DOWNTIME_STEPS_MIN),
cfg.IntOpt('live_migration_downtime_delay',
default=75,
help='Time to wait, in seconds, between each step increase '
'of the migration downtime. Minimum delay is %d seconds. '
'Value is per GiB of guest RAM + disk to be transferred, '
'with lower bound of a minimum of 2 GiB per device' %
LIVE_MIGRATION_DOWNTIME_DELAY_MIN),
cfg.IntOpt('live_migration_completion_timeout',
default=800,
help='Time to wait, in seconds, for migration to successfully '
'complete transferring data before aborting the '
'operation. Value is per GiB of guest RAM + disk to be '
'transferred, with lower bound of a minimum of 2 GiB. '
'Should usually be larger than downtime delay * downtime '
'steps. Set to 0 to disable timeouts.'),
cfg.IntOpt('live_migration_progress_timeout',
default=150,
help='Time to wait, in seconds, for migration to make forward '
'progress in transferring data before aborting the '
'operation. Set to 0 to disable timeouts.'),
cfg.StrOpt('snapshot_image_format',
choices=('raw', 'qcow2', 'vmdk', 'vdi'),
help='Snapshot image format. Defaults to same as source image'),
cfg.StrOpt('disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on virt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
choices=('host-model', 'host-passthrough', 'custom', 'none'),
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If virt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'cpu_mode="custom" and virt_type="kvm|qemu"'),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
cfg.ListOpt('hw_machine_type',
help='For qemu or KVM guests, set this option to specify '
'a default machine type per host architecture. '
'You can find a list of supported machine types '
'in your environment by checking the output of '
'the "virsh capabilities"command. The format of the '
'value for this config option is host-arch=machine-type. '
'For example: x86_64=machinetype1,armv7l=machinetype2'),
cfg.StrOpt('sysinfo_serial',
default='auto',
choices=('none', 'os', 'hardware', 'auto'),
help='The data source used to the populate the host "serial" '
'UUID exposed to guest in the virtual BIOS.'),
cfg.IntOpt('mem_stats_period_seconds',
default=10,
help='A number of seconds to memory usage statistics period. '
'Zero or negative value mean to disable memory usage '
'statistics.'),
cfg.ListOpt('uid_maps',
default=[],
help='List of uid targets and ranges.'
'Syntax is guest-uid:host-uid:count'
'Maximum of 5 allowed.'),
cfg.ListOpt('gid_maps',
default=[],
help='List of guid targets and ranges.'
'Syntax is guest-gid:host-gid:count'
'Maximum of 5 allowed.'),
cfg.IntOpt('realtime_scheduler_priority',
default=1,
help='In a realtime host context vCPUs for guest will run in '
'that scheduling priority. Priority depends on the host '
'kernel (usually 1-99)')
]
libvirt_imagebackend_opts = [
cfg.StrOpt('images_type',
default='default',
choices=('raw', 'flat', 'qcow2', 'lvm', 'rbd', 'ploop',
'default'),
help='VM Images format. If default is specified, then'
' use_cow_images flag is used instead of this one.'),
cfg.StrOpt('images_volume_group',
help='LVM Volume Group that is used for VM images, when you'
' specify images_type=lvm.'),
cfg.BoolOpt('sparse_logical_volumes',
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.StrOpt('images_rbd_pool',
default='rbd',
help='The RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('images_rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file to use'),
cfg.StrOpt('hw_disk_discard',
choices=('ignore', 'unmap'),
help='Discard option for nova managed disks. Need'
' Libvirt(1.0.6) Qemu1.5 (raw format) Qemu1.6(qcow2'
' format)'),
]
libvirt_imagecache_opts = [
cfg.StrOpt('image_info_filename_pattern',
default='$instances_path/$image_cache_subdirectory_name/'
'%(image)s.info',
help='Allows image information files to be stored in '
'non-standard locations',
deprecated_for_removal=True,
deprecated_reason='Image info files are no longer used by the '
'image cache'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
'removed'),
cfg.BoolOpt('checksum_base_images',
default=False,
help='Write a checksum for files in _base to disk',
deprecated_for_removal=True,
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.'),
cfg.IntOpt('checksum_interval_seconds',
default=3600,
help='How frequently to checksum base images',
deprecated_for_removal=True,
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.'),
]
libvirt_lvm_opts = [
cfg.StrOpt('volume_clear',
default='zero',
choices=('none', 'zero', 'shred'),
help='Method used to wipe old volumes.'),
cfg.IntOpt('volume_clear_size',
default=0,
help='Size in MiB to wipe at start of old volumes. 0 => all'),
]
libvirt_utils_opts = [
cfg.BoolOpt('snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images'),
]
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
libvirt_volume_opts = [
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
]
libvirt_volume_aoe_opts = [
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume'),
]
libvirt_volume_glusterfs_opts = [
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node'),
]
libvirt_volume_iscsi_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI or FC volume'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwaddress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
libvirt_volume_iser_opts = [
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
]
libvirt_volume_net_opts = [
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
]
libvirt_volume_nfs_opts = [
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
]
libvirt_volume_quobyte_opts = [
cfg.StrOpt('quobyte_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the Quobyte volume is mounted on the '
'compute node'),
cfg.StrOpt('quobyte_client_cfg',
help='Path to a Quobyte Client configuration file.'),
]
libvirt_volume_scality_opts = [
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
]
libvirt_volume_smbfs_opts = [
cfg.StrOpt('smbfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the SMBFS shares are mounted on the '
'compute node'),
cfg.StrOpt('smbfs_mount_options',
default='',
help='Mount options passed to the SMBFS client. See '
'mount.cifs man page for details. Note that the '
'libvirt-qemu uid and gid must be specified.'),
]
libvirt_remotefs_opts = [
cfg.StrOpt('remote_filesystem_transport',
default='ssh',
choices=('ssh', 'rsync'),
help='Use ssh or rsync transport for creating, copying, '
'removing files on the remote host.'),
]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
libvirt_imagebackend_opts,
libvirt_imagecache_opts,
libvirt_lvm_opts,
libvirt_utils_opts,
libvirt_vif_opts,
libvirt_volume_opts,
libvirt_volume_aoe_opts,
libvirt_volume_glusterfs_opts,
libvirt_volume_iscsi_opts,
libvirt_volume_iser_opts,
libvirt_volume_net_opts,
libvirt_volume_nfs_opts,
libvirt_volume_quobyte_opts,
libvirt_volume_scality_opts,
libvirt_volume_smbfs_opts,
libvirt_remotefs_opts
))
def register_opts(conf):
conf.register_group(libvirt_group)
conf.register_opts(ALL_OPTS, group=libvirt_group)
def list_opts():
return {libvirt_group: ALL_OPTS}
|
needs_sphinx = '1.6'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Gitcd'
copyright = u'2017, Claudio Walser'
author = u'Claudio Walser'
version = u'1.6'
release = u'1.6.16'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
htmlhelp_basename = 'Gitcddoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_documents = [
(master_doc, 'Gitcd.tex', u'Gitcd Documentation',
u'Claudio Walser', 'manual'),
]
man_pages = [
(master_doc, 'gitcd', u'Gitcd Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'Gitcd', u'Gitcd Documentation',
author, 'Gitcd', 'One line description of project.',
'Miscellaneous'),
]
|
"""Script for converting content to Zip/ISO format.
"""
from os import makedirs
from os import stat
from os import walk
from os.path import isdir
from os.path import join
from shutil import copy2
from shutil import copytree
import sys
import click
from scripts.utils import to_iso
from scripts.utils import to_zip
from .verifyconfig import readconfig
def makeiso(size=None):
"""Converts content to ISO format.
Args:
size: Size of the content to be converted
"""
try:
click.echo("\nReading and verifying "
"configuration file.....................")
result = readconfig(["division", "destination.main_path",
"output_folder_name"], True)
division = result[0]
dst = result[1]
folder_name = result[2]
except:
click.echo("Unable to read information from config.yaml. Fix it (or "
"check it out from github) then try again.")
return
if not division:
iso_maker(join(dst, folder_name), dst, folder_name, size)
else:
if not isdir(join(dst, "iso")):
copytree(join(dst, folder_name, "img"),
join(dst, "iso", "img"), symlinks=True)
for div in division:
dst_dir = join(dst, "goc", div)
iso_maker(dst_dir, join(dst, "iso"), div, size)
copy2(join(dst, folder_name, "index.html"), join(dst, "iso"))
def iso_maker(dst_dir, dst, div, size):
"""Gets size of content then converts content to ISO format.
Args:
dst_dir: Destination directory
dst: Destination file name
div: Division object
size: size of content
"""
if not isdir(dst_dir):
click.echo(
"Error: No output for div %s. Have you run the bundle command?" % div)
return
if size:
max_size = int(size)
parts = split(dst_dir, max_size)
for i, part in enumerate(parts):
click.echo(
"Packaging content in a ISO format %d / %d" % (i+1, len(parts)))
to_iso(dst_dir, join(dst, "%s%d.iso" % (div, i+1)), filelist=part)
else:
# convert to ISO file
click.echo("Packaging content in a ISO format..................")
to_iso(dst_dir, join(dst, "%s.iso" % div))
def zip_maker(dst_dir, dst, div, size):
"""Converts content to ZIP format.
Args:
dst_dir: Destination directory
dst: Destination file name
div: Division object
size: size of content
"""
if not isdir(dst_dir):
click.echo(
"Error: No output for div %s. Have you run the bundle command?" % div)
return
if size:
max_size = int(size)
parts = split(dst_dir, max_size)
for i, part in enumerate(parts):
click.echo(
"Packaging content in a zip file %d / %d" % (i+1, len(parts)))
to_zip(dst_dir, join(dst, "%s%d.zip" % (div, i+1)), False, filelist=part)
else:
# convert to zip file
click.echo("Packaging content in a zip file..................")
to_zip(dst_dir, join(dst, "%s.zip" % div), False)
def makezip(size=None):
"""Gets size of content then converts content to Zip format.
Args:
size: Size of the content to be converted
"""
try:
click.echo(
"\nReading and verifying configuration file.....................")
result = readconfig(["division", "destination.main_path",
"output_folder_name"], True)
division = result[0]
dst = result[1]
folder_name = result[2]
except:
click.echo("Unable to read information from config.yaml."
" Fix, then try again.")
return
if not division:
# #convert to zip file and ISO
zip_maker(join(dst, folder_name), dst, folder_name, size)
else:
if not isdir(join(dst, "zip")):
copytree(join(dst, folder_name, "img"),
join(dst, "zip", "img"), symlinks=True)
for div in division:
dst_dir = join(dst, "goc", div)
zip_maker(dst_dir, join(dst, "zip"), div, size)
copy2(join(dst, folder_name, "index.html"), join(dst, "zip"))
def split(dst_dir, max_size):
"""Checks the total size of content, then split content.
Args:
dst_dir: Destination directory
max_size: Maximum size of a split
Returns:
List of splits
"""
parts = []
current_part = []
current_free = max_size
for srcpath, dirnames, filenames in walk(dst_dir):
for name in filenames:
src = join(srcpath, name)
info = stat(src)
size = 1.0 * info.st_size / 1024 / 1024
if current_free < size:
# create a new part
parts.append(current_part)
current_part = []
current_free = max_size
current_part.append(src)
current_free -= size
if not dirnames and not filenames:
current_part.append(srcpath)
parts.append(current_part)
return parts
|
import os
import threading
class LoggerWrapper(threading.Thread):
"""
Read text message from a pipe and redirect them
to a logger (see python's logger module),
the object itself is able to supply a file
descriptor to be used for writing
fdWrite ==> fdRead ==> pipeReader
"""
def __init__(self, logger, level):
"""
Setup the object with a logger and a loglevel
and start the thread
"""
# Initialize the superclass
threading.Thread.__init__(self)
# Make the thread a Daemon Thread (program will exit when only daemon
# threads are alive)
self.daemon = True
# Set the logger object where messages will be redirected
self.logger = logger
# Set the log level
self.level = level
# Create the pipe and store read and write file descriptors
self.fdRead, self.fdWrite = os.pipe()
# Create a file-like wrapper around the read file descriptor
# of the pipe, this has been done to simplify read operations
self.pipeReader = os.fdopen(self.fdRead)
# Start the thread
self.start()
def fileno(self):
"""
Return the write file descriptor of the pipe
"""
return self.fdWrite
def run(self):
"""
This is the method executed by the thread, it
simply read from the pipe (using a file-like
wrapper) and write the text to log.
NB the trailing newline character of the string
read from the pipe is removed
"""
# Endless loop, the method will exit this loop only
# when the pipe is close that is when a call to
# self.pipeReader.readline() returns an empty string
while True:
# Read a line of text from the pipe
messageFromPipe = self.pipeReader.readline()
# If the line read is empty the pipe has been
# closed, do a cleanup and exit
# WARNING: I don't know if this method is correct,
# further study needed
if len(messageFromPipe) == 0:
self.pipeReader.close()
os.close(self.fdRead)
return
# end if
# Remove the trailing newline character frm the string
# before sending it to the logger
if messageFromPipe[-1] == os.linesep:
messageToLog = messageFromPipe[:-1]
else:
messageToLog = messageFromPipe
# Send the text to the logger
self._write(messageToLog)
def _write(self, message):
"""
Utility method to send the message
to the logger with the correct loglevel
"""
self.logger.log(self.level, message)
|
from abc import ABC, abstractmethod
from oscar.agent.commander.base_commander import BaseCommander
from oscar.util.point import Point
from oscar.meta_action import *
class ContextSaveCommander(BaseCommander):
def __init__(self, subordinates: list):
super().__init__(subordinates=subordinates)
self._subordinate_context = {}
self._is_changing_context = False
self.add_shared('env', Env())
self.add_shared('camera', Camera())
def step(self, obs, locked_choice=None):
self._shared['env'].timestamp += 1
if locked_choice is None:
locked_choice = self._locked_choice
# if we were changing context do not ask for choosing a subordinate
if self._is_changing_context:
self._is_changing_context = False
playing_subordinate = self._playing_subordinate
else:
playing_subordinate = self.choose_subordinate(obs, locked_choice)
# if we are changing active subordinate, save and restore context (require one action)
if playing_subordinate is not self._playing_subordinate and self._playing_subordinate is not None:
self.save_context(obs)
play = self.restore_context(playing_subordinate, obs)
self._playing_subordinate = playing_subordinate
else:
self._playing_subordinate = playing_subordinate
play = self._playing_subordinate.step(obs, locked_choice)
if "locked_choice" in play:
self._locked_choice = play["locked_choice"]
else:
self._locked_choice = False
return play
def restore_context(self, subordinate, obs):
if subordinate not in self._subordinate_context:
context = AgentContext()
location = self._shared['camera'].location(obs=obs, shared=self._shared)
context.camera = location
else:
context = self._subordinate_context[subordinate]
play = {}
play['actions'] = [actions.FunctionCall(MOVE_CAMERA, [context.camera.to_array()])]
play['locked_choice'] = True
self._is_changing_context = True
return play
def save_context(self, obs):
context = AgentContext()
location = self._shared['camera'].location(obs=obs, shared=self._shared)
context.camera = location
self._subordinate_context[self._playing_subordinate] = context
@abstractmethod
def choose_subordinate(self, obs, locked_choice):
"""
Choose a subordinate among the list of subordinates, and make it play.
:return: A subordinate among the list of subordinates.
"""
def reset(self):
super().reset()
self._subordinate_context = {}
self._is_changing_context = False
self.add_shared('env', Env())
self.add_shared('camera', Camera())
class AgentContext:
def __init__(self):
self.camera = Point()
|
__author__ = "OBL"
from sys import maxsize
class Contact:
def __init__(self, firstname=None, lastname=None, address=None, homephone=None, mobilephone=None, workphone=None, phone=None,
mail1=None, mail2=None, mail3=None, id=None, all_phones_from_home_page=None, all_mail_from_home_page=None):
self.firstname = firstname
self.lastname = lastname
self.address = address
self.homephone = homephone
self.mobilephone = mobilephone
self.workphone = workphone
self.phone = phone
self.mail1 = mail1
self.mail2 = mail2
self.mail3 = mail3
self.all_phones_from_home_page = all_phones_from_home_page
self.all_mail_from_home_page = all_mail_from_home_page
self.id = id
def __repr__(self):
return "%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s" % (self.id, self.firstname, self.lastname, self.address, self.homephone,
self.mobilephone, self.workphone, self.phone, self.mail1, self.mail2, self.mail3)
def __eq__(self, other):
return self.id is None or other.id is None or self.id == other.id
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
|
from __builtin__ import True
__authors__ = ['"wei keke" <keke.wei@cs2c.com.cn>']
__version__ = "V0.1"
'''
'''
import unittest
from BaseTestCase import BaseTestCase
from TestAPIs.DataCenterAPIs import DataCenterAPIs,smart_attach_storage_domain, smart_deactive_storage_domain, smart_detach_storage_domain
from TestAPIs.ClusterAPIs import ClusterAPIs, AffinityGroupsAPIs, smart_create_cluster, smart_delete_cluster, \
smart_create_affinitygroups, smart_delete_affinitygroups
from TestAPIs.StorageDomainAPIs import smart_create_storage_domain, smart_del_storage_domain
from TestAPIs.VirtualMachineAPIs import VirtualMachineAPIs, smart_del_vm
from TestAPIs.NetworkAPIs import smart_create_network, smart_delete_network
from Utils.PrintLog import LogPrint
from Utils.Util import DictCompare
from TestData.Cluster import ITC02_Setup as ModuleData
from TestAPIs.HostAPIs import smart_create_host,smart_del_host
import xmltodict
class ITC02_Setup(BaseTestCase):
'''
@summary: 集群管理模块级测试用例,初始化模块测试环境;
@note: (1)创建一个数据中心;
@note: (2)创建一个集群;
@note: (3)创建一个主机,并等待其变为UP状态;
@note: (4)创建4个存储域(data1/data2/Export/ISO);
@note: (5)将 4个存储域都附加到数据中心;
@note: (6)创建2个虚拟机
'''
def setUp(self):
self.dm = super(self.__class__, self).setUp()
def test_CreateModuleTestEnv(self):
dcapi = DataCenterAPIs()
capi = ClusterAPIs()
# 创建1个数据中心
LogPrint().info("Pre-Module-Test-1: Create DataCenter '%s'." % self.dm.dc_name)
self.assertTrue(dcapi.createDataCenter(self.dm.dc_info)['status_code']==self.dm.expected_status_code_create_dc)
# 创建1个集群
LogPrint().info("Pre-Module-Test-2: Create Cluster '%s' in DataCenter '%s'." % (self.dm.cluster_name, self.dm.dc_name))
self.assertTrue(capi.createCluster(self.dm.cluster_info)['status_code']==self.dm.expected_status_code_create_cluster)
# 在数据中心中创建一个主机,并等待主机UP。
LogPrint().info("Pre-Module-Test-3: Create Host '%s' in Cluster '%s'." % (self.dm.host1_name, self.dm.cluster_name))
self.assertTrue(smart_create_host(self.dm.host1_name, self.dm.host_info))
# 为数据中心创建Data(data1/data2/export/iso)。
@BaseTestCase.drive_data(self, self.dm.storage_info)
def create_storage_domains(xml_storage_domain_info):
sd_name = xmltodict.parse(xml_storage_domain_info)['storage_domain']['name']
LogPrint().info("Pre-Module-Test-4: Create Data Storage '%s'." % sd_name)
self.assertTrue(smart_create_storage_domain(sd_name, xml_storage_domain_info))
create_storage_domains()
# 将创建的的data1、data2和export、iso域附加到NFS/ISCSI数据中心里。
LogPrint().info("Pre-Module-Test-5: Attach the storages to data centers.")
self.assertTrue(smart_attach_storage_domain(self.dm.dc_name, self.dm.data1_nfs_name))
self.assertTrue(smart_attach_storage_domain(self.dm.dc_name, self.dm.data2_nfs_name))
self.assertTrue(smart_attach_storage_domain(self.dm.dc_name, self.dm.export1_name))
self.assertTrue(smart_attach_storage_domain(self.dm.dc_name, self.dm.iso1_name))
#创建2个虚拟机
LogPrint().info("Pre-Module-Test-6: Create two vms.")
self.vmapi = VirtualMachineAPIs()
r1 = self.vmapi.createVm(self.dm.vm1_info)
if r1['status_code'] == 201:
self.vm_name = r1['result']['vm']['name']
else:
LogPrint().error("Create vm failed.Status-code is WRONG.")
self.assertTrue(False)
r2 = self.vmapi.createVm(self.dm.vm2_info)
if r2['status_code'] == 201:
self.vm_name = r2['result']['vm']['name']
else:
LogPrint().error("Create vm failed.Status-code is WRONG.")
self.assertTrue(False)
class ITC020101_GetClustersList(BaseTestCase):
'''
@summary: ITC-02集群管理-01基本操作-01获取集群列表
'''
def test_GetClustersList(self):
'''
@summary: 测试步骤
@note: (1)获取全部集群列表;
@note: (2)验证接口返回的状态码是否正确。
'''
clusterapi = ClusterAPIs()
LogPrint().info("Test: Get all clusters list.")
r = clusterapi.getClustersList()
if r['status_code']==200:
LogPrint().info('PASS: Get Clusters list SUCCESS.')
self.flag = True
else:
LogPrint().error('FAIL: Get Clusters list FAIL. Returned status code "%s" is WRONG.' % r['status_code'])
self.flag = False
self.assertTrue(self.flag)
class ITC020102_GetClusterInfo(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-02获取指定集群信息
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# 前提1: 创建一个集群
LogPrint().info("Pre-Test: Create a cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
def test_GetClusterInfo(self):
'''
@summary: 测试用例执行步骤
@note: (1)获取指定集群的信息;
@note: (2)验证接口返回的状态码、集群信息是否正确。
'''
# 测试1:获取集群的信息,并与期望结果进行对比
self.clusterapi = ClusterAPIs()
LogPrint().info("Test: Get cluster's ('%s') info." % self.dm.cluster_name)
r = self.clusterapi.getClusterInfo(self.dm.cluster_name)
if r['status_code']==self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(self.dm.cluster_info)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: Get Cluster '%s' info SUCCESS." % self.dm.cluster_name)
self.flag = True
else:
LogPrint().error("FAIL: Get Cluster '%s' info INCORRECT." % self.dm.cluster_name)
self.flag = False
else:
LogPrint().error("FAIL: Get Cluster '%s' info FAILED. Returned status code '%s' is WRONG." % (self.dm.cluster_name, r['status_code']))
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test: Delete cluster '%s'." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC02010301_CreateCluster(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-03创建一个集群-01创建成功
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 初始化测试数据
self.dm = super(self.__class__, self).setUp()
self.clusterapi = ClusterAPIs()
def test_CreateCluster(self):
'''
@summary: 测试步骤
@note: (1)创建一个集群;
@note: (2)操作成功,验证接口返回的状态码、集群信息是否正确。
'''
LogPrint().info("Test: Create a cluster '%s'." % self.dm.cluster_name)
r = self.clusterapi.createCluster(self.dm.cluster_info)
if r['status_code'] == self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(self.dm.cluster_info)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: Create Cluster '%s' SUCCESS." % self.dm.cluster_name)
self.flag = True
else:
LogPrint().error("FAIL: Create Cluster '%s' INCORRECT." % self.dm.cluster_name)
self.flag = False
else:
LogPrint().error("FAIL: Returned status code '%s' is WRONG." % r['status_code'])
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test: Delete cluster '%s'." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC02010302_CreateCluster_Dup(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-03创建一个集群-02重名
'''
def setUp(self):
'''
@summary: 初始化测试数据、测试环境。
'''
# 初始化测试数据
self.dm = super(self.__class__, self).setUp()
LogPrint().info("Pre-Test: Create a cluster %s"%self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
def test_CreateCluster_Dup(self):
'''
@summary: 测试步骤
@note: (1)创建一个重名的Cluster;
@note: (2)操作失败,验证接口返回的状态码、提示信息是否正确。
'''
clusterapi = ClusterAPIs()
LogPrint().info("Test: Create a cluster with dup name.")
r = clusterapi.createCluster(self.dm.cluster_info)
print r
if r['status_code'] == self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(self.dm.error_info)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: Returned status code and info are CORRECT while creating cluster with dup name.")
self.flag = True
else:
LogPrint().error("FAIL: Returned info are INCORRECT while creating cluster with dup name.")
self.flag = False
else:
LogPrint().error("FAIL: Returned status code '%s' is WRONG. " % r['status_code'])
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test: Delete cluster '%s'." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC02010303_CreateClusterNoRequired(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-03创建一个集群-03缺少必填参数
@note: 集群名称、所在数据中心和cpu类型是必填项,验证接口返回码和提示信息
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 初始化测试数据
self.dm = super(self.__class__, self).setUp()
def test_CreateClusterNoRequired(self):
'''
@summary: 测试步骤
@note: (1)创建一个集群,缺少必填参数;
@note: (2)操作失败,验证接口返回的状态码、提示信息是否正确。
'''
clusterapi = ClusterAPIs()
self.expected_result_index = 0
# 使用数据驱动,根据测试数据文件循环创建多个数据中心
@BaseTestCase.drive_data(self, self.dm.cluster_info)
def do_test(xml_info):
self.flag = True
r = clusterapi.createCluster(xml_info)
if r['status_code']==self.dm.expected_status_code:
dictCompare = DictCompare()
if dictCompare.isSubsetDict(xmltodict.parse(self.dm.expected_info_list[self.expected_result_index]), r['result']):
LogPrint().info("PASS: Returned status code and messages are CORRECT.")
else:
LogPrint().error("FAIL: Returned messages are INCORRECT.")
self.flag = False
else:
LogPrint().error("FAIL: Returned status code is '%s' while it should be '%s'." % (r['status_code'], self.dm.expected_status_code))
self.flag = False
self.assertTrue(self.flag)
self.expected_result_index += 1
do_test()
def tearDown(self):
'''
@summary: 无需清理
'''
pass
class ITC02010401_UpdateCluster_nohost(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-04编辑集群-01集群内无主机
'''
def setUp(self):
'''
@summary: 初始化测试数据、测试环境。
'''
# 初始化测试数据
self.dm = super(self.__class__, self).setUp()
# 前提1:创建一个集群
LogPrint().info("Pre-Test: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
def test_UpdateCluster(self):
'''
@summary: 测试步骤
'''
clusterapi = ClusterAPIs()
self.expected_result_index = 0
# 使用数据驱动,根据测试数据文件循环创建多个数据中心
@BaseTestCase.drive_data(self, self.dm.cluster_info_new)
def do_test(xml_info):
self.flag = True
r = clusterapi.updateCluster(self.dm.cluster_name, xml_info)
if r['status_code'] == self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(xml_info)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: ITC02010401_UpdateCluster_nohost SUCCESS." )
self.flag = True
else:
LogPrint().error("FAIL: ITC02010401_UpdateCluster_nohost.Error-info INCORRECT.")
self.flag = False
else:
LogPrint().error("FAIL: ITC02010401_UpdateCluster_nohost FAILED.Status-code WRONG. " )
self.flag = False
self.assertTrue(self.flag)
self.expected_result_index += 1
do_test()
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test: Delete cluster '%s'." % self.dm.cluster_name_new)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name_new))
class ITC0201040201_UpdateCluster_host_cputype(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-04编辑集群-02集群内有主机-01更改集群的cpu类型
'''
def setUp(self):
'''
@summary: 初始化测试数据、测试环境。
'''
# 初始化测试数据
self.dm = super(self.__class__, self).setUp()
# 前提1:创建一个集群
LogPrint().info("Pre-Test-1: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
# 前提2:创建一个主机
LogPrint().info("Pre-Test-2: Create host '%s' for this TC." % self.dm.host_name)
self.assertTrue(smart_create_host(self.dm.host_name, self.dm.host_info))
def test_UpdateCluster_host(self):
'''
@summary: 测试步骤
@note: (1)更改含有主机的集群CPU类型;
@note: (2)操作成功,验证接口返回的状态码、相关信息是否正确。
'''
clusterapi = ClusterAPIs()
LogPrint().info("Test: Edit cluster's cpu type if there are hosts in this cluster.")
r = clusterapi.updateCluster(self.dm.cluster_name, self.dm.cluster_info_new)
if r['status_code'] == self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(self.dm.expected_info)
print dict_actual
print dict_expected
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: ITC0201040201_test_UpdateCluster_host_cputype SUCCESS." )
self.flag = True
else:
LogPrint().error("FAIL: ITC0201040201_test_UpdateCluster_host_cputype .Error-info INCORRECT.")
self.flag = False
else:
LogPrint().error("FAIL: Returned status code '%s' is WRONG." % r['status_code'])
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete host '%s'." % self.dm.host_name)
self.assertTrue(smart_del_host(self.dm.host_name,self.dm.host_del_option))
LogPrint().info("Post-Test-2: Delete cluster '%s'." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC0201040202_UpdateCluster_host_upcpu(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-04编辑集群-02集群内有主机-02升高cpu级别
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# 前提1:创建一个集群
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
# 前提2:创建一个主机
self.assertTrue(smart_create_host(self.dm.host_name, self.dm.host_info))
def test_UpdateCluster_host(self):
'''
@summary: 测试步骤
@note: (1)
@note: (2)
'''
clusterapi = ClusterAPIs()
LogPrint().info("Test: Improve cluster's CPU level while there are hosts in cluster.")
r = clusterapi.updateCluster(self.dm.cluster_name, self.dm.cluster_info_new)
if r['status_code'] == self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(self.dm.expected_info)
print dict_actual
print dict_expected
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: ITC0201040202_test_UpdateCluster_host_upcpu SUCCESS." )
self.flag = True
else:
LogPrint().error("FAIL: ITC0201040202_test_UpdateCluster_host_upcpu. Error-info INCORRECT.")
self.flag = False
else:
LogPrint().error("FAIL: Returned tatus_code '%s' is WRONG." % r['status_code'])
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete host '%s'." % self.dm.host_name)
self.assertTrue(smart_del_host(self.dm.host_name,self.dm.host_del_option))
LogPrint().info("Post-Test-2: Delete cluster '%s'." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC0201040203_UpdateCluster_host_name(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-04编辑集群-02集群内有主机-03更改名称
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 初始化测试数据
self.dm = super(self.__class__, self).setUp()
# 前提1:首先创建一个集群
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
# 前提2:创建一个主机
self.assertTrue(smart_create_host(self.dm.host_name, self.dm.host_info))
def test_UpdateCluster_host(self):
'''
@summary: 测试步骤
@note: (1)
@note: (2)
'''
clusterapi = ClusterAPIs()
LogPrint().info("Test: Edit cluster's name while there are hosts in cluster.")
r = clusterapi.updateCluster(self.dm.cluster_name, self.dm.cluster_info_new)
if r['status_code'] == self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(self.dm.cluster_info_new)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: ITC0201040203_test_UpdateCluster_host_name SUCCESS." )
self.flag = True
else:
LogPrint().error("FAIL: ITC0201040203_test_UpdateCluster_host_name. Error-info INCORRECT.")
self.flag = False
else:
LogPrint().error("FAIL: ITC0201040203_test_UpdateCluster_host_name.Status_code is wrong. ")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete host '%s'." % self.dm.host_name)
self.assertTrue(smart_del_host(self.dm.host_name,self.dm.host_del_option))
LogPrint().info("Post-Test-2: Delete cluster '%s'." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name_new))
class ITC02010501_DeleteCluster_clear(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-05删除集群-01干净集群
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# 准备1:创建一个集群
LogPrint().info("Pre-Test: Create a cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
def test_DeleteCluster(self):
'''
@summary: 测试用例执行步骤
@note: (1)删除一个干净的集群;
@note: (2)操作成功,验证接口返回的状态码是否正确,验证被删除的集群不再存在。
'''
clusterapi = ClusterAPIs()
# 测试1:获取集群的信息,并与期望结果进行对比
LogPrint().info("Test: Delete the clean cluster '%s'." % self.dm.cluster_name)
r = clusterapi.delCluster(self.dm.cluster_name)
if r['status_code'] == self.dm.status_code:
if not clusterapi.searchClusterByName(self.dm.cluster_name)['result']['clusters']:
LogPrint().info("PASS: Delete Cluster '%s' info SUCCESS." % self.dm.cluster_name)
self.flag = True
else:
LogPrint().error("FAIL: The Cluster '%s' is still exist ." % self.dm.cluster_name)
self.flag = False
else:
LogPrint().error("FAIL: Returned status code '%s' is WRONG." % r['status_code'])
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test: Delete cluster '%s'." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC02010502_DeleteCluster_host(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-05删除集群-02集群内有主机
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# 前提1:创建一个集群
LogPrint().info("Pre-Test-1: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
# 前提2:创建一个主机
LogPrint().info("Pre-Test-2: Create host '%s' for this TC." % self.dm.host_name)
self.assertTrue(smart_create_host(self.dm.host_name, self.dm.host_info))
def test_DeleteCluster_host(self):
'''
@summary: 测试用例执行步骤
@note: (1)删除包含主机的集群
@note: (2)操作失败,验证返回状态码,验证报错信息
'''
clusterapi = ClusterAPIs()
LogPrint().info("Test: Delete cluster %s."% self.dm.cluster_name)
r = clusterapi.delCluster(self.dm.cluster_name)
if r['status_code'] == self.dm.status_code:
dict_actual = r['result']
dict_expected = xmltodict.parse(self.dm.expected_info)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: Returned status code and messages are CORRECT." )
self.flag = True
else:
LogPrint().error("FAIL: Returned message is INCORRECT.")
self.flag = False
else:
LogPrint().error("FAIL: Returned status code is wrong. ")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete host %s. "% self.dm.host_name)
self.assertTrue(smart_del_host(self.dm.host_name, self.dm.host_del_option))
LogPrint().info("Post-Test-2: Delete cluster %s. " % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC020201_GetClusterNetworkList(BaseTestCase):
'''
@summary: ITC-02集群管理-02集群网络基本操作-01获取集群网络列表
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# 准备1:创建一个集群
LogPrint().info("Pre-Test-1: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
def test_GetClusterNetworkList(self):
'''
@summary: 测试用例执行步骤
@note: (1)获取集群的网络列表
@note: (2)操作成功,验证返回状态码
'''
clusterapi = ClusterAPIs()
LogPrint().info("Test: Get the cluster %s's network list. "% self.dm.cluster_name)
r = clusterapi.getClusterNetworkList(self.dm.cluster_name)
if r['status_code'] == self.dm.status_code:
LogPrint().info('PASS: Get Cluster %s Network list SUCCESS.'% self.dm.cluster_name)
self.flag = True
else:
LogPrint().error('FAIL: Get Cluster %s Network list FAIL.'% self.dm.cluster_name)
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete cluster %s. "% self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC020202_GetClusterNetworkInfo(BaseTestCase):
'''
@summary: ITC-02集群管理-01集群操作-02获取指定集群的网络信息
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# step1:创建一个集群
LogPrint().info("Pre-Test-1: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
# step2:创建一个逻辑网络
LogPrint().info("Pre-Test-2: Create network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_create_network(self.dm.nw_info, self.dm.nw_name))
# step3:附加该网络到集群上
LogPrint().info("Pre-Test-3: Attach network '%s'to cluster '%s' for this TC." % (self.dm.nw_name, self.dm.cluster_name))
self.clusterapi = ClusterAPIs()
self.clusterapi.attachNetworkToCluster(self.dm.cluster_name, self.dm.nw_info)
def test_GetClusterNetworkInfo(self):
'''
@summary: 测试用例执行步骤
@note: 操作成功,验证网络信息
'''
LogPrint().info("Test: Get the cluster %s's network info. "% self.dm.cluster_name)
r= self.clusterapi.getClusterNetworkInfo(self.dm.cluster_name, self.dm.nw_name)
dict_actual = r
dict_expected = xmltodict.parse(self.dm.nw_info)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: Get ClusterNetwork '%s' info SUCCESS." % self.dm.nw_name)
else:
LogPrint().error("FAIL: Returned message is WRONG. ")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_delete_network(self.dm.nw_name, self.dm.dc_name))
LogPrint().info("Post-Test-2: Delete cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC020203_attachNetworktoCluster(BaseTestCase):
'''
@summary: ITC-02集群管理-02集群网络操作-03将网络附加到集群
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
#step1:创建一个集群
LogPrint().info("Pre-Test-1: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
#step2:创建一个逻辑网络
LogPrint().info("Pre-Test-2: Create network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_create_network(self.dm.nw_info, self.dm.nw_name))
def test_attachNetworktoCluster(self):
'''
@summary: 测试用例执行步骤
@note: (1)测试将网络附加到集群
@note: (2)操作成功,验证返回状态码,验证网络是否附加到集群
'''
LogPrint().info("Test: Attach Network %s to Cluster %s. "%(self.dm.nw_name, self.dm.cluster_name))
clusterapi = ClusterAPIs()
r = clusterapi.attachNetworkToCluster(self.dm.cluster_name, self.dm.nw_info)
print r
if r['status_code'] == self.dm.status_code:
cluster_id = r['result']['network']['cluster']['@id']
cluster_name = clusterapi.getClusterNameById(cluster_id)
if cluster_name == self.dm.cluster_name:
LogPrint().info("PASS: Attach Network %s to Cluster %s SUCCESS." %(self.dm.nw_name, self.dm.cluster_name) )
else:
LogPrint().error("FAIL: Attach Network %s to Cluster %s FAIL."%(self.dm.nw_name, self.dm.cluster_name))
self.flag = False
else:
LogPrint().error("FAIL: Returned status code is WRONG. ")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_delete_network(self.dm.nw_name, self.dm.dc_name))
LogPrint().info("Post-Test-2: Delete cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC020204_detachNetworkFromCluster(BaseTestCase):
'''
@summary: ITC-02集群管理-02集群网络操作-04将网络从集群分离
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# step1:创建一个集群
LogPrint().info("Pre-Test-1: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
# step2:创建一个逻辑网络
LogPrint().info("Pre-Test-2: Create network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_create_network(self.dm.nw_info, self.dm.nw_name))
# step3:附加该网络到集群上
LogPrint().info("Pre-Test-3: Attach network '%s'to cluster '%s' for this TC." % (self.dm.nw_name, self.dm.cluster_name))
self.clusterapi = ClusterAPIs()
self.clusterapi.attachNetworkToCluster(self.dm.cluster_name, self.dm.nw_info)
def test_detachNetworkFromCluster(self):
'''
@summary: 测试用例执行步骤
@note: 测试将网络从集群中分离
@note: 操作成功,验证返回状态码,验证集群中是否有该网络
'''
LogPrint().info("Test: Detach Network %s from Cluster %s. "%(self.dm.nw_name, self.dm.cluster_name))
r = self.clusterapi.detachNetworkFromCluster(self.dm.cluster_name, self.dm.nw_name)
if r['status_code'] ==self.dm.status_code:
#检查集群中网络是否存在
if not self.clusterapi.getClusterNetworkInfo(self.dm.cluster_name, self.dm.nw_name):
LogPrint().info("PASS: Detach Network %s from Cluster %s SUCCESS. "%(self.dm.nw_name, self.dm.cluster_name) )
else:
LogPrint().info("FAIL: Cluster %s still has Network %s. "%(self.dm.cluster_name, self.dm.nw_name))
else:
LogPrint().info("FAIL: Returned status code is WRONG.")
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_delete_network(self.dm.nw_name, self.dm.dc_name))
LogPrint().info("Post-Test-2: Delete cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC020205_UpdateNetworkofCluster(BaseTestCase):
'''
@summary: ITC-02集群管理-02集群网络操作-05更新集群网络信息
'''
def setUp(self):
'''
@summary: 测试用例执行前的环境初始化(前提)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
# step1:创建一个集群
LogPrint().info("Pre-Test-1: Create cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_create_cluster(self.dm.cluster_info, self.dm.cluster_name))
# step2:创建一个逻辑网络
LogPrint().info("Pre-Test-2: Create network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_create_network(self.dm.nw_info, self.dm.nw_name))
# step3:附加该网络到集群上
LogPrint().info("Pre-Test-3: Attach network '%s'to cluster '%s' for this TC." % (self.dm.nw_name, self.dm.cluster_name))
self.clusterapi = ClusterAPIs()
self.clusterapi.attachNetworkToCluster(self.dm.cluster_name, self.dm.nw_info)
def test_UpdateNetworkofCluster(self):
'''
@summary: 测试用例执行步骤
@note: 更新集群网络信息
@note: 操作成功,验证返回状态码,验证更新信息是否正确
'''
LogPrint().info("Test: Update Network %s of Cluster %s. "%(self.dm.nw_name, self.dm.cluster_name))
r = self.clusterapi.updateNetworkOfCluster(self.dm.cluster_name, self.dm.nw_name, self.dm.nw_info_new)
if r['status_code'] ==self.dm.status_code:
dict_actual = self.clusterapi.getClusterNetworkInfo(self.dm.cluster_name, self.dm.nw_name)
#dict_expected = {'network':xmltodict.parse(self.dm.nw_info_new)['network']}
dict_expected = xmltodict.parse(self.dm.nw_info_new)
dictCompare = DictCompare()
if dictCompare.isSubsetDict(dict_expected, dict_actual):
LogPrint().info("PASS: Detach Network %s from Cluster %s SUCCESS. "%(self.dm.nw_name, self.dm.cluster_name) )
else:
LogPrint().info("FAIL: Returned message is WRONG. ")
else:
LogPrint().info("FAIL: Returned status code is WRONG." )
def tearDown(self):
'''
@summary: 测试结束后的资源清理(恢复初始环境)
'''
LogPrint().info("Post-Test-1: Delete network '%s' for this TC." % self.dm.nw_name)
self.assertTrue(smart_delete_network(self.dm.nw_name, self.dm.dc_name))
LogPrint().info("Post-Test-2: Delete cluster '%s' for this TC." % self.dm.cluster_name)
self.assertTrue(smart_delete_cluster(self.dm.cluster_name))
class ITC020301_GetAffinityGroupsList(BaseTestCase):
'''
@summary: ITC-02集群管理-03亲和组-01查看亲和组列表
'''
def setUp(self):
pass
def test_GetAffinityGroupsList(self):
'''
@summary: 测试用例执行步骤
@note: 获取亲和组列表
@note: 验证接口返回状态码是否正确
'''
self.affinitygroups_api = AffinityGroupsAPIs()
LogPrint().info("Test: Get AffinityGroups List of cluster %s." % ModuleData.cluster_name)
r = self.affinitygroups_api.getAffinityGroupsList(ModuleData.cluster_name)
if r['status_code']==200:
LogPrint().info('PASS: Get AffinityGroups List Success.')
self.flag = True
else:
LogPrint().error("FAIL: Get AffinityGroups List Fail.")
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
pass
class ITC020302_GetAffinityGroupsInfo(BaseTestCase):
'''
@summary: ITC-02集群管理-03亲和组-02查看指定亲和组信息
'''
def setUp(self):
'''
@summary: 测试环境初始化
'''
#调用父类方法,获取该用例对应的测试数据模块
self.dm = super(self.__class__, self).setUp()
#创建一个亲和组
LogPrint().info("Pre-Test-1: Create affinitygroups '%s' for this TC" % self.dm.group_name)
self.assertTrue(smart_create_affinitygroups(ModuleData.cluster_name, self.dm.group_info, self.dm.group_name))
def test_GetAffinityGroupsInfo(self):
'''
@summary: 测试用例执行步骤
@note: 获取指定亲和组信息
@note: 验证返回状态码及接口返回信息
'''
self.flag = True
self.affinitygroups_api = AffinityGroupsAPIs()
LogPrint().info("Test: Get %s info of %s" % (self.dm.group_name, ModuleData.cluster_name))
r = self.affinitygroups_api.getAffinityGroupsInfo(ModuleData.cluster_name, self.dm.group_name)
if r['status_code'] == self.dm.status_code:
if DictCompare().isSubsetDict(xmltodict.parse(self.dm.group_info), r['result']):
LogPrint().info("PASS: Get AffinityGroups info success.")
else:
LogPrint().error("FAIL: Get AffinityGroups info fail. The info is wrong.")
self.flag = False
else:
LogPrint().error("FAIL: Get AffinityGroups info fail. The status_code is %s." % r['status_code'])
print xmltodict.unparse(r['result'], pretty=True)
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后资源清理
'''
LogPrint().info("Post-Test-1: Delete affinitygroups %s for this TC." % self.dm.group_name)
self.assertTrue(smart_delete_affinitygroups(ModuleData.cluster_name, self.dm.group_name))
class ITC02030301_CreateAffinityGroups_novms(BaseTestCase):
'''
@summary: ITC-02集群管理-03亲和组-03创建亲和组-01不添加虚拟机
'''
def setUp(self):
'''
@summary: 测试环境初始化
'''
self.dm = super(self.__class__, self).setUp()
def test_CreateAffinityGroups_twovms(self):
'''
@summary: 测试用例执行步骤
@note: 创建一个亲和组,不添加虚拟机
@note: 验证状态返回码及接口返回信息
'''
self.flag = True
self.affinitygroups_api = AffinityGroupsAPIs()
LogPrint().info("Test: Create affinitygroups without vms in cluster %s." % ModuleData.cluster_name)
r = self.affinitygroups_api.createAffinityGroups(ModuleData.cluster_name, self.dm.group_info)
if r['status_code'] == self.dm.status_code:
if DictCompare().isSubsetDict(xmltodict.parse(self.dm.group_info), r['result']):
LogPrint().info("PASS: Create affinitygroups %s success." % self.dm.group_name)
else:
LogPrint().error("FAIL: Create affinitygroups fail. The info is wrong" )
self.flag = False
else:
LogPrint().error("FAIL: Create affinitygroups fail. The status_code is %s." % r['status_code'])
self.flag = False
print xmltodict.unparse(r['result'], pretty=True)
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后资源清理
'''
LogPrint().info("Post-Test-1: Delete affinitygroups %s for this TC." % self.dm.group_name)
self.assertTrue(smart_delete_affinitygroups(ModuleData.cluster_name, self.dm.group_name))
class ITC02030302_CreateAffinityGroups_twovms(BaseTestCase):
'''
@summary: ITC-02集群管理-03亲和组-03创建亲和组-02添加两个虚拟机
'''
def setUp(self):
'''
@summary: 测试环境初始化
'''
self.dm = super(self.__class__, self).setUp()
def test_CreateAffinityGroups_twovms(self):
'''
@summary: 测试用例执行步骤
@note: 创建一个亲和组,添加两个虚拟机
@note: 验证状态返回码
'''
self.flag = True
self.affinitygroups_api = AffinityGroupsAPIs()
LogPrint().info("Test: Create affinitygroups %s in cluster %s and add two vms." % (self.dm.group_name, ModuleData.cluster_name))
r1 = self.affinitygroups_api.createAffinityGroups(ModuleData.cluster_name, self.dm.group_info)
if r1['status_code'] == self.dm.status_code_creategroups:
r2 = self.affinitygroups_api.addVmtoAffinityGroups(ModuleData.cluster_name, self.dm.group_name, ModuleData.vm1_name)
if r2['status_code'] == self.dm.status_code_addvms:
r3 = self.affinitygroups_api.addVmtoAffinityGroups(ModuleData.cluster_name, self.dm.group_name, ModuleData.vm2_name)
if r3['status_code'] == self.dm.status_code_addvms:
LogPrint().info("PASS: Create affinitygroups and add two vms success.")
else:
LogPrint().info("FAIL: Add vm2 to affinitygroups fail. The status_code is %s." % r3['status_code'])
self.flag = False
else:
LogPrint().info("FAIL: Add vm1 to affinitygroups fail. The status_code is %s." % r2['status_code'])
self.flag = False
else:
LogPrint().info("FAIL: Create affinitygroups fail. The status_code is %s." % r1['status_code'])
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后资源清理
'''
LogPrint().info("Post-Test-1: Delete affinitygroups %s for this TC." % self.dm.group_name)
self.assertTrue(smart_delete_affinitygroups(ModuleData.cluster_name, self.dm.group_name))
class ITC02030401_UpdateAffinityGroups_groupinfo(BaseTestCase):
'''
@summary: ITC-02集群管理-03亲和组-04编辑亲和组-01编辑组信息
'''
def setUp(self):
'''
@summary: 测试环境初始化
'''
self.dm = super(self.__class__, self).setUp()
#创建一个亲和组
LogPrint().info("Pre-Test-1: Create affinitygroups %s in cluster %s for this TC." % (self.dm.group_name, ModuleData.cluster_name))
self.assertTrue(smart_create_affinitygroups(ModuleData.cluster_name, self.dm.group_info, self.dm.group_name))
#添加一个虚拟机到亲和组
self.affinitygroups_api = AffinityGroupsAPIs()
LogPrint().info("Pre-Test-2: Add vm %s to affinitygroups %s." % (ModuleData.vm1_name, self.dm.group_name))
self.affinitygroups_api.addVmtoAffinityGroups(ModuleData.cluster_name, self.dm.group_name, ModuleData.vm1_name)
def test_UpdateAffinityGroups_groupinfo(self):
'''
@summary: 测试用例执行步骤
@note: 编辑指定亲和组信息(名称、描述、极性、Enforcing)
@note: 验证状态返回码及接口返回信息
'''
self.flag = True
LogPrint().info("Test: Update affinitygroups %s info in cluster %s." % (self.dm.group_name, ModuleData.cluster_name))
r = self.affinitygroups_api.updateAffinityGroups(ModuleData.cluster_name, self.dm.group_name, self.dm.update_info)
if r['status_code'] == self.dm.status_code:
if DictCompare().isSubsetDict(xmltodict.parse(self.dm.update_info), r['result']):
LogPrint().info("PASS: Update affinitygroups info success.")
else:
LogPrint().error("FAIL: Update affinitygroups info fail. The info is wrong.")
self.flag = False
print xmltodict.unparse(r['result'], pretty=True)
else:
LogPrint().info("FAIL: Update affinitygroups info fail. The status_code is %s." % r['status_code'])
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后资源清理
'''
LogPrint().info("Post-Test-1: Delete affinitygroups %s for this TC." % self.dm.update_name)
self.assertTrue(smart_delete_affinitygroups(ModuleData.cluster_name, self.dm.update_name))
class ITC02030402_UpdateAffinityGroups_vms(BaseTestCase):
'''
@summary: ITC-02集群管理-03亲和组-04编辑亲和组-02删除、增加亲和组中的虚拟机
'''
def setUp(self):
'''
@summary: 测试环境初始化
'''
self.dm = super(self.__class__, self).setUp()
#创建一个亲和组
LogPrint().info("Pre-Test-1: Create affinitygroups %s in cluster %s for this TC." % (self.dm.group_name, ModuleData.cluster_name))
self.assertTrue(smart_create_affinitygroups(ModuleData.cluster_name, self.dm.group_info, self.dm.group_name))
#添加一个虚拟机到亲和组
self.affinitygroups_api = AffinityGroupsAPIs()
LogPrint().info("Pre-Test-2: Add vm %s to affinitygroups %s." % (ModuleData.vm1_name, self.dm.group_name))
self.affinitygroups_api.addVmtoAffinityGroups(ModuleData.cluster_name, self.dm.group_name, ModuleData.vm1_name)
def test_UpdateAffinityGroups_vms(self):
'''
@summary: 测试用例执行步骤
@note: 删除亲和组中原有虚拟机,并添加新的虚拟机
@note: 验证状态返回码
'''
self.flag = True
LogPrint().info("Test: Update vms in affinitygroups %s." % self.dm.group_name)
r1 = self.affinitygroups_api.removeVmfromAffinityGroups(ModuleData.cluster_name, self.dm.group_name, ModuleData.vm1_name)
if r1['status_code'] == self.dm.status_code:
r2 = self.affinitygroups_api.addVmtoAffinityGroups(ModuleData.cluster_name, self.dm.group_name, ModuleData.vm2_name)
if r2['status_code'] == self.dm.status_code:
LogPrint().info("PASS: Update vms in affinitygroups %s success." % self.dm.group_name)
else:
LogPrint().info("FAIL: Add vm %s to affinitygroups %s fail. The status_code is %s." % (ModuleData.vm2_name,self.dm.group_name,r2['status_code']))
self.flag = False
else:
LogPrint().error("FAIL: Remove vm %s from affinitygroups %s fail. The status_code is %s." % (ModuleData.vm1_name,self.dm.group_name,r1['status_code']))
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
'''
@summary: 测试结束后资源清理
'''
LogPrint().info("Post-Test-1: Delete affinitygroups %s for this TC." % self.dm.group_name)
self.assertTrue(smart_delete_affinitygroups(ModuleData.cluster_name, self.dm.group_name))
class ITC020305_DeleteAffinityGroups(BaseTestCase):
'''
@summary: ITC-02集群管理-03亲和组-05删除亲和组
'''
def setUp(self):
'''
@summary: 测试环境初始化
'''
self.dm = super(self.__class__, self).setUp()
#创建一个亲和组
LogPrint().info("Pre-Test-1: Create affinitygroups %s in cluster %s for this TC." % (self.dm.group_name, ModuleData.cluster_name))
self.assertTrue(smart_create_affinitygroups(ModuleData.cluster_name, self.dm.group_info, self.dm.group_name))
#添加一个虚拟机到亲和组
self.affinitygroups_api = AffinityGroupsAPIs()
LogPrint().info("Pre-Test-2: Add vm %s to affinitygroups %s." % (ModuleData.vm1_name, self.dm.group_name))
self.affinitygroups_api.addVmtoAffinityGroups(ModuleData.cluster_name, self.dm.group_name, ModuleData.vm1_name)
def test_DeleteAffinityGroups(self):
'''
@summary: 测试用例执行步骤
@note: 删除亲和组
@note: 验证状态返回码
'''
LogPrint().info("Test: Delete affinitygroups %s in cluster %s." % (self.dm.group_name, ModuleData.cluster_name))
r = self.affinitygroups_api.deleteAffinityGroups(ModuleData.cluster_name, self.dm.group_name)
if r['status_code'] == self.dm.status_code:
LogPrint().info("PASS: Delete affinitygroups %s success." % self.dm.group_name)
self.flag = True
else:
LogPrint().error("FAIL: Delete affinitygroups %s fail. The status_code is %s." % (self.dm.group_name, r['status_code']))
self.flag = False
self.assertTrue(self.flag)
def tearDown(self):
pass
class ITC02_TearDown(BaseTestCase):
'''
@summary: “集群管理”模块测试环境清理(执行完该模块所有测试用例后,需要执行该用例清理环境)
'''
def setUp(self):
'''
@summary: 模块测试环境初始化(获取测试数据)
'''
# 调用父类方法,获取该用例所对应的测试数据模块
self.dm = self.initData('ITC02_Setup')
def test_TearDown(self):
dcapi = DataCenterAPIs()
capi = ClusterAPIs()
# Step1:删除虚拟机
LogPrint().info("Post-Module-Test-1: Delete vm '%s'." % self.dm.vm1_name)
self.assertTrue(smart_del_vm(self.dm.vm1_name))
LogPrint().info("Post-Module-Test-2: Delete vm '%s'." % self.dm.vm2_name)
self.assertTrue(smart_del_vm(self.dm.vm2_name))
# Step2:将export和iso存储域设置为Maintenance状态,然后从数据中心分离
LogPrint().info("Post-Module-Test-2-1: Deactivate storage domains '%s'." % self.dm.export1_name)
self.assertTrue(smart_deactive_storage_domain(self.dm.dc_name, self.dm.export1_name))
LogPrint().info("Post-Module-Test-2-2: Detach storage domains '%s'." % self.dm.export1_name)
self.assertTrue(smart_detach_storage_domain(self.dm.dc_name, self.dm.export1_name))
LogPrint().info("Post-Module-Test-2-3: Deactivate storage domains '%s'." % self.dm.iso1_name)
self.assertTrue(smart_deactive_storage_domain(self.dm.dc_name, self.dm.iso1_name))
LogPrint().info("Post-Module-Test-2-4: Detach storage domains '%s'." % self.dm.iso1_name)
self.assertTrue(smart_detach_storage_domain(self.dm.dc_name, self.dm.iso1_name))
# Step3:将data2存储域设置为Maintenance状态,然后从数据中心分离
LogPrint().info("Post-Module-Test-3-1: Deactivate data storage domains '%s'." % self.dm.data2_nfs_name)
self.assertTrue(smart_deactive_storage_domain(self.dm.dc_name, self.dm.data2_nfs_name))
LogPrint().info("Post-Module-Test-3-2: Detach data storage domains '%s'." % self.dm.data2_nfs_name)
self.assertTrue(smart_detach_storage_domain(self.dm.dc_name, self.dm.data2_nfs_name))
# Step4:将data1存储域设置为Maintenance状态
LogPrint().info("Post-Module-Test-4: Deactivate data storage domains '%s'." % self.dm.data1_nfs_name)
self.assertTrue(smart_deactive_storage_domain(self.dm.dc_name, self.dm.data1_nfs_name))
# Step5:删除数据中心dc1(非强制,之后存储域变为Unattached状态)
if dcapi.searchDataCenterByName(self.dm.dc_name)['result']['data_centers']:
LogPrint().info("Post-Module-Test-5: Delete DataCenter '%s'." % self.dm.dc_name)
self.assertTrue(dcapi.delDataCenter(self.dm.dc_name)['status_code']==self.dm.expected_status_code_del_dc)
# Step6:删除4个Unattached状态存储域(data1/data2/export1/iso)
LogPrint().info("Post-Module-Test-6: Delete all unattached storage domains.")
dict_sd_to_host = [self.dm.data1_nfs_name, self.dm.data2_nfs_name, self.dm.iso1_name, self.dm.export1_name]
for sd in dict_sd_to_host:
smart_del_storage_domain(sd, self.dm.xml_del_sd_option, host_name=self.dm.host1_name)
# Step7:删除主机(host1)
LogPrint().info("Post-Module-Test-7: Delete host '%s'." % self.dm.host1_name)
self.assertTrue(smart_del_host(self.dm.host1_name, self.dm.xml_del_host_option))
# Step8:删除集群cluster1
if capi.searchClusterByName(self.dm.cluster_name)['result']['clusters']:
LogPrint().info("Post-Module-Test-8: Delete Cluster '%s'." % self.dm.cluster_name)
self.assertTrue(capi.delCluster(self.dm.cluster_name)['status_code']==self.dm.expected_status_code_del_cluster)
if __name__ == "__main__":
# 建立测试套件 testSuite,并添加多个测试用例
test_cases = ["Cluster.ITC02_TearDown"]
testSuite = unittest.TestSuite()
loader = unittest.TestLoader()
tests = loader.loadTestsFromNames(test_cases)
testSuite.addTests(tests)
unittest.TextTestRunner(verbosity=2).run(testSuite)
|
''' 多任务多线程下载:同时启多个线程下载多个文件,并且把每个文件"分段",每段再启一个线程去下载 '''
import time,threading,urllib2,Queue
_lock = threading.Lock()
class Downloader(threading.Thread):
def __init__(self, url, Save_F, buffer, queue):
self.url = url
self.buffer = buffer
self.Save_F = Save_F
self.queue = queue
threading.Thread.__init__(self)
def run(self):
self.down_point = self.queue.get()
self.start_size = self.down_point[0]
self.end_size = self.down_point[1]
req = urllib2.Request(self.url)
req.headers['Range'] = 'bytes=%s-%s' % (self.start_size, self.end_size)
f = urllib2.urlopen(req)
offset = self.start_size
while True:
with _lock:
block = f.read(self.buffer)
if block == '':break
self.Save_F.seek(offset)
self.Save_F.write(block)
offset = offset + len(block)
print '%s down ok!' %self.name
time.sleep(1)
self.queue.task_done()
class init_thread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.num_threads = 12
self.url_queue = queue
self.Downpoint_queue = Queue.Queue()
self.buffer = 3072
def run(self):
while True:
url = self.url_queue.get()
file_name = url.split('/')[-1]
print 'Start Download: %s' %(file_name)
req = urllib2.urlopen(url)
size = int(req.info().getheaders('Content-Length')[0])
Save_F = open(file_name, 'wb')
avg_size, pad_size = divmod(size, self.num_threads)
for t in range(self.num_threads):
Down_thd = Downloader(url, Save_F, self.buffer, self.Downpoint_queue)
Down_thd.setDaemon(True)
Down_thd.start()
for i in range(self.num_threads):
start_size = i*avg_size
end_size = start_size + avg_size - 1
if i == self.num_threads - 1:
end_size = end_size + pad_size +1
self.Downpoint_queue.put([start_size,end_size])
self.Downpoint_queue.join()
Save_F.close()
self.url_queue.task_done()
time.sleep(1)
if __name__ == '__main__':
Threads = 3
ck_queue = Queue.Queue()
u1 = 'http://downmini.kugou.com/kugou7695.exe'
u2 = 'http://dldir1.qq.com/weixin/Windows/WeChat1.1.exe'
u3 = 'http://dldir1.qq.com/qqfile/qq/QQ7.1/14522/QQ7.1.exe'
u4 = 'http://yydl.duowan.com/4/setup/YYSetup-7.5.0.0-zh-CN.exe'
u5 = 'http://dl.liebao.cn/kb/KSbrowser_5.2.91.10096.exe'
url_list=[u1,u2,u3,u4,u5]
for T in range(Threads):
init_thd = init_thread(ck_queue)
init_thd.setDaemon(True)
init_thd.start()
for file_url in url_list:
ck_queue.put(file_url)
ck_queue.join()
|
"""Get GitHub data for the innovation networks data pilot.
Uses https://www.githubarchive.org/ and gets the last 2 years of
activity"""
import logging
import os
import requests
import sys
from datetime import datetime, timedelta
from time import sleep
def get_file_path():
"""Get the path to the current file"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def make_url(date_stamp=None,
year=datetime.now().year,
month=datetime.now().month,
day=datetime.now().day - 1,
hour=datetime.now().hour):
"""Return a GitHub Archive URL that will get data for
the given year and month. Defaults to the current hour for yesterday."""
base_url = "http://data.githubarchive.org/"
if date_stamp:
return base_url + date_stamp + '.json.gz'
else:
return (base_url +
str(year) + '-' +
'{:02d}'.format(month) + '-' +
'{:02d}'.format(day) + '-' +
'{:02d}'.format(hour) + '.json.gz')
def urls():
"""Returns a list of formatted GitHubArchive URLs"""
return [make_url(single_date.strftime("%Y-%m-%d-%H")) for
single_date in daterange()]
def daterange(start_date=datetime.now() - timedelta(731),
end_date=datetime.now()):
"""yields dates for last two years, counting from yesterday"""
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def out_file_name(out_path):
"""Formatted file name"""
file_name = '{}_github_event_data.json.gz'.format(
datetime.now().strftime("%Y%m%d%S"))
return os.path.join(out_path, file_name)
def write_data(file_obj, url_list):
"""Iterate through url_list, use requests to stream the file,
writing to disk in chunks"""
for url in url_list:
req = requests.get(url, stream=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk:
file_obj.write(chunk)
file_obj.flush()
del(url_list[0])
sleep(2)
def main():
logging.basicConfig(level=logging.DEBUG, filename='/tmp/github.get_data.log')
# Set the cwd to this file's
os.chdir(get_file_path())
# All the urls for the json data as a deque object
# that supports left sided pop
url_list = urls()
# Standard data folder
out_path = "../../data/"
# make the path if it doesn't exist
if not os.path.exists(out_path):
os.mkdir(out_path)
out_file = out_file_name(out_path)
try:
fp = open(out_file, 'wb')
write_data(fp, url_list)
fp.close()
except Exception as e:
logging.error(e, exc_info=True)
sleep(10)
# Restart, the list will still hold the relevant url,
# as the 0 index isn't deleted until after file is flushed
write_data(fp, url_list)
if __name__ == "__main__":
main()
|
"""Tests for kfp.dsl.serialization_utils module."""
import unittest
from kfp.deprecated.dsl import serialization_utils
_DICT_DATA = {
'int1': 1,
'str1': 'helloworld',
'float1': 1.11,
'none1': None,
'dict1': {
'int2': 2,
'list2': ['inside the list', None, 42]
}
}
_EXPECTED_YAML_LITERAL = """\
dict1:
int2: 2
list2:
- inside the list
-
- 42
float1: 1.11
int1: 1
none1:
str1: helloworld
"""
class SerializationUtilsTest(unittest.TestCase):
def testDumps(self):
self.assertEqual(_EXPECTED_YAML_LITERAL,
serialization_utils.yaml_dump(_DICT_DATA))
|
from ..core.experts import *
from ..core.harness import *
from ..core.transforms import *
from ..contraction.definitions import *
fun_name = 'row_reduction_2d'
op_name = 'linalg.generic'
all_names = [ \
# Note: tried small 1, 2 and 4 sizes, never resulted in better perf. \
"Tile4x16", \
"Tile4x16FusedOutput", \
"Tile8x16", \
"Tile8x16FusedOutput", \
"Tile4x64", \
"Tile4x64FusedOutput", \
"Tile8x64", \
"Tile8x64FusedOutput", \
"Tile4x128", \
"Tile4x128FusedOutput", \
"Tile8x128", \
"Tile8x128FusedOutput", \
]
def all_experts(problem_sizes: List[int]):
tile_sizes = [
[4, 16], [8, 16], \
[4, 64], [8, 64], \
[4, 128], [8, 128]
]
res = []
for ts in tile_sizes:
res.append(
Tile(fun_name=fun_name, op_name=op_name, tile_sizes=[ts[0], ts[1]])
.then(Vectorize(fun_name, op_name))
.then(LoweringOnlyExpert(fun_name,
op_name,
multi_reduction_lowering='innerreduction')),
)
res.append(
Tile(fun_name=fun_name, op_name=op_name, tile_sizes=[ts[0], ts[1]])
.then(ExperimentalFuseFillIntoTiledReductionOutput(fun_name, op_name))
.then(Vectorize(fun_name, op_name))
.then(Vectorize(fun_name, 'linalg.fill', vectorize_only_tiled=True))
.then(LoweringOnlyExpert(fun_name,
op_name,
multi_reduction_lowering='innerreduction')),
)
return [e.print_ir(after_all=False, at_begin=False, llvm=False) for e in res]
keys = ['m', 'n']
def main():
# Specify default configuration and parse command line.
# Note: `\` char at the end of next line prevents formatter reflows, keep it.
args = test_argparser( \
"row reduction 2d fused fill benchmark",
default_n_iters=100,
default_problem_sizes_list=[
[128, 256],
[112, 128],
[256, 256],
[1008, 1024],
[8096, 6144],
],
default_expert_list=all_names,
default_dynamic_at_compile_time_list=[[]],
default_spec_list=[])
def numpy_kernel(args, sizes, types):
A, B = args
B.fill(0.)
np.sum(A, axis=1, out=B)
def pytorch_kernel(args, sizes, types):
A, B = args
B.fill_(0.)
torch.sum(A, dim=1, out=B)
for problem_sizes in args.problem_sizes_list:
test_harness(lambda s, t: EinsumProblem('mn->m', 'mn', 1),
[[np.float32] * 2],
test_sizes(keys, [problem_sizes]),
test_experts(all_experts(problem_sizes), all_names,
args.expert_list),
n_iters=args.n_iters,
dynamic_at_compile_time_sizes=[],
function_name=fun_name,
zero_at_each_iteration=True,
dump_ir_to_file='/tmp/abcd.mlir',
dump_obj_to_file='/tmp/abcd.o',
dump_data_to_file=args.dump_data)
if __name__ == '__main__':
main()
|
from oslo.config import cfg
from nova.tests.unit.integrated.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
class ShelveJsonTest(test_servers.ServersSampleBase):
extension_name = "os-shelve"
def setUp(self):
super(ShelveJsonTest, self).setUp()
# Don't offload instance, so we can test the offload call.
CONF.set_override('shelved_offload_time', -1)
def _test_server_action(self, uuid, template, action):
response = self._do_post('servers/%s/action' % uuid,
template, {'action': action})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_shelve(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
def test_shelve_offload(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
def test_unshelve(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-unshelve', 'unshelve')
|
from action_tutorials_interfaces.action import Fibonacci
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
class FibonacciActionClient(Node):
def __init__(self):
super().__init__('fibonacci_action_client')
self._action_client = ActionClient(self, Fibonacci, 'fibonacci')
def send_goal(self, order):
goal_msg = Fibonacci.Goal()
goal_msg.order = order
self._action_client.wait_for_server()
self._send_goal_future = self._action_client.send_goal_async(
goal_msg,
feedback_callback=self.feedback_callback)
self._send_goal_future.add_done_callback(self.goal_response_callback)
def goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected :(')
return
self.get_logger().info('Goal accepted :)')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.get_result_callback)
def get_result_callback(self, future):
result = future.result().result
self.get_logger().info('Result: {0}'.format(result.sequence))
rclpy.shutdown()
def feedback_callback(self, feedback_msg):
feedback = feedback_msg.feedback
self.get_logger().info('Received feedback: {0}'.format(feedback.partial_sequence))
def main(args=None):
rclpy.init(args=args)
action_client = FibonacciActionClient()
action_client.send_goal(10)
rclpy.spin(action_client)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from caffe2.python import core
from caffe2.python.dataio import Reader
from caffe2.python.dataset import Dataset
from caffe2.python.pipeline import pipe
from caffe2.python.task import Cluster, TaskGroup
class CachedReader(Reader):
"""
Reader with persistent in-file cache.
Example usage:
cached_reader = CachedReader(reader)
build_cache_step = cached_reader.build_cache('/tmp/cache.db')
with LocalSession() as session:
session.run(build_cache_step)
Every time new reader is created, it's expected that build_cache will be
called before setup_ex and usage of the reader. build_cache will check
existence of provided file path and in case it's missing will initialize it
by reading data from original reader. All consequent attempts to read will
ignore original reader (i.e. no additional data will be read from it).
"""
def __init__(self, reader, db_type='leveldb', name='cached_reader'):
super(CachedReader, self).__init__(reader.schema())
self.original_reader = reader
self.cache_path = None
self.ds_reader = None
self.ds = Dataset(self._schema, name)
self.db_type = db_type
self.name = name
self.field_names = self._schema.field_names()
def setup_ex(self, init_net, finish_net):
assert self.cache_path, 'build_cache must be called first'
self._init_dataset(init_net)
self._load_from_file(init_net)
self.ds_reader = self.ds.reader(init_net, batch_size=100)
def read(self, read_net):
assert self.ds_reader, 'setup must be called first'
return self.ds_reader.read(read_net)
def has_cache(self):
return self.cache_path and os.path.exists(self.cache_path)
def build_cache(self, cache_path, overwrite=False):
if not self.has_cache() or overwrite:
self.cache_path = cache_path
if self.has_cache() and not overwrite:
# cache already exists, no need to rebuild it
return core.execution_step('build_step', [])
init_net = core.Net('init')
self._init_dataset(init_net)
with Cluster(), core.NameScope(self.name), TaskGroup() as copy_tg:
pipe(self.original_reader, self.ds.writer(), num_threads=16)
copy_step = copy_tg.to_task().get_step()
save_net = core.Net('save')
self._save_to_file(save_net)
return core.execution_step('build_cache', [init_net, copy_step, save_net])
def _init_dataset(self, init_net):
with core.NameScope(self.name):
self.ds.init_empty(init_net)
def _save_to_file(self, net):
net.Save(
self.ds.content().field_blobs(),
[],
db=self.cache_path,
db_type=self.db_type,
blob_name_overrides=self.field_names,
absolute_path=True,
)
def _load_from_file(self, net):
net.Load(
[],
self.ds.content().field_blobs(),
db=self.cache_path,
db_type=self.db_type,
absolute_path=True,
source_blob_names=self.field_names,
)
|
"""Unittest runner for quantum Linux Bridge plugin
This file should be run from the top dir in the quantum directory
To run all tests::
PLUGIN_DIR=quantum/plugins/linuxbridge ./run_tests.sh
"""
import os
import sys
from nose import config
from nose import core
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(__file__))
from quantum.api.api_common import OperationalStatus
from quantum.common.test_lib import run_tests, test_config
import quantum.tests.unit
if __name__ == '__main__':
exit_status = False
# if a single test case was specified,
# we should only invoked the tests once
invoke_once = len(sys.argv) > 1
test_config['plugin_name'] = "LinuxBridgePlugin.LinuxBridgePlugin"
test_config['plugin_name_v2'] = "lb_quantum_plugin.LinuxBridgePluginV2"
test_config['default_net_op_status'] = OperationalStatus.UP
test_config['default_port_op_status'] = OperationalStatus.DOWN
cwd = os.getcwd()
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
includeExe=True,
traverseNamespace=True,
plugins=core.DefaultPluginManager())
c.configureWhere(quantum.tests.unit.__path__)
exit_status = run_tests(c)
if invoke_once:
sys.exit(0)
os.chdir(cwd)
working_dir = os.path.abspath("quantum/plugins/linuxbridge")
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
workingDir=working_dir)
exit_status = exit_status or run_tests(c)
sys.exit(exit_status)
|
"""
Resource class and its manager for floating IPs in Compute API v2
"""
from osclient2 import base
from osclient2 import mapper
from osclient2 import utils
ATTRIBUTE_MAPPING = [
('id', 'id', mapper.Noop),
('fixed_ip', 'fixed_ip', mapper.Noop),
('server', 'instance_id', mapper.Resource('nova.server')),
('ip', 'ip', mapper.Noop),
('pool', 'pool', mapper.Noop),
]
class Resource(base.Resource):
"""Resource class for floating IPs in Compute API v2"""
def associate(self, server=None):
"""
Associate a floating IP
@keyword server: Server
@type server: osclient2.nova.v2.server.Resource
@rtype: None
"""
self._http.post('/servers/%s/action' % server.get_id(),
data=utils.get_json_body(
'addFloatingIp', address=self.ip))
def disassociate(self):
"""
Disassociate a floating IP
@rtype: None
"""
self._http.post('/servers/%s/action' % self.server.id,
data=utils.get_json_body(
'removeFloatingIp', address=self.ip))
def deallocate(self):
"""
Deallocate a floating IP
@rtype: None
"""
super(Resource, self).delete()
class Manager(base.Manager):
"""Manager class for floating IPs in Compute API v2"""
resource_class = Resource
service_type = 'compute'
_attr_mapping = ATTRIBUTE_MAPPING
_hidden_methods = ["create", "update", "delete"]
_json_resource_key = 'floating_ip'
_json_resources_key = 'floating_ips'
_url_resource_path = '/os-floating-ips'
def allocate(self, pool=None):
"""Allocate a new floating IP
@keyword pool: Pool name
@type pool: str
@return: Allocated floating IP address
@rtype: osclient2.nova.v2.floating_ip.Resource
"""
return super(Manager, self).create(pool=pool)
def list_pools(self):
"""
List floating IP pools
@return: Floating IP pool list
@rtype: [str]
"""
ret = self._http.get('/os-floating-ip-pools')
return [x["name"] for x in ret.get("floating_ip_pools", [])]
|
import os
os.environ.setdefault("DJANGO_CONFIGURATION", "Prod")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "manager.settings")
from django.core.asgi import get_asgi_application # noqa: E402
application = get_asgi_application()
|
""" This file contains different utility functions that are not connected
in anyway to the networks presented in the tutorials, but rather help in
processing the outputs into a more understandable way.
For example ``tile_raster_images`` helps in generating a easy to grasp
image from a set of samples or weights.
"""
from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
import numpy
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= old_div(1.0, (ndar.max() + eps))
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)
]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(
out_shape,
dtype=dt
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
|
"""RFC2462 style IPv6 address generation."""
import netaddr
from jacket.i18n import _
def to_global(prefix, mac, project_id):
try:
mac64 = netaddr.EUI(mac).eui64().words
int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
mac64_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') |
maskIP).format()
except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
except TypeError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
mask2 = netaddr.IPAddress('::0200:0:0:0')
mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
|
"""
Molotov-based executor for Taurus.
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
from math import ceil
from bzt import ToolError
from bzt.engine import ScenarioExecutor
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import ExecutorWidget
from bzt.modules.services import RequiredTool
from bzt.utils import unicode_decode
from bzt.utils import shutdown_process, dehumanize_time, get_full_path, LDJSONReader
class MolotovExecutor(ScenarioExecutor):
def __init__(self):
super(MolotovExecutor, self).__init__()
self.process = None
self.report_file_name = None
self.stdout = None
self.stderr = None
self.molotov = None
self.scenario = None
self.launch_cmdline = None
self.user_tool_path = None
def prepare(self):
super(MolotovExecutor, self).prepare()
self.install_required_tools()
self.stdout = open(self.engine.create_artifact("molotov", ".out"), 'w')
self.stderr = open(self.engine.create_artifact("molotov", ".err"), 'w')
self.report_file_name = self.engine.create_artifact("molotov-report", ".ldjson")
self.reader = MolotovReportReader(self.report_file_name, self.log)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def install_required_tools(self):
self.molotov = self._get_tool(Molotov, path=self.settings.get('path', None))
if not self.molotov.check_if_installed():
self.molotov.install()
def get_widget(self):
if not self.widget:
label = "%s" % self
self.widget = ExecutorWidget(self, "Molotov: " + label.split('/')[1])
return self.widget
def startup(self):
load = self.get_load()
cmdline = [self.molotov.tool_path]
if load.concurrency is not None:
cmdline += ['--workers', str(load.concurrency)]
if 'processes' in self.execution:
cmdline += ['--processes', str(self.execution['processes'])]
duration = 0
if load.ramp_up:
ramp_up = int(ceil(dehumanize_time(load.hold)))
duration += ramp_up
cmdline += ['--ramp-up', str(ramp_up)]
if load.hold:
hold = int(ceil(dehumanize_time(load.hold)))
duration += hold
cmdline += ['--duration', str(duration)]
think_time = self.get_scenario().get("think-time", None)
if think_time:
cmdline += ['--delay', str(dehumanize_time(think_time))]
user_cmd = self.settings.get("cmdline")
if user_cmd:
cmdline += user_cmd.split(" ")
cmdline += ['--use-extension=bzt.resources.molotov_ext']
cmdline += [self.get_script_path(required=True)]
self.env.set({"MOLOTOV_TAURUS_REPORT": self.report_file_name})
self.env.add_path({"PYTHONPATH": get_full_path(__file__, step_up=3)})
self.process = self._execute(cmdline)
def check(self):
ret_code = self.process.poll()
if ret_code is None:
return False
if ret_code != 0:
raise ToolError("Molotov exited with non-zero code: %s" % ret_code, self.get_error_diagnostics())
return True
def shutdown(self):
shutdown_process(self.process, self.log)
def get_error_diagnostics(self):
diagnostics = []
if self.stdout is not None:
with open(self.stdout.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("molotov STDOUT:\n" + contents)
if self.stderr is not None:
with open(self.stderr.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("molotov STDERR:\n" + contents)
return diagnostics
def resource_files(self):
return [self.get_script_path(required=True)]
class Molotov(RequiredTool):
def __init__(self, path=None, **kwargs):
super(Molotov, self).__init__(installable=False, **kwargs)
self.tool_path = path or shutil.which(self.tool_name.lower())
class MolotovReportReader(ResultsReader):
def __init__(self, filename, parent_logger):
super(MolotovReportReader, self).__init__()
self.is_distributed = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.ldjson_reader = LDJSONReader(filename, self.log)
self.read_records = 0
self._concurrency = 0
def _read(self, final_pass=False):
for row in self.ldjson_reader.read(final_pass):
self.read_records += 1
if row.get("type") == "workers":
self._concurrency = row.get("value", self._concurrency)
elif row.get("type") == "scenario_success":
label = unicode_decode(row["name"])
tstmp = int(float(row["ts"]))
rtm = float(row["duration"])
rcd = "200"
error = None
cnn = ltc = byte_count = 0
trname = ''
yield tstmp, label, self._concurrency, rtm, cnn, ltc, rcd, error, trname, byte_count
elif row.get("type") == "scenario_failure":
label = unicode_decode(row["name"])
tstmp = int(float(row["ts"]))
rtm = float(row["duration"])
rcd = row["exception"]
error = row["errorMessage"]
cnn = ltc = byte_count = 0
trname = ''
yield tstmp, label, self._concurrency, rtm, cnn, ltc, rcd, error, trname, byte_count
elif row.get("type") == "request":
label = unicode_decode(row["label"])
tstmp = int(float(row["ts"]))
rtm = float(row["elapsed"])
rcd = row["responseCode"]
error = None
if int(rcd) >= 400:
error = row["responseMessage"]
cnn = 0
ltc = 0
trname = ''
byte_count = 0
yield tstmp, label, self._concurrency, rtm, cnn, ltc, rcd, error, trname, byte_count
|
"""
DNS - main dnslib module
Contains core DNS packet handling code
"""
from __future__ import print_function
import base64,binascii,calendar,collections,copy,os.path,random,socket,\
string,struct,textwrap,time
from itertools import chain
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from dnslib.bit import get_bits,set_bits
from dnslib.bimap import Bimap,BimapError
from dnslib.buffer import Buffer,BufferError
from dnslib.label import DNSLabel,DNSLabelError,DNSBuffer
from dnslib.lex import WordLexer
from dnslib.ranges import BYTES,B,H,I,IP4,IP6,ntuple_range,check_range,\
check_bytes
class DNSError(Exception):
pass
QTYPE = Bimap('QTYPE',
{1:'A', 2:'NS', 5:'CNAME', 6:'SOA', 12:'PTR', 15:'MX',
16:'TXT', 17:'RP', 18:'AFSDB', 24:'SIG', 25:'KEY', 28:'AAAA',
29:'LOC', 33:'SRV', 35:'NAPTR', 36:'KX', 37:'CERT', 38:'A6',
39:'DNAME', 41:'OPT', 42:'APL', 43:'DS', 44:'SSHFP',
45:'IPSECKEY', 46:'RRSIG', 47:'NSEC', 48:'DNSKEY', 49:'DHCID',
50:'NSEC3', 51:'NSEC3PARAM', 52:'TLSA', 55:'HIP', 99:'SPF',
249:'TKEY', 250:'TSIG', 251:'IXFR', 252:'AXFR', 255:'ANY',
257:'TYPE257', 32768:'TA', 32769:'DLV'},
DNSError)
CLASS = Bimap('CLASS',
{1:'IN', 2:'CS', 3:'CH', 4:'Hesiod', 254:'None', 255:'*'},
DNSError)
QR = Bimap('QR',
{0:'QUERY', 1:'RESPONSE'},
DNSError)
RCODE = Bimap('RCODE',
{0:'NOERROR', 1:'FORMERR', 2:'SERVFAIL', 3:'NXDOMAIN',
4:'NOTIMP', 5:'REFUSED', 6:'YXDOMAIN', 7:'YXRRSET',
8:'NXRRSET', 9:'NOTAUTH', 10:'NOTZONE'},
DNSError)
OPCODE = Bimap('OPCODE',{0:'QUERY', 1:'IQUERY', 2:'STATUS', 5:'UPDATE'},
DNSError)
def label(label,origin=None):
if label.endswith("."):
return DNSLabel(label)
else:
return (origin if isinstance(origin,DNSLabel)
else DNSLabel(origin)).add(label)
class DNSRecord(object):
"""
Main DNS class - corresponds to DNS packet & comprises DNSHeader,
DNSQuestion and RR sections (answer,ns,ar)
>>> d = DNSRecord()
>>> d.add_question(DNSQuestion("abc.com")) # Or DNSRecord.question("abc.com")
>>> d.add_answer(RR("abc.com",QTYPE.CNAME,ttl=60,rdata=CNAME("ns.abc.com")))
>>> d.add_auth(RR("abc.com",QTYPE.SOA,ttl=60,rdata=SOA("ns.abc.com","admin.abc.com",(20140101,3600,3600,3600,3600))))
>>> d.add_ar(RR("ns.abc.com",ttl=60,rdata=A("1.2.3.4")))
>>> print(d)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 1, ADDITIONAL: 1
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 60 IN CNAME ns.abc.com.
;; AUTHORITY SECTION:
abc.com. 60 IN SOA ns.abc.com. admin.abc.com. 20140101 3600 3600 3600 3600
;; ADDITIONAL SECTION:
ns.abc.com. 60 IN A 1.2.3.4
>>> str(d) == str(DNSRecord.parse(d.pack()))
True
"""
@classmethod
def parse(cls,packet):
"""
Parse DNS packet data and return DNSRecord instance
Recursively parses sections (calling appropriate parse method)
"""
buffer = DNSBuffer(packet)
try:
header = DNSHeader.parse(buffer)
questions = []
rr = []
auth = []
ar = []
for i in range(header.q):
questions.append(DNSQuestion.parse(buffer))
for i in range(header.a):
rr.append(RR.parse(buffer))
for i in range(header.auth):
auth.append(RR.parse(buffer))
for i in range(header.ar):
ar.append(RR.parse(buffer))
return cls(header,questions,rr,auth=auth,ar=ar)
except DNSError:
raise
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking DNSRecord [offset=%d]: %s" % (
buffer.offset,e))
@classmethod
def question(cls,qname,qtype="A",qclass="IN"):
"""
Shortcut to create question
>>> q = DNSRecord.question("www.google.com")
>>> print(q)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;www.google.com. IN A
>>> q = DNSRecord.question("www.google.com","NS")
>>> print(q)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;www.google.com. IN NS
"""
return DNSRecord(q=DNSQuestion(qname,getattr(QTYPE,qtype),
getattr(CLASS,qclass)))
def __init__(self,header=None,questions=None,
rr=None,q=None,a=None,auth=None,ar=None):
"""
Create new DNSRecord
"""
self.header = header or DNSHeader()
self.questions = questions or []
self.rr = rr or []
self.auth = auth or []
self.ar = ar or []
# Shortcuts to add a single Question/Answer
if q:
self.questions.append(q)
if a:
self.rr.append(a)
self.set_header_qa()
def reply(self,ra=1,aa=1):
"""
Create skeleton reply packet
>>> q = DNSRecord.question("abc.com")
>>> a = q.reply()
>>> a.add_answer(RR("abc.com",QTYPE.A,rdata=A("1.2.3.4"),ttl=60))
>>> print(a)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 60 IN A 1.2.3.4
"""
return DNSRecord(DNSHeader(id=self.header.id,
bitmap=self.header.bitmap,
qr=1,ra=ra,aa=aa),
q=self.q)
def replyZone(self,zone,ra=1,aa=1):
"""
Create reply with response data in zone-file format
>>> q = DNSRecord.question("abc.com")
>>> a = q.replyZone("abc.com 60 A 1.2.3.4")
>>> print(a)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 60 IN A 1.2.3.4
"""
return DNSRecord(DNSHeader(id=self.header.id,
bitmap=self.header.bitmap,
qr=1,ra=ra,aa=aa),
q=self.q,
rr=RR.fromZone(zone))
def add_question(self,*q):
"""
Add question(s)
>>> q = DNSRecord()
>>> q.add_question(DNSQuestion("abc.com"),
... DNSQuestion("abc.com",QTYPE.MX))
>>> print(q)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: rd; QUERY: 2, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.com. IN A
;abc.com. IN MX
"""
self.questions.extend(q)
self.set_header_qa()
def add_answer(self,*rr):
"""
Add answer(s)
>>> q = DNSRecord.question("abc.com")
>>> a = q.reply()
>>> a.add_answer(*RR.fromZone("abc.com A 1.2.3.4"))
>>> print(a)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 0 IN A 1.2.3.4
"""
self.rr.extend(rr)
self.set_header_qa()
def add_auth(self,*auth):
"""
Add authority records
>>> q = DNSRecord.question("abc.com")
>>> a = q.reply()
>>> a.add_answer(*RR.fromZone("abc.com 60 A 1.2.3.4"))
>>> a.add_auth(*RR.fromZone("abc.com 3600 NS nsa.abc.com"))
>>> print(a)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 1, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 60 IN A 1.2.3.4
;; AUTHORITY SECTION:
abc.com. 3600 IN NS nsa.abc.com.
"""
self.auth.extend(auth)
self.set_header_qa()
def add_ar(self,*ar):
"""
Add additional records
>>> q = DNSRecord.question("abc.com")
>>> a = q.reply()
>>> a.add_answer(*RR.fromZone("abc.com 60 CNAME x.abc.com"))
>>> a.add_ar(*RR.fromZone("x.abc.com 3600 A 1.2.3.4"))
>>> print(a)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 60 IN CNAME x.abc.com.
;; ADDITIONAL SECTION:
x.abc.com. 3600 IN A 1.2.3.4
"""
self.ar.extend(ar)
self.set_header_qa()
def set_header_qa(self):
"""
Reset header q/a/auth/ar counts to match numver of records
(normally done transparently)
"""
self.header.q = len(self.questions)
self.header.a = len(self.rr)
self.header.auth = len(self.auth)
self.header.ar = len(self.ar)
# Shortcut to get first question
def get_q(self):
return self.questions[0] if self.questions else DNSQuestion()
q = property(get_q)
# Shortcut to get first answer
def get_a(self):
return self.rr[0] if self.rr else RR()
a = property(get_a)
def pack(self):
"""
Pack record into binary packet
(recursively packs each section into buffer)
>>> q = DNSRecord.question("abc.com")
>>> q.header.id = 1234
>>> a = q.replyZone("abc.com A 1.2.3.4")
>>> a.header.aa = 0
>>> pkt = a.pack()
>>> print(DNSRecord.parse(pkt))
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 1234
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 0 IN A 1.2.3.4
"""
self.set_header_qa()
buffer = DNSBuffer()
self.header.pack(buffer)
for q in self.questions:
q.pack(buffer)
for rr in self.rr:
rr.pack(buffer)
for auth in self.auth:
auth.pack(buffer)
for ar in self.ar:
ar.pack(buffer)
return buffer.data
def truncate(self):
"""
Return truncated copy of DNSRecord (with TC flag set)
(removes all Questions & RRs and just returns header)
>>> q = DNSRecord.question("abc.com")
>>> a = q.reply()
>>> a.add_answer(*RR.fromZone('abc.com IN TXT %s' % ('x' * 255)))
>>> a.add_answer(*RR.fromZone('abc.com IN TXT %s' % ('x' * 255)))
>>> a.add_answer(*RR.fromZone('abc.com IN TXT %s' % ('x' * 255)))
>>> len(a.pack())
829
>>> t = a.truncate()
>>> print(t)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa tc rd ra; QUERY: 0, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
"""
return DNSRecord(DNSHeader(id=self.header.id,
bitmap=self.header.bitmap,
tc=1))
def send(self,dest,port=53,tcp=False,timeout=None,ipv6=False):
"""
Send packet to nameserver and return response
"""
data = self.pack()
if ipv6:
inet = socket.AF_INET6
else:
inet = socket.AF_INET
if tcp:
if len(data) > 65535:
raise ValueError("Packet length too long: %d" % len(data))
data = struct.pack("!H",len(data)) + data
sock = socket.socket(inet,socket.SOCK_STREAM)
if timeout is not None:
sock.settimeout(timeout)
sock.connect((dest,port))
sock.sendall(data)
response = sock.recv(8192)
length = struct.unpack("!H",bytes(response[:2]))[0]
while len(response) - 2 < length:
response += sock.recv(8192)
sock.close()
response = response[2:]
else:
sock = socket.socket(inet,socket.SOCK_DGRAM)
if timeout is not None:
sock.settimeout(timeout)
sock.sendto(self.pack(),(dest,port))
response,server = sock.recvfrom(8192)
sock.close()
return response
def format(self,prefix="",sort=False):
"""
Formatted 'repr'-style representation of record
(optionally with prefix and/or sorted RRs)
"""
s = sorted if sort else lambda x:x
sections = [ repr(self.header) ]
sections.extend(s([repr(q) for q in self.questions]))
sections.extend(s([repr(rr) for rr in self.rr]))
sections.extend(s([repr(rr) for rr in self.auth]))
sections.extend(s([repr(rr) for rr in self.ar]))
return prefix + ("\n" + prefix).join(sections)
def toZone(self,prefix=""):
"""
Formatted 'DiG' (zone) style output
(with optional prefix)
"""
z = self.header.toZone().split("\n")
if self.questions:
z.append(";; QUESTION SECTION:")
[ z.extend(q.toZone().split("\n")) for q in self.questions ]
if self.rr:
z.append(";; ANSWER SECTION:")
[ z.extend(rr.toZone().split("\n")) for rr in self.rr ]
if self.auth:
z.append(";; AUTHORITY SECTION:")
[ z.extend(rr.toZone().split("\n")) for rr in self.auth ]
if self.ar:
z.append(";; ADDITIONAL SECTION:")
[ z.extend(rr.toZone().split("\n")) for rr in self.ar ]
return prefix + ("\n" + prefix).join(z)
def short(self):
"""
Just return RDATA
"""
return "\n".join([rr.rdata.toZone() for rr in self.rr])
def __eq__(self,other):
"""
Test for equality by diffing records
"""
if type(other) != type(self):
return False
else:
return self.diff(other) == []
def __ne__(self,other):
return not(self.__eq__(other))
def diff(self,other):
"""
Diff records - recursively diff sections (sorting RRs)
"""
err = []
if self.header != other.header:
err.append((self.header,other.header))
for section in ('questions','rr','auth','ar'):
if section == 'questions':
k = lambda x:tuple(map(str,(x.qname,x.qtype)))
else:
k = lambda x:tuple(map(str,(x.rname,x.rtype,x.rdata)))
a = dict([(k(rr),rr) for rr in getattr(self,section)])
b = dict([(k(rr),rr) for rr in getattr(other,section)])
sa = set(a)
sb = set(b)
for e in sorted(sa.intersection(sb)):
if a[e] != b[e]:
err.append((a[e],b[e]))
for e in sorted(sa.difference(sb)):
err.append((a[e],None))
for e in sorted(sb.difference(sa)):
err.append((None,b[e]))
return err
def __repr__(self):
return self.format()
def __str__(self):
return self.toZone()
class DNSHeader(object):
"""
DNSHeader section
"""
# Ensure attribute values match packet
id = H('id')
bitmap = H('bitmap')
q = H('q')
a = H('a')
auth = H('auth')
ar = H('ar')
@classmethod
def parse(cls,buffer):
"""
Implements parse interface
"""
try:
(id,bitmap,q,a,auth,ar) = buffer.unpack("!HHHHHH")
return cls(id,bitmap,q,a,auth,ar)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking DNSHeader [offset=%d]: %s" % (
buffer.offset,e))
def __init__(self,id=None,bitmap=None,q=0,a=0,auth=0,ar=0,**args):
if id is None:
self.id = random.randint(0,65535)
else:
self.id = id
if bitmap is None:
self.bitmap = 0
self.rd = 1
else:
self.bitmap = bitmap
self.q = q
self.a = a
self.auth = auth
self.ar = ar
for k,v in args.items():
if k.lower() == "qr":
self.qr = v
elif k.lower() == "opcode":
self.opcode = v
elif k.lower() == "aa":
self.aa = v
elif k.lower() == "tc":
self.tc = v
elif k.lower() == "rd":
self.rd = v
elif k.lower() == "ra":
self.ra = v
elif k.lower() == "rcode":
self.rcode = v
# Accessors for header properties (automatically pack/unpack
# into bitmap)
def get_qr(self):
return get_bits(self.bitmap,15)
def set_qr(self,val):
self.bitmap = set_bits(self.bitmap,val,15)
qr = property(get_qr,set_qr)
def get_opcode(self):
return get_bits(self.bitmap,11,4)
def set_opcode(self,val):
self.bitmap = set_bits(self.bitmap,val,11,4)
opcode = property(get_opcode,set_opcode)
def get_aa(self):
return get_bits(self.bitmap,10)
def set_aa(self,val):
self.bitmap = set_bits(self.bitmap,val,10)
aa = property(get_aa,set_aa)
def get_tc(self):
return get_bits(self.bitmap,9)
def set_tc(self,val):
self.bitmap = set_bits(self.bitmap,val,9)
tc = property(get_tc,set_tc)
def get_rd(self):
return get_bits(self.bitmap,8)
def set_rd(self,val):
self.bitmap = set_bits(self.bitmap,val,8)
rd = property(get_rd,set_rd)
def get_ra(self):
return get_bits(self.bitmap,7)
def set_ra(self,val):
self.bitmap = set_bits(self.bitmap,val,7)
ra = property(get_ra,set_ra)
def get_rcode(self):
return get_bits(self.bitmap,0,4)
def set_rcode(self,val):
self.bitmap = set_bits(self.bitmap,val,0,4)
rcode = property(get_rcode,set_rcode)
def pack(self,buffer):
buffer.pack("!HHHHHH",self.id,self.bitmap,
self.q,self.a,self.auth,self.ar)
def __repr__(self):
f = [ self.aa and 'AA',
self.tc and 'TC',
self.rd and 'RD',
self.ra and 'RA' ]
if OPCODE.get(self.opcode) == 'UPDATE':
f1='zo'
f2='pr'
f3='up'
f4='ad'
else:
f1='q'
f2='a'
f3='ns'
f4='ar'
return "<DNS Header: id=0x%x type=%s opcode=%s flags=%s " \
"rcode='%s' %s=%d %s=%d %s=%d %s=%d>" % (
self.id,
QR.get(self.qr),
OPCODE.get(self.opcode),
",".join(filter(None,f)),
RCODE.get(self.rcode),
f1, self.q, f2, self.a, f3, self.auth, f4, self.ar )
def toZone(self):
f = [ self.qr and 'qr',
self.aa and 'aa',
self.tc and 'tc',
self.rd and 'rd',
self.ra and 'ra' ]
z1 = ';; ->>HEADER<<- opcode: %s, status: %s, id: %d' % (
OPCODE.get(self.opcode),RCODE.get(self.rcode),self.id)
z2 = ';; flags: %s; QUERY: %d, ANSWER: %d, AUTHORITY: %d, ADDITIONAL: %d' % (
" ".join(filter(None,f)),
self.q,self.a,self.auth,self.ar)
return z1 + "\n" + z2
def __str__(self):
return self.toZone()
def __ne__(self,other):
return not(self.__eq__(other))
def __eq__(self,other):
if type(other) != type(self):
return False
else:
# Ignore id
attrs = ('qr','aa','tc','rd','ra','opcode','rcode')
return all([getattr(self,x) == getattr(other,x) for x in attrs])
class DNSQuestion(object):
"""
DNSQuestion section
"""
@classmethod
def parse(cls,buffer):
try:
qname = buffer.decode_name()
qtype,qclass = buffer.unpack("!HH")
return cls(qname,qtype,qclass)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking DNSQuestion [offset=%d]: %s" % (
buffer.offset,e))
def __init__(self,qname=None,qtype=1,qclass=1):
self.qname = qname
self.qtype = qtype
self.qclass = qclass
def set_qname(self,qname):
if isinstance(qname,DNSLabel):
self._qname = qname
else:
self._qname = DNSLabel(qname)
def get_qname(self):
return self._qname
qname = property(get_qname,set_qname)
def pack(self,buffer):
buffer.encode_name(self.qname)
buffer.pack("!HH",self.qtype,self.qclass)
def toZone(self):
return ';%-30s %-7s %s' % (self.qname,CLASS.get(self.qclass),
QTYPE.get(self.qtype))
def __repr__(self):
return "<DNS Question: '%s' qtype=%s qclass=%s>" % (
self.qname, QTYPE.get(self.qtype), CLASS.get(self.qclass))
def __str__(self):
return self.toZone()
def __ne__(self,other):
return not(self.__eq__(other))
def __eq__(self,other):
if type(other) != type(self):
return False
else:
# List of attributes to compare when diffing
attrs = ('qname','qtype','qclass')
return all([getattr(self,x) == getattr(other,x) for x in attrs])
class EDNSOption(object):
"""
EDNSOption pseudo-section
Very rudimentary support for EDNS0 options however this has not been
tested due to a lack of data (anyone wanting to improve support or
provide test data please raise an issue)
>>> EDNSOption(1,b"1234")
<EDNS Option: Code=1 Data='31323334'>
>>> EDNSOption(99999,b"1234")
Traceback (most recent call last):
...
ValueError: Attribute 'code' must be between 0-65535 [99999]
>>> EDNSOption(1,None)
Traceback (most recent call last):
...
ValueError: Attribute 'data' must be instance of ...
"""
code = H('code')
data = BYTES('data')
def __init__(self,code,data):
self.code = code
self.data = data
def pack(self,buffer):
buffer.pack("!HH",self.code,len(self.data))
buffer.append(self.data)
def __repr__(self):
return "<EDNS Option: Code=%d Data='%s'>" % (
self.code,binascii.hexlify(self.data).decode())
def toZone(self):
return ";EDNS: code: %s; data: %s" % (
self.code,binascii.hexlify(self.data).decode())
def __str__(self):
return self.toZone()
def __ne__(self,other):
return not(self.__eq__(other))
def __eq__(self,other):
if type(other) != type(self):
return False
else:
# List of attributes to compare when diffing
attrs = ('code','data')
return all([getattr(self,x) == getattr(other,x) for x in attrs])
class RR(object):
"""
DNS Resource Record
Contains RR header and RD (resource data) instance
"""
rtype = H('rtype')
rclass = H('rclass')
ttl = I('ttl')
rdlength = H('rdlength')
@classmethod
def parse(cls,buffer):
try:
rname = buffer.decode_name()
rtype,rclass,ttl,rdlength = buffer.unpack("!HHIH")
if rtype == QTYPE.OPT:
options = []
option_buffer = Buffer(buffer.get(rdlength))
while option_buffer.remaining() > 4:
code,length = option_buffer.unpack("!HH")
data = option_buffer.get(length)
options.append(EDNSOption(code,data))
rdata = options
else:
if rdlength:
rdata = RDMAP.get(QTYPE.get(rtype),RD).parse(
buffer,rdlength)
else:
rdata = ''
return cls(rname,rtype,rclass,ttl,rdata)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking RR [offset=%d]: %s" % (
buffer.offset,e))
@classmethod
def fromZone(cls,zone,origin="",ttl=0):
"""
Parse RR data from zone file and return list of RRs
"""
return list(ZoneParser(zone,origin=origin,ttl=ttl))
def __init__(self,rname=None,rtype=1,rclass=1,ttl=0,rdata=None):
self.rname = rname
self.rtype = rtype
self.rclass = rclass
self.ttl = ttl
self.rdata = rdata
# TODO Add property getters/setters
if self.rtype == QTYPE.OPT:
self.edns_len = self.rclass
self.edns_do = get_bits(self.ttl,15)
self.edns_ver = get_bits(self.ttl,16,8)
self.edns_rcode = get_bits(self.ttl,24,8)
def set_rname(self,rname):
if isinstance(rname,DNSLabel):
self._rname = rname
else:
self._rname = DNSLabel(rname)
def get_rname(self):
return self._rname
rname = property(get_rname,set_rname)
def pack(self,buffer):
buffer.encode_name(self.rname)
buffer.pack("!HHI",self.rtype,self.rclass,self.ttl)
rdlength_ptr = buffer.offset
buffer.pack("!H",0)
start = buffer.offset
if self.rtype == QTYPE.OPT:
for opt in self.rdata:
opt.pack(buffer)
else:
self.rdata.pack(buffer)
end = buffer.offset
buffer.update(rdlength_ptr,"!H",end-start)
def __repr__(self):
if self.rtype == QTYPE.OPT:
s = ["<DNS OPT: edns_ver=%d do=%d ext_rcode=%d udp_len=%d>" % (
self.edns_ver,self.edns_do,self.edns_rcode,self.edns_len)]
s.extend([repr(opt) for opt in self.rdata])
return "\n".join(s)
else:
return "<DNS RR: '%s' rtype=%s rclass=%s ttl=%d rdata='%s'>" % (
self.rname, QTYPE.get(self.rtype), CLASS.get(self.rclass),
self.ttl, self.rdata)
def toZone(self):
if self.rtype == QTYPE.OPT:
edns = [ ";OPT PSEUDOSECTION",
";EDNS: version: %d, flags: %s; udp: %d" % (
self.edns_ver,
"do" if self.edns_do else "",
self.edns_len)
]
edns.extend([str(opt) for opt in self.rdata])
return "\n".join(edns)
else:
return '%-23s %-7s %-7s %-7s %s' % (self.rname,self.ttl,
CLASS.get(self.rclass),
QTYPE.get(self.rtype),
self.rdata.toZone())
def __str__(self):
return self.toZone()
def __ne__(self,other):
return not(self.__eq__(other))
def __eq__(self,other):
# Handle OPT specially as may be different types (RR/EDNS0)
if self.rtype == QTYPE.OPT and getattr(other,"rtype",False) == QTYPE.OPT:
attrs = ('rname','rclass','rtype','ttl','rdata')
return all([getattr(self,x) == getattr(other,x) for x in attrs])
else:
if type(other) != type(self):
return False
else:
# List of attributes to compare when diffing (ignore ttl)
attrs = ('rname','rclass','rtype','rdata')
return all([getattr(self,x) == getattr(other,x) for x in attrs])
class EDNS0(RR):
"""
ENDS0 pseudo-record
Wrapper around the ENDS0 support in RR to make it more convenient to
create EDNS0 pseudo-record - this just makes it easier to specify the
EDNS0 parameters directly
EDNS flags should be passed as a space separated string of options
(currently only 'do' is supported)
>>> EDNS0("abc.com",flags="do",udp_len=2048,version=1)
<DNS OPT: edns_ver=1 do=1 ext_rcode=0 udp_len=2048>
>>> print(_)
;OPT PSEUDOSECTION
;EDNS: version: 1, flags: do; udp: 2048
>>> opt = EDNS0("abc.com",flags="do",ext_rcode=1,udp_len=2048,version=1,opts=[EDNSOption(1,b'abcd')])
>>> opt
<DNS OPT: edns_ver=1 do=1 ext_rcode=1 udp_len=2048>
<EDNS Option: Code=1 Data='61626364'>
>>> print(opt)
;OPT PSEUDOSECTION
;EDNS: version: 1, flags: do; udp: 2048
;EDNS: code: 1; data: 61626364
>>> r = DNSRecord.question("abc.com").replyZone("abc.com A 1.2.3.4")
>>> r.add_ar(opt)
>>> print(r)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; QUESTION SECTION:
;abc.com. IN A
;; ANSWER SECTION:
abc.com. 0 IN A 1.2.3.4
;; ADDITIONAL SECTION:
;OPT PSEUDOSECTION
;EDNS: version: 1, flags: do; udp: 2048
;EDNS: code: 1; data: 61626364
>>> DNSRecord.parse(r.pack()) == r
True
"""
def __init__(self,rname=None,rtype=QTYPE.OPT,
ext_rcode=0,version=0,flags="",udp_len=0,opts=None):
check_range('ext_rcode',ext_rcode,0,255)
check_range('version',version,0,255)
edns_flags = { 'do' : 1 << 15 }
flag_bitmap = sum([edns_flags[x] for x in flags.split()])
ttl = (ext_rcode << 24) + (version << 16) + flag_bitmap
if opts and not all([isinstance(o,EDNSOption) for o in opts]):
raise ValueError("Option must be instance of EDNSOption")
super(EDNS0,self).__init__(rname,rtype,udp_len,ttl,opts or [])
class RD(object):
"""
Base RD object - also used as placeholder for unknown RD types
To create a new RD type subclass this and add to RDMAP (below)
Subclass should implement (as a mininum):
parse (parse from packet data)
__init__ (create class)
__repr__ (return in zone format)
fromZone (create from zone format)
(toZone uses __repr__ by default)
Unknown rdata types default to RD and store rdata as a binary
blob (this allows round-trip encoding/decoding)
"""
@classmethod
def parse(cls,buffer,length):
"""
Unpack from buffer
"""
try:
data = buffer.get(length)
return cls(data)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking RD [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
"""
Create new record from zone format data
RD is a list of strings parsed from DiG output
"""
# Unknown rata - assume hexdump in zone format
# (DiG prepends "\\# <len>" to the hexdump so get last item)
return cls(binascii.unhexlify(rd[-1].encode('ascii')))
def __init__(self,data=b""):
# Assume raw bytes
check_bytes('data',data)
self.data = bytes(data)
def pack(self,buffer):
"""
Pack record into buffer
"""
buffer.append(self.data)
def __repr__(self):
"""
Default 'repr' format should be equivalent to RD zone format
"""
# For unknown rdata just default to hex
return binascii.hexlify(self.data).decode()
def toZone(self):
return repr(self)
# Comparison operations - in most cases only need to override 'attrs'
# in subclass (__eq__ will automatically compare defined atttrs)
# Attributes for comparison
attrs = ('data',)
def __eq__(self,other):
if type(other) != type(self):
return False
else:
return all([getattr(self,x) == getattr(other,x) for x in self.attrs])
def __ne__(self,other):
return not(self.__eq__(other))
def _force_bytes(x):
if isinstance(x,bytes):
return x
else:
return x.encode()
class TXT(RD):
"""
DNS TXT record. Pass in either a single string, or a tuple/list of strings.
>>> TXT('txtvers=1')
"txtvers=1"
>>> TXT(('txtvers=1',))
"txtvers=1"
>>> TXT(['txtvers=1',])
"txtvers=1"
>>> TXT(['txtvers=1','swver=2.5'])
"txtvers=1","swver=2.5"
>>> a = DNSRecord()
>>> a.add_answer(*RR.fromZone('example.com 60 IN TXT "txtvers=1"'))
>>> a.add_answer(*RR.fromZone('example.com 120 IN TXT "txtvers=1" "swver=2.3"'))
>>> print(a)
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: rd; QUERY: 0, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 0
;; ANSWER SECTION:
example.com. 60 IN TXT "txtvers=1"
example.com. 120 IN TXT "txtvers=1" "swver=2.3"
"""
@classmethod
def parse(cls,buffer,length):
try:
data = list()
start_bo = buffer.offset
now_length = 0
while buffer.offset < start_bo + length:
(txtlength,) = buffer.unpack("!B")
# First byte is TXT length (not in RFC?)
if now_length + txtlength < length:
now_length += txtlength
data.append(buffer.get(txtlength))
else:
raise DNSError("Invalid TXT record: len(%d) > RD len(%d)" %
(txtlength,length))
return cls(data)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking TXT [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(list(map(lambda x: x.encode(), rd)))
def __init__(self,data):
if type(data) in (tuple,list):
self.data = [ _force_bytes(x) for x in data ]
else:
self.data = [ _force_bytes(data) ]
if any([len(x)>255 for x in self.data]):
raise DNSError("TXT record too long: %s" % self.data)
def pack(self,buffer):
for ditem in self.data:
if len(ditem) > 255:
raise DNSError("TXT record too long: %s" % ditem)
buffer.pack("!B",len(ditem))
buffer.append(ditem)
def toZone(self):
return " ".join([ '"%s"' % x.decode(errors='replace') for x in self.data ])
def __repr__(self):
return ",".join([ '"%s"' % x.decode(errors='replace') for x in self.data ])
class A(RD):
data = IP4('data')
@classmethod
def parse(cls,buffer,length):
try:
data = buffer.unpack("!BBBB")
return cls(data)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking A [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(rd[0])
def __init__(self,data):
if type(data) in (tuple,list):
self.data = tuple(data)
else:
self.data = tuple(map(int,data.rstrip(".").split(".")))
def pack(self,buffer):
buffer.pack("!BBBB",*self.data)
def __repr__(self):
return "%d.%d.%d.%d" % self.data
def _parse_ipv6(a):
"""
Parse IPv6 address. Ideally we would use the ipaddress module in
Python3.3 but can't rely on having this.
Does not handle dotted-quad addresses or subnet prefix
>>> _parse_ipv6("::") == (0,) * 16
True
>>> _parse_ipv6("1234:5678::abcd:0:ff00")
(18, 52, 86, 120, 0, 0, 0, 0, 0, 0, 171, 205, 0, 0, 255, 0)
"""
l,_,r = a.partition("::")
l_groups = list(chain(*[divmod(int(x,16),256) for x in l.split(":") if x]))
r_groups = list(chain(*[divmod(int(x,16),256) for x in r.split(":") if x]))
zeros = [0] * (16 - len(l_groups) - len(r_groups))
return tuple(l_groups + zeros + r_groups)
def _format_ipv6(a):
"""
Format IPv6 address (from tuple of 16 bytes) compressing sequence of
zero bytes to '::'. Ideally we would use the ipaddress module in
Python3.3 but can't rely on having this.
>>> _format_ipv6([0]*16)
'::'
>>> _format_ipv6(_parse_ipv6("::0012:5678"))
'::12:5678'
>>> _format_ipv6(_parse_ipv6("1234:0:5678::ff:0:1"))
'1234:0:5678::ff:0:1'
"""
left = []
right = []
current = 'left'
for i in range(0,16,2):
group = (a[i] << 8) + a[i+1]
if current == 'left':
if group == 0 and i < 14:
if (a[i+2] << 8) + a[i+3] == 0:
current = 'right'
else:
left.append("0")
else:
left.append("%x" % group)
else:
if group == 0 and len(right) == 0:
pass
else:
right.append("%x" % group)
if len(left) < 8:
return ":".join(left) + "::" + ":".join(right)
else:
return ":".join(left)
class AAAA(RD):
"""
Basic support for AAAA record - accepts IPv6 address data as either
a tuple of 16 bytes or in text format
"""
data = IP6('data')
@classmethod
def parse(cls,buffer,length):
try:
data = buffer.unpack("!16B")
return cls(data)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking AAAA [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(rd[0])
def __init__(self,data):
if type(data) in (tuple,list):
self.data = tuple(data)
else:
self.data = _parse_ipv6(data)
def pack(self,buffer):
buffer.pack("!16B",*self.data)
def __repr__(self):
return _format_ipv6(self.data)
class MX(RD):
preference = H('preference')
@classmethod
def parse(cls,buffer,length):
try:
(preference,) = buffer.unpack("!H")
mx = buffer.decode_name()
return cls(mx,preference)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking MX [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(label(rd[1],origin),int(rd[0]))
def __init__(self,label=None,preference=10):
self.label = label
self.preference = preference
def set_label(self,label):
if isinstance(label,DNSLabel):
self._label = label
else:
self._label = DNSLabel(label)
def get_label(self):
return self._label
label = property(get_label,set_label)
def pack(self,buffer):
buffer.pack("!H",self.preference)
buffer.encode_name(self.label)
def __repr__(self):
return "%d %s" % (self.preference,self.label)
attrs = ('preference','label')
class CNAME(RD):
@classmethod
def parse(cls,buffer,length):
try:
label = buffer.decode_name()
return cls(label)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking CNAME [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(label(rd[0],origin))
def __init__(self,label=None):
self.label = label
def set_label(self,label):
if isinstance(label,DNSLabel):
self._label = label
else:
self._label = DNSLabel(label)
def get_label(self):
return self._label
label = property(get_label,set_label)
def pack(self,buffer):
buffer.encode_name(self.label)
def __repr__(self):
return "%s" % (self.label)
attrs = ('label',)
class PTR(CNAME):
pass
class NS(CNAME):
pass
class SOA(RD):
times = ntuple_range('times',5,0,4294967295)
@classmethod
def parse(cls,buffer,length):
try:
mname = buffer.decode_name()
rname = buffer.decode_name()
times = buffer.unpack("!IIIII")
return cls(mname,rname,times)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking SOA [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(label(rd[0],origin),label(rd[1],origin),[parse_time(t) for t in rd[2:]])
def __init__(self,mname=None,rname=None,times=None):
self.mname = mname
self.rname = rname
self.times = tuple(times) if times else (0,0,0,0,0)
def set_mname(self,mname):
if isinstance(mname,DNSLabel):
self._mname = mname
else:
self._mname = DNSLabel(mname)
def get_mname(self):
return self._mname
mname = property(get_mname,set_mname)
def set_rname(self,rname):
if isinstance(rname,DNSLabel):
self._rname = rname
else:
self._rname = DNSLabel(rname)
def get_rname(self):
return self._rname
rname = property(get_rname,set_rname)
def pack(self,buffer):
buffer.encode_name(self.mname)
buffer.encode_name(self.rname)
buffer.pack("!IIIII", *self.times)
def __repr__(self):
return "%s %s %s" % (self.mname,self.rname,
" ".join(map(str,self.times)))
attrs = ('mname','rname','times')
class SRV(RD):
priority = H('priority')
weight = H('weight')
port = H('port')
@classmethod
def parse(cls,buffer,length):
try:
priority,weight,port = buffer.unpack("!HHH")
target = buffer.decode_name()
return cls(priority,weight,port,target)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking SRV [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(int(rd[0]),int(rd[1]),int(rd[2]),rd[3])
def __init__(self,priority=0,weight=0,port=0,target=None):
self.priority = priority
self.weight = weight
self.port = port
self.target = target
def set_target(self,target):
if isinstance(target,DNSLabel):
self._target = target
else:
self._target = DNSLabel(target)
def get_target(self):
return self._target
target = property(get_target,set_target)
def pack(self,buffer):
buffer.pack("!HHH",self.priority,self.weight,self.port)
buffer.encode_name(self.target)
def __repr__(self):
return "%d %d %d %s" % (self.priority,self.weight,self.port,self.target)
attrs = ('priority','weight','port','target')
class NAPTR(RD):
order = H('order')
preference = H('preference')
@classmethod
def parse(cls, buffer, length):
try:
order, preference = buffer.unpack('!HH')
(length,) = buffer.unpack('!B')
flags = buffer.get(length)
(length,) = buffer.unpack('!B')
service = buffer.get(length)
(length,) = buffer.unpack('!B')
regexp = buffer.get(length)
replacement = buffer.decode_name()
return cls(order, preference, flags, service, regexp, replacement)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking NAPTR [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
encode = lambda s : s.encode()
_label = lambda s : label(s,origin)
m = (int,int,encode,encode,encode,_label)
return cls(*[ f(v) for f,v in zip(m,rd)])
def __init__(self,order,preference,flags,service,regexp,replacement=None):
self.order = order
self.preference = preference
self.flags = flags
self.service = service
self.regexp = regexp
self.replacement = replacement
def set_replacement(self,replacement):
if isinstance(replacement,DNSLabel):
self._replacement = replacement
else:
self._replacement = DNSLabel(replacement)
def get_replacement(self):
return self._replacement
replacement = property(get_replacement,set_replacement)
def pack(self, buffer):
buffer.pack('!HH', self.order, self.preference)
buffer.pack('!B', len(self.flags))
buffer.append(self.flags)
buffer.pack('!B', len(self.service))
buffer.append(self.service)
buffer.pack('!B', len(self.regexp))
buffer.append(self.regexp)
buffer.encode_name(self.replacement)
def __repr__(self):
return '%d %d "%s" "%s" "%s" %s' %(
self.order,self.preference,self.flags.decode(),
self.service.decode(),
self.regexp.decode().replace('\\','\\\\'),
self.replacement or '.'
)
attrs = ('order','preference','flags','service','regexp','replacement')
class DNSKEY(RD):
flags = H('flags')
protocol = B('protocol')
algorithm = B('algorithm')
@classmethod
def parse(cls,buffer,length):
try:
(flags,protocol,algorithm) = buffer.unpack("!HBB")
key = buffer.get(length - 4)
return cls(flags,protocol,algorithm,key)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking DNSKEY [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(int(rd[0]),int(rd[1]),int(rd[2]),
base64.b64decode(("".join(rd[3:])).encode('ascii')))
def __init__(self,flags,protocol,algorithm,key):
self.flags = flags
self.protocol = protocol
self.algorithm = algorithm
self.key = key
def pack(self,buffer):
buffer.pack("!HBB",self.flags,self.protocol,self.algorithm)
buffer.append(self.key)
def __repr__(self):
return "%d %d %d %s" % (self.flags,self.protocol,self.algorithm,
base64.b64encode(self.key).decode())
attrs = ('flags','protocol','algorithm','key')
class RRSIG(RD):
covered = H('covered')
algorithm = B('algorithm')
labels = B('labels')
orig_ttl = I('orig_ttl')
sig_exp = I('sig_exp')
sig_inc = I('sig_inc')
key_tag = H('key_tag')
@classmethod
def parse(cls,buffer,length):
try:
start = buffer.offset
(covered,algorithm,labels,
orig_ttl,sig_exp,sig_inc,key_tag) = buffer.unpack("!HBBIIIH")
name = buffer.decode_name()
sig = buffer.get(length - (buffer.offset - start))
return cls(covered,algorithm,labels,orig_ttl,sig_exp,sig_inc,key_tag,
name,sig)
except (BufferError,BimapError) as e:
raise DNSError("Error unpacking DNSKEY [offset=%d]: %s" %
(buffer.offset,e))
@classmethod
def fromZone(cls,rd,origin=None):
return cls(getattr(QTYPE,rd[0]),int(rd[1]),int(rd[2]),int(rd[3]),
int(calendar.timegm(time.strptime(rd[4]+'UTC',"%Y%m%d%H%M%S%Z"))),
int(calendar.timegm(time.strptime(rd[5]+'UTC',"%Y%m%d%H%M%S%Z"))),
int(rd[6]),rd[7],
base64.b64decode(("".join(rd[8:])).encode('ascii')))
def __init__(self,covered,algorithm,labels,orig_ttl,
sig_exp,sig_inc,key_tag,name,sig):
self.covered = covered
self.algorithm = algorithm
self.labels = labels
self.orig_ttl = orig_ttl
self.sig_exp = sig_exp
self.sig_inc = sig_inc
self.key_tag = key_tag
self.name = DNSLabel(name)
self.sig = sig
def pack(self,buffer):
buffer.pack("!HBBIIIH",self.covered,self.algorithm,self.labels,
self.orig_ttl,self.sig_exp,self.sig_inc,
self.key_tag)
buffer.encode_name_nocompress(self.name)
buffer.append(self.sig)
def __repr__(self):
timestamp_fmt = "{0.tm_year}{0.tm_mon:02}{0.tm_mday:02}{0.tm_hour:02}{0.tm_min:02}{0.tm_sec:02}"
return "%s %d %d %d %s %s %d %s %s" % (
QTYPE.get(self.covered),
self.algorithm,
self.labels,
self.orig_ttl,
timestamp_fmt.format(time.gmtime(self.sig_exp)),
timestamp_fmt.format(time.gmtime(self.sig_inc)),
self.key_tag,
self.name,
base64.b64encode(self.sig).decode())
attrs = ('covered','algorithm','labels','orig_ttl','sig_exp','sig_inc',
'key_tag','name','sig')
RDMAP = { 'CNAME':CNAME, 'A':A, 'AAAA':AAAA, 'TXT':TXT, 'MX':MX,
'PTR':PTR, 'SOA':SOA, 'NS':NS, 'NAPTR': NAPTR, 'SRV':SRV,
'DNSKEY':DNSKEY, 'RRSIG':RRSIG,
}
secs = {'s':1,'m':60,'h':3600,'d':86400,'w':604800}
def parse_time(s):
"""
Parse time spec with optional s/m/h/d/w suffix
"""
if s[-1].lower() in secs:
return int(s[:-1]) * secs[s[-1].lower()]
else:
return int(s)
class ZoneParser:
"""
Zone file parser
>>> z = ZoneParser("www.example.com. 60 IN A 1.2.3.4")
>>> list(z.parse())
[<DNS RR: 'www.example.com.' rtype=A rclass=IN ttl=60 rdata='1.2.3.4'>]
"""
def __init__(self,zone,origin="",ttl=0):
self.l = WordLexer(zone)
self.l.commentchars = ';'
self.l.nltok = ('NL',None)
self.l.spacetok = ('SPACE',None)
self.i = iter(self.l)
if type(origin) is DNSLabel:
self.origin = origin
else:
self.origin= DNSLabel(origin)
self.ttl = ttl
self.label = DNSLabel("")
self.prev = None
def expect(self,expect):
t,val = next(self.i)
if t != expect:
raise ValueError("Invalid Token: %s (expecting: %s)" % (t,expect))
return val
def parse_label(self,label):
if label.endswith("."):
self.label = DNSLabel(label)
elif label == "@":
self.label = self.origin
elif label == '':
pass
else:
self.label = self.origin.add(label)
return self.label
def parse_rr(self,rr):
label = self.parse_label(rr.pop(0))
ttl = int(rr.pop(0)) if rr[0].isdigit() else self.ttl
rclass = rr.pop(0) if rr[0] in ('IN','CH','HS') else 'IN'
rtype = rr.pop(0)
rdata = rr
rd = RDMAP.get(rtype,RD)
return RR(rname=label,
ttl=ttl,
rclass=getattr(CLASS,rclass),
rtype=getattr(QTYPE,rtype),
rdata=rd.fromZone(rdata,self.origin))
def __iter__(self):
return self.parse()
def parse(self):
rr = []
paren = False
try:
while True:
tok,val = next(self.i)
if tok == 'NL':
if not paren and rr:
self.prev = tok
yield self.parse_rr(rr)
rr = []
elif tok == 'SPACE' and self.prev == 'NL' and not paren:
rr.append('')
elif tok == 'ATOM':
if val == '(':
paren = True
elif val == ')':
paren = False
elif val == '$ORIGIN':
self.expect('SPACE')
origin = self.expect('ATOM')
self.origin = self.label = DNSLabel(origin)
elif val == '$TTL':
self.expect('SPACE')
ttl = self.expect('ATOM')
self.ttl = parse_time(ttl)
else:
rr.append(val)
self.prev = tok
except StopIteration:
if rr:
yield self.parse_rr(rr)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
import enum
"""This module has the global key values that are used across framework
modules.
"""
class Config(enum.Enum):
"""Enum values for test config related lookups.
"""
# Keys used to look up values from test config files.
# These keys define the wording of test configs and their internal
# references.
key_log_path = "logpath"
key_testbed = "testbed"
key_testbed_name = "name"
key_config_path = "configpath"
# Internal keys, used internally, not exposed to user's config files.
ikey_user_param = "user_params"
ikey_testbed_name = "testbed_name"
ikey_logger = "log"
ikey_logpath = "log_path"
ikey_cli_args = "cli_args"
# A list of keys whose values in configs should not be passed to test
# classes without unpacking first.
reserved_keys = (key_testbed, key_log_path)
def get_name_by_value(value):
for name, member in Config.__members__.items():
if member.value == value:
return name
return None
def get_internal_value(external_value):
"""Translates the value of an external key to the value of its
corresponding internal key.
"""
return value_to_value(external_value, "i%s")
def get_module_name(name_in_config):
"""Translates the name of a controller in config file to its module name.
"""
return value_to_value(name_in_config, "m_%s")
def value_to_value(ref_value, pattern):
"""Translates the value of a key to the value of its corresponding key. The
corresponding key is chosen based on the variable name pattern.
"""
ref_key_name = get_name_by_value(ref_value)
if not ref_key_name:
return None
target_key_name = pattern % ref_key_name
try:
return getattr(Config, target_key_name).value
except AttributeError:
return None
|
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as pmod
from . import templater
from django.conf import settings
import decimal, datetime, string, random
from django.core.mail import send_mail, EmailMultiAlternatives
def process_request(request):
'''this py handles the forget password functionality. Sends an email to recover password'''
#makes sure the user is already logged in
if request.user.is_authenticated():
return HttpResponseRedirect('/shop/account')
form = RequestForm(initial ={
'username': "",
})
#generates form that takes the customer's username and then, sends a link to thier email with a
#link to reset password
if request.method == 'POST':
form = RequestForm(request.POST, request=request)
if form.is_valid():
user = pmod.User.objects.get(username=form.cleaned_data['username'])
link = ''.join(random.choice(string.ascii_uppercase) for i in range(15))
date = datetime.datetime.now() + datetime.timedelta(hours=2)
user.resetlink = link
user.resetdate = date
user.save()
html = "<html><body></body>Please click <a href=\"http://www.djuvo.com/shop/resetpassword/" + str(user.id) +"/" + str(user.resetlink) + "\">here</a> to reset your password. This reset link will expire in 2 hours.<br>Thank you!<br>HexPhotos</html>"
message = "/shop/resetpassword/" + str(user.id) +"/" + str(user.resetlink)
message = html
msg = EmailMultiAlternatives('HexPhotos Password Reset', message, 'hexphotos.byu@gmail.com', [user.email])
msg.attach_alternative(html, "text/html")
msg.send()
return HttpResponseRedirect('/shop/emailsent')
tvars = {
'form': form,
}
return templater.render_to_response(request, 'forgotpassword.html', tvars)
class RequestForm(forms.Form):
username = forms.CharField(required=False, label='Username', widget=forms.TextInput(attrs={'class':'form-control'}))
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(RequestForm, self).__init__(*args, **kwargs)
#function that raises error if username does not match any account
def clean(self):
try:
user = pmod.User.objects.get(username=self.cleaned_data['username'])
except:
raise forms.ValidationError("That email does not correspond to a user.")
return self.cleaned_data
|
import webob.exc
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket import context as nova_context
from jacket.compute import exception
from jacket.i18n import _
from jacket.compute import servicegroup
from jacket.compute import utils
authorize = extensions.extension_authorizer('cloud', 'services')
class ServiceController(object):
def __init__(self, ext_mgr=None, *args, **kwargs):
self.host_api = cloud.HostAPI()
self.servicegroup_api = servicegroup.API()
self.ext_mgr = ext_mgr
def _get_services(self, req):
api_services = ('cloud-osapi_compute', 'cloud-ec2', 'cloud-metadata')
context = req.environ['compute.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks
nova_context.require_admin_context(context)
services = [
s
for s in self.host_api.service_get_all(context, set_zones=True)
if s['binary'] not in api_services
]
host = ''
if 'host' in req.GET:
host = req.GET['host']
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s['host'] == host]
if binary:
services = [s for s in services if s['binary'] == binary]
return services
def _get_service_detail(self, svc, detailed):
alive = self.servicegroup_api.service_is_up(svc)
state = (alive and "up") or "down"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
service_detail = {'binary': svc['binary'], 'host': svc['host'],
'zone': svc['availability_zone'],
'status': active, 'state': state,
'updated_at': svc['updated_at']}
if self.ext_mgr.is_loaded('os-extended-services-delete'):
service_detail['id'] = svc['id']
if detailed:
service_detail['disabled_reason'] = svc['disabled_reason']
return service_detail
def _get_services_list(self, req, detailed):
services = self._get_services(req)
svcs = []
for svc in services:
svcs.append(self._get_service_detail(svc, detailed))
return svcs
def _is_valid_as_reason(self, reason):
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
except exception.InvalidInput:
return False
return True
@wsgi.response(204)
def delete(self, req, id):
"""Deletes the specified service."""
if not self.ext_mgr.is_loaded('os-extended-services-delete'):
raise webob.exc.HTTPMethodNotAllowed()
context = req.environ['compute.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks
nova_context.require_admin_context(context)
try:
utils.validate_integer(id, 'id')
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
try:
self.host_api.service_delete(context, id)
except exception.ServiceNotFound:
explanation = _("Service %s not found.") % id
raise webob.exc.HTTPNotFound(explanation=explanation)
def index(self, req):
"""Return a list of all running services."""
detailed = self.ext_mgr.is_loaded('os-extended-services')
services = self._get_services_list(req, detailed)
return {'services': services}
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['compute.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks
nova_context.require_admin_context(context)
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
if id == "enable":
disabled = False
status = "enabled"
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
else:
msg = _("Unknown action")
raise webob.exc.HTTPNotFound(explanation=msg)
try:
host = body['host']
binary = body['binary']
ret_value = {
'service': {
'host': host,
'binary': binary,
'status': status,
},
}
status_detail = {
'disabled': disabled,
'disabled_reason': None,
}
if id == "disable-log-reason":
reason = body['disabled_reason']
if not self._is_valid_as_reason(reason):
msg = _('The string containing the reason for disabling '
'the service contains invalid characters or is '
'too long.')
raise webob.exc.HTTPBadRequest(explanation=msg)
status_detail['disabled_reason'] = reason
ret_value['service']['disabled_reason'] = reason
except (TypeError, KeyError):
msg = _('Invalid attribute in the request')
if 'host' in body and 'binary' in body:
msg = _('Missing disabled reason field')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
self.host_api.service_update(context, host, binary, status_detail)
except exception.HostBinaryNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return ret_value
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/cloud/ext/services/api/v2"
updated = "2012-10-28T00:00:00Z"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('os-services',
ServiceController(self.ext_mgr))
resources.append(resource)
return resources
|
"""Liberated Pixel Cup [(LPC)][1] Sprites Dataset.
This file provides logic to download and build a version of the sprites
video sequence dataset as used in the Disentangled Sequential
Autoencoder paper [(Li and Mandt, 2018)][2].
[1]: Liberated Pixel Cup. http://lpc.opengameart.org. Accessed:
2018-07-20.
[2]: Yingzhen Li and Stephan Mandt. Disentangled Sequential Autoencoder.
In _International Conference on Machine Learning_, 2018.
https://arxiv.org/abs/1803.02991
"""
from collections import namedtuple
import os
import random
import zipfile
from absl import flags
from six.moves import urllib
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import lookup_ops # pylint: disable=g-direct-tensorflow-import
__all__ = ["SpritesDataset"]
flags.DEFINE_string(
"data_dir",
default=os.path.join(
os.getenv("TEST_TMPDIR", "/tmp"),
os.path.join("disentangled_vae", "data")),
help="Directory where the dataset is stored.")
DATA_SPRITES_URL = "https://github.com/jrconway3/Universal-LPC-spritesheet/archive/master.zip"
DATA_SPRITES_DIR = "Universal-LPC-spritesheet-master"
WIDTH = 832
HEIGHT = 1344
FRAME_SIZE = 64
CHANNELS = 4
SKIN_COLORS = [
os.path.join("body", "male", "light.png"),
os.path.join("body", "male", "tanned2.png"),
os.path.join("body", "male", "darkelf.png"),
os.path.join("body", "male", "darkelf2.png"),
os.path.join("body", "male", "dark.png"),
os.path.join("body", "male", "dark2.png")
]
HAIRSTYLES = [
os.path.join("hair", "male", "messy2", "green2.png"),
os.path.join("hair", "male", "ponytail", "blue2.png"),
os.path.join("hair", "male", "messy1", "light-blonde.png"),
os.path.join("hair", "male", "parted", "white.png"),
os.path.join("hair", "male", "plain", "ruby-red.png"),
os.path.join("hair", "male", "jewfro", "purple.png")
]
TOPS = [
os.path.join(
"torso", "shirts", "longsleeve", "male", "maroon_longsleeve.png"),
os.path.join(
"torso", "shirts", "longsleeve", "male", "teal_longsleeve.png"),
os.path.join(
"torso", "shirts", "longsleeve", "male", "white_longsleeve.png"),
os.path.join("torso", "plate", "chest_male.png"),
os.path.join("torso", "leather", "chest_male.png"),
os.path.join("formal_male_no_th-sh", "shirt.png")
]
PANTS = [
os.path.join("legs", "pants", "male", "white_pants_male.png"),
os.path.join("legs", "armor", "male", "golden_greaves_male.png"),
os.path.join("legs", "pants", "male", "red_pants_male.png"),
os.path.join("legs", "armor", "male", "metal_pants_male.png"),
os.path.join("legs", "pants", "male", "teal_pants_male.png"),
os.path.join("formal_male_no_th-sh", "pants.png")
]
Action = namedtuple("Action", ["name", "start_row", "frames"])
ACTIONS = [
Action("walk", 8, 9),
Action("spellcast", 0, 7),
Action("slash", 12, 6)
]
Direction = namedtuple("Direction", ["name", "row_offset"])
DIRECTIONS = [
Direction("west", 1),
Direction("south", 2),
Direction("east", 3),
]
FLAGS = flags.FLAGS
def read_image(filepath):
"""Returns an image tensor."""
im_bytes = tf.io.read_file(filepath)
im = tf.image.decode_image(im_bytes, channels=CHANNELS)
im = tf.image.convert_image_dtype(im, tf.float32)
return im
def join_seq(seq):
"""Joins a sequence side-by-side into a single image."""
return tf.concat(tf.unstack(seq), 1)
def download_sprites():
"""Downloads the sprites data and returns the saved filepath."""
filepath = os.path.join(FLAGS.data_dir, DATA_SPRITES_DIR)
if not tf.io.gfile.exists(filepath):
if not tf.io.gfile.exists(FLAGS.data_dir):
tf.io.gfile.makedirs(FLAGS.data_dir)
zip_name = "{}.zip".format(filepath)
urllib.request.urlretrieve(DATA_SPRITES_URL, zip_name)
with zipfile.ZipFile(zip_name, "r") as zip_file:
zip_file.extractall(FLAGS.data_dir)
tf.io.gfile.remove(zip_name)
return filepath
def create_character(skin, hair, top, pants):
"""Creates a character sprite from a set of attribute sprites."""
dtype = skin.dtype
hair_mask = tf.cast(hair[..., -1:] <= 0, dtype)
top_mask = tf.cast(top[..., -1:] <= 0, dtype)
pants_mask = tf.cast(pants[..., -1:] <= 0, dtype)
char = (skin * hair_mask) + hair
char = (char * top_mask) + top
char = (char * pants_mask) + pants
return char
def create_seq(character, action_metadata, direction, length=8, start=0):
"""Creates a sequence.
Args:
character: A character sprite tensor.
action_metadata: An action metadata tuple.
direction: An integer representing the direction, i.e., the row
offset within each action group corresponding to a particular
direction.
length: Desired length of the sequence. If this is longer than
the number of available frames, it will roll over to the
beginning.
start: Index of possible frames at which to start the sequence.
Returns:
A sequence tensor.
"""
sprite_start = (action_metadata[0]+direction) * FRAME_SIZE
sprite_end = (action_metadata[0]+direction+1) * FRAME_SIZE
sprite_line = character[sprite_start:sprite_end, ...]
# Extract 64x64 patches that are side-by-side in the sprite, and limit
# to the actual number of frames for the given action.
frames = tf.stack(tf.split(sprite_line, 13, axis=1)) # 13 is a hack
frames = frames[0:action_metadata[1]]
# Extract a slice of the desired length.
# NOTE: Length could be longer than the number of frames, so tile as needed.
frames = tf.roll(frames, shift=-start, axis=0)
frames = tf.tile(frames, [2, 1, 1, 1]) # 2 is a hack
frames = frames[:length]
frames = tf.cast(frames, dtype=tf.float32)
frames.set_shape([length, FRAME_SIZE, FRAME_SIZE, CHANNELS])
return frames
def create_random_seq(character, action_metadata, direction, length=8):
"""Creates a random sequence."""
start = tf.random.uniform([], maxval=action_metadata[1], dtype=tf.int32)
return create_seq(character, action_metadata, direction, length, start)
def create_sprites_dataset(characters, actions, directions, channels=3,
length=8, shuffle=False, fake_data=False):
"""Creates a tf.data pipeline for the sprites dataset.
Args:
characters: A list of (skin, hair, top, pants) tuples containing
relative paths to the sprite png image for each attribute.
actions: A list of Actions.
directions: A list of Directions.
channels: Number of image channels to yield.
length: Desired length of the sequences.
shuffle: Whether or not to shuffle the characters and sequences
start frame.
fake_data: Boolean for whether or not to yield synthetic data.
Returns:
A tf.data.Dataset yielding (seq, skin label index, hair label index,
top label index, pants label index, action label index, skin label
name, hair label_name, top label name, pants label name, action
label name) tuples.
"""
if fake_data:
dummy_image = tf.random.normal([HEIGHT, WIDTH, CHANNELS])
else:
basedir = download_sprites()
action_names = [action.name for action in actions]
action_metadata = [(action.start_row, action.frames) for action in actions]
direction_rows = [direction.row_offset for direction in directions]
chars = tf.data.Dataset.from_tensor_slices(characters)
act_names = tf.data.Dataset.from_tensor_slices(action_names).repeat()
acts_metadata = tf.data.Dataset.from_tensor_slices(action_metadata).repeat()
dir_rows = tf.data.Dataset.from_tensor_slices(direction_rows).repeat()
if shuffle:
chars = chars.shuffle(len(characters))
dataset = tf.data.Dataset.zip((chars, act_names, acts_metadata, dir_rows))
skin_table = lookup_ops.index_table_from_tensor(sorted(SKIN_COLORS))
hair_table = lookup_ops.index_table_from_tensor(sorted(HAIRSTYLES))
top_table = lookup_ops.index_table_from_tensor(sorted(TOPS))
pants_table = lookup_ops.index_table_from_tensor(sorted(PANTS))
action_table = lookup_ops.index_table_from_tensor(sorted(action_names))
def process_example(attrs, act_name, act_metadata, dir_row_offset):
"""Processes a dataset row."""
skin_name = attrs[0]
hair_name = attrs[1]
top_name = attrs[2]
pants_name = attrs[3]
if fake_data:
char = dummy_image
else:
skin = read_image(basedir + os.sep + skin_name)
hair = read_image(basedir + os.sep + hair_name)
top = read_image(basedir + os.sep + top_name)
pants = read_image(basedir + os.sep + pants_name)
char = create_character(skin, hair, top, pants)
if shuffle:
seq = create_random_seq(char, act_metadata, dir_row_offset, length)
else:
seq = create_seq(char, act_metadata, dir_row_offset, length)
seq = seq[..., :channels] # limit output channels
skin_idx = skin_table.lookup(skin_name)
hair_idx = hair_table.lookup(hair_name)
top_idx = top_table.lookup(top_name)
pants_idx = pants_table.lookup(pants_name)
act_idx = action_table.lookup(act_name)
return (seq, skin_idx, hair_idx, top_idx, pants_idx, act_idx,
skin_name, hair_name, top_name, pants_name, act_name)
dataset = dataset.map(process_example)
return dataset
class SpritesDataset(object):
"""Liberated Pixel Cup [(LPC)][1] Sprites Dataset.
This file provides logic to download and build a version of the
sprites video sequence dataset as used in the Disentangled Sequential
Autoencoder paper [(Li and Mandt, 2018)][2]. The dataset contains
sprites (graphics files used to generate animated sequences) of human
characters wearing a variety of clothing, and performing a variety of
actions. The paper limits the dataset used for training to four
attribute categories (skin color, hairstyles, tops, and pants), each
of which are limited to include six variants. Thus, there are
6^4 = 1296 possible animated characters in this dataset. The
characters are shuffled and deterministically split such that 1000
characters are used for the training set, and 296 are used for the
testing set. The numbers are consistent with the paper, but the exact
split is impossible to match given the currently available paper
details. The actions are limited to three categories (walking,
casting spells, and slashing), each with three viewing angles.
Sequences of length T=8 frames are generated depicting a given
character performing a given action, starting at a random frame in the
sequence.
Attributes:
train: Training dataset with 1000 characters each performing an
action.
test: Testing dataset with 296 characters each performing an action.
#### References:
[1]: Liberated Pixel Cup. http://lpc.opengameart.org. Accessed:
2018-07-20.
[2]: Yingzhen Li and Stephan Mandt. Disentangled Sequential
Autoencoder. In _International Conference on Machine Learning_,
2018. https://arxiv.org/abs/1803.02991
"""
def __init__(self, channels=3, shuffle_train=True, fake_data=False):
"""Creates the SpritesDataset and stores train and test datasets.
The datasets yield (seq, skin label index, hair label index, top
label index, pants label index, action label index, skin label name,
hair label_name, top label name, pants label name, action label
name) tuples.
Args:
channels: Number of image channels to yield.
shuffle_train: Boolean for whether or not to shuffle the training
set.
fake_data: Boolean for whether or not to yield synthetic data.
Raises:
ValueError: If the number of training or testing examples is
incorrect, or if there is overlap betweem the two datasets.
"""
super(SpritesDataset, self).__init__()
self.frame_size = FRAME_SIZE
self.channels = channels
self.length = 8
num_train = 1000
num_test = 296
characters = [(skin, hair, top, pants)
for skin in sorted(SKIN_COLORS)
for hair in sorted(HAIRSTYLES)
for top in sorted(TOPS)
for pants in sorted(PANTS)]
random.seed(42)
random.shuffle(characters)
train_chars = characters[:num_train]
test_chars = characters[num_train:]
num_train_actual = len(set(train_chars))
num_test_actual = len(set(test_chars))
num_train_test_overlap = len(set(train_chars) & set(test_chars))
if num_train_actual != num_train:
raise ValueError(
"Unexpected number of training examples: {}.".format(
num_train_actual))
if num_test_actual != num_test:
raise ValueError(
"Unexpected number of testing examples: {}.".format(
num_test_actual))
if num_train_test_overlap > 0: # pylint: disable=g-explicit-length-test
raise ValueError(
"Overlap between train and test datasets detected: {}.".format(
num_train_test_overlap))
self.train = create_sprites_dataset(
train_chars, ACTIONS, DIRECTIONS, self.channels, self.length,
shuffle=shuffle_train, fake_data=fake_data)
self.test = create_sprites_dataset(
test_chars, ACTIONS, DIRECTIONS, self.channels, self.length,
shuffle=False, fake_data=fake_data)
|
from ztag.annotation import *
class VarnishServer(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
def process(self, obj, meta):
vendor = obj["headers"]["server"]
if "varnish" in vendor.lower():
meta.local_metadata.manufacturer = Manufacturer.VARNISH
meta.local_metadata.product = "HTTP Server"
return meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.