code
stringlengths 1
199k
|
|---|
"""
"""
import numpy as np
from numpy import ma
def bin_spike(x, l):
"""
l is the number of points used for comparison, thus l=2 means that each
point will be compared only against the previous and following
measurements. l=2 is is probably not a good choice, too small.
Maybe use pstsd instead?
Dummy way to avoid warnings when x[ini:fin] are all masked.
Improve this in the future.
"""
assert x.ndim == 1, "I'm not ready to deal with multidimensional x"
assert l%2 == 0, "l must be an even integer"
N = len(x)
bin = ma.masked_all(N)
# bin_std = ma.masked_all(N)
half_window = int(l/2)
idx = (i for i in range(half_window, N - half_window) if np.isfinite(x[i]))
for i in idx:
ini = max(0, i - half_window)
fin = min(N, i + half_window)
# At least 3 valid points
if ma.compressed(x[ini:fin]).size >= 3:
bin[i] = x[i] - ma.median(x[ini:fin])
# bin_std[i] = (np.append(x[ini:i], x[i+1:fin+1])).std()
bin[i] /= (np.append(x[ini:i], x[i+1:fin+1])).std()
return bin
class Bin_Spike(object):
def __init__(self, data, varname, cfg, autoflag=True):
self.data = data
self.varname = varname
self.cfg = cfg
self.set_features()
if autoflag:
self.test()
def keys(self):
return self.features.keys() + \
["flag_%s" % f for f in self.flags.keys()]
def set_features(self):
self.features = {'bin_spike': bin_spike(self.data[self.varname],
self.cfg['l'])}
def test(self):
self.flags = {}
try:
threshold = self.cfg['threshold']
except:
print("Deprecated cfg format. It should contain a threshold item.")
threshold = self.cfg
try:
flag_good = self.cfg['flag_good']
flag_bad = self.cfg['flag_bad']
except:
print("Deprecated cfg format. It should contain flag_good & flag_bad.")
flag_good = 1
flag_bad = 3
assert (np.size(threshold) == 1) and \
(threshold is not None) and \
(np.isfinite(threshold))
flag = np.zeros(self.data[self.varname].shape, dtype='i1')
flag[np.nonzero(self.features['bin_spike'] > threshold)] = flag_bad
flag[np.nonzero(self.features['bin_spike'] <= threshold)] = flag_good
flag[ma.getmaskarray(self.data[self.varname])] = 9
self.flags['bin_spike'] = flag
|
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import rfpipe, rfpipe.candidates
import pytest
from astropy import time
from numpy import degrees, nan, argmax, abs
tparams = [(0, 0, 0, 5e-3, 0.3, 0.0001, 0.0),]
inprefs = [({'flaglist': [], 'chans': list(range(32)), 'sigma_image1': None,
'spw': [0], 'savecandcollection': True, 'savenoise': True,
'savecanddata': True, 'returncanddata': True, 'saveplots': True,
'fftmode': 'fftw', 'searchtype': 'imagek'}, 1),
({'simulated_transient': tparams, 'dmarr': [0, 1, 2], 'dtarr': [1, 2],
'savecanddata': True, 'savenoise': True, 'saveplots': True,
'returncanddata': True, 'savecandcollection': True,
'timesub': 'mean', 'fftmode': 'fftw', 'searchtype': 'imagek',
'sigma_image1': 10, 'sigma_kalman': 1,
'clustercands': True, 'flaglist': []}, 2),
({'simulated_transient': tparams, 'dmarr': [0, 1, 2], 'dtarr': [1, 2],
'savecanddata': True, 'savenoise': True, 'saveplots': True,
'returncanddata': True, 'savecandcollection': True,
'timesub': 'cs', 'fftmode': 'fftw', 'searchtype': 'imagek',
'sigma_image1': 10, 'sigma_kalman': 1,
'clustercands': True, 'flaglist': []}, 2),]
@pytest.fixture(scope="module", params=inprefs)
def mockstate(request):
inprefs, scan = request.param
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=scan, datasource='sim',
antconfig='D')
return rfpipe.state.State(inmeta=meta, inprefs=inprefs)
@pytest.fixture(scope="module")
def mockdata(mockstate):
segment = 0
data = rfpipe.source.read_segment(mockstate, segment)
data[0, 0, 0, 0] = nan
return rfpipe.source.data_prep(mockstate, segment, data)
@pytest.fixture(scope="module")
def mockcc(mockstate):
cc = rfpipe.pipeline.pipeline_scan(mockstate)
return cc
def test_dataprep(mockstate, mockdata):
assert mockdata.shape == mockstate.datashape
def test_noise(mockstate, mockdata):
for noises in rfpipe.candidates.iter_noise(mockstate.noisefile):
assert len(noises)
def test_pipelinescan(mockcc):
if mockcc.prefs.simulated_transient is not None:
rfpipe.candidates.makesummaryplot(mockcc)
assert mockcc is not None
def test_voevent(mockcc):
if mockcc.prefs.simulated_transient is not None:
name = rfpipe.candidates.make_voevent(mockcc)
assert name is not None
def test_candids(mockcc):
if mockcc.prefs.simulated_transient is not None:
assert len(mockcc.candids)
def test_cc(mockcc):
if mockcc.prefs.returncanddata:
assert isinstance(mockcc.canddata, list)
assert len(mockcc.canddata) == len(mockcc)
if mockcc.prefs.savecandcollection:
ccs = rfpipe.candidates.iter_cands(mockcc.state.candsfile)
cc = sum(ccs)
assert len(cc) == len(mockcc)
if cc.prefs.returncanddata:
assert isinstance(cc.canddata, list)
assert len(cc.canddata) == len(cc)
assert len(cc.canddata) == len(mockcc.canddata)
def test_phasecenter_detection():
inprefs = {'simulated_transient': [(0, 1, 0, 5e-3, 0.3, -0.001, 0.),
(0, 9, 0, 5e-3, 0.3, 0., 0.),
(0, 19, 0, 5e-3, 0.3, 0.001, 0.)],
'dmarr': [0], 'dtarr': [1], 'timesub': None, 'fftmode': 'fftw', 'searchtype': 'image',
'sigma_image1': 10, 'flaglist': [], 'uvres': 60, 'npix_max': 128, 'max_candfrac': 0}
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
assert cc.array['l1'][0] <= 0.
assert cc.array['l1'][1] == 0.
assert cc.array['l1'][2] >= 0.
assert all(abs(cc.array['m1']) <= 0.0003)
def test_phasecenter_detection_shift():
inprefs = {'simulated_transient': [(0, 1, 0, 5e-3, 0.3, -0.001, 0.),
(0, 9, 0, 5e-3, 0.3, 0., 0.),
(0, 19, 0, 5e-3, 0.3, 0.001, 0.)],
'dmarr': [0], 'dtarr': [1], 'timesub': None, 'fftmode': 'fftw', 'searchtype': 'image',
'sigma_image1': 10, 'flaglist': [], 'uvres': 60, 'npix_max': 128, 'max_candfrac': 0}
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
meta['phasecenters'] = [(t0, t0+0.01/(24*3600), degrees(0.001), 0.),
(t0+0.01/(24*3600), t0+0.05/(24*3600), 0., 0.),
(t0+0.05/(24*3600), t0+0.1/(24*3600), degrees(-0.001), 0.)]
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
assert all(cc.array['l1'] == 0.)
assert all(cc.array['m1'] == 0.)
def test_wide_transient():
print("Try injecting a transient of width 40ms at integration 8")
inprefs = {'simulated_transient': [(0, 8, 0, 40e-3, 0.3, 0., 0.)],
'dmarr': [0], 'dtarr': [1,2,4,8], 'timesub': None, 'fftmode': 'fftw', 'searchtype': 'image',
'sigma_image1': 10, 'flaglist': [], 'uvres': 60, 'npix_max': 128, 'max_candfrac': 0}
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
ind = argmax(cc.array['snr1'])
assert cc.array['dtind'][ind] == 3
assert cc.array['integration'][ind]*2**cc.array['dtind'][ind] == 8
print("Try injecting a transient of width 20ms at integration 8")
inprefs['simulated_transient'] = [(0, 8, 0, 20e-3, 0.3, 0., 0.)]
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
ind = argmax(cc.array['snr1'])
assert cc.array['dtind'][ind] == 2
assert cc.array['integration'][ind]*2**cc.array['dtind'][ind] == 8
|
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from corehq import privileges
from corehq.apps.accounting.models import BillingAccount
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from corehq.apps.reports.filters.users import (
ChangeActionFilter,
ChangedByUserFilter,
EnterpriseUserFilter,
)
from corehq.apps.reports.filters.users import \
ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.generic import GenericTabularReport, GetParamsMixin, PaginatedReportMixin
from corehq.apps.reports.standard import DatespanMixin, ProjectReport
from corehq.apps.users.audit.change_messages import (
ASSIGNED_LOCATIONS_FIELD,
CHANGE_MESSAGES_FIELDS,
DOMAIN_FIELD,
LOCATION_FIELD,
PHONE_NUMBERS_FIELD,
ROLE_FIELD,
TWO_FACTOR_FIELD,
get_messages,
)
from corehq.apps.users.models import UserHistory
from corehq.const import USER_DATETIME_FORMAT
from corehq.util.timezones.conversions import ServerTime
class UserHistoryReport(GetParamsMixin, DatespanMixin, GenericTabularReport, ProjectReport, PaginatedReportMixin):
slug = 'user_history'
name = ugettext_lazy("User History")
section_name = ugettext_lazy("User Management")
dispatcher = UserManagementReportDispatcher
fields = [
'corehq.apps.reports.filters.users.AffectedUserFilter',
'corehq.apps.reports.filters.users.ChangedByUserFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'corehq.apps.reports.filters.users.ChangeActionFilter',
'corehq.apps.reports.filters.users.UserPropertyFilter',
'corehq.apps.reports.filters.users.UserUploadRecordFilter',
]
description = ugettext_lazy("History of user updates")
ajax_pagination = True
default_sort = {'changed_at': 'desc'}
@classmethod
def get_primary_properties(cls, domain):
"""
Get slugs and human-friendly names for the properties that are available
for filtering and/or displayed by default in the report, without
needing to click "See More".
"""
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_data_label = _("profile or user data")
else:
user_data_label = _("user data")
return {
"username": _("username"),
ROLE_FIELD: _("role"),
"email": _("email"),
DOMAIN_FIELD: _("project"),
"is_active": _("is active"),
"language": _("language"),
PHONE_NUMBERS_FIELD: _("phone numbers"),
LOCATION_FIELD: _("primary location"),
"user_data": user_data_label,
TWO_FACTOR_FIELD: _("two factor authentication disabled"),
ASSIGNED_LOCATIONS_FIELD: _("assigned locations"),
}
@property
def headers(self):
h = [
DataTablesColumn(_("Affected User"), sortable=False),
DataTablesColumn(_("Modified by User"), sortable=False),
DataTablesColumn(_("Action"), prop_name='action'),
DataTablesColumn(_("Via"), prop_name='changed_via'),
DataTablesColumn(_("Changes"), sortable=False),
DataTablesColumn(_("Change Message"), sortable=False),
DataTablesColumn(_("Timestamp"), prop_name='changed_at'),
]
return DataTablesHeader(*h)
@property
def total_records(self):
return self._get_queryset().count()
@memoized
def _get_queryset(self):
user_slugs = self.request.GET.getlist(EMWF.slug)
user_ids = self._get_user_ids(user_slugs)
# return empty queryset if no matching users were found
if user_slugs and not user_ids:
return UserHistory.objects.none()
changed_by_user_slugs = self.request.GET.getlist(ChangedByUserFilter.slug)
changed_by_user_ids = self._get_user_ids(changed_by_user_slugs)
# return empty queryset if no matching users were found
if changed_by_user_slugs and not changed_by_user_ids:
return UserHistory.objects.none()
user_property = self.request.GET.get('user_property')
actions = self.request.GET.getlist('action')
user_upload_record_id = self.request.GET.get('user_upload_record')
query = self._build_query(user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id)
return query
def _get_user_ids(self, slugs):
es_query = self._get_users_es_query(slugs)
return es_query.values_list('_id', flat=True)
def _get_users_es_query(self, slugs):
return EnterpriseUserFilter.user_es_query(
self.domain,
slugs,
self.request.couch_user,
)
def _build_query(self, user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id):
filters = Q(for_domain__in=self._for_domains())
if user_ids:
filters = filters & Q(user_id__in=user_ids)
if changed_by_user_ids:
filters = filters & Q(changed_by__in=changed_by_user_ids)
if user_property:
filters = filters & self._get_property_filters(user_property)
if actions and ChangeActionFilter.ALL not in actions:
filters = filters & Q(action__in=actions)
if user_upload_record_id:
filters = filters & Q(user_upload_record_id=user_upload_record_id)
if self.datespan:
filters = filters & Q(changed_at__lt=self.datespan.enddate_adjusted,
changed_at__gte=self.datespan.startdate)
return UserHistory.objects.filter(filters)
def _for_domains(self):
return BillingAccount.get_account_by_domain(self.domain).get_domains()
@staticmethod
def _get_property_filters(user_property):
if user_property in CHANGE_MESSAGES_FIELDS:
query_filters = Q(change_messages__has_key=user_property)
# to include CommCareUser creation from UI where a location can be assigned as a part of user creation
# which is tracked only under "changes" and not "change messages"
if user_property == LOCATION_FIELD:
query_filters = query_filters | Q(changes__has_key='location_id')
else:
query_filters = Q(changes__has_key=user_property)
return query_filters
@property
def rows(self):
records = self._get_queryset().order_by(self.ordering)[
self.pagination.start:self.pagination.start + self.pagination.count
]
for record in records:
yield self._user_history_row(record, self.domain, self.timezone)
@property
def ordering(self):
by, direction = list(self.get_sorting_block()[0].items())[0]
return '-' + by if direction == 'desc' else by
@memoized
def _get_location_name(self, location_id):
from corehq.apps.locations.models import SQLLocation
if not location_id:
return None
try:
location_object = SQLLocation.objects.get(location_id=location_id)
except ObjectDoesNotExist:
return None
return location_object.display_name
def _user_history_row(self, record, domain, timezone):
return [
record.user_repr,
record.changed_by_repr,
_get_action_display(record.action),
record.changed_via,
self._user_history_details_cell(record.changes, domain),
self._html_list(list(get_messages(record.change_messages))),
ServerTime(record.changed_at).user_time(timezone).ui_string(USER_DATETIME_FORMAT),
]
def _html_list(self, changes):
items = []
if isinstance(changes, dict):
for key, value in changes.items():
if isinstance(value, dict):
value = self._html_list(value)
elif isinstance(value, list):
value = format_html(", ".join(value))
else:
value = format_html(str(value))
items.append("<li>{}: {}</li>".format(key, value))
elif isinstance(changes, list):
items = ["<li>{}</li>".format(format_html(change)) for change in changes]
return mark_safe(f"<ul class='list-unstyled'>{''.join(items)}</ul>")
def _user_history_details_cell(self, changes, domain):
properties = UserHistoryReport.get_primary_properties(domain)
properties.pop("user_data", None)
primary_changes = {}
all_changes = {}
for key, value in changes.items():
if key == 'location_id':
value = self._get_location_name(value)
primary_changes[properties[LOCATION_FIELD]] = value
all_changes[properties[LOCATION_FIELD]] = value
elif key == 'user_data':
for user_data_key, user_data_value in changes['user_data'].items():
all_changes[f"user data: {user_data_key}"] = user_data_value
elif key in properties:
primary_changes[properties[key]] = value
all_changes[properties[key]] = value
more_count = len(all_changes) - len(primary_changes)
return render_to_string("reports/standard/partials/user_history_changes.html", {
"primary_changes": self._html_list(primary_changes),
"all_changes": self._html_list(all_changes),
"more_count": more_count,
})
def _get_action_display(logged_action):
action = ugettext_lazy("Updated")
if logged_action == UserHistory.CREATE:
action = ugettext_lazy("Added")
elif logged_action == UserHistory.DELETE:
action = ugettext_lazy("Deleted")
return action
|
import threading
import numpy as np
def ros_ensure_valid_name(name):
return name.replace('-','_')
def lineseg_box(xmin, ymin, xmax, ymax):
return [ [xmin,ymin,xmin,ymax],
[xmin,ymax,xmax,ymax],
[xmax,ymax,xmax,ymin],
[xmax,ymin,xmin,ymin],
]
def lineseg_circle(x,y,radius,N=64):
draw_linesegs = []
theta = np.arange(N)*2*np.pi/N
xdraw = x+np.cos(theta)*radius
ydraw = y+np.sin(theta)*radius
for i in range(N-1):
draw_linesegs.append(
(xdraw[i],ydraw[i],xdraw[i+1],ydraw[i+1]))
draw_linesegs.append(
(xdraw[-1],ydraw[-1],xdraw[0],ydraw[0]))
return draw_linesegs
class SharedValue:
def __init__(self):
self.evt = threading.Event()
self._val = None
def set(self,value):
# called from producer thread
self._val = value
self.evt.set()
def is_new_value_waiting(self):
return self.evt.isSet()
def get(self,*args,**kwargs):
# called from consumer thread
self.evt.wait(*args,**kwargs)
val = self._val
self.evt.clear()
return val
def get_nowait(self):
# XXX TODO this is not atomic and is thus dangerous.
# (The value could get read, then another thread could set it,
# and only then might it get flagged as clear by this thread,
# even though a new value is waiting.)
val = self._val
self.evt.clear()
return val
class SharedValue1(object):
def __init__(self,initial_value):
self._val = initial_value
self.lock = threading.Lock()
def get(self):
self.lock.acquire()
try:
val = self._val
finally:
self.lock.release()
return val
def set(self,new_value):
self.lock.acquire()
try:
self._val = new_value
finally:
self.lock.release()
|
import numpy as nm
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/square_unit_tri.mesh'
def get_pars(ts, coors, mode=None, region=None, ig=None, extra_arg=None):
if mode == 'special':
if extra_arg == 'hello!':
ic = 0
else:
ic = 1
return {('x_%s' % ic) : coors[:,ic]}
def get_p_edge(ts, coors, bc=None):
if bc.name == 'p_left':
return nm.sin(nm.pi * coors[:,1])
else:
return nm.cos(nm.pi * coors[:,1])
def get_circle(coors, domain=None):
r = nm.sqrt(coors[:,0]**2.0 + coors[:,1]**2.0)
return nm.where(r < 0.2)[0]
functions = {
'get_pars1' : (lambda ts, coors, mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hello!'),),
'get_p_edge' : (get_p_edge,),
'get_circle' : (get_circle,),
}
function_1 = {
'name' : 'get_pars2',
'function' : lambda ts, coors,mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hi!'),
}
materials = {
'mf1' : (None, 'get_pars1'),
'mf2' : 'get_pars2',
# Dot denotes a special value, that is not propagated to all QP.
'mf3' : ({'a' : 10.0, 'b' : 2.0, '.c' : 'ahoj'},),
}
fields = {
'pressure' : (nm.float64, 1, 'Omega', 2),
}
variables = {
'p' : ('unknown field', 'pressure', 0),
'q' : ('test field', 'pressure', 'p'),
}
wx = 0.499
regions = {
'Omega' : ('all', {}),
'Left' : ('nodes in (x < -%.3f)' % wx, {}),
'Right' : ('nodes in (x > %.3f)' % wx, {}),
'Circle' : ('nodes by get_circle', {}),
}
ebcs = {
'p_left' : ('Left', {'p.all' : 'get_p_edge'}),
'p_right' : ('Right', {'p.all' : 'get_p_edge'}),
}
equations = {
'e1' : """dw_laplace.2.Omega( mf3.a, q, p ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
}
fe = {
'chunk_size' : 1000
}
from sfepy.base.testing import TestCommon, assert_
from sfepy.base.base import pause, debug
class Test( TestCommon ):
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf(conf)
test = Test(problem = problem, conf = conf, options = options)
return test
from_conf = staticmethod( from_conf )
def test_material_functions(self):
problem = self.problem
ts = problem.get_default_ts(step=0)
problem.materials.time_update(ts,
problem.domain,
problem.equations)
coors = problem.domain.get_mesh_coors()
mat1 = problem.materials['mf1']
assert_(nm.all(coors[:,0] == mat1.get_data(None, None, 'x_0')))
mat2 = problem.materials['mf2']
assert_(nm.all(coors[:,1] == mat2.get_data(None, None, 'x_1')))
mat3 = problem.materials['mf3']
key = mat3.get_keys(region_name='Omega')[0]
assert_(nm.all(mat3.get_data(key, 0, 'a') == 10.0))
assert_(nm.all(mat3.get_data(key, 0, 'b') == 2.0))
assert_(mat3.get_data(None, None, 'c') == 'ahoj')
return True
def test_ebc_functions(self):
import os.path as op
problem = self.problem
problem.set_equations(self.conf.equations)
problem.time_update()
vec = problem.solve()
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0] + '_ebc.vtk')
problem.save_state(name, vec)
ok = True
domain = problem.domain
iv = domain.regions['Left'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.sin(nm.pi * coors[:,1]),
label1='state_left', label2='bc_left')
iv = domain.regions['Right'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.cos(nm.pi * coors[:,1]),
label1='state_right', label2='bc_right')
return ok
def test_region_functions(self):
import os.path as op
problem = self.problem
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0])
problem.save_regions(name, ['Circle'])
return True
|
__version__ = '0.5.0'
request_post_identifier = 'current_aldryn_blog_entry'
|
"""Helper utilities and decorators."""
from flask import flash, render_template, current_app
def flash_errors(form, category="warning"):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash("{0} - {1}"
.format(getattr(form, field).label.text, error), category)
def render_extensions(template_path, **kwargs):
"""
Wraps around the standard render template method and shoves in some other stuff out of the config.
:param template_path:
:param kwargs:
:return:
"""
return render_template(template_path,
_GOOGLE_ANALYTICS=current_app.config['GOOGLE_ANALYTICS'],
**kwargs)
|
"""
pypm.common.util
~~~~~~~~~~~~~~~~
Assorted utility code
"""
import os
from os import path as P
import sys
import re
from contextlib import contextmanager
import logging
import time
import textwrap
from datetime import datetime
from pkg_resources import Requirement
from pkg_resources import resource_filename
import six
import pypm
from zclockfile import LockFile
LOG = logging.getLogger(__name__)
def wrapped(txt, prefix='', **options):
"""Return wrapped text suitable for printing to terminal"""
MAX_WIDTH=70 # textwrap.wrap's default
return '\n'.join([
'{0}{1}'.format(prefix, line)
for line in textwrap.wrap(txt, width=MAX_WIDTH-len(prefix), **options)])
def lazyproperty(func):
"""A property decorator for lazy evaluation"""
cache = {}
def _get(self):
"""Return the property value from cache once it is calculated"""
try:
return cache[self]
except KeyError:
cache[self] = value = func(self)
return value
return property(_get)
def memoize(fn):
"""Memoize functions that take simple arguments
The arugments of this function must be 'hashable'
Keywords are not supported
"""
memo = {}
def wrapper(*args):
key = tuple(args)
if key not in memo:
memo[key] = fn(*args)
return memo[key]
return wrapper
class ConfigParserNamedLists(object):
"""Parse a named mapping from the configuration file.
Example input (config file):
[packages]
free = http://pypm-free.as.com
be = http://pypm-be.as.com
staging = http://pypm-staging.as.com
default = be free
QA = staging default
What this class produces (self.mapping):
{
'free': [factory('free', 'http://pypm-free.as.com')],
'be': [factory('be', 'http://pypm-be.as.com')],
'staging': [factory('staging', 'http://pypm-staging.as.com')],
'default': [factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
'QA': [factory('staging', 'http://pypm-staging.as.com'),
factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
}
"""
VALUE_SEP = re.compile('[\s,]+')
def __init__(self, option_items, factory, is_sentinel):
"""
- option_items: ConfigParser.items('yoursection')
- factory: a function that produces the value object
- sentinel_p: a function that returns True for sentinels
"""
self.option_items = option_items
self.factory = factory
self.is_sentinel = is_sentinel
self.mapping = {}
self._init()
def _init(self):
for name, value in self.option_items:
if name in self.mapping:
raise ValueError('duplicate option key found: {0}'.format(name))
else:
self.mapping[name] = value
# substitute references
_processed = set()
for name in self.mapping:
self._expand_rvalue(name, _processed)
def _expand_rvalue(self, name, processed):
if name in processed:
return
value = self.mapping[name]
if isinstance(value, list):
processed.add(name)
return
if name not in self.mapping:
raise ValueError('unknown option reference: {0}'.format(name))
if self.is_sentinel(value):
self.mapping[name] = [self.factory(name, value)]
else:
self.mapping[name] = []
for part in self.VALUE_SEP.split(value):
self._expand_rvalue(part, processed)
self.mapping[name].extend(self.mapping[part])
@contextmanager
def locked(lockfile):
"""'with' context to lock a file"""
lock = LockFile(lockfile)
try:
yield
finally:
lock.close()
@contextmanager
def dlocked(directory):
"""Lock based on a directory
You need this function if you do not want more than on process to be
operating on a directory
"""
if not P.exists(directory):
os.makedirs(directory)
lockfile = P.join(directory, '.lock')
with locked(lockfile):
yield
def get_user_agent(default):
"""Return an user agent string representing PyPM
Retain the default user-agent for backward-compat
"""
return '{0} (PyPM {1.__version__})'.format(default, pypm)
def existing(path):
"""Return path, but assert its presence first"""
assert isinstance(path, (six.string_types, six.text_type)), \
'not of string type: %s <%s>' % (path, type(path))
assert P.exists(path), 'file/directory not found: %s' % path
return path
def concise_path(pth):
"""Return a concise, but human-understandable, version of ``pth``
Compresses %HOME% and %APPDATA%
"""
aliases = [
('%APPDATA%', os.getenv('APPDATA', None)),
('~', P.expanduser('~')),
]
for alias, pthval in aliases:
if pthval and pth.startswith(pthval):
return P.join(alias, P.relpath(pth, pthval))
return pth
def abs2rel(absolute_path):
"""Convert an absolute path to relative path assuming the topmost directory
is the bast dir.
>>> strip_abs_root('/opt/ActivePython/')
'opt/ActivePython/'
>>> strip_abs_root('/opt/ActivePython')
'opt/ActivePython'
"""
assert os.path.isabs(absolute_path), \
'`%s` is not a absolute path' % absolute_path
if sys.platform.startswith('win'):
assert absolute_path[1:3] == ':\\'
return absolute_path[3:] # remove the DRIVE
else:
assert absolute_path[0] == '/'
return absolute_path[1:] # remove the '/'
def url_join(url, components):
"""Join URL components .. always with a forward slash"""
assert type(components) is list
assert '\\' not in url, \
'URL is not supposed to contain backslashes. Is this windows path? '+url
return url + '/' + '/'.join(components)
def path_to_url(path):
"""Convert local path to remote url
"""
if sys.platform.startswith('win'):
assert '/' not in path, \
'windows path cannot contain forward slash: '+path
drive, path = os.path.splitdrive(path)
return url_join('file:///' + drive,
path.split('\\'))
else:
return 'file://' + P.abspath(path)
def pypm_file(*paths):
"""Return absolute path to a file residing inside the pypm package using
pkg_resources API"""
return resource_filename(Requirement.parse('pypm'), P.join(*paths))
class BareDateTime(six.text_type):
"""Wrapper around the DateTime object with our own standard string
representation
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT
@classmethod
def to_string(cls, dt):
"""Convert the datetime object `dt` to a string
with format as defind by this class
"""
return dt.strftime(cls.FORMAT)
@classmethod
def to_datetime(cls, dt_string):
"""Convert dt_string, formatted by `to_string()` method above"""
ts = time.mktime(time.strptime(dt_string, cls.FORMAT))
return datetime.fromtimestamp(ts)
|
"""
Read in the output from the trace-inputlocator script and create a GraphViz file.
Pass as input the path to the yaml output of the trace-inputlocator script via config file.
The output is written to the trace-inputlocator location.
WHY? because the trace-inputlocator only has the GraphViz output of the last call to the script. This
version re-creates the trace-data from the (merged) yaml file (the yaml output is merged if pre-existing in the output
file).
"""
import yaml
import cea.config
from cea.tests.trace_inputlocator import create_graphviz_output
def main(config):
with open(config.trace_inputlocator.yaml_output_file, 'r') as f:
yaml_data = yaml.safe_load(f)
trace_data = []
for script in yaml_data.keys():
for direction in ('input', 'output'):
for locator, file in yaml_data[script][direction]:
trace_data.append((direction, script, locator, file))
create_graphviz_output(trace_data, config.trace_inputlocator.graphviz_output_file)
if __name__ == '__main__':
main(cea.config.Configuration())
|
from gmusicapi import Mobileclient
import getpass
class GpmSession(object):
# Private Variables
# Public Variables
api = None
logged_in = False
songs = None
playlists = None
# Constructor with optionally passed credentials
# Omit credentials if you want to handle login, include for prompts from this module
def __init__(self, email=None, pw=None):
self.api = Mobileclient()
if not email and not pw:
email = input("Please enter an email address tied to a GPM account: ")
pw = getpass.getpass("Please enter the password associated with %s: " % email)
self.logged_in = self.api.login(email, pw, Mobileclient.FROM_MAC_ADDRESS) # As per api protocol
if self.logged_in:
print("Google Play Music login successful")
else:
print("Google Play Music login failed")
def init(self, songs = True, playlists = True):
if songs:
self.songs = self.api.get_all_songs()
if playlists:
self.playlists = self.api.get_all_playlists()
def get_song_stream(self, title, artist=None):
print(not self.songs)
if not self.songs:
self.init(True, False)
song = next(iter((track for track in self.songs if self._filter_condition(track, title, artist)) or []), None)
if song:
return self.api.get_stream_url(song["id"])
else:
return None
def _filter_condition(self, song_obj, search_title, search_artist):
result = True
if search_title:
result = result & (song_obj["title"].lower().strip() == search_title.lower().strip())
if search_artist:
result = result & (song_obj["artist"].lower().strip() == search_artist.lower().strip())
return result
def main():
session = GpmSession()
while not session.logged_in:
session = GpmSession()
session.init()
print(session.get_song_stream("Dirty Laundry", "Bitter Sweet"))
print(session.get_song_stream("1940"))
if __name__ == "__main__":
main()
|
"""
Sql support for multilingual models
"""
|
import sublime
from .Base import Base
from ...utils import Debug
from ...utils.uiutils import get_prefix
class Outline(Base):
regions = {}
ts_view = None
def __init__(self, t3sviews):
super(Outline, self).__init__('Typescript : Outline View', t3sviews)
# SET TEXT
def set_text(self, edit_token, members, ts_view):
"""
This function takes the tss.js members structure instead of a string.
"""
# this will process the outline, even if the view is closed
self.ts_view = ts_view
if type(members) == list:
self._tssjs_2_outline_format(members)
elif type(members) == str:
self.text = members
super(Outline, self).set_text(edit_token, self.text)
def is_current_ts(self, ts_view):
if ts_view is None or self.ts_view is None:
return
return ts_view.id() == self.ts_view.id()
def _tssjs_2_outline_format(self, members):
text = []
line = 0
self.regions = {}
for member in members:
start_line = member['min']['line']
end_line = member['lim']['line']
left = member['min']['character']
right = member['lim']['character']
a = self.ts_view.text_point(start_line-1, left-1)
b = self.ts_view.text_point(end_line-1, right-1)
region = sublime.Region(a, b)
kind = get_prefix(member['loc']['kind'])
container_kind = get_prefix(member['loc']['containerKind'])
if member['loc']['kindModifiers'] != "":
member['loc']['kindModifiers'] = " " + member['loc']['kindModifiers']
if member['loc']['kind'] != 'class' and member['loc']['kind'] != 'interface':
t = "%s %s %s %s" % (kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
text.append('\n\t')
text.append(t.strip())
line += 1
self.regions[line] = region
else:
t = "%s %s %s %s {" % (container_kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
if len(text) == 0:
text.append('\n%s\n' % t.strip())
line += 2
self.regions[line - 1] = region
else:
text.append('\n\n}\n\n%s\n' % t.strip())
line += 5
self.regions[line - 1] = region
if len(members) == 0:
text.append("\n\nno members found\n")
self.text = ''.join(text)
is_focusing_ts_view = False
def on_click(self,line):
if self.is_focusing_ts_view:
Debug('focus', 'Outline.on_click: is just focusing other view > ignore')
return
if line in self.regions:
draw = sublime.DRAW_NO_FILL
self.ts_view.add_regions('typescript-definition', [self.regions[line]], 'comment', 'dot', draw)
self._focus_member_in_view(self.regions[line])
def _focus_member_in_view(self, region):
if self.ts_view.is_loading():
return
else:
Debug('focus', "_focus_member_in_view, Region @pos %i" % (region.begin()))
self.is_focusing_ts_view = True
self.ts_view.show(region)
self.ts_view.window().focus_view(self.ts_view)
self.is_focusing_ts_view = False
|
from PIL import Image, ImageChops, ImageDraw
from django.contrib.auth.models import User
from filer.models.foldermodels import Folder
from filer.models.clipboardmodels import Clipboard, ClipboardItem
def create_superuser():
superuser = User.objects.create_superuser('admin',
'admin@free.fr',
'secret')
return superuser
def create_folder_structure(depth=2, sibling=2, parent=None):
"""
This method creates a folder structure of the specified depth.
* depth: is an integer (default=2)
* sibling: is an integer (default=2)
* parent: is the folder instance of the parent.
"""
if depth > 0 and sibling > 0:
depth_range = range(1, depth+1)
depth_range.reverse()
for d in depth_range:
for s in range(1,sibling+1):
name = "folder: %s -- %s" %(str(d), str(s))
folder = Folder(name=name, parent=parent)
folder.save()
create_folder_structure(depth=d-1, sibling=sibling, parent=folder)
def create_clipboard_item(user, file):
clipboard, was_clipboard_created = Clipboard.objects.get_or_create(user=user)
clipboard_item = ClipboardItem(clipboard=clipboard, file=file)
return clipboard_item
def create_image(mode='RGB', size=(800, 600)):
image = Image.new(mode, size)
draw = ImageDraw.Draw(image)
x_bit, y_bit = size[0] // 10, size[1] // 10
draw.rectangle((x_bit, y_bit * 2, x_bit * 7, y_bit * 3), 'red')
draw.rectangle((x_bit * 2, y_bit, x_bit * 3, y_bit * 8), 'red')
return image
|
import os
import numpy as np
import MeshToolkit as mtk
def ReadObj( filename ) :
# Initialisation
vertices = []
faces = []
normals = []
colors = []
texcoords = []
material = ""
# Read each line in the file
for line in open( filename, "r" ) :
# Empty line / Comment
if line.isspace() or line.startswith( '#' ) : continue
# Split values in the line
values = line.split()
# Vertex
if values[0] == 'v' :
vertices.append( list( map( float, values[1:4] ) ) )
# Face (index starts at 1)
elif values[0] == 'f' :
faces.append( list( map( int, [ (v.split('/'))[0] for v in values[1:4] ] ) ) )
# Normal
elif values[0] == 'vn' :
normals.append( list( map( float, values[1:4] ) ) )
# Color
elif values[0] == 'c' :
colors.append( list( map( float, values[1:4] ) ) )
# Texture
elif values[0] == 'vt' :
texcoords.append( list( map( float, values[1:3] ) ) )
# Texture filename
elif values[0] == 'mtllib' :
material = values[1]
# Remap face indices
faces = np.array( faces ) - 1
# Return the final mesh
return mtk.Mesh( os.path.splitext(os.path.basename(filename))[0], vertices, faces, colors, material, texcoords, [], normals )
|
import sys
MOD = 123 # type: int
YES = "yes" # type: str
NO = "NO" # type: str
def solve(N: int, M: int, H: "List[List[str]]", A: "List[int]", B: "List[float]", Q: int, X: "List[int]"):
print(N, M)
assert len(H) == N - 1
for i in range(N - 1):
assert len(H[i]) == M - 2
print(*H[i])
assert len(A) == N - 1
assert len(B) == N - 1
for i in range(N - 1):
print(A[i], B[i])
print(Q)
assert len(X) == M + Q
for i in range(M + Q):
print(X[i])
print(YES)
print(NO)
print(MOD)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
M = int(next(tokens)) # type: int
H = [[next(tokens) for _ in range(M - 1 - 2 + 1)] for _ in range(N - 2 + 1)] # type: "List[List[str]]"
A = [int()] * (N - 2 + 1) # type: "List[int]"
B = [float()] * (N - 2 + 1) # type: "List[float]"
for i in range(N - 2 + 1):
A[i] = int(next(tokens))
B[i] = float(next(tokens))
Q = int(next(tokens)) # type: int
X = [int(next(tokens)) for _ in range(M + Q)] # type: "List[int]"
solve(N, M, H, A, B, Q, X)
if __name__ == '__main__':
main()
|
"""
MoinMoin - fullsearch action
This is the backend of the search form. Search pages and print results.
@copyright: 2001 by Juergen Hermann <jh@web.de>
@license: GNU GPL, see COPYING for details.
"""
import re, time
from MoinMoin.Page import Page
from MoinMoin import wikiutil
from parsedatetime.parsedatetime import Calendar
from MoinMoin.web.utils import check_surge_protect
def checkTitleSearch(request):
""" Return 1 for title search, 0 for full text search, -1 for idiot spammer
who tries to press all buttons at once.
When used in FullSearch macro, we have 'titlesearch' parameter with
'0' or '1'. In standard search, we have either 'titlesearch' or
'fullsearch' with localized string. If both missing, default to
True (might happen with Safari) if this isn't an advanced search.
"""
form = request.values
if 'titlesearch' in form and 'fullsearch' in form:
ret = -1 # spammer / bot
else:
try:
ret = int(form['titlesearch'])
except ValueError:
ret = 1
except KeyError:
ret = ('fullsearch' not in form and not isAdvancedSearch(request)) and 1 or 0
return ret
def isAdvancedSearch(request):
""" Return True if advanced search is requested """
try:
return int(request.values['advancedsearch'])
except KeyError:
return False
def searchHints(f, hints):
""" Return a paragraph showing hints for a search
@param f: the formatter to use
@param hints: list of hints (as strings) to show
"""
return ''.join([
f.paragraph(1, attr={'class': 'searchhint'}),
# this is illegal formatter usage anyway, so we can directly use a literal
"<br>".join(hints),
f.paragraph(0),
])
def execute(pagename, request, fieldname='value', titlesearch=0, statistic=0):
_ = request.getText
titlesearch = checkTitleSearch(request)
if titlesearch < 0:
check_surge_protect(request, kick=True) # get rid of spammer
return
advancedsearch = isAdvancedSearch(request)
form = request.values
# context is relevant only for full search
if titlesearch:
context = 0
elif advancedsearch:
context = 180 # XXX: hardcoded context count for advancedsearch
else:
context = int(form.get('context', 0))
# Get other form parameters
needle = form.get(fieldname, '')
case = int(form.get('case', 0))
regex = int(form.get('regex', 0)) # no interface currently
hitsFrom = int(form.get('from', 0))
highlight_titles = int(form.get('highlight_titles', 1))
highlight_pages = int(form.get('highlight_pages', 1))
mtime = None
msg = ''
historysearch = 0
# if advanced search is enabled we construct our own search query
if advancedsearch:
and_terms = form.get('and_terms', '').strip()
or_terms = form.get('or_terms', '').strip()
not_terms = form.get('not_terms', '').strip()
#xor_terms = form.get('xor_terms', '').strip()
categories = form.getlist('categories') or ['']
timeframe = form.get('time', '').strip()
language = form.getlist('language') or ['']
mimetype = form.getlist('mimetype') or [0]
excludeunderlay = form.get('excludeunderlay', 0)
nosystemitems = form.get('nosystemitems', 0)
historysearch = form.get('historysearch', 0)
mtime = form.get('mtime', '')
if mtime:
mtime_parsed = None
# get mtime from known date/time formats
for fmt in (request.user.datetime_fmt,
request.cfg.datetime_fmt, request.user.date_fmt,
request.cfg.date_fmt):
try:
mtime_parsed = time.strptime(mtime, fmt)
except ValueError:
continue
else:
break
if mtime_parsed:
mtime = time.mktime(mtime_parsed)
else:
# didn't work, let's try parsedatetime
cal = Calendar()
mtime_parsed, parsed_what = cal.parse(mtime)
# XXX it is unclear if usage of localtime here and in parsedatetime module is correct.
# time.localtime is the SERVER's local time and of no relevance to the user (being
# somewhere in the world)
# mktime is reverse function for localtime, so this maybe fixes it again!?
if parsed_what > 0 and mtime_parsed <= time.localtime():
mtime = time.mktime(mtime_parsed)
else:
mtime_parsed = None # we don't use invalid stuff
# show info
if mtime_parsed:
# XXX mtime_msg is not shown in some cases
mtime_msg = _("(!) Only pages changed since '''%s''' are being displayed!",
wiki=True) % request.user.getFormattedDateTime(mtime)
else:
mtime_msg = _('/!\\ The modification date you entered was not '
'recognized and is therefore not considered for the '
'search results!', wiki=True)
else:
mtime_msg = None
word_re = re.compile(r'(\"[\w\s]+"|\w+)', re.UNICODE)
needle = ''
if categories[0]:
needle += 'category:%s ' % ','.join(categories)
if language[0]:
needle += 'language:%s ' % ','.join(language)
if mimetype[0]:
needle += 'mimetype:%s ' % ','.join(mimetype)
if excludeunderlay:
needle += '-domain:underlay '
if nosystemitems:
needle += '-domain:system '
if and_terms:
needle += '(%s) ' % and_terms
if not_terms:
needle += '(%s) ' % ' '.join(['-%s' % t for t in word_re.findall(not_terms)])
if or_terms:
needle += '(%s) ' % ' or '.join(word_re.findall(or_terms))
# check for sensible search term
stripped = needle.strip()
if len(stripped) == 0:
request.theme.add_msg(_('Please use a more selective search term instead '
'of {{{"%s"}}}', wiki=True) % wikiutil.escape(needle), "error")
Page(request, pagename).send_page()
return
needle = stripped
# Setup for type of search
if titlesearch:
title = _('Title Search: "%s"')
sort = 'page_name'
else:
if advancedsearch:
title = _('Advanced Search: "%s"')
else:
title = _('Full Text Search: "%s"')
sort = 'weight'
# search the pages
from MoinMoin.search import searchPages, QueryParser, QueryError
try:
query = QueryParser(case=case, regex=regex,
titlesearch=titlesearch).parse_query(needle)
except QueryError: # catch errors in the search query
request.theme.add_msg(_('Your search query {{{"%s"}}} is invalid. Please refer to '
'HelpOnSearching for more information.', wiki=True, percent=True) % wikiutil.escape(needle), "error")
Page(request, pagename).send_page()
return
results = searchPages(request, query, sort, mtime, historysearch)
# directly show a single hit for title searches
# this is the "quick jump" functionality if you don't remember
# the pagename exactly, but just some parts of it
if titlesearch and len(results.hits) == 1:
page = results.hits[0]
if not page.attachment: # we did not find an attachment
page = Page(request, page.page_name)
querydict = {}
if highlight_pages:
highlight = query.highlight_re()
if highlight:
querydict.update({'highlight': highlight})
url = page.url(request, querystr=querydict)
request.http_redirect(url)
return
if not results.hits: # no hits?
f = request.formatter
querydict = dict(wikiutil.parseQueryString(request.query_string))
querydict.update({'titlesearch': 0})
request.theme.add_msg(_('Your search query {{{"%s"}}} didn\'t return any results. '
'Please change some terms and refer to HelpOnSearching for '
'more information.%s', wiki=True, percent=True) % (wikiutil.escape(needle),
titlesearch and ''.join([
'<br>',
_('(!) Consider performing a', wiki=True), ' ',
f.url(1, href=request.page.url(request, querydict, escape=0)),
_('full-text search with your search terms'),
f.url(0), '.',
]) or ''), "error")
Page(request, pagename).send_page()
return
# This action generates data using the user language
request.setContentLanguage(request.lang)
request.theme.send_title(title % needle, pagename=pagename)
# Start content (important for RTL support)
request.write(request.formatter.startContent("content"))
# Hints
f = request.formatter
hints = []
if titlesearch:
querydict = dict(wikiutil.parseQueryString(request.query_string))
querydict.update({'titlesearch': 0})
hints.append(''.join([
_("(!) You're performing a title search that might not include"
' all related results of your search query in this wiki. <<BR>>', wiki=True),
' ',
f.url(1, href=request.page.url(request, querydict, escape=0)),
f.text(_('Click here to perform a full-text search with your '
'search terms!')),
f.url(0),
]))
if advancedsearch and mtime_msg:
hints.append(mtime_msg)
if hints:
request.write(searchHints(f, hints))
# Search stats
request.write(results.stats(request, request.formatter, hitsFrom))
# Then search results
info = not titlesearch
if context:
output = results.pageListWithContext(request, request.formatter,
info=info, context=context, hitsFrom=hitsFrom, hitsInfo=1,
highlight_titles=highlight_titles,
highlight_pages=highlight_pages)
else:
output = results.pageList(request, request.formatter, info=info,
hitsFrom=hitsFrom, hitsInfo=1,
highlight_titles=highlight_titles,
highlight_pages=highlight_pages)
request.write(output)
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
|
import unittest
from tests.test_basic import BaseTestCase
from datetime import timedelta, datetime, tzinfo
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
class UtilTestCase(BaseTestCase):
"""
Tests utils
"""
def test_parse_iso_8601_time_str(self):
"""
At times, Amazon hands us a timestamp with no microseconds.
"""
import datetime
from route53.util import parse_iso_8601_time_str
self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01Z'),
datetime.datetime(2013, 7, 28, 1, 0, 1, 0, \
tzinfo=UTC()))
self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01.001Z'),
datetime.datetime(2013, 7, 28, 1, 0, 1, 1000, \
tzinfo=UTC()))
|
from .tornadoconnection import TornadoLDAPConnection
|
from __future__ import print_function, unicode_literals, division, absolute_import
from enocean.protocol.eep import EEP
eep = EEP()
def test_first_range():
offset = -40
values = range(0x01, 0x0C)
for i in range(len(values)):
minimum = float(i * 10 + offset)
maximum = minimum + 40
profile = eep.find_profile([], 0xA5, 0x02, values[i])
assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
def test_second_range():
offset = -60
values = range(0x10, 0x1C)
for i in range(len(values)):
minimum = float(i * 10 + offset)
maximum = minimum + 80
profile = eep.find_profile([], 0xA5, 0x02, values[i])
assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
def test_rest():
profile = eep.find_profile([], 0xA5, 0x02, 0x20)
assert -10 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert +41.2 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
profile = eep.find_profile([], 0xA5, 0x02, 0x30)
assert -40 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert +62.3 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
|
def powers_of_two(limit):
value = 1
while value < limit:
yield value
value += value
for i in powers_of_two(70):
print(i)
g = powers_of_two(100)
assert str(type(powers_of_two)) == "<class 'function'>"
assert str(type(g)) == "<class 'generator'>"
assert g.__next__() == 1
assert g.__next__() == 2
assert next(g) == 4
assert next(g) == 8
|
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
from uuid import UUID
class HReconnectMatchElementsRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HReconnectMatchElementsRHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HReconnectMatchElementsRHS, self).__init__(name='HReconnectMatchElementsRHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([(2, 0), (0, 1)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_post__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_action__"] = """#===============================================================================
pass
"""
self["name"] = """"""
self["GUID__"] = UUID('ce9c5429-6e4c-4782-a83a-17e240381cb6')
# Set the node attributes
self.vs[0]["mm__"] = """MT_post__match_contains"""
self.vs[0]["MT_label__"] = """3"""
self.vs[0]["GUID__"] = UUID('789662d8-ab7d-4640-a710-abbc847de320')
self.vs[1]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_post__classtype"] = """
return attr_value
"""
self.vs[1]["MT_post__name"] = """
return attr_value
"""
self.vs[1]["GUID__"] = UUID('7e5e306f-cb65-40df-9e60-63b9fe83b79b')
self.vs[2]["mm__"] = """MT_post__MatchModel"""
self.vs[2]["MT_label__"] = """1"""
self.vs[2]["GUID__"] = UUID('3c85bf70-be4a-40d8-9bcb-c138195ad20e')
from HReconnectMatchElementsLHS import HReconnectMatchElementsLHS
self.pre = HReconnectMatchElementsLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
def execute(self, packet, match):
"""
Transforms the current match of the packet according to the rule %s.
Pivots are also assigned, if any.
@param packet: The input packet.
@param match: The match to rewrite.
"""
graph = packet.graph
# Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite.
# Because of the uniqueness property of labels in a rule, we can store all LHS labels
# and subsequently add the labels corresponding to the nodes to be created.
labels = match.copy()
#===============================================================================
# Update attribute values
#===============================================================================
#===============================================================================
# Create new nodes
#===============================================================================
# match_contains3
new_node = graph.add_node()
labels['3'] = new_node
graph.vs[new_node][Himesis.Constants.META_MODEL] = 'match_contains'
#===============================================================================
# Create new edges
#===============================================================================
# MatchModel1 -> match_contains3
graph.add_edges([(labels['1'], labels['3'])])
# match_contains3 -> MetaModelElement_S2
graph.add_edges([(labels['3'], labels['2'])])
#===============================================================================
# Set the output pivots
#===============================================================================
#===============================================================================
# Perform the post-action
#===============================================================================
try:
self.action(lambda i: graph.vs[labels[i]], graph)
except Exception, e:
raise Exception('An error has occurred while applying the post-action', e)
#===============================================================================
# Finally, delete nodes (this will automatically delete the adjacent edges)
#===============================================================================
|
# coding: utf-8
import unittest
from azure.mgmt.containerregistry.v2017_03_01.models import (
RegistryCreateParameters,
RegistryUpdateParameters,
StorageAccountParameters,
Sku,
SkuTier,
ProvisioningState,
PasswordName
)
import azure.mgmt.storage
from devtools_testutils import (
AzureMgmtTestCase, FakeStorageAccount,
ResourceGroupPreparer, StorageAccountPreparer
)
FAKE_STORAGE = FakeStorageAccount(
name='pyacr',
id=''
)
DEFAULT_LOCATION = 'westcentralus'
DEFAULT_SKU_NAME = 'Basic'
DEFAULT_KEY_VALUE_PAIR = {
'key': 'value'
}
class MgmtACRTest20170301(AzureMgmtTestCase):
def setUp(self):
super(MgmtACRTest20170301, self).setUp()
self.client = self.create_mgmt_client(
azure.mgmt.containerregistry.ContainerRegistryManagementClient,
api_version='2017-03-01'
)
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
@StorageAccountPreparer(name_prefix='pyacr', location=DEFAULT_LOCATION, playback_fake_resource=FAKE_STORAGE)
def test_basic_registry(self, resource_group, location, storage_account, storage_account_key):
registry_name = self.get_resource_name('pyacr')
name_status = self.client.registries.check_name_availability(registry_name)
self.assertTrue(name_status.name_available)
# Create a Basic registry
registry = self.client.registries.create(
resource_group_name=resource_group.name,
registry_name=registry_name,
registry_create_parameters=RegistryCreateParameters(
location=location,
sku=Sku(
name=DEFAULT_SKU_NAME
),
storage_account=StorageAccountParameters(
name=storage_account.name,
access_key=storage_account_key
)
)
).result()
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.location, location)
self.assertEqual(registry.sku.name, DEFAULT_SKU_NAME)
self.assertEqual(registry.sku.tier, SkuTier.basic.value)
self.assertEqual(registry.provisioning_state.value, ProvisioningState.succeeded.value)
self.assertEqual(registry.admin_user_enabled, False)
registries = list(self.client.registries.list_by_resource_group(resource_group.name))
self.assertEqual(len(registries), 1)
# Update the registry with new tags and enable admin user
registry = self.client.registries.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
registry_update_parameters=RegistryUpdateParameters(
tags=DEFAULT_KEY_VALUE_PAIR,
admin_user_enabled=True
)
)
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
registry = self.client.registries.get(resource_group.name, registry_name)
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
credentials = self.client.registries.list_credentials(resource_group.name, registry_name)
self.assertEqual(len(credentials.passwords), 2)
credentials = self.client.registries.regenerate_credential(
resource_group.name, registry_name, PasswordName.password)
self.assertEqual(len(credentials.passwords), 2)
self.client.registries.delete(resource_group.name, registry_name)
if __name__ == '__main__':
unittest.main()
|
"""
Absorption chillers
"""
import cea.config
import cea.inputlocator
import pandas as pd
import numpy as np
from math import log, ceil
import sympy
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
from cea.analysis.costs.equations import calc_capex_annualized, calc_opex_annualized
__author__ = "Shanshan Hsieh"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Shanshan Hsieh"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def calc_chiller_main(mdot_chw_kgpers, T_chw_sup_K, T_chw_re_K, T_hw_in_C, T_ground_K, absorption_chiller):
"""
This model calculates the operation conditions of the absorption chiller given the chilled water loads in
evaporators and the hot water inlet temperature in the generator (desorber).
This is an empirical model using characteristic equation method developed by _[Kuhn A. & Ziegler F., 2005].
The parameters of each absorption chiller can be derived from experiments or performance curves from manufacturer's
catalog, more details are described in _[Puig-Arnavat M. et al, 2010].
Assumptions: constant external flow rates (chilled water at the evaporator, cooling water at the condenser and
absorber, hot water at the generator).
:param mdot_chw_kgpers: required chilled water flow rate
:type mdot_chw_kgpers: float
:param T_chw_sup_K: required chilled water supply temperature (outlet from the evaporator)
:type T_chw_sup_K: float
:param T_chw_re_K: required chilled water return temperature (inlet to the evaporator)
:type T_chw_re_K: float
:param T_hw_in_C: hot water inlet temperature to the generator
:type T_hw_in_C: float
:param T_ground_K: ground temperature
:type T_ground_K: float
:param locator: locator class
:return:
..[Kuhn A. & Ziegler F., 2005] Operational results of a 10kW absorption chiller and adaptation of the characteristic
equation. In: Proceedings of the interantional conference solar air conditioning. Bad Staffelstein, Germany: 2005.
..[Puig-Arnavat M. et al, 2010] Analysis and parameter identification for characteristic equations of single- and
double-effect absorption chillers by means of multivariable regression. Int J Refrig: 2010.
"""
chiller_prop = absorption_chiller.chiller_prop # get data from the class
# create a dict of input operating conditions
input_conditions = {'T_chw_sup_K': T_chw_sup_K,
'T_chw_re_K': T_chw_re_K,
'T_hw_in_C': T_hw_in_C,
'T_ground_K': T_ground_K}
mcp_chw_WperK = mdot_chw_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK
q_chw_total_W = mcp_chw_WperK * (T_chw_re_K - T_chw_sup_K)
if np.isclose(q_chw_total_W, 0.0):
wdot_W = 0.0
q_cw_W = 0.0
q_hw_W = 0.0
T_hw_out_C = np.nan
EER = 0.0
input_conditions['q_chw_W'] = 0.0
else:
min_chiller_size_W = min(chiller_prop['cap_min'].values)
max_chiller_size_W = max(chiller_prop['cap_max'].values)
# get chiller properties and input conditions according to load
if q_chw_total_W < min_chiller_size_W:
# get chiller property according to load
chiller_prop = chiller_prop[chiller_prop['cap_min'] == min_chiller_size_W]
# operate at minimum load
number_of_chillers_activated = 1.0 # only activate one chiller
input_conditions['q_chw_W'] = chiller_prop['cap_min'].values # minimum load
elif q_chw_total_W <= max_chiller_size_W:
# get chiller property according to load
chiller_prop = chiller_prop[(chiller_prop['cap_min'] <= q_chw_total_W) &
(chiller_prop['cap_max'] >= q_chw_total_W)]
# operate one chiller at the cooling load
number_of_chillers_activated = 1.0 # only activate one chiller
input_conditions['q_chw_W'] = q_chw_total_W # operate at the chilled water load
else:
# get chiller property according to load
chiller_prop = chiller_prop[chiller_prop['cap_max'] == max_chiller_size_W]
# distribute loads to multiple chillers
number_of_chillers_activated = q_chw_total_W / max_chiller_size_W
# operate at maximum load
input_conditions['q_chw_W'] = max(chiller_prop['cap_max'].values)
absorption_chiller.update_data(chiller_prop)
operating_conditions = calc_operating_conditions(absorption_chiller, input_conditions)
# calculate chiller outputs
wdot_W = calc_power_demand(input_conditions['q_chw_W'], chiller_prop) * number_of_chillers_activated
q_cw_W = operating_conditions['q_cw_W'] * number_of_chillers_activated
q_hw_W = operating_conditions['q_hw_W'] * number_of_chillers_activated
T_hw_out_C = operating_conditions['T_hw_out_C']
EER = q_chw_total_W / (q_hw_W + wdot_W)
if T_hw_out_C < 0.0 :
print ('T_hw_out_C = ', T_hw_out_C, ' incorrect condition, check absorption chiller script.')
chiller_operation = {'wdot_W': wdot_W, 'q_cw_W': q_cw_W, 'q_hw_W': q_hw_W, 'T_hw_out_C': T_hw_out_C,
'q_chw_W': q_chw_total_W, 'EER': EER}
return chiller_operation
def calc_operating_conditions(absorption_chiller, input_conditions):
"""
Calculates chiller operating conditions at given input conditions by solving the characteristic equations and the
energy balance equations. This method is adapted from _[Kuhn A. & Ziegler F., 2005].
The heat rejection to cooling tower is approximated with the energy balance:
Q(condenser) + Q(absorber) = Q(generator) + Q(evaporator)
:param AbsorptionChiller chiller_prop: parameters in the characteristic equations and the external flow rates.
:param input_conditions:
:type input_conditions: dict
:return: a dict with operating conditions of the chilled water, cooling water and hot water loops in a absorption
chiller.
To improve speed, the system of equations was solved using sympy for the output variable ``q_hw_kW`` which is
then used to compute the remaining output variables. The following code was used to create the expression to
calculate ``q_hw_kW`` with::
# use symbolic computation to derive a formula for q_hw_kW:
# first, make sure all the variables are sympy symbols:
T_chw_in_C, T_chw_out_C, T_cw_in_C, T_hw_in_C, mcp_cw_kWperK, mcp_hw_kWperK, q_chw_kW = sympy.symbols(
"T_chw_in_C, T_chw_out_C, T_cw_in_C, T_hw_in_C, mcp_cw_kWperK, mcp_hw_kWperK, q_chw_kW")
T_hw_out_C, T_cw_out_C, q_hw_kW = sympy.symbols('T_hw_out_C, T_cw_out_C, q_hw_kW')
a_e, a_g, e_e, e_g, r_e, r_g, s_e, s_g = sympy.symbols("a_e, a_g, e_e, e_g, r_e, r_g, s_e, s_g")
ddt_e, ddt_g = sympy.symbols("ddt_e, ddt_g")
# the system of equations:
eq_e = s_e * ddt_e + r_e - q_chw_kW
eq_ddt_e = ((T_hw_in_C + T_hw_out_C) / 2.0
+ a_e * (T_cw_in_C + T_cw_out_C) / 2.0
+ e_e * (T_chw_in_C + T_chw_out_C) / 2.0
- ddt_e)
eq_g = s_g * ddt_g + r_g - q_hw_kW
eq_ddt_g = ((T_hw_in_C + T_hw_out_C) / 2.0
+ a_g * (T_cw_in_C
+ T_cw_out_C) / 2.0
+ e_g * (T_chw_in_C + T_chw_out_C) / 2.0
- ddt_g)
eq_bal_g = (T_hw_in_C - T_hw_out_C) - q_hw_kW / mcp_hw_kWperK
# solve the system of equations with sympy
eq_sys = [eq_e, eq_g, eq_bal_g, eq_ddt_e, eq_ddt_g]
unknown_variables = (T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g)
a, b = sympy.linear_eq_to_matrix(eq_sys, unknown_variables)
T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g = tuple(*sympy.linsolve(eq_sys, unknown_variables))
q_hw_kW.simplify()
..[Kuhn A. & Ziegler F., 2005] Operational results of a 10kW absorption chiller and adaptation of the characteristic
equation. In: Proceedings of the interantional conference solar air conditioning. Bad Staffelstein, Germany: 2005.
"""
# external water circuits (e: chilled water, ac: cooling water, d: hot water)
T_hw_in_C = input_conditions['T_hw_in_C']
T_cw_in_C = input_conditions['T_ground_K'] - 273.0 # condenser water inlet temperature
T_chw_in_C = input_conditions['T_chw_re_K'] - 273.0 # inlet to the evaporator
T_chw_out_C = input_conditions['T_chw_sup_K'] - 273.0 # outlet from the evaporator
q_chw_kW = input_conditions['q_chw_W'] / 1000 # cooling load ata the evaporator
m_cw_kgpers = absorption_chiller.m_cw_kgpers # external flow rate of cooling water at the condenser and absorber
m_hw_kgpers = absorption_chiller.m_hw_kgpers # external flow rate of hot water at the generator
mcp_cw_kWperK = m_cw_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK / 1000
mcp_hw_kWperK = m_hw_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK / 1000
# chiller_props (these are constants from the Absorption_chiller sheet in systems.xls)
s_e = absorption_chiller.s_e
r_e = absorption_chiller.r_e
s_g = absorption_chiller.s_g
r_g = absorption_chiller.r_g
a_e = absorption_chiller.a_e
e_e = absorption_chiller.e_e
a_g = absorption_chiller.a_g
e_g = absorption_chiller.e_g
# variables to solve
# T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g = sympy.symbols('T_hw_out_C T_cw_out_C q_hw_kW , ddt_e, ddt_g')
#
# # systems of equations to solve
# eq_e = s_e * ddt_e + r_e - q_chw_kW
# eq_ddt_e = ((T_hw_in_C + T_hw_out_C) / 2.0 + a_e * (T_cw_in_C + T_cw_out_C) / 2.0 + e_e * (T_chw_in_C + T_chw_out_C) / 2.0 - ddt_e)
# eq_g = s_g * ddt_g + r_g - q_hw_kW
# eq_ddt_g = ((T_hw_in_C + T_hw_out_C) / 2.0 + a_g * (T_cw_in_C + T_cw_out_C) / 2.0 + e_g * (T_chw_in_C + T_chw_out_C) / 2.0- ddt_g)
# eq_bal_g = (T_hw_in_C - T_hw_out_C) - q_hw_kW / mcp_hw_kWperK
#
# # solve the system of equations with sympy
# eq_sys = [eq_e, eq_g, eq_bal_g, eq_ddt_e, eq_ddt_g]
# unknown_variables = (T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g)
# (T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g) = tuple(*sympy.linsolve(eq_sys, unknown_variables))
# a = np.array([
# [0, 0, 0, s_e, 0],
# [0, 0, -1, 0, s_g],
# [-1, 0, -1 / mcp_hw_kWperK, 0, 0],
# [0.5, 0, 0, -1, 0],
# [0.5, 0, 0, 0, -1]])
# b = np.array([
# [q_chw_kW - r_e],
# [-r_g],
# [-T_hw_in_C],
# [-0.5 * T_hw_in_C - 0.5 * e_e * (T_chw_in_C + T_chw_out_C)],
# [-0.5 * T_hw_in_C - 0.5 * e_g * (T_chw_in_C + T_chw_out_C)]])
# the below equation for q_hw_kW was created with sympy.linsolve using symbols for all the variables.
q_hw_kW = ((r_g * s_e * (0.5 * a_e * mcp_hw_kWperK + 0.25 * s_g * (a_e - a_g))
+ s_g * (0.5 * a_g * mcp_hw_kWperK * (q_chw_kW - r_e)
+ s_e * (0.5 * mcp_hw_kWperK
* (a_e * (0.5 * T_chw_in_C * e_g
+ 0.5 * T_chw_out_C * e_g
+ 0.5 * T_cw_in_C * a_g
+ 1.0 * T_hw_in_C)
- a_g * (0.5 * T_chw_in_C * e_e
+ 0.5 * T_chw_out_C * e_e
+ 0.5 * T_cw_in_C * a_e
+ 1.0 * T_hw_in_C))
- 0.25 * r_g * (a_e - a_g))))
/ (s_e * (0.5 * a_e * mcp_hw_kWperK + 0.25 * s_g * (a_e - a_g))))
# calculate results
q_cw_kW = q_hw_kW + q_chw_kW # Q(condenser) + Q(absorber)
T_hw_out_C = T_hw_in_C - q_hw_kW / mcp_hw_kWperK
T_cw_out_C = T_cw_in_C + q_cw_kW / mcp_cw_kWperK # TODO: set upper bound of the chiller operation
return {'T_hw_out_C': T_hw_out_C,
'T_cw_out_C': T_cw_out_C,
'q_chw_W': q_chw_kW * 1000,
'q_hw_W': q_hw_kW * 1000,
'q_cw_W': q_cw_kW * 1000}
def calc_power_demand(q_chw_W, chiller_prop):
"""
Calculates the power demand of the solution and refrigeration pumps in absorption chillers.
Linear equations derived from manufacturer's catalog _[Broad Air Conditioning, 2018].
:param q_chw_W:
:param ACH_type:
:return:
..[Broad Air Conditioning, 2018] BROAD XII NON-ELECTRIC CHILLER. (2018).
etrieved from https://www.broadusa.net/en/wp-content/uploads/2018/12/BROAD-XII-US-Catalog2018-12.pdf
"""
ach_type = chiller_prop['type'].values[0]
if ach_type == 'single':
w_dot_W = 0.0028 + 2941
else:
w_dot_W = 0.0021 * q_chw_W + 2757 # assuming the same for double and triple effect chillers
return w_dot_W
def calc_Cinv_ACH(Q_nom_W, Absorption_chiller_cost_data, ACH_type):
"""
Annualized investment costs for the vapor compressor chiller
:type Q_nom_W : float
:param Q_nom_W: peak cooling demand in [W]
:returns InvCa: annualized chiller investment cost in CHF/a
:rtype InvCa: float
"""
Capex_a_ACH_USD = 0
Opex_fixed_ACH_USD = 0
Capex_ACH_USD = 0
if Q_nom_W > 0:
Absorption_chiller_cost_data = Absorption_chiller_cost_data[Absorption_chiller_cost_data['type'] == ACH_type]
max_chiller_size = max(Absorption_chiller_cost_data['cap_max'].values)
Q_nom_W = Absorption_chiller_cost_data['cap_min'].values.min() if Q_nom_W < Absorption_chiller_cost_data[
'cap_min'].values.min() else Q_nom_W # minimum technology size
if Q_nom_W <= max_chiller_size:
Absorption_chiller_cost_data = Absorption_chiller_cost_data[
(Absorption_chiller_cost_data['cap_min'] <= Q_nom_W) & (
Absorption_chiller_cost_data[
'cap_max'] > Q_nom_W)] # keep properties of the associated capacity
Inv_a = Absorption_chiller_cost_data.iloc[0]['a']
Inv_b = Absorption_chiller_cost_data.iloc[0]['b']
Inv_c = Absorption_chiller_cost_data.iloc[0]['c']
Inv_d = Absorption_chiller_cost_data.iloc[0]['d']
Inv_e = Absorption_chiller_cost_data.iloc[0]['e']
Inv_IR = Absorption_chiller_cost_data.iloc[0]['IR_%']
Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr']
Inv_OM = Absorption_chiller_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (Q_nom_W) ** Inv_c + (Inv_d + Inv_e * Q_nom_W) * log(Q_nom_W)
Capex_a_ACH_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Opex_fixed_ACH_USD = InvC * Inv_OM
Capex_ACH_USD = InvC
else:
number_of_chillers = int(ceil(Q_nom_W / max_chiller_size))
Q_nom_each_chiller = Q_nom_W / number_of_chillers
for i in range(number_of_chillers):
Absorption_chiller_cost_data = Absorption_chiller_cost_data[
(Absorption_chiller_cost_data['cap_min'] <= Q_nom_each_chiller) & (
Absorption_chiller_cost_data[
'cap_max'] > Q_nom_each_chiller)] # keep properties of the associated capacity
Inv_a = Absorption_chiller_cost_data.iloc[0]['a']
Inv_b = Absorption_chiller_cost_data.iloc[0]['b']
Inv_c = Absorption_chiller_cost_data.iloc[0]['c']
Inv_d = Absorption_chiller_cost_data.iloc[0]['d']
Inv_e = Absorption_chiller_cost_data.iloc[0]['e']
Inv_IR = Absorption_chiller_cost_data.iloc[0]['IR_%']
Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr']
Inv_OM = Absorption_chiller_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (Q_nom_each_chiller) ** Inv_c + (Inv_d + Inv_e * Q_nom_each_chiller) * log(Q_nom_each_chiller)
Capex_a1 = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Capex_a_ACH_USD = Capex_a_ACH_USD + Capex_a1
Opex_fixed_ACH_USD = Opex_fixed_ACH_USD + InvC * Inv_OM
Capex_ACH_USD = Capex_ACH_USD + InvC
return Capex_a_ACH_USD, Opex_fixed_ACH_USD, Capex_ACH_USD
class AbsorptionChiller(object):
__slots__ = ["code", "chiller_prop", "m_cw_kgpers", "m_hw_kgpers",
"s_e", "r_e", "s_g", "r_g", "a_e", "e_e", "a_g", "e_g"]
def __init__(self, chiller_prop, ACH_type):
self.chiller_prop = chiller_prop[chiller_prop['type'] == ACH_type]
# copy first row to self for faster lookup (avoid pandas __getitem__ in tight loops)
self.code = chiller_prop['code'].values[0]
# external flow rate of cooling water at the condenser and absorber
self.m_cw_kgpers = chiller_prop['m_cw'].values[0]
# external flow rate of hot water at the generator
self.m_hw_kgpers = chiller_prop['m_hw'].values[0]
self.s_e = chiller_prop['s_e'].values[0]
self.r_e = chiller_prop['r_e'].values[0]
self.s_g = chiller_prop['s_g'].values[0]
self.r_g = chiller_prop['r_g'].values[0]
self.a_e = chiller_prop['a_e'].values[0]
self.e_e = chiller_prop['e_e'].values[0]
self.a_g = chiller_prop['a_g'].values[0]
self.e_g = chiller_prop['e_g'].values[0]
def update_data(self, chiller_prop):
"""Due to how AbsorptionChiller is currently used (FIXME: can we fix this?), we somedimes need to update
the instance variables from the databaframe chiller_prop.
"""
if self.code != chiller_prop['code'].values[0]:
# only update if new code...
# print("Updating chiller_prop data! old code: {0}, new code: {1}".format(self.code, chiller_prop['code'].values[0]))
self.code = chiller_prop['code'].values[0]
self.m_cw_kgpers = chiller_prop['m_cw'].values[0]
self.m_hw_kgpers = chiller_prop['m_hw'].values[0]
self.s_e = chiller_prop['s_e'].values[0]
self.r_e = chiller_prop['r_e'].values[0]
self.s_g = chiller_prop['s_g'].values[0]
self.r_g = chiller_prop['r_g'].values[0]
self.a_e = chiller_prop['a_e'].values[0]
self.e_e = chiller_prop['e_e'].values[0]
self.a_g = chiller_prop['a_g'].values[0]
self.e_g = chiller_prop['e_g'].values[0]
def main(config):
"""
run the whole preprocessing routine
test case 1) q_hw_W = 24213, q_chw_W = 20088, EER = 0.829, T_hw_out_C = 67.22 _[Kuhn, 2011]
test case 2) q_hw_W = 824105, q_chw_W = 1163011, EER = 1.41, T_hw_out_C = 165.93 _[Shirazi, 2016]
test case 3) q_hw_W = 623379, q_chw_W = 1163430, EER = 1.87, T_hw_out_C = 195.10 _[Shirazi, 2016]
..[Kuhn A., Ozgur-Popanda C., & Ziegler F., 2011] A 10kW Indirectly Fired Absorption Heat Pump: Concepts for a
reversible operation. 10th International Heat Pump Conference, 2011.
..[Shirazi A., Taylor R.A., White S.D., Morrison G.L.] A systematic parametric study and feasibility assessment
of solar-assisted single-effect, double-effect, and triple-effect absorption chillers for heating and cooling
applications. Energy Conversion and Management, 2016
"""
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
# Input parameters for test cases
case_1_dict = {'mdot_chw_kgpers':0.8, 'T_chw_sup_K': 280.0, 'T_chw_re_K': 286.0, 'T_hw_in_C': 84.6, 'ACH_type': 'single'}
case_2_dict = {'mdot_chw_kgpers': 39.7, 'T_chw_sup_K': 280.0, 'T_chw_re_K': 287.0, 'T_hw_in_C': 180,
'ACH_type': 'double'}
case_3_dict = {'mdot_chw_kgpers': 55.6, 'T_chw_sup_K': 280.0, 'T_chw_re_K': 285.0, 'T_hw_in_C': 210,
'ACH_type': 'triple'}
# Unpack parameters
case_dict = case_1_dict
mdot_chw_kgpers = case_dict['mdot_chw_kgpers']
T_chw_sup_K = case_dict['T_chw_sup_K']
T_chw_re_K = case_dict['T_chw_re_K']
T_hw_in_C = case_dict['T_hw_in_C']
T_ground_K = 300
ach_type = case_dict['ACH_type']
chiller_prop = AbsorptionChiller(pd.read_excel(locator.get_database_conversion_systems(), sheet_name="Absorption_chiller"), ach_type)
chiller_operation = calc_chiller_main(mdot_chw_kgpers, T_chw_sup_K, T_chw_re_K, T_hw_in_C, T_ground_K, chiller_prop)
print(chiller_operation)
print('test_decentralized_buildings_cooling() succeeded. Please doubel check results in the description.')
if __name__ == '__main__':
main(cea.config.Configuration())
|
"""
Copyright (c) 2013 Timon Wong
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import locale
import os
import subprocess
import sys
import tempfile
PY3K = sys.version_info >= (3, 0, 0)
class MarkupRenderer(object):
def __init__(self):
self.renderer_options = {}
def load_settings(self, global_setting, renderer_options):
self.renderer_options = renderer_options
@classmethod
def is_enabled(cls, filename, syntax):
return False
def render(self, text, **kwargs):
raise NotImplementedError()
class InputMethod(object):
STDIN = 1
TEMPFILE = 2
FILE = 3
class CommandlineRenderer(MarkupRenderer):
def __init__(self, input_method=InputMethod.STDIN, executable=None, args=[]):
super(CommandlineRenderer, self).__init__()
self.input_method = input_method
self.executable = executable
self.args = args
def pre_process_encoding(self, text, **kwargs):
return text.encode('utf-8')
def pre_process(self, text, **kwargs):
return text
def post_process(self, rendered_text, **kwargs):
return rendered_text
def post_process_encoding(self, rendered_text, **kwargs):
return rendered_text.decode('utf-8')
def render(self, text, **kwargs):
text = self.pre_process_encoding(text, **kwargs)
text = self.pre_process(text, **kwargs)
text = self.executable_check(text, kwargs['filename'])
text = self.post_process_encoding(text, **kwargs)
return self.post_process(text, **kwargs)
def executable_check(self, text, filename):
tempfile_ = None
result = ''
try:
args = [self.get_executable()]
if self.input_method == InputMethod.STDIN:
args.extend(self.get_args())
elif self.input_method == InputMethod.TEMPFILE:
_, ext = os.path.splitext(filename)
tempfile_ = tempfile.NamedTemporaryFile(suffix=ext)
tempfile_.write(text)
tempfile_.flush()
args.extend(self.get_args(filename=tempfile_.name()))
text = None
elif self.input_method == InputMethod.FILE:
args.extend(self.get_args(filename=filename))
text = None
else:
return u''
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=self.get_startupinfo())
result, errdata = proc.communicate(text)
if len(errdata) > 0:
print(errdata)
finally:
if tempfile_ is not None:
tempfile_.close() # Also delete file
return result.strip()
def get_executable(self):
if not PY3K and os.name == 'nt':
# [PY2K] On Windows, popen won't support unicode args
if isinstance(self.executable, unicode):
encoding = locale.getpreferredencoding()
return self.executable.encode(encoding)
return self.executable
def get_args(self, filename=None):
if not PY3K and os.name == 'nt':
# [PY2K] On Windows, popen won't support unicode args
encoding = locale.getpreferredencoding()
args = [arg if isinstance(arg, str) else arg.encode(encoding) for arg in self.args]
else:
args = self.args
return [arg.format(filename=filename) for arg in args]
def get_startupinfo(self):
if os.name != 'nt':
return None
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
return info
def renderer(renderer_type):
renderer_type.IS_VALID_RENDERER__ = True
return renderer_type
|
"""
Test that no StopIteration is raised inside a generator
"""
import asyncio
class RebornStopIteration(StopIteration):
"""
A class inheriting from StopIteration exception
"""
def gen_ok():
yield 1
yield 2
yield 3
def gen_stopiter():
yield 1
yield 2
yield 3
raise StopIteration # [stop-iteration-return]
def gen_stopiterchild():
yield 1
yield 2
yield 3
raise RebornStopIteration # [stop-iteration-return]
def gen_next_raises_stopiter():
g = gen_ok()
while True:
yield next(g) # [stop-iteration-return]
def gen_next_inside_try_except():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
return
def gen_next_inside_wrong_try_except():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
return
def gen_next_inside_wrong_try_except2():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
raise StopIteration # [stop-iteration-return]
def gen_in_for():
for el in gen_ok():
yield el
def gen_yield_from():
yield from gen_ok()
def gen_dont_crash_on_no_exception():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
raise
def gen_dont_crash_on_uninferable():
# https://github.com/PyCQA/pylint/issues/1779
yield from iter()
raise asyncio.TimeoutError()
def gen_next_with_sentinel():
yield next([], 42) # No bad return
from itertools import count
def generator_using_next():
counter = count()
number = next(counter)
yield number * 2
class SomeClassWithNext:
def next(self):
return iter([1, 2, 3])
def some_gen(self):
for value in self.next():
yield value
SomeClassWithNext().some_gen()
def something_invalid():
raise Exception('cannot iterate this')
def invalid_object_passed_to_next():
yield next(something_invalid()) # [stop-iteration-return]
|
import _plotly_utils.basevalidators
class RangebreaksValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="rangebreaks", parent_name="layout.xaxis", **kwargs):
super(RangebreaksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Rangebreak"),
data_docs=kwargs.pop(
"data_docs",
"""
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The
default is one day in milliseconds.
enabled
Determines whether this axis rangebreak is
enabled or disabled. Please note that
`rangebreaks` only work for "date" axis type.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pattern
Determines a pattern on the time line that
generates breaks. If *day of week* - days of
the week in English e.g. 'Sunday' or `sun`
(matching is case-insensitive and considers
only the first three characters), as well as
Sunday-based integers between 0 and 6. If
"hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info.
Examples: - { pattern: 'day of week', bounds:
[6, 1] } or simply { bounds: ['sat', 'mon'] }
breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8]
} breaks from 5pm to 8am (i.e. skips non-work
hours).
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use
`dvalue` to set the size of the values along
the axis.
""",
),
**kwargs
)
|
import unittest
import numpy as np
from bsym import ColourOperation, Configuration
from unittest.mock import patch
class ColourOperationTestCase( unittest.TestCase ):
"""Tests for colour operation methods"""
def test_symmetry_operation_is_initialised_from_a_matrix( self ):
matrix = np.array( [ [ 1, 0 ], [ 0, 1 ] ] )
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
co = ColourOperation( matrix, colour_mapping=mapping )
np.testing.assert_array_equal( co.matrix, matrix )
self.assertEqual( co.colour_mapping, mapping )
def test_from_vector( self ):
vector = [ 2, 3, 1 ]
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 }, { 1: 1, 0: 0 } ]
co = ColourOperation.from_vector( vector, mapping )
np.testing.assert_array_equal( co.matrix, np.array( [ [ 0, 0, 1 ], [ 1, 0, 0 ], [ 0, 1, 0 ] ] ) )
self.assertEqual( co.colour_mapping, mapping )
def test_from_vector_with_label( self ):
vector = [ 2, 3, 1 ]
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
label = 'A'
co = ColourOperation.from_vector( vector, mapping, label=label )
np.testing.assert_array_equal( co.matrix, np.array( [ [ 0, 0, 1 ], [ 1, 0, 0 ], [ 0, 1, 0 ] ] ) )
self.assertEqual( co.label, label )
self.assertEqual( co.colour_mapping, mapping )
def test_symmetry_operation_is_initialised_with_label( self ):
matrix = np.array( [ [ 1, 0 ], [ 0, 1 ] ] )
label = 'E'
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
co = ColourOperation( matrix, mapping, label=label )
self.assertEqual( co.label, label )
self.assertEqual( co.colour_mapping, mapping )
def test_from_vector_counting_from_zero( self ):
vector = [ 1, 2, 0 ]
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
co = ColourOperation.from_vector( vector, mapping, count_from_zero=True )
np.testing.assert_array_equal( co.matrix, np.array( [ [ 0, 0, 1 ], [ 1, 0, 0 ], [ 0, 1, 0 ] ] ) )
self.assertEqual( co.colour_mapping, mapping )
def test_operate_on( self ):
matrix = np.array( [ [ 0, 1, 0 ], [ 0, 0, 1 ], [ 1, 0, 0 ] ] )
colour_mapping = [ { 1:1, 2:2, 3:3 },
{ 1:2, 2:3, 3:1 },
{ 1:3, 2:2, 3:1 } ]
co = ColourOperation( matrix, colour_mapping )
configuration = Configuration( [ 1, 2, 3 ] )
co.operate_on( configuration )
np.testing.assert_array_equal( co.operate_on( configuration ).vector, np.array( [ 2, 1, 3 ] ) )
def test_mul( self ):
matrix_a = np.array( [ [ 1, 0 ], [ 0, 1 ] ] )
colour_mapping_a = [ { 0:1, 1:0 }, { 0:1, 1:0 } ]
matrix_b = np.array( [ [ 0, 1 ], [ 1, 0 ] ] )
colour_mapping_b = [ { 0:1, 1:0 }, { 0:1, 1:0 } ]
co_a = ColourOperation( matrix_a, colour_mapping_a )
co_b = ColourOperation( matrix_b, colour_mapping_b )
co_c = co_a * co_b
np.testing.assert_array_equal( co_c.matrix , np.array( [ [ 0, 1 ], [ 1, 0 ] ] ) )
self.assertEqual( co_c.colour_mapping, [ { 0:0, 1:1 }, { 0:0, 1:1 } ] )
if __name__ == '__main__':
unittest.main()
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/weapon/shared_wpn_heavy_blaster.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","wpn_heavy_blaster")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from .humannum import (
K,
M,
G,
T,
P,
E,
Z,
Y,
humannum,
parsenum,
parseint,
value_to_unit,
unit_to_value,
)
__all__ = [
'K',
'M',
'G',
'T',
'P',
'E',
'Z',
'Y',
'humannum',
'parsenum',
'parseint',
'value_to_unit',
'unit_to_value',
]
|
import datetime
from django.contrib import admin
from django.db.models import Q
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from models import Invoice, InvoiceItem
class InvoiceItemInline(admin.TabularInline):
fieldsets = (
(
None,
{
'fields': ('title', 'quantity', 'unit', 'unit_price', 'tax_rate', 'weight')
}
),
)
model = InvoiceItem
extra = 0
class OverdueFilter(admin.SimpleListFilter):
title = _('overdue')
parameter_name = 'overdue'
def lookups(self, request, model_admin):
return (
('no', _('no')),
('yes', _('yes')),
)
def queryset(self, request, queryset):
if self.value() == 'no':
return queryset.filter(Q(date_due__gt=datetime.datetime.combine(now().date(), datetime.time.max))|Q(status=Invoice.STATUS.PAID))
if self.value() == 'yes':
return queryset.filter(date_due__lt=datetime.datetime.combine(now().date(), datetime.time.max)).exclude(status=Invoice.STATUS.PAID)
class InvoiceAdmin(admin.ModelAdmin):
date_hierarchy = 'date_issue'
list_display = ['pk', 'type', 'full_number', 'status', 'customer_name', 'customer_country',
'subtotal', 'vat', 'total', 'currency', 'date_issue', 'payment_term', 'is_overdue_boolean', 'is_paid']
list_editable = ['status']
list_filter = ['type', 'status', 'payment_method', OverdueFilter,
#'language', 'currency'
]
search_fields = ['number', 'subtitle', 'note', 'supplier_name', 'customer_name', 'shipping_name']
inlines = (InvoiceItemInline, )
fieldsets = (
(_(u'General information'), {
'fields': (
'type', 'number', 'full_number', 'status', 'subtitle', 'language', 'note',
'date_issue', 'date_tax_point', 'date_due', 'date_sent'
)
}),
(_(u'Contact details'), {
'fields': (
'issuer_name', 'issuer_email', 'issuer_phone'
)
}),
(_(u'Payment details'), {
'fields': (
'currency', 'discount', 'credit',
#'already_paid',
'payment_method', 'constant_symbol', 'variable_symbol', 'specific_symbol', 'reference',
'bank_name', 'bank_country', 'bank_city', 'bank_street', 'bank_zip', 'bank_iban', 'bank_swift_bic'
)
}),
(_(u'Supplier details'), {
'fields': (
'supplier_name', 'supplier_street', 'supplier_zip', 'supplier_city', 'supplier_country',
'supplier_registration_id', 'supplier_tax_id', 'supplier_vat_id', 'supplier_additional_info'
)
}),
(_(u'Customer details'), {
'fields': (
'customer_name', 'customer_street', 'customer_zip', 'customer_city', 'customer_country',
'customer_registration_id', 'customer_tax_id', 'customer_vat_id', 'customer_additional_info',
)
}),
(_(u'Shipping details'), {
'fields': (
'shipping_name', 'shipping_street', 'shipping_zip', 'shipping_city', 'shipping_country'
)
})
)
def is_overdue_boolean(self, invoice):
return invoice.is_overdue
is_overdue_boolean.boolean = True
is_overdue_boolean.short_description = _(u'Is overdue')
def is_paid(self, invoice):
return invoice.status == Invoice.STATUS.PAID
is_paid.boolean = True
is_paid.short_description = _(u'Is paid')
admin.site.register(Invoice, InvoiceAdmin)
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('recipe', '0010_auto_20171114_1443'),
]
operations = [
migrations.RemoveField(
model_name='direction',
name='recipe',
),
migrations.DeleteModel(
name='Direction',
),
]
|
__author__ = 'Anubhav Jain'
__copyright__ = 'Copyright 2014, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Oct 03, 2014'
|
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'agile-analytics'
copyright = u'2016, Chris Heisel'
author = u'Chris Heisel'
version = u'0.1'
release = u'0.1'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = True
html_theme = 'alabaster'
html_static_path = ['_static']
htmlhelp_basename = 'agile-analyticsdoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_documents = [
(master_doc, 'agile-analytics.tex', u'agile-analytics Documentation',
u'Chris Heisel', 'manual'),
]
man_pages = [
(master_doc, 'agile-analytics', u'agile-analytics Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'agile-analytics', u'agile-analytics Documentation',
author, 'agile-analytics', 'One line description of project.',
'Miscellaneous'),
]
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/event_perk/shared_fed_dub_2x10_honorguard_deed.iff"
result.attribute_template_id = 2
result.stfName("event_perk","fed_dub_2x10_honorguard_deed_name")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/container/drum/shared_pob_ship_loot_box.iff"
result.attribute_template_id = -1
result.stfName("space/space_interaction","pob_loot")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
import TransferErrors as TE
import cPickle as pickle
with open('stuck.pkl','rb') as pklfile:
stuck = pickle.load(pklfile)
TE.makeBasicTable(stuck,TE.workdir+'html/table.html',TE.webdir+'table.html')
TE.makeCSV(stuck,TE.webdir+'data.csv')
for basis in [-6,-5,-4,-3,-1,1,2]:
TE.makeJson(stuck,TE.webdir+('stuck_%i'%basis).replace('-','m')+'.json',basis)
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/naboo/shared_waterfall_naboo_falls_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from workshop.views import WorkshopRegistrationListView, WorkshopDetailView, WorkshopRegistrationUpdateView, \
WorkshopRegisterFormView, WorkshopListView, WorkshopFeedbackCreateView, WorkshopGalleryCreateView, \
WorkshopGalleryListView, WorkshopGalleryDeleteView, WorkshopCreateView, WorkshopUpdateView, WorkshopDeleteView
urlpatterns = [
url(r'^$', WorkshopListView.as_view(), name='workshop_list'),
url(r'^create/$', login_required(WorkshopCreateView.as_view()), name='workshop_create'),
url(r'^(?P<workshop_id>[0-9]+)/$', WorkshopDetailView.as_view(), name='workshop_detail'),
# TODO(2) Fix update and uncomment
# url(r'^(?P<pk>[0-9]+)/update/$', login_required(WorkshopUpdateView.as_view()), name='workshopdetail_update'),
url(r'^(?P<pk>[0-9]+)/delete/$', login_required(WorkshopDeleteView.as_view()), name='workshop_delete'),
url(r'^(?P<workshop_id>[0-9]+)/register/$', WorkshopRegisterFormView.as_view(), name='workshop_register'),
url(r'^(?P<workshop_id>[0-9]+)/register/list/$',
login_required(WorkshopRegistrationListView.as_view()), name='workshop_registration_list'),
url(r'^(?P<workshop_id>[0-9]+)/register/update/$',
login_required(WorkshopRegistrationUpdateView.as_view()), name='workshop_update'),
url(r'^success/$',
TemplateView.as_view(template_name='workshop/success.html'), name='workshop_registration_success'),
url(r'^(?P<workshop_id>[0-9]+)/feedback/$', WorkshopFeedbackCreateView.as_view(), name='workshop_feedback'),
url(r'^feedback/success/$',
TemplateView.as_view(template_name='workshop/success_feedback.html'), name='feedback_success'),
url(r'^(?P<pk>[0-9]+)/add-image/$', login_required(WorkshopGalleryCreateView.as_view()), name='image_create'),
url(r'^(?P<pk>[0-9]+)/gallery/$', WorkshopGalleryListView.as_view(), name='image_list'),
url(r'^image/(?P<pk>[0-9]+)/delete/$', login_required(WorkshopGalleryDeleteView.as_view()), name='image_delete'),
]
|
EAST = None
NORTH = None
WEST = None
SOUTH = None
class Robot:
def __init__(self, direction=NORTH, x_pos=0, y_pos=0):
pass
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("hordak", "0005_account_currencies")]
operations = [
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION check_leg()
RETURNS trigger AS
$$
DECLARE
tx_id INT;
non_zero RECORD;
BEGIN
IF (TG_OP = 'DELETE') THEN
tx_id := OLD.transaction_id;
ELSE
tx_id := NEW.transaction_id;
END IF;
SELECT ABS(SUM(amount)) AS total, amount_currency AS currency
INTO non_zero
FROM hordak_leg
WHERE transaction_id = tx_id
GROUP BY amount_currency
HAVING ABS(SUM(amount)) > 0
LIMIT 1;
IF FOUND THEN
RAISE EXCEPTION 'Sum of transaction amounts in each currency must be 0. Currency % has non-zero total %',
non_zero.currency, non_zero.total;
END IF;
RETURN NEW;
END;
$$
LANGUAGE plpgsql;
"""
)
]
|
from thriftpy.protocol import TJSONProtocol
from thriftpy.thrift import TPayload, TType
from thriftpy.transport import TMemoryBuffer
from thriftpy._compat import u
import thriftpy.protocol.json as proto
class TItem(TPayload):
thrift_spec = {
1: (TType.I32, "id"),
2: (TType.LIST, "phones", (TType.STRING)),
}
default_spec = [("id", None), ("phones", None)]
def test_map_to_obj():
val = [{"key": "ratio", "value": "0.618"}]
spec = [TType.STRING, TType.DOUBLE]
obj = proto.map_to_obj(val, spec)
assert {"ratio": 0.618} == obj
def test_map_to_json():
obj = {"ratio": 0.618}
spec = [TType.STRING, TType.DOUBLE]
json = proto.map_to_json(obj, spec)
assert [{"key": "ratio", "value": 0.618}] == json
def test_list_to_obj():
val = [4, 8, 4, 12, 67]
spec = TType.I32
obj = proto.list_to_obj(val, spec)
assert [4, 8, 4, 12, 67] == obj
def test_list_to_json():
val = [4, 8, 4, 12, 67]
spec = TType.I32
json = proto.list_to_json(val, spec)
assert [4, 8, 4, 12, 67] == json
def test_struct_to_json():
obj = TItem(id=13, phones=["5234", "12346456"])
json = proto.struct_to_json(obj)
assert {"id": 13, "phones": ["5234", "12346456"]} == json
def test_struct_to_obj():
json = {"id": 13, "phones": ["5234", "12346456"]}
obj = TItem()
obj = proto.struct_to_obj(json, obj)
assert obj.id == 13 and obj.phones == ["5234", "12346456"]
def test_json_proto_api_write():
obj = TItem(id=13, phones=["5234", "12346456"])
trans = TMemoryBuffer()
p = TJSONProtocol(trans)
p.write_struct(obj)
data = trans.getvalue().decode("utf-8")
length = data[0:4]
import json
data = json.loads(data[4:])
assert length == "\x00\x00\x00S" and data == {
"metadata": {"version": 1},
"payload": {"phones": ["5234", "12346456"], "id": 13}}
def test_json_proto_api_read():
obj = TItem(id=13, phones=["5234", "12346456"])
trans = TMemoryBuffer()
p = TJSONProtocol(trans)
p.write_struct(obj)
obj2 = TItem()
obj2 = p.read_struct(obj2)
assert obj.id == 13 and obj.phones == ["5234", "12346456"]
def test_unicode_string():
class Foo(TPayload):
thrift_spec = {
1: (TType.STRING, "name")
}
default_spec = [("name", None)]
trans = TMemoryBuffer()
p = TJSONProtocol(trans)
foo = Foo(name=u('pão de açúcar'))
foo.write(p)
foo2 = Foo()
foo2.read(p)
assert foo == foo2
|
"""Customizations for the cloudsearchdomain command.
This module customizes the cloudsearchdomain command:
* Add validation that --endpoint-url is required.
"""
def register_cloudsearchdomain(cli):
cli.register_last('calling-command.cloudsearchdomain',
validate_endpoint_url)
def validate_endpoint_url(parsed_globals, **kwargs):
if parsed_globals.endpoint_url is None:
return ValueError(
"--endpoint-url is required for cloudsearchdomain commands")
|
from http import HTTPStatus
from typing import Callable
from typing import Dict
from typing import List
from typing import Union
import demistomock as demisto
import requests
from CommonServerPython import *
from CommonServerUserPython import *
from intezer_sdk import consts
from intezer_sdk.analysis import Analysis
from intezer_sdk.analysis import get_analysis_by_id
from intezer_sdk.analysis import get_latest_analysis
from intezer_sdk.api import IntezerApi
from intezer_sdk.errors import AnalysisIsAlreadyRunning
from intezer_sdk.errors import AnalysisIsStillRunning
from intezer_sdk.errors import FamilyNotFoundError
from intezer_sdk.errors import HashDoesNotExistError
from intezer_sdk.errors import InvalidApiKey
from intezer_sdk.family import Family
from intezer_sdk.sub_analysis import SubAnalysis
from requests import HTTPError
''' CONSTS '''
requests.packages.urllib3.disable_warnings()
IS_AVAILABLE_URL = 'is-available'
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def _get_missing_file_result(file_hash: str) -> CommandResults:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
return CommandResults(
readable_output=f'The Hash {file_hash} was not found on Intezer genome database',
outputs={
outputPaths['dbotscore']: dbot
}
)
def _get_missing_analysis_result(analysis_id: str, sub_analysis_id: str = None) -> CommandResults:
if not sub_analysis_id:
output = f'The Analysis {analysis_id} was not found on Intezer Analyze'
else:
output = f'Could not find the analysis \'{analysis_id}\' or the sub analysis \'{sub_analysis_id}\''
return CommandResults(
readable_output=output
)
def _get_missing_family_result(family_id: str) -> CommandResults:
return CommandResults(
readable_output=f'The Family {family_id} was not found on Intezer Analyze'
)
def _get_analysis_running_result(analysis_id: str = None, response: requests.Response = None) -> CommandResults:
if response:
analysis_id = response.json()['result_url'].split('/')[2]
context_json = {
'ID': analysis_id,
'Status': 'InProgress'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output='Analysis is still in progress',
outputs=context_json
)
''' COMMANDS '''
def check_is_available(intezer_api: IntezerApi, args: dict) -> str:
try:
response = intezer_api.get_url_result(f'/{IS_AVAILABLE_URL}')
return 'ok' if response else 'Empty response from intezer service'
except InvalidApiKey as error:
return f'Invalid API key received.\n{error}'
except HTTPError as error:
return f'Error occurred when reaching Intezer Analyze. Please check Analyze Base URL. \n{error}'
except ConnectionError as error:
return f'Error connecting to Analyze Base url.\n{error}'
def analyze_by_hash_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
analysis = Analysis(file_hash=file_hash, api=intezer_api)
try:
analysis.send()
analysis_id = analysis.analysis_id
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis_id)
)
except HashDoesNotExistError:
return _get_missing_file_result(file_hash)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result(response=error.response)
def get_latest_result_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
latest_analysis = get_latest_analysis(file_hash=file_hash, api=intezer_api)
if not latest_analysis:
return _get_missing_file_result(file_hash)
return enrich_dbot_and_display_file_analysis_results(latest_analysis.result())
def analyze_by_uploaded_file_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
file_id = args.get('file_entry_id')
file_data = demisto.getFilePath(file_id)
try:
analysis = Analysis(file_path=file_data['path'], api=intezer_api)
analysis.send()
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis.analysis_id)
)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result(response=error.response)
def check_analysis_status_and_get_results_command(intezer_api: IntezerApi, args: dict) -> List[CommandResults]:
analysis_type = args.get('analysis_type', 'File')
analysis_ids = argToList(args.get('analysis_id'))
indicator_name = args.get('indicator_name')
command_results = []
for analysis_id in analysis_ids:
try:
if analysis_type == 'Endpoint':
response = intezer_api.get_url_result(f'/endpoint-analyses/{analysis_id}')
analysis_result = response.json()['result']
else:
analysis = get_analysis_by_id(analysis_id, api=intezer_api)
analysis_result = analysis.result()
if analysis_result and analysis_type == 'Endpoint':
command_results.append(
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name))
else:
command_results.append(enrich_dbot_and_display_file_analysis_results(analysis_result))
except HTTPError as http_error:
if http_error.response.status_code == HTTPStatus.CONFLICT:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
elif http_error.response.status_code == HTTPStatus.NOT_FOUND:
command_results.append(_get_missing_analysis_result(analysis_id))
else:
raise http_error
except AnalysisIsStillRunning:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
return command_results
def get_analysis_sub_analyses_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
try:
analysis = get_analysis_by_id(analysis_id, api=intezer_api)
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
except AnalysisIsStillRunning:
return _get_analysis_running_result(analysis_id=str(analysis_id))
sub_analyses: List[SubAnalysis] = analysis.get_sub_analyses()
all_sub_analyses_ids = [sub.analysis_id for sub in sub_analyses]
sub_analyses_table = tableToMarkdown('Sub Analyses', all_sub_analyses_ids, headers=['Analysis IDs'])
context_json = {
'ID': analysis.analysis_id,
'SubAnalysesIDs': all_sub_analyses_ids
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output=sub_analyses_table,
outputs=context_json,
raw_response=all_sub_analyses_ids
)
def get_analysis_code_reuse_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
sub_analysis_id = args.get('sub_analysis_id', 'root')
try:
sub_analysis: SubAnalysis = SubAnalysis(analysis_id=sub_analysis_id,
composed_analysis_id=analysis_id,
sha256='',
source='',
api=intezer_api)
sub_analysis_code_reuse = sub_analysis.code_reuse
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
elif error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result(analysis_id=str(analysis_id))
if not sub_analysis_code_reuse:
return CommandResults(
readable_output='No code reuse for this analysis'
)
families = sub_analysis_code_reuse.pop('families') if 'families' in sub_analysis_code_reuse else None
readable_output = tableToMarkdown('Code Reuse', sub_analysis_code_reuse)
if families:
readable_output += '\nFamilies:\n'
readable_output += '\n'.join(tableToMarkdown(family['family_name'], family) for family in families)
is_root = sub_analysis_id == 'root'
if is_root:
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'CodeReuse': sub_analysis_code_reuse,
'CodeReuseFamilies': families
}
}
else:
context_json = {
'Intezer.Analysis(obj.RootAnalysis == val.ID).SubAnalyses(obj.ID == val.ID)': {
'ID': sub_analysis_id,
'RootAnalysis': analysis_id,
'CodeReuse': sub_analysis_code_reuse,
'CodeReuseFamilies': families
}
}
return CommandResults(
readable_output=readable_output,
outputs=context_json,
raw_response=sub_analysis.code_reuse
)
def get_analysis_metadata_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
sub_analysis_id = args.get('sub_analysis_id', 'root')
try:
sub_analysis: SubAnalysis = SubAnalysis(analysis_id=sub_analysis_id,
composed_analysis_id=analysis_id,
sha256='',
source='',
api=intezer_api)
sub_analysis_metadata = sub_analysis.metadata
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
elif error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result(analysis_id=str(analysis_id))
metadata_table = tableToMarkdown('Analysis Metadata', sub_analysis_metadata)
is_root = sub_analysis_id == 'root'
if is_root:
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'Metadata': sub_analysis_metadata
}
}
else:
context_json = {
'Intezer.Analysis(obj.RootAnalysis == val.ID).SubAnalyses(obj.ID == val.ID)': {
'ID': sub_analysis_id,
'RootAnalysis': analysis_id,
'Metadata': sub_analysis_metadata
}
}
return CommandResults(
readable_output=metadata_table,
outputs=context_json,
raw_response=sub_analysis_metadata
)
def get_family_info_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
family_id = args.get('family_id')
family = Family(family_id, api=intezer_api)
try:
family.fetch_info()
except FamilyNotFoundError:
return _get_missing_family_result(str(family_id))
output = {
'ID': family_id,
'Name': family.name,
'Type': family.type
}
markdown = tableToMarkdown('Family Info', output)
return CommandResults(
readable_output=markdown,
outputs_prefix='Intezer.Family',
outputs=output
)
def enrich_dbot_and_display_file_analysis_results(intezer_result):
verdict = intezer_result.get('verdict')
sha256 = intezer_result.get('sha256')
analysis_id = intezer_result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': intezer_result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
md = tableToMarkdown('Analysis Report', intezer_result)
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({intezer_result["sub_verdict"]})\n'
if 'family_name' in intezer_result:
presentable_result += f'Family: **{intezer_result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({intezer_result["analysis_url"]})\n'
presentable_result += md
return CommandResults(
readable_output=presentable_result,
raw_response=intezer_result,
outputs={
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID && val.ID == obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
)
def enrich_dbot_and_display_endpoint_analysis_results(intezer_result, indicator_name=None) -> CommandResults:
verdict = intezer_result['verdict']
computer_name = intezer_result['computer_name']
analysis_id = intezer_result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': intezer_result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if intezer_result.get('families') is not None:
presentable_result += f'Families: **{intezer_result["families"]}**\n'
presentable_result += f' Scan Time: {intezer_result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({intezer_result["analysis_url"]})\n'
return CommandResults(
readable_output=presentable_result,
raw_response=intezer_result,
outputs={
outputPaths['dbotscore']: dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID && val.ID == obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
)
''' EXECUTION CODE '''
def main():
command = None
try:
handle_proxy()
intezer_api_key = demisto.getParam('APIKey')
intezer_base_url_param = demisto.getParam('AnalyzeBaseURL')
use_ssl = not demisto.params().get('insecure', False)
analyze_base_url = intezer_base_url_param or consts.BASE_URL
intezer_api = IntezerApi(consts.API_VERSION, intezer_api_key, analyze_base_url, use_ssl)
command_handlers: Dict[str, Callable[[IntezerApi, dict], Union[List[CommandResults], CommandResults, str]]] = {
'test-module': check_is_available,
'intezer-analyze-by-hash': analyze_by_hash_command,
'intezer-analyze-by-file': analyze_by_uploaded_file_command,
'intezer-get-latest-report': get_latest_result_command,
'intezer-get-analysis-result': check_analysis_status_and_get_results_command,
'intezer-get-sub-analyses': get_analysis_sub_analyses_command,
'intezer-get-analysis-code-reuse': get_analysis_code_reuse_command,
'intezer-get-analysis-metadata': get_analysis_metadata_command,
'intezer-get-family-info': get_family_info_command
}
command = demisto.command()
command_handler = command_handlers[command]
command_results = command_handler(intezer_api, demisto.args())
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {command} command. Error: {str(e)}')
if __name__ == "__builtin__" or __name__ == "builtins":
main()
|
from schema import *
|
"""Test CLR field support."""
import System
import pytest
from Python.Test import FieldTest
def test_public_instance_field():
"""Test public instance fields."""
ob = FieldTest()
assert ob.PublicField == 0
ob.PublicField = 1
assert ob.PublicField == 1
with pytest.raises(TypeError):
del FieldTest().PublicField
def test_public_static_field():
"""Test public static fields."""
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
FieldTest.PublicStaticField = 1
assert FieldTest.PublicStaticField == 1
assert ob.PublicStaticField == 1
ob.PublicStaticField = 0
assert ob.PublicStaticField == 0
with pytest.raises(TypeError):
del FieldTest.PublicStaticField
with pytest.raises(TypeError):
del FieldTest().PublicStaticField
def test_protected_instance_field():
"""Test protected instance fields."""
ob = FieldTest()
assert ob.ProtectedField == 0
ob.ProtectedField = 1
assert ob.ProtectedField == 1
with pytest.raises(TypeError):
del FieldTest().ProtectedField
def test_protected_static_field():
"""Test protected static fields."""
ob = FieldTest()
assert FieldTest.ProtectedStaticField == 0
FieldTest.ProtectedStaticField = 1
assert FieldTest.ProtectedStaticField == 1
assert ob.ProtectedStaticField == 1
ob.ProtectedStaticField = 0
assert ob.ProtectedStaticField == 0
with pytest.raises(TypeError):
del FieldTest.ProtectedStaticField
with pytest.raises(TypeError):
del FieldTest().ProtectedStaticField
def test_read_only_instance_field():
"""Test readonly instance fields."""
assert FieldTest().ReadOnlyField == 0
with pytest.raises(TypeError):
FieldTest().ReadOnlyField = 1
with pytest.raises(TypeError):
del FieldTest().ReadOnlyField
def test_read_only_static_field():
"""Test readonly static fields."""
ob = FieldTest()
assert FieldTest.ReadOnlyStaticField == 0
assert ob.ReadOnlyStaticField == 0
with pytest.raises(TypeError):
FieldTest.ReadOnlyStaticField = 1
with pytest.raises(TypeError):
FieldTest().ReadOnlyStaticField = 1
with pytest.raises(TypeError):
del FieldTest.ReadOnlyStaticField
with pytest.raises(TypeError):
del FieldTest().ReadOnlyStaticField
def test_constant_field():
"""Test const fields."""
ob = FieldTest()
assert FieldTest.ConstField == 0
assert ob.ConstField == 0
with pytest.raises(TypeError):
FieldTest().ConstField = 1
with pytest.raises(TypeError):
FieldTest.ConstField = 1
with pytest.raises(TypeError):
del FieldTest().ConstField
with pytest.raises(TypeError):
del FieldTest.ConstField
def test_internal_field():
"""Test internal fields."""
with pytest.raises(AttributeError):
_ = FieldTest().InternalField
with pytest.raises(AttributeError):
_ = FieldTest().InternalStaticField
with pytest.raises(AttributeError):
_ = FieldTest.InternalStaticField
def test_private_field():
"""Test private fields."""
with pytest.raises(AttributeError):
_ = FieldTest().PrivateField
with pytest.raises(AttributeError):
_ = FieldTest().PrivateStaticField
with pytest.raises(AttributeError):
_ = FieldTest.PrivateStaticField
def test_field_descriptor_get_set():
"""Test field descriptor get / set."""
# This test ensures that setting an attribute implemented with
# a descriptor actually goes through the descriptor (rather than
# silently replacing the descriptor in the instance or type dict.
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
assert ob.PublicStaticField == 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
ob.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
FieldTest.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
def test_field_descriptor_wrong_type():
"""Test setting a field using a value of the wrong type."""
with pytest.raises(ValueError):
FieldTest().PublicField = "spam"
def test_field_descriptor_abuse():
"""Test field descriptor abuse."""
desc = FieldTest.__dict__['PublicField']
with pytest.raises(TypeError):
desc.__get__(0, 0)
with pytest.raises(TypeError):
desc.__set__(0, 0)
def test_boolean_field():
"""Test boolean fields."""
# change this to true / false later for Python 2.3?
ob = FieldTest()
assert ob.BooleanField is False
ob.BooleanField = True
assert ob.BooleanField is True
ob.BooleanField = False
assert ob.BooleanField is False
ob.BooleanField = 1
assert ob.BooleanField is True
ob.BooleanField = 0
assert ob.BooleanField is False
def test_sbyte_field():
"""Test sbyte fields."""
ob = FieldTest()
assert ob.SByteField == 0
ob.SByteField = 1
assert ob.SByteField == 1
def test_byte_field():
"""Test byte fields."""
ob = FieldTest()
assert ob.ByteField == 0
ob.ByteField = 1
assert ob.ByteField == 1
def test_char_field():
"""Test char fields."""
ob = FieldTest()
assert ob.CharField == u'A'
assert ob.CharField == 'A'
ob.CharField = 'B'
assert ob.CharField == u'B'
assert ob.CharField == 'B'
ob.CharField = u'C'
assert ob.CharField == u'C'
assert ob.CharField == 'C'
def test_int16_field():
"""Test int16 fields."""
ob = FieldTest()
assert ob.Int16Field == 0
ob.Int16Field = 1
assert ob.Int16Field == 1
def test_int32_field():
"""Test int32 fields."""
ob = FieldTest()
assert ob.Int32Field == 0
ob.Int32Field = 1
assert ob.Int32Field == 1
def test_int64_field():
"""Test int64 fields."""
ob = FieldTest()
assert ob.Int64Field == 0
ob.Int64Field = 1
assert ob.Int64Field == 1
def test_uint16_field():
"""Test uint16 fields."""
ob = FieldTest()
assert ob.UInt16Field == 0
ob.UInt16Field = 1
assert ob.UInt16Field == 1
def test_uint32_field():
"""Test uint32 fields."""
ob = FieldTest()
assert ob.UInt32Field == 0
ob.UInt32Field = 1
assert ob.UInt32Field == 1
def test_uint64_field():
"""Test uint64 fields."""
ob = FieldTest()
assert ob.UInt64Field == 0
ob.UInt64Field = 1
assert ob.UInt64Field == 1
def test_single_field():
"""Test single fields."""
ob = FieldTest()
assert ob.SingleField == 0.0
ob.SingleField = 1.1
assert ob.SingleField == 1.1
def test_double_field():
"""Test double fields."""
ob = FieldTest()
assert ob.DoubleField == 0.0
ob.DoubleField = 1.1
assert ob.DoubleField == 1.1
def test_decimal_field():
"""Test decimal fields."""
ob = FieldTest()
assert ob.DecimalField == System.Decimal(0)
ob.DecimalField = System.Decimal(1)
assert ob.DecimalField == System.Decimal(1)
def test_string_field():
"""Test string fields."""
ob = FieldTest()
assert ob.StringField == "spam"
ob.StringField = "eggs"
assert ob.StringField == "eggs"
def test_interface_field():
"""Test interface fields."""
from Python.Test import Spam, ISpam
ob = FieldTest()
assert ISpam(ob.SpamField).GetValue() == "spam"
assert ob.SpamField.GetValue() == "spam"
ob.SpamField = Spam("eggs")
assert ISpam(ob.SpamField).GetValue() == "eggs"
assert ob.SpamField.GetValue() == "eggs"
def test_object_field():
"""Test ob fields."""
ob = FieldTest()
assert ob.ObjectField is None
ob.ObjectField = System.String("spam")
assert ob.ObjectField == "spam"
ob.ObjectField = System.Int32(1)
assert ob.ObjectField == 1
ob.ObjectField = None
assert ob.ObjectField is None
def test_enum_field():
"""Test enum fields."""
from Python.Test import ShortEnum
ob = FieldTest()
assert ob.EnumField == ShortEnum.Zero
ob.EnumField = ShortEnum.One
assert ob.EnumField == ShortEnum.One
def test_nullable_field():
"""Test nullable fields."""
ob = FieldTest()
ob.StringField = None
assert ob.StringField is None
ob.ObjectField = None
assert ob.ObjectField is None
ob.SpamField = None
assert ob.SpamField is None
# Primitive types and enums should not be set to null.
with pytest.raises(TypeError):
FieldTest().Int32Field = None
with pytest.raises(TypeError):
FieldTest().EnumField = None
|
from msrest.serialization import Model
class Usage(Model):
"""Describes Storage Resource Usage.
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
:type unit: str or :class:`UsageUnit
<azure.mgmt.storage.v2015_06_15.models.UsageUnit>`
:param current_value: The current count of the allocated resources in the
subscription.
:type current_value: int
:param limit: The maximum count of the resources that can be allocated in
the subscription.
:type limit: int
:param name: The name of the type of usage.
:type name: :class:`UsageName
<azure.mgmt.storage.v2015_06_15.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
|
import pangloss
import sys,getopt,cPickle,numpy
import scipy.stats as stats
def Calibrate(argv):
"""
NAME
Calibrate.py
PURPOSE
Transform the results of the lightcone reconstruction process,
Pr(kappah|D), into our target PDF, Pr(kappa|D).
COMMENTS
All PDF input is provided as a list of samples. There are two
modes of operation:
1) The Pr(kappah|C) for an ensemble of calibration lightcones are
compressed into a single number (currently the
median), and then combined with the true kappa values to make
Pr(kappa,kappah|C). This is written out as a 2D sample list.
2) The Pr(kappah|D) for a single observed lightcone is compressed
into a single number (currently the median). This is then used
to take a slice from Pr(kappa,kappah|C) to make Pr(kappa|D,C).
Both 1 and 2 can be carried out in series if desired (Mode=3).
FLAGS
-h Print this message [0]
INPUTS
configfile Plain text file containing Pangloss configuration
OPTIONAL INPUTS
--mode Operating mode 1,2 or 3. See COMMENTS above.
OUTPUTS
stdout Useful information
samples From 1) Pr(kappa,kappah|C) or 2) Pr(kappa|D,C)
EXAMPLE
Calibrate.py example.config
BUGS
AUTHORS
This file is part of the Pangloss project, distributed under the
GPL v2, by Tom Collett (IoA) and Phil Marshall (Oxford).
Please cite: Collett et al 2013, http://arxiv.org/abs/1303.6564
HISTORY
2013-03-21 started Collett & Marshall (Oxford)
"""
# --------------------------------------------------------------------
try:
opts, args = getopt.getopt(argv,"hm:",["help","mode"])
except getopt.GetoptError, err:
print str(err) # will print something like "option -a not recognized"
print Calibrate.__doc__ # will print the big comment above.
return
Mode=3
for o,a in opts:
if o in ("-h", "--help"):
print Calibrate.__doc__
return
elif o in ("-m", "--mode"):
Mode = int(a)
assert Mode < 4 and Mode >0, "unhandled Mode"
else:
assert False, "unhandled option"
# Check for setup file in array args:
if len(args) == 1:
configfile = args[0]
print pangloss.doubledashedline
print pangloss.hello
print pangloss.doubledashedline
print "Calibrate: transforming Pr(kappah|D) to Pr(kappa|D)"
print "Calibrate: taking instructions from",configfile
else:
print Calibrate.__doc__
return
# --------------------------------------------------------------------
# Read in configuration, and extract the ones we need:
experiment = pangloss.Configuration(configfile)
EXP_NAME = experiment.parameters['ExperimentName']
Nc = experiment.parameters['NCalibrationLightcones']
comparator=experiment.parameters['Comparator']
comparatorType=experiment.parameters['ComparatorType']
comparatorWidth=experiment.parameters['ComparatorWidth']
# Figure out which mode is required:
ModeName = experiment.parameters['CalibrateMode']
if ModeName=='Joint': Mode = 1
if ModeName=='Slice': Mode = 2
if ModeName=='JointAndSlice': Mode = 3
CALIB_DIR = experiment.parameters['CalibrationFolder'][0]
jointdistfile= CALIB_DIR+'/'+comparator+'_'+comparatorType+'.pickle'
jointdistasPDFfile= CALIB_DIR+'/'+comparator+'_'+comparatorType+'_asPDF.pickle'
# Final result is PDF for kappa:
x = experiment.parameters['ObservedCatalog'][0]
resultfile = x.split('.')[0]+"_"+EXP_NAME+"_PofKappa.pickle"
# --------------------------------------------------------------------
# Mode 1: generate a joint distribution, eg Pr(kappah,kappa)
# from the calibration dataset:
if Mode==1 or Mode==3:
print pangloss.dashedline
# First find the calibration pdfs for kappa_h:
calpickles = []
for i in range(Nc):
calpickles.append(experiment.getLightconePickleName('simulated',pointing=i))
calresultpickles=[]
if comparator=="Kappah" and comparatorType=="median":
for i in range(Nc):
x = calpickles[i]
pfile = x.split('.')[0].split("_lightcone")[0]+"_"+EXP_NAME+"_KappaHilbert_Kappah_median.pickle"
calresultpickles.append(pfile)
elif comparator=="Kappah" and comparatorType!="median":
for i in range(Nc):
x = calpickles[i]
pfile = x.split('.')[0].split("_lightcone")[0]+"_"+EXP_NAME+"_KappaHilbert_Kappah_"+comparatorType+".pickle"
calresultpickles.append(pfile)
else:
print "Calibrate: Unrecognised comparator "+Comparator
print "Calibrate: If you want to use a comparator other than kappa_h, "
print "Calibrate: you'll need to code it up!"
print "Calibrate: (This should be easy, but you can ask tcollett@ast.cam.uk for help)."
exit()
# Now calculate comparators:
callist=numpy.empty((Nc,2))
jd=pangloss.PDF(["kappa_ext",comparator+'_'+comparatorType])
for i in range(Nc):
C = calresultpickles[i]
pdf = pangloss.readPickle(C)
if comparator=="Kappah":
if comparatorType=="median":
# Recall that we created a special file for this
# choice of comparator and comparator type, in
# Reconstruct. You could also use the
# comparatortype=="mean" code, swapping mean for median.
callist[i,0]=pdf[0]
callist[i,1]=pdf[1][0]
elif comparatorType=="mean":
callist[i,0] = pdf.truth[0]
callist[i,1] = numpy.mean(pdf.samples)
else:
print "Calibrate: Unrecognised comparatorType "+comparatorType
print "Calibrate: If you want to use a comparatorType other than median "
print "Calibrate: or mean, you'll need to code it up!"
print "Calibrate: (This should be easy, but you can ask tcollett@ast.cam.uk for help)."
exit()
jd.append(callist[i])
pangloss.writePickle(callist,jointdistfile)
# Also store the joint dist as a pangloss pdf:
pangloss.writePickle(jd,jointdistasPDFfile)
# Plot:
plotfile = jointdistasPDFfile.split('.')[0]+'.png'
jd.plot("Kappah_median","kappa_ext",weight=None,output=plotfile,title="The joint distribution of $\kappa_{\mathrm{ext}}$ and calibrator \n\n (more correlated means a better calibrator!)")
print "Calibrate: calibration joint PDF saved in:"
print "Calibrate: "+jointdistfile
print "Calibrate: and "+jointdistasPDFfile
print "Calibrate: you can view this PDF in "+plotfile
# --------------------------------------------------------------------
# Mode 2: calibrate a real line of sight's Pr(kappah|D) using the
# joint distribution Pr(kappa,<kappah>|D)
if Mode==2 or Mode==3:
print pangloss.dashedline
callibguide = pangloss.readPickle(jointdistfile)
obspickle = experiment.getLightconePickleName('real')
pfile = obspickle.split('.')[0].split("_lightcone")[0]+'_'+EXP_NAME+"_PofKappah.pickle"
pdf=pangloss.readPickle(pfile)
if comparator=="Kappah":
if comparatorType=="median":# note we created a special file for this choice of comparator and comparator type. You could also use the comparatortype=="mean" code swapping mean for median.
RealComparator=numpy.median(pdf.samples)
elif comparatorType=="mean":
RealComparator=numpy.mean(pdf.samples)
else:
print "I don't know that comparatorType. exiting"
exit()
pdf = pangloss.PDF(["kappa_ext","weight"])
#print RealComparator
#print numpy.median(callibguide[:,1]),numpy.std(callibguide[:,1])
dif=(callibguide[:,1]-RealComparator)
weights=dif*0.0
weights[numpy.abs(dif)<comparatorWidth]=1.
weights/=numpy.sum(weights)
samples=callibguide[:,0]
samplesandweights=callibguide.copy()
samplesandweights[:,1]=weights
pdf.samples=(samplesandweights)
plotfile = resultfile.split('.')[0]+".png"
pdf.plot('kappa_ext',weight='weight',output=plotfile)
average = numpy.average(samples, weights=weights)
variance = numpy.dot(weights, (samples-average)**2)/weights.sum()
average,std=average, variance**.5
#if step function weights can calculate 68%CL easily:
included=samples[weights>0]
onesigconfidence=numpy.abs(\
stats.scoreatpercentile(included,84)-
stats.scoreatpercentile(included,16)\
)/2.
pangloss.writePickle(pdf,resultfile)
print "Calibrate: your reconstructed lightcone has been calibrated,"
print "Calibrate: suggesting it has a kappa_ext of",\
"%.3f +\- %.3f"%(average,onesigconfidence)
print "Calibrate: the PDF for kappa_ext has been output to "+resultfile
print "Calibrate: in the form of sample kappa_ext values, and their weights."
print "Calibrate: you can view this PDF in "+plotfile
print
print "Calibrate: To read and process this file, try:"
print
print " import pangloss"
print " pdf = pangloss.readPickle(\"%s\")"%resultfile
print " kappa_samples = pdf.getParameter(\"kappa_ext\")"
print " kappa_weights = pdf.getParameter(\"weight\")"
# --------------------------------------------------------------------
print
print pangloss.doubledashedline
return resultfile,jointdistasPDFfile
if __name__ == '__main__':
Calibrate(sys.argv[1:])
|
from pulp.bindings import auth, consumer, consumer_groups, repo_groups, repository
from pulp.bindings.actions import ActionsAPI
from pulp.bindings.content import OrphanContentAPI, ContentSourceAPI, ContentCatalogAPI
from pulp.bindings.event_listeners import EventListenerAPI
from pulp.bindings.server_info import ServerInfoAPI, ServerStatusAPI
from pulp.bindings.tasks import TasksAPI, TaskSearchAPI
from pulp.bindings.upload import UploadAPI
class Bindings(object):
def __init__(self, pulp_connection):
"""
@type: pulp_connection: pulp.bindings.server.PulpConnection
"""
# Please keep the following in alphabetical order to ease reading
self.actions = ActionsAPI(pulp_connection)
self.bind = consumer.BindingsAPI(pulp_connection)
self.bindings = consumer.BindingSearchAPI(pulp_connection)
self.profile = consumer.ProfilesAPI(pulp_connection)
self.consumer = consumer.ConsumerAPI(pulp_connection)
self.consumer_content = consumer.ConsumerContentAPI(pulp_connection)
self.consumer_content_schedules = consumer.ConsumerContentSchedulesAPI(pulp_connection)
self.consumer_group = consumer_groups.ConsumerGroupAPI(pulp_connection)
self.consumer_group_search = consumer_groups.ConsumerGroupSearchAPI(pulp_connection)
self.consumer_group_actions = consumer_groups.ConsumerGroupActionAPI(pulp_connection)
self.consumer_group_bind = consumer_groups.ConsumerGroupBindAPI(pulp_connection)
self.consumer_group_content = consumer_groups.ConsumerGroupContentAPI(pulp_connection)
self.consumer_history = consumer.ConsumerHistoryAPI(pulp_connection)
self.consumer_search = consumer.ConsumerSearchAPI(pulp_connection)
self.content_orphan = OrphanContentAPI(pulp_connection)
self.content_source = ContentSourceAPI(pulp_connection)
self.content_catalog = ContentCatalogAPI(pulp_connection)
self.event_listener = EventListenerAPI(pulp_connection)
self.permission = auth.PermissionAPI(pulp_connection)
self.repo = repository.RepositoryAPI(pulp_connection)
self.repo_actions = repository.RepositoryActionsAPI(pulp_connection)
self.repo_distributor = repository.RepositoryDistributorAPI(pulp_connection)
self.repo_group = repo_groups.RepoGroupAPI(pulp_connection)
self.repo_group_actions = repo_groups.RepoGroupActionAPI(pulp_connection)
self.repo_group_distributor = repo_groups.RepoGroupDistributorAPI(pulp_connection)
self.repo_group_distributor_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_group_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_history = repository.RepositoryHistoryAPI(pulp_connection)
self.repo_importer = repository.RepositoryImporterAPI(pulp_connection)
self.repo_publish_schedules = repository.RepositoryPublishSchedulesAPI(pulp_connection)
self.repo_search = repository.RepositorySearchAPI(pulp_connection)
self.repo_sync_schedules = repository.RepositorySyncSchedulesAPI(pulp_connection)
self.repo_unit = repository.RepositoryUnitAPI(pulp_connection)
self.role = auth.RoleAPI(pulp_connection)
self.server_info = ServerInfoAPI(pulp_connection)
self.server_status = ServerStatusAPI(pulp_connection)
self.tasks = TasksAPI(pulp_connection)
self.tasks_search = TaskSearchAPI(pulp_connection)
self.uploads = UploadAPI(pulp_connection)
self.user = auth.UserAPI(pulp_connection)
self.user_search = auth.UserSearchAPI(pulp_connection)
|
import re
WHITE_LIST = {
'names': {
'eno': {},
'evo': {},
'ii': {},
'li': {'alias': 'Ii'},
'utö': {},
'usa': {}
},
'patterns': [
{
'find': re.compile('([A-ZÄÖa-zäö-]*)(mlk)'),
'replace': r'\1 mlk'
}
]
}
|
from meh import Config
from meh.handler import ExceptionHandler
from meh.dump import ReverseExceptionDump
from pyanaconda import iutil, kickstart
import sys
import os
import shutil
import time
import re
import errno
import glob
import traceback
import blivet.errors
from pyanaconda.errors import CmdlineError
from pyanaconda.ui.communication import hubQ
from pyanaconda.constants import THREAD_EXCEPTION_HANDLING_TEST, IPMI_FAILED
from pyanaconda.threads import threadMgr
from pyanaconda.i18n import _
from pyanaconda import flags
from pyanaconda import startup_utils
from gi.repository import GLib
import logging
log = logging.getLogger("anaconda")
class AnacondaExceptionHandler(ExceptionHandler):
def __init__(self, confObj, intfClass, exnClass, tty_num, gui_lock, interactive):
"""
:see: python-meh's ExceptionHandler
:param tty_num: the number of tty the interface is running on
"""
ExceptionHandler.__init__(self, confObj, intfClass, exnClass)
self._gui_lock = gui_lock
self._intf_tty_num = tty_num
self._interactive = interactive
def _main_loop_handleException(self, dump_info):
"""
Helper method with one argument only so that it can be registered
with GLib.idle_add() to run on idle or called from a handler.
:type dump_info: an instance of the meh.DumpInfo class
"""
ty = dump_info.exc_info.type
value = dump_info.exc_info.value
if (issubclass(ty, blivet.errors.StorageError) and value.hardware_fault) \
or (issubclass(ty, OSError) and value.errno == errno.EIO):
# hardware fault or '[Errno 5] Input/Output error'
hw_error_msg = _("The installation was stopped due to what "
"seems to be a problem with your hardware. "
"The exact error message is:\n\n%s.\n\n "
"The installer will now terminate.") % str(value)
self.intf.messageWindow(_("Hardware error occured"), hw_error_msg)
sys.exit(0)
elif isinstance(value, blivet.errors.UnusableConfigurationError):
sys.exit(0)
else:
super(AnacondaExceptionHandler, self).handleException(dump_info)
return False
def handleException(self, dump_info):
"""
Our own handleException method doing some additional stuff before
calling the original python-meh's one.
:type dump_info: an instance of the meh.DumpInfo class
:see: python-meh's ExceptionHandler.handleException
"""
log.debug("running handleException")
exception_lines = traceback.format_exception(*dump_info.exc_info)
log.critical("\n".join(exception_lines))
ty = dump_info.exc_info.type
value = dump_info.exc_info.value
try:
from gi.repository import Gtk
# XXX: Gtk stopped raising RuntimeError if it fails to
# initialize. Horay! But will it stay like this? Let's be
# cautious and raise the exception on our own to work in both
# cases
initialized = Gtk.init_check(None)[0]
if not initialized:
raise RuntimeError()
# Attempt to grab the GUI initializing lock, do not block
if not self._gui_lock.acquire(False):
# the graphical interface is running, don't crash it by
# running another one potentially from a different thread
log.debug("Gtk running, queuing exception handler to the "
"main loop")
GLib.idle_add(self._main_loop_handleException, dump_info)
else:
log.debug("Gtk not running, starting Gtk and running "
"exception handler in it")
self._main_loop_handleException(dump_info)
except (RuntimeError, ImportError):
log.debug("Gtk cannot be initialized")
# X not running (Gtk cannot be initialized)
if threadMgr.in_main_thread():
log.debug("In the main thread, running exception handler")
if issubclass(ty, CmdlineError) or not self._interactive:
if issubclass(ty, CmdlineError):
cmdline_error_msg = _("\nThe installation was stopped due to "
"incomplete spokes detected while running "
"in non-interactive cmdline mode. Since there "
"cannot be any questions in cmdline mode, "
"edit your kickstart file and retry "
"installation.\nThe exact error message is: "
"\n\n%s.\n\nThe installer will now terminate.") % str(value)
else:
cmdline_error_msg = _("\nRunning in cmdline mode, no interactive debugging "
"allowed.\nThe exact error message is: "
"\n\n%s.\n\nThe installer will now terminate.") % str(value)
# since there is no UI in cmdline mode and it is completely
# non-interactive, we can't show a message window asking the user
# to acknowledge the error; instead, print the error out and sleep
# for a few seconds before exiting the installer
print(cmdline_error_msg)
time.sleep(10)
sys.exit(1)
else:
print("\nAn unknown error has occured, look at the "
"/tmp/anaconda-tb* file(s) for more details")
# in the main thread, run exception handler
self._main_loop_handleException(dump_info)
else:
log.debug("In a non-main thread, sending a message with "
"exception data")
# not in the main thread, just send message with exception
# data and let message handler run the exception handler in
# the main thread
exc_info = dump_info.exc_info
hubQ.send_exception((exc_info.type,
exc_info.value,
exc_info.stack))
def postWriteHook(self, dump_info):
anaconda = dump_info.object
# See if there is a /root present in the root path and put exception there as well
if os.access(iutil.getSysroot() + "/root", os.X_OK):
try:
dest = iutil.getSysroot() + "/root/%s" % os.path.basename(self.exnFile)
shutil.copyfile(self.exnFile, dest)
except (shutil.Error, IOError):
log.error("Failed to copy %s to %s/root", self.exnFile, iutil.getSysroot())
# run kickstart traceback scripts (if necessary)
try:
kickstart.runTracebackScripts(anaconda.ksdata.scripts)
# pylint: disable=bare-except
except:
pass
iutil.ipmi_report(IPMI_FAILED)
def runDebug(self, exc_info):
if flags.can_touch_runtime_system("switch console") \
and self._intf_tty_num != 1:
iutil.vtActivate(1)
iutil.eintr_retry_call(os.open, "/dev/console", os.O_RDWR) # reclaim stdin
iutil.eintr_retry_call(os.dup2, 0, 1) # reclaim stdout
iutil.eintr_retry_call(os.dup2, 0, 2) # reclaim stderr
# ^
# |
# +------ dup2 is magic, I tells ya!
# bring back the echo
import termios
si = sys.stdin.fileno()
attr = termios.tcgetattr(si)
attr[3] = attr[3] & termios.ECHO
termios.tcsetattr(si, termios.TCSADRAIN, attr)
print("\nEntering debugger...")
print("Use 'continue' command to quit the debugger and get back to "\
"the main window")
import pdb
pdb.post_mortem(exc_info.stack)
if flags.can_touch_runtime_system("switch console") \
and self._intf_tty_num != 1:
iutil.vtActivate(self._intf_tty_num)
def initExceptionHandling(anaconda):
fileList = ["/tmp/anaconda.log", "/tmp/packaging.log",
"/tmp/program.log", "/tmp/storage.log", "/tmp/ifcfg.log",
"/tmp/dnf.log", "/tmp/dnf.rpm.log",
"/tmp/yum.log", iutil.getSysroot() + "/root/install.log",
"/proc/cmdline"]
if os.path.exists("/tmp/syslog"):
fileList.extend(["/tmp/syslog"])
if anaconda.opts and anaconda.opts.ksfile:
fileList.extend([anaconda.opts.ksfile])
conf = Config(programName="anaconda",
programVersion=startup_utils.get_anaconda_version_string(),
programArch=os.uname()[4],
attrSkipList=["_intf._actions",
"_intf._currentAction._xklwrapper",
"_intf._currentAction._spokes[\"KeyboardSpoke\"]._xkl_wrapper",
"_intf._currentAction._storage_playground",
"_intf._currentAction._spokes[\"CustomPartitioningSpoke\"]._storage_playground",
"_intf._currentAction.language.translations",
"_intf._currentAction.language.locales",
"_intf._currentAction._spokes[\"PasswordSpoke\"]._oldweak",
"_intf._currentAction._spokes[\"PasswordSpoke\"]._password",
"_intf._currentAction._spokes[\"UserSpoke\"]._password",
"_intf._currentAction._spokes[\"UserSpoke\"]._oldweak",
"_intf.storage.bootloader.password",
"_intf.storage.data",
"_intf.storage.encryptionPassphrase",
"_bootloader.encrypted_password",
"_bootloader.password",
"payload._groups"],
localSkipList=["passphrase", "password", "_oldweak", "_password"],
fileList=fileList)
conf.register_callback("lsblk_output", lsblk_callback, attchmnt_only=True)
conf.register_callback("nmcli_dev_list", nmcli_dev_list_callback,
attchmnt_only=True)
conf.register_callback("type", lambda: "anaconda", attchmnt_only=True)
conf.register_callback("addons", list_addons_callback, attchmnt_only=False)
if "/tmp/syslog" not in fileList:
# no syslog, grab output from journalctl and put it also to the
# anaconda-tb file
conf.register_callback("journalctl", journalctl_callback, attchmnt_only=False)
interactive = not anaconda.displayMode == 'c'
handler = AnacondaExceptionHandler(conf, anaconda.intf.meh_interface,
ReverseExceptionDump, anaconda.intf.tty_num,
anaconda.gui_initialized, interactive)
handler.install(anaconda)
return conf
def lsblk_callback():
"""Callback to get info about block devices."""
return iutil.execWithCapture("lsblk", ["--perms", "--fs", "--bytes"])
def nmcli_dev_list_callback():
"""Callback to get info about network devices."""
return iutil.execWithCapture("nmcli", ["device", "show"])
def journalctl_callback():
"""Callback to get logs from journalctl."""
# regex to filter log messages from anaconda's process (we have that in our
# logs)
anaconda_log_line = re.compile(r"\[%d\]:" % os.getpid())
ret = ""
for line in iutil.execReadlines("journalctl", ["-b"]):
if anaconda_log_line.search(line) is None:
# not an anaconda's message
ret += line + "\n"
return ret
def list_addons_callback():
"""
Callback to get info about the addons potentially affecting Anaconda's
behaviour.
"""
# list available addons and take their package names
addon_pkgs = glob.glob("/usr/share/anaconda/addons/*")
return ", ".join(addon.rsplit("/", 1)[1] for addon in addon_pkgs)
def test_exception_handling():
"""
Function that can be used for testing exception handling in anaconda. It
tries to prepare a worst case scenario designed from bugs seen so far.
"""
# XXX: this is a huge hack, but probably the only way, how we can get
# "unique" stack and thus unique hash and new bugreport
def raise_exception(msg, non_ascii):
timestamp = str(time.time()).split(".", 1)[0]
code = """
def f%s(msg, non_ascii):
raise RuntimeError(msg)
f%s(msg, non_ascii)
""" % (timestamp, timestamp)
eval(compile(code, "str_eval", "exec"))
# test non-ascii characters dumping
non_ascii = u'\u0159'
msg = "NOTABUG: testing exception handling"
# raise exception from a separate thread
from pyanaconda.threads import AnacondaThread
threadMgr.add(AnacondaThread(name=THREAD_EXCEPTION_HANDLING_TEST,
target=raise_exception,
args=(msg, non_ascii)))
|
from __future__ import absolute_import
from __future__ import division
import marshal
import pickle
from testlib import VdsmTestCase
from testlib import expandPermutations, permutations
from vdsm.common.compat import json
from vdsm.common.password import (
ProtectedPassword,
protect_passwords,
unprotect_passwords,
)
class ProtectedPasswordTests(VdsmTestCase):
def test_str(self):
p = ProtectedPassword("12345678")
self.assertNotIn("12345678", str(p))
def test_repr(self):
p = ProtectedPassword("12345678")
self.assertNotIn("12345678", repr(p))
def test_value(self):
p = ProtectedPassword("12345678")
self.assertEqual("12345678", p.value)
def test_eq(self):
p1 = ProtectedPassword("12345678")
p2 = ProtectedPassword("12345678")
self.assertEqual(p1, p2)
def test_ne(self):
p1 = ProtectedPassword("12345678")
p2 = ProtectedPassword("12345678")
self.assertFalse(p1 != p2)
def test_pickle_copy(self):
p1 = ProtectedPassword("12345678")
p2 = pickle.loads(pickle.dumps(p1))
self.assertEqual(p1, p2)
def test_no_marshal(self):
p1 = ProtectedPassword("12345678")
self.assertRaises(ValueError, marshal.dumps, p1)
def test_no_json(self):
p1 = ProtectedPassword("12345678")
self.assertRaises(TypeError, json.dumps, p1)
@expandPermutations
class ProtectTests(VdsmTestCase):
@permutations([[list()], [dict()], [tuple()]])
def test_protect_empty(self, params):
self.assertEqual(params, protect_passwords(params))
@permutations([[list()], [dict()], [tuple()]])
def test_unprotect_empty(self, result):
self.assertEqual(result, unprotect_passwords(result))
def test_protect_dict(self):
unprotected = dict_unprotected()
protected = dict_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_dict(self):
protected = dict_protected()
unprotected = dict_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def test_protect_nested_dicts(self):
unprotected = nested_dicts_unprotected()
protected = nested_dicts_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_nested_dicts(self):
protected = nested_dicts_protected()
unprotected = nested_dicts_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def test_protect_lists_of_dicts(self):
unprotected = lists_of_dicts_unprotected()
protected = lists_of_dicts_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_lists_of_dicts(self):
protected = lists_of_dicts_protected()
unprotected = lists_of_dicts_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def test_protect_nested_lists_of_dicts(self):
unprotected = nested_lists_of_dicts_unprotected()
protected = nested_lists_of_dicts_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_nested_lists_of_dicts(self):
protected = nested_lists_of_dicts_protected()
unprotected = nested_lists_of_dicts_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def dict_unprotected():
return {
"key": "value",
"_X_key": "secret",
"password": "12345678"
}
def dict_protected():
return {
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("12345678")
}
def nested_dicts_unprotected():
return {
"key": "value",
"_X_key": "secret",
"nested": {
"password": "12345678",
"nested": {
"key": "value",
"_X_key": "secret",
"password": "87654321",
}
}
}
def nested_dicts_protected():
return {
"key": "value",
"_X_key": ProtectedPassword("secret"),
"nested": {
"password": ProtectedPassword("12345678"),
"nested": {
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("87654321"),
}
}
}
def lists_of_dicts_unprotected():
return [
{
"key": "value",
"_X_key": "secret",
"password": "12345678",
},
{
"key": "value",
"_X_key": "secret",
"password": "87654321",
}
]
def lists_of_dicts_protected():
return [
{
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("12345678"),
},
{
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("87654321"),
}
]
def nested_lists_of_dicts_unprotected():
return {
"key": "value",
"nested": [
{
"key": "value",
"nested": [
{
"key": "value",
"_X_key": "secret",
"password": "12345678",
}
]
}
]
}
def nested_lists_of_dicts_protected():
return {
"key": "value",
"nested": [
{
"key": "value",
"nested": [
{
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("12345678"),
}
]
}
]
}
|
from ij import IJ
from ij.gui import NonBlockingGenericDialog
from ij import WindowManager
from ij.gui import WaitForUserDialog
from ij import ImageStack
from ij import ImagePlus
theImage = IJ.getImage()
sourceImages = []
if theImage.getNChannels() == 1:
IJ.run("8-bit")
sourceImages.append(theImage)
else:
sourceImages = ChannelSplitter.split(theImage)
sourceNames = []
for im in sourceImages:
im.show()
sourceNames.append(im.getTitle())
gd0 = NonBlockingGenericDialog("Select source image...")
gd0.addChoice("Source image",sourceNames,sourceNames[0])
gd0.showDialog()
if (gd0.wasOKed()):
chosenImage = gd0.getNextChoice()
theImage = WindowManager.getImage(chosenImage)
IJ.selectWindow(chosenImage)
else:
theImage = sourceImages[0]
IJ.selectWindow(sourceNames[0])
gd = NonBlockingGenericDialog("Set slice params...")
gd.addNumericField("Slice start:",1,0)
gd.addNumericField("Slice end:",theImage.getNSlices(),0)
gd.showDialog()
if (gd.wasOKed()):
## Selecting the ROI over the stack
startSlice = int(gd.getNextNumber())
endSlice = gd.getNextNumber()
width = theImage.getWidth()
height = theImage.getHeight()
roiArray = []
for i in range(startSlice,endSlice+1):
theImage.setSlice(i)
bp = theImage.getProcessor().duplicate()
bp.setColor(0)
doStaySlice = True
while doStaySlice:
waiter = WaitForUserDialog("Draw ROI","Draw ROI, then hit OK")
waiter.show()
roi = theImage.getRoi()
if roi is None:
doStaySlice = True
else:
doStaySlice = False
roiArray.append(roi)
## Applying the ROI to each channel
newStacks = []
castImages = []
for procImage in sourceImages:
newStacks.append(ImageStack(width,height))
ns = newStacks[-1]
for i in range(startSlice,endSlice+1):
procImage.setSliceWithoutUpdate(i)
bp = procImage.getProcessor().duplicate()
bp.fillOutside(roiArray[i-startSlice])
ns.addSlice(bp)
castImages.append(ImagePlus(procImage.getShortTitle()+"_cast",ns))
## Displays the output
for castImage in castImages:
castImage.show()
## Cleans up the windows
for sourceImage in sourceImages:
sourceImage.close()
|
"""Add data_migration table
Revision ID: 2e171e6198e6
Revises: 15d3fad78656
Create Date: 2016-08-03 11:11:55.680872
"""
revision = '2e171e6198e6'
down_revision = '15d3fad78656'
from alembic import op
from sqlalchemy import Column, Integer, Unicode, DateTime
def upgrade():
op.create_table('data_migration',
Column('id', Integer, primary_key=True),
Column('name', Unicode(255), nullable=False, unique=True),
Column('finish_time', DateTime),
mysql_engine='InnoDB')
def downgrade():
op.drop_table('data_migration')
|
import codecs, glob, os, sys
if __name__ == "__main__":
import FileGenerator
else:
from . import FileGenerator
continuationLineEnd = " \\"
def FindPathToHeader(header, includePath):
for incDir in includePath:
relPath = os.path.join(incDir, header)
if os.path.exists(relPath):
return relPath
return ""
fhifCache = {} # Remember the includes in each file. ~5x speed up.
def FindHeadersInFile(filePath):
if filePath not in fhifCache:
headers = []
with codecs.open(filePath, "r", "utf-8") as f:
for line in f:
if line.strip().startswith("#include"):
parts = line.split()
if len(parts) > 1:
header = parts[1]
if header[0] != '<': # No system headers
headers.append(header.strip('"'))
fhifCache[filePath] = headers
return fhifCache[filePath]
def FindHeadersInFileRecursive(filePath, includePath, renames):
headerPaths = []
for header in FindHeadersInFile(filePath):
if header in renames:
header = renames[header]
relPath = FindPathToHeader(header, includePath)
if relPath and relPath not in headerPaths:
headerPaths.append(relPath)
subHeaders = FindHeadersInFileRecursive(relPath, includePath, renames)
headerPaths.extend(sh for sh in subHeaders if sh not in headerPaths)
return headerPaths
def RemoveStart(relPath, start):
if relPath.startswith(start):
return relPath[len(start):]
return relPath
def ciKey(f):
return f.lower()
def FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames={}):
deps = []
for sourceGlob in sourceGlobs:
sourceFiles = glob.glob(sourceGlob)
# Sorting the files minimizes deltas as order returned by OS may be arbitrary
sourceFiles.sort(key=ciKey)
for sourceName in sourceFiles:
objName = os.path.splitext(os.path.basename(sourceName))[0]+objExt
headerPaths = FindHeadersInFileRecursive(sourceName, includePath, renames)
depsForSource = [sourceName] + headerPaths
depsToAppend = [RemoveStart(fn.replace("\\", "/"), startDirectory) for
fn in depsForSource]
deps.append([objName, depsToAppend])
return deps
def PathStem(p):
""" Return the stem of a filename: "CallTip.o" -> "CallTip" """
return os.path.splitext(os.path.basename(p))[0]
def InsertSynonym(dependencies, current, additional):
""" Insert a copy of one object file with dependencies under a different name.
Used when one source file is used to create two object files with different
preprocessor definitions. """
result = []
for dep in dependencies:
result.append(dep)
if (dep[0] == current):
depAdd = [additional, dep[1]]
result.append(depAdd)
return result
def ExtractDependencies(input):
""" Create a list of dependencies from input list of lines
Each element contains the name of the object and a list of
files that it depends on.
Dependencies that contain "/usr/" are removed as they are system headers. """
deps = []
for line in input:
headersLine = line.startswith(" ") or line.startswith("\t")
line = line.strip()
isContinued = line.endswith("\\")
line = line.rstrip("\\ ")
fileNames = line.strip().split(" ")
if not headersLine:
# its a source file line, there may be headers too
sourceLine = fileNames[0].rstrip(":")
fileNames = fileNames[1:]
deps.append([sourceLine, []])
deps[-1][1].extend(header for header in fileNames if "/usr/" not in header)
return deps
def TextFromDependencies(dependencies):
""" Convert a list of dependencies to text. """
text = ""
indentHeaders = "\t"
joinHeaders = continuationLineEnd + os.linesep + indentHeaders
for dep in dependencies:
object, headers = dep
text += object + ":"
for header in headers:
text += joinHeaders
text += header
if headers:
text += os.linesep
return text
def UpdateDependencies(filepath, dependencies, comment=""):
""" Write a dependencies file if different from dependencies. """
FileGenerator.UpdateFile(os.path.abspath(filepath), comment.rstrip() + os.linesep +
TextFromDependencies(dependencies))
def WriteDependencies(output, dependencies):
""" Write a list of dependencies out to a stream. """
output.write(TextFromDependencies(dependencies))
if __name__ == "__main__":
""" Act as a filter that reformats input dependencies to one per line. """
inputLines = sys.stdin.readlines()
deps = ExtractDependencies(inputLines)
WriteDependencies(sys.stdout, deps)
|
"""HEPData module test cases."""
def test_version():
"""Test version import."""
from hepdata import __version__
assert __version__
|
import sys
import os
args = sys.argv[1:]
files = args[0:-1]
newdir = args[-1]
for file in files:
cmd = "svn mv %s %s/" % (file,newdir)
print cmd
os.system(cmd)
|
from ubi.block import sort
class ubi_file(object):
"""UBI image file object
Arguments:
Str:path -- Path to file to parse
Int:block_size -- Erase block size of NAND in bytes.
Int:start_offset -- (optional) Where to start looking in the file for
UBI data.
Int:end_offset -- (optional) Where to stop looking in the file.
Methods:
seek -- Put file head to specified byte offset.
Int:offset
read -- Read specified bytes from file handle.
Int:size
tell -- Returns byte offset of current file location.
read_block -- Returns complete PEB data of provided block
description.
Obj:block
read_block_data -- Returns LEB data only from provided block.
Obj:block
reader -- Generator that returns data from file.
reset -- Reset file position to start_offset
Handles all the actual file interactions, read, seek,
extract blocks, etc.
"""
def __init__(self, path, block_size, start_offset=0, end_offset=None):
self._fhandle = open(path, 'rb')
self._start_offset = start_offset
if end_offset:
self._end_offset = end_offset
else:
self._fhandle.seek(0, 2)
self._end_offset = self.tell()
self._block_size = block_size
if start_offset >= self._end_offset:
raise Exception('Start offset larger than file size!')
self._fhandle.seek(self._start_offset)
def _set_start(self, i):
self._start_offset = i
def _get_start(self):
return self._start_offset
start_offset = property(_get_start, _set_start)
def _get_end(self):
return self._end_offset
end_offset = property(_get_end)
def _get_block_size(self):
return self._block_size
block_size = property(_get_block_size)
def seek(self, offset):
self._fhandle.seek(offset)
def read(self, size):
return self._fhandle.read(size)
def tell(self):
return self._fhandle.tell()
def reset(self):
self._fhandle.seek(self.start_offset)
def reader(self):
self.reset()
while True:
cur_loc = self._fhandle.tell()
if self.end_offset and cur_loc > self.end_offset:
break
elif self.end_offset and self.end_offset - cur_loc < self.block_size:
chunk_size = self.end_offset - cur_loc
else:
chunk_size = self.block_size
buf = self.read(chunk_size)
if not buf:
break
yield buf
def read_block(self, block):
"""Read complete PEB data from file.
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset)
return self._fhandle.read(block.size)
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad)
return buf
class leb_virtual_file():
def __init__(self, ubi, volume):
self._ubi = ubi
self._volume = volume
self._blocks = sort.by_leb(self._volume.get_blocks(self._ubi.blocks))
self._seek = 0
self.leb_data_size = len(self._blocks) * self._ubi.leb_size
self._last_leb = -1
self._last_buf = ''
def read(self, i):
buf = ''
leb = int(self.tell() / self._ubi.leb_size)
offset = self.tell() % self._ubi.leb_size
if leb == self._last_leb:
self.seek(self.tell() + i)
return self._last_buf[offset:offset + i]
else:
buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]])
self._last_buf = buf
self._last_leb = leb
self.seek(self.tell() + i)
return buf[offset:offset + i]
def reset(self):
self.seek(0)
def seek(self, offset):
self._seek = offset
def tell(self):
return self._seek
def reader(self):
last_leb = 0
for block in self._blocks:
while 0 != (self._ubi.blocks[block].leb_num - last_leb):
last_leb += 1
yield '\xff' * self._ubi.leb_size
last_leb += 1
yield self._ubi.file.read_block_data(self._ubi.blocks[block])
|
"""Helpers for git extensions written in python
"""
import inspect
import os
import subprocess
import sys
import traceback
config = {}
def __extract_name_email(info, type_):
"""Extract a name and email from a string in the form:
User Name <user@example.com> tstamp offset
Stick that into our config dict for either git committer or git author.
"""
val = ' '.join(info.split(' ')[:-2])
angle = val.find('<')
if angle > -1:
config['GIT_%s_NAME' % type_] = val[:angle - 1]
config['GIT_%s_EMAIL' % type_] = val[angle + 1:-1]
else:
config['GIT_%s_NAME' % type_] = val
def __create_config():
"""Create our configuration dict from git and the env variables we're given.
"""
devnull = file('/dev/null', 'w')
# Stick all our git variables in our dict, just in case anyone needs them
gitvar = subprocess.Popen(['git', 'var', '-l'], stdout=subprocess.PIPE,
stderr=devnull)
for line in gitvar.stdout:
k, v = line.split('=', 1)
if k == 'GIT_COMMITTER_IDENT':
__extract_name_email(v, 'COMMITTER')
elif k == 'GIT_AUTHOR_IDENT':
__extract_name_email(v, 'AUTHOR')
elif v == 'true':
v = True
elif v == 'false':
v = False
else:
try:
v = int(v)
except:
pass
config[k] = v
gitvar.wait()
# Find out where git's sub-exes live
gitexec = subprocess.Popen(['git', '--exec-path'], stdout=subprocess.PIPE,
stderr=devnull)
config['GIT_LIBEXEC'] = gitexec.stdout.readlines()[0].strip()
gitexec.wait()
# Figure out the git dir in our repo, if applicable
gitdir = subprocess.Popen(['git', 'rev-parse', '--git-dir'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gitdir.stdout.readlines()
if gitdir.wait() == 0:
config['GIT_DIR'] = lines[0].strip()
# Figure out the top level of our repo, if applicable
gittoplevel = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gittoplevel.stdout.readlines()
if gittoplevel.wait() == 0:
config['GIT_TOPLEVEL'] = lines[0].strip()
# We may have been called by a wrapper that passes us some info through the
# environment. Use it if it's there
for k, v in os.environ.iteritems():
if k.startswith('PY_GIT_'):
config[k[3:]] = v
elif k == 'PGL_OK':
config['PGL_OK'] = True
# Make sure our git dir and toplevel are fully-qualified
if 'GIT_DIR' in config and not os.path.isabs(config['GIT_DIR']):
git_dir = os.path.join(config['GIT_TOPLEVEL'], config['GIT_DIR'])
config['GIT_DIR'] = os.path.abspath(git_dir)
def warn(msg):
"""Print a warning
"""
sys.stderr.write('%s\n' % (msg,))
def die(msg):
"""Print an error message and exit the program
"""
sys.stderr.write('%s\n' % (msg,))
sys.exit(1)
def do_checks():
"""Check to ensure we've got everything we expect
"""
try:
import argparse
except:
die('Your python must support the argparse module')
def main(_main):
"""Mark a function as the main function for our git subprogram. Based
very heavily on automain by Gerald Kaszuba, but with modifications to make
it work better for our purposes.
"""
parent = inspect.stack()[1][0]
name = parent.f_locals.get('__name__', None)
if name == '__main__':
__create_config()
if 'PGL_OK' not in config:
do_checks()
rval = 1
try:
rval = _main()
except Exception, e:
sys.stdout.write('%s\n' % str(e))
f = file('pygit.tb', 'w')
traceback.print_tb(sys.exc_info()[2], None, f)
f.close()
sys.exit(rval)
return _main
if __name__ == '__main__':
"""If we get run as a script, check to make sure it's all ok and exit with
an appropriate error code
"""
do_checks()
sys.exit(0)
|
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml=False
ContentHandler=object
else:
has_xml=True
import os,sys
from waflib.Tools import cxx
from waflib import Task,Utils,Options,Errors,Context
from waflib.TaskGen import feature,after_method,extension
from waflib.Configure import conf
from waflib import Logs
MOC_H=['.h','.hpp','.hxx','.hh']
EXT_RCC=['.qrc']
EXT_UI=['.ui']
EXT_QT5=['.cpp','.cc','.cxx','.C']
QT5_LIBS='''
qtmain
Qt5Bluetooth
Qt5CLucene
Qt5Concurrent
Qt5Core
Qt5DBus
Qt5Declarative
Qt5DesignerComponents
Qt5Designer
Qt5Gui
Qt5Help
Qt5MultimediaQuick_p
Qt5Multimedia
Qt5MultimediaWidgets
Qt5Network
Qt5Nfc
Qt5OpenGL
Qt5Positioning
Qt5PrintSupport
Qt5Qml
Qt5QuickParticles
Qt5Quick
Qt5QuickTest
Qt5Script
Qt5ScriptTools
Qt5Sensors
Qt5SerialPort
Qt5Sql
Qt5Svg
Qt5Test
Qt5WebKit
Qt5WebKitWidgets
Qt5Widgets
Qt5WinExtras
Qt5X11Extras
Qt5XmlPatterns
Qt5Xml'''
class qxx(Task.classes['cxx']):
def __init__(self,*k,**kw):
Task.Task.__init__(self,*k,**kw)
self.moc_done=0
def runnable_status(self):
if self.moc_done:
return Task.Task.runnable_status(self)
else:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
self.add_moc_tasks()
return Task.Task.runnable_status(self)
def create_moc_task(self,h_node,m_node):
try:
moc_cache=self.generator.bld.moc_cache
except AttributeError:
moc_cache=self.generator.bld.moc_cache={}
try:
return moc_cache[h_node]
except KeyError:
tsk=moc_cache[h_node]=Task.classes['moc'](env=self.env,generator=self.generator)
tsk.set_inputs(h_node)
tsk.set_outputs(m_node)
if self.generator:
self.generator.tasks.append(tsk)
gen=self.generator.bld.producer
gen.outstanding.insert(0,tsk)
gen.total+=1
return tsk
else:
delattr(self,'cache_sig')
def moc_h_ext(self):
try:
ext=Options.options.qt_header_ext.split()
except AttributeError:
pass
if not ext:
ext=MOC_H
return ext
def add_moc_tasks(self):
node=self.inputs[0]
bld=self.generator.bld
try:
self.signature()
except KeyError:
pass
else:
delattr(self,'cache_sig')
include_nodes=[node.parent]+self.generator.includes_nodes
moctasks=[]
mocfiles=set([])
for d in bld.raw_deps.get(self.uid(),[]):
if not d.endswith('.moc'):
continue
if d in mocfiles:
continue
mocfiles.add(d)
h_node=None
base2=d[:-4]
for x in include_nodes:
for e in self.moc_h_ext():
h_node=x.find_node(base2+e)
if h_node:
break
if h_node:
m_node=h_node.change_ext('.moc')
break
else:
for k in EXT_QT5:
if base2.endswith(k):
for x in include_nodes:
h_node=x.find_node(base2)
if h_node:
break
if h_node:
m_node=h_node.change_ext(k+'.moc')
break
if not h_node:
raise Errors.WafError('No source found for %r which is a moc file'%d)
task=self.create_moc_task(h_node,m_node)
moctasks.append(task)
self.run_after.update(set(moctasks))
self.moc_done=1
class trans_update(Task.Task):
run_str='${QT_LUPDATE} ${SRC} -ts ${TGT}'
color='BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf=[]
self.files=[]
def startElement(self,name,attrs):
if name=='file':
self.buf=[]
def endElement(self,name):
if name=='file':
self.files.append(str(''.join(self.buf)))
def characters(self,cars):
self.buf.append(cars)
@extension(*EXT_RCC)
def create_rcc_task(self,node):
rcnode=node.change_ext('_rc.cpp')
self.create_task('rcc',node,rcnode)
cpptask=self.create_task('cxx',rcnode,rcnode.change_ext('.o'))
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks=[cpptask]
return cpptask
@extension(*EXT_UI)
def create_uic_task(self,node):
uictask=self.create_task('ui5',node)
uictask.outputs=[self.path.find_or_declare(self.env['ui_PATTERN']%node.name[:-3])]
@extension('.ts')
def add_lang(self,node):
self.lang=self.to_list(getattr(self,'lang',[]))+[node]
@feature('qt5')
@after_method('apply_link')
def apply_qt5(self):
if getattr(self,'lang',None):
qmtasks=[]
for x in self.to_list(self.lang):
if isinstance(x,str):
x=self.path.find_resource(x+'.ts')
qmtasks.append(self.create_task('ts2qm',x,x.change_ext('.qm')))
if getattr(self,'update',None)and Options.options.trans_qt5:
cxxnodes=[a.inputs[0]for a in self.compiled_tasks]+[a.inputs[0]for a in self.tasks if getattr(a,'inputs',None)and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update',cxxnodes,x.inputs)
if getattr(self,'langname',None):
qmnodes=[x.outputs[0]for x in qmtasks]
rcnode=self.langname
if isinstance(rcnode,str):
rcnode=self.path.find_or_declare(rcnode+'.qrc')
t=self.create_task('qm2rcc',qmnodes,rcnode)
k=create_rcc_task(self,t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
lst=[]
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag)<2:continue
f=flag[0:2]
if f in('-D','-I','/D','/I'):
if(f[0]=='/'):
lst.append('-'+flag[1:])
else:
lst.append(flag)
self.env.append_value('MOC_FLAGS',lst)
@extension(*EXT_QT5)
def cxx_hook(self,node):
return self.create_compiled_task('qxx',node)
class rcc(Task.Task):
color='BLUE'
run_str='${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_out=['.h']
def rcname(self):
return os.path.splitext(self.inputs[0].name)[0]
def scan(self):
if not has_xml:
Logs.error('no xml support was found, the rcc dependencies will be incomplete!')
return([],[])
parser=make_parser()
curHandler=XMLHandler()
parser.setContentHandler(curHandler)
fi=open(self.inputs[0].abspath(),'r')
try:
parser.parse(fi)
finally:
fi.close()
nodes=[]
names=[]
root=self.inputs[0].parent
for x in curHandler.files:
nd=root.find_resource(x)
if nd:nodes.append(nd)
else:names.append(x)
return(nodes,names)
class moc(Task.Task):
color='BLUE'
run_str='${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
class ui5(Task.Task):
color='BLUE'
run_str='${QT_UIC} ${SRC} -o ${TGT}'
ext_out=['.h']
class ts2qm(Task.Task):
color='BLUE'
run_str='${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
color='BLUE'
after='ts2qm'
def run(self):
txt='\n'.join(['<file>%s</file>'%k.path_from(self.outputs[0].parent)for k in self.inputs])
code='<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>'%txt
self.outputs[0].write(code)
def configure(self):
self.find_qt5_binaries()
self.set_qt5_libs_to_check()
self.set_qt5_defines()
self.find_qt5_libraries()
self.add_qt5_rpath()
self.simplify_qt5_libs()
@conf
def find_qt5_binaries(self):
env=self.env
opt=Options.options
qtdir=getattr(opt,'qtdir','')
qtbin=getattr(opt,'qtbin','')
paths=[]
if qtdir:
qtbin=os.path.join(qtdir,'bin')
if not qtdir:
qtdir=os.environ.get('QT5_ROOT','')
qtbin=os.environ.get('QT5_BIN',None)or os.path.join(qtdir,'bin')
if qtbin:
paths=[qtbin]
if not qtdir:
paths=os.environ.get('PATH','').split(os.pathsep)
paths.append('/usr/share/qt5/bin/')
try:
lst=Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
qtdir='/usr/local/Trolltech/%s/'%lst[0]
qtbin=os.path.join(qtdir,'bin')
paths.append(qtbin)
cand=None
prev_ver=['5','0','0']
for qmk in('qmake-qt5','qmake5','qmake'):
try:
qmake=self.find_program(qmk,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version=self.cmd_and_log(qmake+['-query','QT_VERSION']).strip()
except self.errors.WafError:
pass
else:
if version:
new_ver=version.split('.')
if new_ver>prev_ver:
cand=qmake
prev_ver=new_ver
if cand:
self.env.QMAKE=cand
else:
self.fatal('Could not find qmake for qt5')
qtbin=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_BINS']).strip()+os.sep
def find_bin(lst,var):
if var in env:
return
for f in lst:
try:
ret=self.find_program(f,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var]=ret
break
find_bin(['uic-qt5','uic'],'QT_UIC')
if not env.QT_UIC:
self.fatal('cannot find the uic compiler for qt5')
self.start_msg('Checking for uic version')
uicver=self.cmd_and_log(env.QT_UIC+["-version"],output=Context.BOTH)
uicver=''.join(uicver).strip()
uicver=uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt','')
self.end_msg(uicver)
if uicver.find(' 3.')!=-1 or uicver.find(' 4.')!=-1:
self.fatal('this uic compiler is for qt3 or qt5, add uic for qt5 to your path')
find_bin(['moc-qt5','moc'],'QT_MOC')
find_bin(['rcc-qt5','rcc'],'QT_RCC')
find_bin(['lrelease-qt5','lrelease'],'QT_LRELEASE')
find_bin(['lupdate-qt5','lupdate'],'QT_LUPDATE')
env['UIC_ST']='%s -o %s'
env['MOC_ST']='-o'
env['ui_PATTERN']='ui_%s.h'
env['QT_LRELEASE_FLAGS']=['-silent']
env.MOCCPPPATH_ST='-I%s'
env.MOCDEFINES_ST='-D%s'
@conf
def find_qt5_libraries(self):
qtlibs=getattr(Options.options,'qtlibs',None)or os.environ.get("QT5_LIBDIR",None)
if not qtlibs:
try:
qtlibs=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_PREFIX']).strip()+os.sep
qtlibs=os.path.join(qtdir,'lib')
self.msg('Found the Qt5 libraries in',qtlibs)
qtincludes=os.environ.get("QT5_INCLUDES",None)or self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_HEADERS']).strip()
env=self.env
if not'PKG_CONFIG_PATH'in os.environ:
os.environ['PKG_CONFIG_PATH']='%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib'%(qtlibs,qtlibs)
try:
if os.environ.get("QT5_XCOMPILE",None):
raise self.errors.ConfigurationError()
self.check_cfg(atleast_pkgconfig_version='0.1')
except self.errors.ConfigurationError:
for i in self.qt5_vars:
uselib=i.upper()
if Utils.unversioned_sys_platform()=="darwin":
frameworkName=i+".framework"
qtDynamicLib=os.path.join(qtlibs,frameworkName,i)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('INCLUDES_'+uselib,os.path.join(qtlibs,frameworkName,'Headers'))
elif env.DEST_OS!="win32":
qtDynamicLib=os.path.join(qtlibs,"lib"+i+".so")
qtStaticLib=os.path.join(qtlibs,"lib"+i+".a")
if os.path.exists(qtDynamicLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
elif os.path.exists(qtStaticLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtStaticLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for k in("lib%s.a","lib%s5.a","%s.lib","%s5.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
uselib=i.upper()+"_debug"
for k in("lib%sd.a","lib%sd5.a","%sd.lib","%sd5.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for i in self.qt5_vars_debug+self.qt5_vars:
self.check_cfg(package=i,args='--cflags --libs',mandatory=False)
@conf
def simplify_qt5_libs(self):
env=self.env
def process_lib(vars_,coreval):
for d in vars_:
var=d.upper()
if var=='QTCORE':
continue
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_'+var]=accu
process_lib(self.qt5_vars,'LIBPATH_QTCORE')
process_lib(self.qt5_vars_debug,'LIBPATH_QTCORE_DEBUG')
@conf
def add_qt5_rpath(self):
env=self.env
if getattr(Options.options,'want_rpath',False):
def process_rpath(vars_,coreval):
for d in vars_:
var=d.upper()
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if var!='QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var]=accu
process_rpath(self.qt5_vars,'LIBPATH_QTCORE')
process_rpath(self.qt5_vars_debug,'LIBPATH_QTCORE_DEBUG')
@conf
def set_qt5_libs_to_check(self):
if not hasattr(self,'qt5_vars'):
self.qt5_vars=QT5_LIBS
self.qt5_vars=Utils.to_list(self.qt5_vars)
if not hasattr(self,'qt5_vars_debug'):
self.qt5_vars_debug=[a+'_debug'for a in self.qt5_vars]
self.qt5_vars_debug=Utils.to_list(self.qt5_vars_debug)
@conf
def set_qt5_defines(self):
if sys.platform!='win32':
return
for x in self.qt5_vars:
y=x[2:].upper()
self.env.append_unique('DEFINES_%s'%x.upper(),'QT_%s_LIB'%y)
self.env.append_unique('DEFINES_%s_DEBUG'%x.upper(),'QT_%s_LIB'%y)
def options(opt):
opt.add_option('--want-rpath',action='store_true',default=False,dest='want_rpath',help='enable the rpath for qt libraries')
opt.add_option('--header-ext',type='string',default='',help='header extension for moc files',dest='qt_header_ext')
for i in'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i,type='string',default='',dest=i)
opt.add_option('--translate',action="store_true",help="collect translation strings",dest="trans_qt5",default=False)
|
"""Tests for json encoding/decoding."""
import json
import logging
from rekall import testlib
from rekall.ui import json_renderer
class JsonTest(testlib.RekallBaseUnitTestCase):
"""Test the Json encode/decoder."""
PLUGIN = "json_render"
def setUp(self):
self.session = self.MakeUserSession()
self.renderer = json_renderer.JsonRenderer(session=self.session)
self.encoder = self.renderer.encoder
self.decoder = self.renderer.decoder
def testObjectRenderer(self):
cases = [
('\xff\xff\x00\x00', {'mro': u'str:basestring:object',
'b64': u'//8AAA=='}),
("hello", u'hello'), # A string is converted into unicode if
# possible.
(1, 1), # Ints are already JSON serializable.
(dict(foo=2), {'foo': 2}),
(set([1, 2, 3]), {'mro': u'set:object', 'data': [1, 2, 3]}),
([1, 2, 3], [1, 2, 3]),
([1, "\xff\xff\x00\x00", 3], [1, {'mro': u'str:basestring:object',
'b64': u'//8AAA=='}, 3]),
]
for case in cases:
encoded = self.encoder.Encode(case[0])
self.assertEqual(encoded, case[1])
def testProperSerialization(self):
"""Test that serializing simple python objects with json works.
NOTE: Json is not intrinsically a fully functional serialization format
- it is unable to serialize many common python primitives (e.g. strings,
dicts with numeric keys etc). This tests that our wrapping around the
json format allows the correct serialization of python primitives.
"""
for case in [
[1, 2],
[1, "hello"],
["1", "2"],
["hello", u'Gr\xfcetzi'],
"hello",
u'Gr\xfcetzi',
dict(a="hello"),
dict(b=dict(a="hello")), # Nested dict.
]:
self.encoder.flush()
data = self.encoder.Encode(case)
logging.debug("%s->%s" % (case, data))
# Make sure the data is JSON serializable.
self.assertEqual(data, json.loads(json.dumps(data)))
self.decoder.SetLexicon(self.encoder.GetLexicon())
self.assertEqual(case, self.decoder.Decode(data))
def testObjectSerization(self):
"""Serialize _EPROCESS objects.
We check that the deserialized object is an exact replica of the
original - this includes the same address spaces, profile and offset.
Having the objects identical allows us to dereference object members
seamlessly.
"""
for task in self.session.plugins.pslist().filter_processes():
self.encoder.flush()
data = self.encoder.Encode(task)
logging.debug("%r->%s" % (task, data))
# Make sure the data is JSON serializable.
self.assertEqual(data, json.loads(json.dumps(data)))
self.decoder.SetLexicon(self.encoder.GetLexicon())
decoded_task = self.decoder.Decode(data)
self.assertEqual(task.obj_offset, decoded_task.obj_offset)
self.assertEqual(task.obj_name, decoded_task.obj_name)
self.assertEqual(task.obj_vm.name, decoded_task.obj_vm.name)
# Check the process name is the same - this tests subfield
# dereferencing.
self.assertEqual(task.name, decoded_task.name)
self.assertEqual(task.pid, decoded_task.pid)
def testAllObjectSerialization(self):
for vtype in self.session.profile.vtypes:
obj = self.session.profile.Object(vtype)
self.CheckObjectSerization(obj)
self.CheckObjectSerization(self.session.profile)
self.CheckObjectSerization(self.session.kernel_address_space)
self.CheckObjectSerization(self.session.physical_address_space)
# Some native types.
self.CheckObjectSerization(set([1, 2, 3]))
self.CheckObjectSerization(dict(a=1, b=dict(a=1)))
def CheckObjectSerization(self, obj):
object_renderer_cls = json_renderer.JsonObjectRenderer.ForTarget(
obj, "JsonRenderer")
renderer = json_renderer.JsonRenderer(session=self.session)
object_renderer = object_renderer_cls(
session=self.session, renderer=renderer)
encoded = object_renderer.EncodeToJsonSafe(obj, strict=True)
# Make sure it is json safe.
json.dumps(encoded)
# Now decode it.
decoding_object_renderer_cls = json_renderer.JsonObjectRenderer.FromEncoded(
encoded, "JsonRenderer")
self.assertEqual(decoding_object_renderer_cls, object_renderer_cls)
decoded = object_renderer.DecodeFromJsonSafe(encoded, {})
self.assertEqual(decoded, obj)
# Now check the DataExportRenderer.
object_renderer_cls = json_renderer.JsonObjectRenderer.ForTarget(
obj, "DataExportRenderer")
object_renderer = object_renderer_cls(session=self.session,
renderer="DataExportRenderer")
encoded = object_renderer.EncodeToJsonSafe(obj, strict=True)
# Make sure it is json safe.
json.dumps(encoded)
# Data Export is not decodable.
|
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY,
SYSTEM, SYSTEM_ONLY, CURRENT_USER,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
def _convert_trivial(x):
return x
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'__CURSOR': _convert_trivial,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, files=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the journal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next() or raises StopIteration.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
"""Return the previous log entry as a mapping type,
currently a standard dictionary of fields.
Optional skip value will return the -`skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
Equivalent to get_next(-skip).
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return unique values appearing in the journal for given `field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal. `timeout` is the maximum
time in seconds to wait, or None, to wait forever.
Returns one of NOP (no change), APPEND (new entries have been
added to the end of the journal), or INVALIDATE (journal files
have been added or removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `realtime` time.
Argument `realtime` must be either an integer unix timestamp
or datetime.datetime instance.
"""
if isinstance(realtime, _datetime.datetime):
realtime = float(realtime.strftime("%s.%f")) * 1000000
return super(Reader, self).seek_realtime(int(realtime))
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either
seconds or a datetime.timedelta instance. Argument `bootid`
is a string or UUID representing which boot the monotonic time
is reference to. Defaults to current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.totalseconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.get_hex()
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.get_hex()
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID equal to current boot ID or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
def get_catalog(mid):
if isinstance(mid, _uuid.UUID):
mid = mid.get_hex()
return _get_catalog(mid)
def _make_line(field, value):
if isinstance(value, bytes):
return field.encode('utf-8') + b'=' + value
elif isinstance(value, int):
return field + '=' + str(value)
else:
return field + '=' + value
def send(MESSAGE, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE=
field. MESSAGE must be a string and will be sent as UTF-8 to
the journal.
MESSAGE_ID can be given to uniquely identify the type of
message. It must be a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to
identify the caller. Unless at least on of the three is given,
values are extracted from the stack frame of the caller of
send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE
must be an integer.
Additional fields for the journal entry can only be specified
as keyword arguments. The payload can be either a string or
bytes. A string will be sent as UTF-8, and bytes will be sent
as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY,
SYSLOG_IDENTIFIER, SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE == CODE_FILE == CODE_FUNC == None:
CODE_FILE, CODE_LINE, CODE_FUNC = \
_traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key, val) for key, val in kwargs.items())
return sendv(*args)
def stream(identifier, priority=LOG_DEBUG, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted
text strings are written to the journal.
The file will be line buffered, so messages are actually sent
after a newline character is written.
>>> stream = journal.stream('myapp')
>>> stream
<open file '<fdopen>', mode 'w' at 0x...>
>>> stream.write('message...\n')
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
Using the interface with print might be more convinient:
>>> from __future__ import print_function
>>> print('message...', file=stream)
priority is the syslog priority, one of `LOG_EMERG`,
`LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`,
`LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority
level prefixes (such as '<1>') are interpreted. See
sd-daemon(3) for more information.
"""
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an
overview: http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(journal.JournalHandler())
>>> log.warn("Some message: %s", detail)
Note that by default, message levels `INFO` and `DEBUG` are
ignored by the logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where
they come from, attach it to the root logger:
>>> logging.root.addHandler(journal.JournalHandler())
For more complex configurations when using `dictConfig` or
`fileConfig`, specify `systemd.journal.JournalHandler` as the
handler class. Only standard handler configuration options
are supported: `level`, `formatter`, `filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warn("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this
handler can be specified as keyword arguments. This probably
makes sense only for SYSLOG_IDENTIFIER and similar fields
which are constant for the whole program:
>>> journal.JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
The following journal fields will be sent:
`MESSAGE`, `PRIORITY`, `THREAD_NAME`, `CODE_FILE`, `CODE_LINE`,
`CODE_FUNC`, `LOGGER` (name as supplied to getLogger call),
`MESSAGE_ID` (optional, see above), `SYSLOG_IDENTIFIER` (defaults
to sys.argv[0]).
"""
def __init__(self, level=_logging.NOTSET, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self._extra = kwargs
def emit(self, record):
"""Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present.
"""
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, 'MESSAGE_ID', None)
send(msg,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**self._extra)
except Exception:
self.handleError(record)
@staticmethod
def mapPriority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have
to map numbers in between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
|
from twisted.internet import defer
from twisted.python import components, failure
from twisted.cred import error, credentials
class ICredentialsChecker(components.Interface):
"""I check sub-interfaces of ICredentials.
@cvar credentialInterfaces: A list of sub-interfaces of ICredentials which
specifies which I may check.
"""
def requestAvatarId(self, credentials):
"""
@param credentials: something which implements one of the interfaces in
self.credentialInterfaces.
@return: a Deferred which will fire a string which identifies an
avatar, an empty tuple to specify an authenticated anonymous user
(provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin).
A note on anonymity - We do not want None as the value for anonymous
because it is too easy to accidentally return it. We do not want the
empty string, because it is too easy to mistype a password file. For
example, an .htpasswd file may contain the lines: ['hello:asdf',
'world:asdf', 'goodbye', ':world']. This misconfiguration will have an
ill effect in any case, but accidentally granting anonymous access is a
worse failure mode than simply granting access to an untypeable
username. We do not want an instance of 'object', because that would
create potential problems with persistence.
"""
ANONYMOUS = ()
class AllowAnonymousAccess:
__implements__ = ICredentialsChecker
credentialInterfaces = credentials.IAnonymous,
def requestAvatarId(self, credentials):
return defer.succeed(ANONYMOUS)
class InMemoryUsernamePasswordDatabaseDontUse:
credentialInterfaces = credentials.IUsernamePassword,
__implements__ = ICredentialsChecker
def __init__(self):
self.users = {}
def addUser(self, username, password):
self.users[username] = password
def _cbPasswordMatch(self, matched, username):
if matched:
return username
else:
return failure.Failure(error.UnauthorizedLogin())
def requestAvatarId(self, credentials):
if self.users.has_key(credentials.username):
return defer.maybeDeferred(
credentials.checkPassword,
self.users[credentials.username]).addCallback(
self._cbPasswordMatch, credentials.username)
else:
return defer.fail(error.UnauthorizedLogin())
|
import sys
import os
import json
username = None
password = None
webhdfsurl = None
srcfile = sys.argv[1]
destfile = sys.argv[2]
if "VCAP_SERVICES" in os.environ:
vcaps = json.loads(os.environ["VCAP_SERVICES"])
if "Analytics for Apache Hadoop" in vcaps:
username = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["userid"]
password = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["password"]
webhdfsurl = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["WebhdfsUrl"]
else:
if "WEBHDFS_USER" in os.environ:
username=os.environ["WEBHDFS_USER"]
if "WEBHDFS_PASSWORD" in os.environ:
password=os.environ["WEBHDFS_PASSWORD"]
if "WEBHDFS_URL" in os.environ:
webhdfsurl=os.environ["WEBHDFS_URL"]
if (username is not None and password is not None and webhdfsurl is not None):
filedata = None
with open (srcfile,'r') as file:
filedata = file.read()
filedata = filedata.replace('%instance_user%', username)
filedata = filedata.replace('%instance_user_password%', password)
filedata = filedata.replace('%webhdfs_url%', webhdfsurl)
with open (destfile,'w') as file:
file.write(filedata)
sys.exit(0)
else:
sys.stderr.write('Fatal error: cannot find Web HDFS credentials and/or endpoint\n')
if username is None:
sys.stderr.write('username missing\n')
if password is None:
sys.stderr.write('password missing\n')
if webhdfsurl is None:
sys.stderr.write('URL endpoint missing\n')
sys.exit(1)
|
"""
Program that parses standard format results,
compute and check regression bug.
:copyright: Red Hat 2011-2012
:author: Amos Kong <akong@redhat.com>
"""
import os
import sys
import re
import commands
import warnings
import ConfigParser
import MySQLdb
def exec_sql(cmd, conf="../../global_config.ini"):
config = ConfigParser.ConfigParser()
config.read(conf)
user = config.get("AUTOTEST_WEB", "user")
passwd = config.get("AUTOTEST_WEB", "password")
db = config.get("AUTOTEST_WEB", "database")
db_type = config.get("AUTOTEST_WEB", "db_type")
if db_type != 'mysql':
print "regression.py: only support mysql database!"
sys.exit(1)
conn = MySQLdb.connect(host="localhost", user=user,
passwd=passwd, db=db)
cursor = conn.cursor()
cursor.execute(cmd)
rows = cursor.fetchall()
lines = []
for row in rows:
line = []
for c in row:
line.append(str(c))
lines.append(" ".join(line))
cursor.close()
conn.close()
return lines
def get_test_keyval(jobid, keyname, default=''):
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
test_idx = exec_sql('select test_idx from tko_tests where job_idx=%s'
% idx)[3]
try:
return exec_sql('select value from tko_test_attributes'
' where test_idx=%s and attribute="%s"'
% (test_idx, keyname))[-1]
except:
return default
class Sample(object):
""" Collect test results in same environment to a sample """
def __init__(self, sample_type, arg):
def generate_raw_table(test_dict):
ret_dict = []
tmp = []
sample_type = category = None
for i in test_dict:
line = i.split('|')[1:]
if not sample_type:
sample_type = line[0:2]
if sample_type != line[0:2]:
ret_dict.append('|'.join(sample_type + tmp))
sample_type = line[0:2]
tmp = []
if "e+" in line[-1]:
tmp.append("%f" % float(line[-1]))
elif 'e-' in line[-1]:
tmp.append("%f" % float(line[-1]))
elif not (re.findall("[a-zA-Z]", line[-1]) or is_int(line[-1])):
tmp.append("%f" % float(line[-1]))
else:
tmp.append(line[-1])
if category != i.split('|')[0]:
category = i.split('|')[0]
ret_dict.append("Category:" + category.strip())
ret_dict.append(self.categories)
ret_dict.append('|'.join(sample_type + tmp))
return ret_dict
if sample_type == 'filepath':
files = arg.split()
self.files_dict = []
for i in range(len(files)):
fd = open(files[i], "r")
f = []
for l in fd.readlines():
l = l.strip()
if re.findall("^### ", l):
if "kvm-userspace-ver" in l:
self.kvmver = l.split(':')[-1]
elif "kvm_version" in l:
self.hostkernel = l.split(':')[-1]
elif "guest-kernel-ver" in l:
self.guestkernel = l.split(':')[-1]
elif "session-length" in l:
self.len = l.split(':')[-1]
else:
f.append(l.strip())
self.files_dict.append(f)
fd.close()
sysinfodir = os.path.join(os.path.dirname(files[0]), "../../sysinfo/")
sysinfodir = os.path.realpath(sysinfodir)
cpuinfo = commands.getoutput("cat %s/cpuinfo" % sysinfodir)
lscpu = commands.getoutput("cat %s/lscpu" % sysinfodir)
meminfo = commands.getoutput("cat %s/meminfo" % sysinfodir)
lspci = commands.getoutput("cat %s/lspci_-vvnn" % sysinfodir)
partitions = commands.getoutput("cat %s/partitions" % sysinfodir)
fdisk = commands.getoutput("cat %s/fdisk_-l" % sysinfodir)
status_path = os.path.join(os.path.dirname(files[0]), "../status")
status_file = open(status_path, 'r')
content = status_file.readlines()
self.testdata = re.findall("localtime=(.*)\t", content[-1])[-1]
cpunum = len(re.findall("processor\s+: \d", cpuinfo))
cpumodel = re.findall("Model name:\s+(.*)", lscpu)
socketnum = int(re.findall("Socket\(s\):\s+(\d+)", lscpu)[0])
corenum = int(re.findall("Core\(s\) per socket:\s+(\d+)", lscpu)[0]) * socketnum
threadnum = int(re.findall("Thread\(s\) per core:\s+(\d+)", lscpu)[0]) * corenum
numanodenum = int(re.findall("NUMA node\(s\):\s+(\d+)", lscpu)[0])
memnum = float(re.findall("MemTotal:\s+(\d+)", meminfo)[0]) / 1024 / 1024
nicnum = len(re.findall("\d+:\d+\.0 Ethernet", lspci))
disknum = re.findall("sd\w+\S", partitions)
fdiskinfo = re.findall("Disk\s+(/dev/sd.*\s+GiB),", fdisk)
elif sample_type == 'database':
jobid = arg
self.kvmver = get_test_keyval(jobid, "kvm-userspace-ver")
self.hostkernel = get_test_keyval(jobid, "kvm_version")
self.guestkernel = get_test_keyval(jobid, "guest-kernel-ver")
self.len = get_test_keyval(jobid, "session-length")
self.categories = get_test_keyval(jobid, "category")
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
data = exec_sql("select test_idx,iteration_key,iteration_value"
" from tko_perf_view where job_idx=%s" % idx)
testidx = None
job_dict = []
test_dict = []
for l in data:
s = l.split()
if not testidx:
testidx = s[0]
if testidx != s[0]:
job_dict.append(generate_raw_table(test_dict))
test_dict = []
testidx = s[0]
test_dict.append(' | '.join(s[1].split('--')[0:] + s[-1:]))
job_dict.append(generate_raw_table(test_dict))
self.files_dict = job_dict
self.version = " userspace: %s\n host kernel: %s\n guest kernel: %s" % (
self.kvmver, self.hostkernel, self.guestkernel)
nrepeat = len(self.files_dict)
if nrepeat < 2:
print "`nrepeat' should be larger than 1!"
sys.exit(1)
self.desc = """<hr>Machine Info:
o CPUs(%s * %s), Cores(%s), Threads(%s), Sockets(%s),
o NumaNodes(%s), Memory(%.1fG), NICs(%s)
o Disks(%s | %s)
Please check sysinfo directory in autotest result to get more details.
(eg: http://autotest-server.com/results/5057-autotest/host1/sysinfo/)
<hr>""" % (cpunum, cpumodel, corenum, threadnum, socketnum, numanodenum, memnum, nicnum, fdiskinfo, disknum)
self.desc += """ - Every Avg line represents the average value based on *%d* repetitions of the same test,
and the following SD line represents the Standard Deviation between the *%d* repetitions.
- The Standard deviation is displayed as a percentage of the average.
- The significance of the differences between the two averages is calculated using unpaired T-test that
takes into account the SD of the averages.
- The paired t-test is computed for the averages of same category.
""" % (nrepeat, nrepeat)
def getAvg(self, avg_update=None):
return self._process_files(self.files_dict, self._get_list_avg,
avg_update=avg_update)
def getAvgPercent(self, avgs_dict):
return self._process_files(avgs_dict, self._get_augment_rate)
def getSD(self):
return self._process_files(self.files_dict, self._get_list_sd)
def getSDRate(self, sds_dict):
return self._process_files(sds_dict, self._get_rate)
def getTtestPvalue(self, fs_dict1, fs_dict2, paired=None, ratio=None):
"""
scipy lib is used to compute p-value of Ttest
scipy: http://www.scipy.org/
t-test: http://en.wikipedia.org/wiki/Student's_t-test
"""
try:
from scipy import stats
import numpy as np
except ImportError:
print "No python scipy/numpy library installed!"
return None
ret = []
s1 = self._process_files(fs_dict1, self._get_list_self, merge=False)
s2 = self._process_files(fs_dict2, self._get_list_self, merge=False)
# s*[line][col] contians items (line*col) of all sample files
for line in range(len(s1)):
tmp = []
if type(s1[line]) != list:
tmp = s1[line]
else:
if len(s1[line][0]) < 2:
continue
for col in range(len(s1[line])):
avg1 = self._get_list_avg(s1[line][col])
avg2 = self._get_list_avg(s2[line][col])
sample1 = np.array(s1[line][col])
sample2 = np.array(s2[line][col])
warnings.simplefilter("ignore", RuntimeWarning)
if (paired):
if (ratio):
(_, p) = stats.ttest_rel(np.log(sample1), np.log(sample2))
else:
(_, p) = stats.ttest_rel(sample1, sample2)
else:
(_, p) = stats.ttest_ind(sample1, sample2)
flag = "+"
if float(avg1) > float(avg2):
flag = "-"
tmp.append(flag + "%f" % (1 - p))
tmp = "|".join(tmp)
ret.append(tmp)
return ret
def _get_rate(self, data):
""" num2 / num1 * 100 """
result = "0.0"
if len(data) == 2 and float(data[0]) != 0:
result = float(data[1]) / float(data[0]) * 100
if result > 100:
result = "%.2f%%" % result
else:
result = "%.4f%%" % result
return result
def _get_augment_rate(self, data):
""" (num2 - num1) / num1 * 100 """
result = "+0.0"
if len(data) == 2 and float(data[0]) != 0:
result = (float(data[1]) - float(data[0])) / float(data[0]) * 100
if result > 100:
result = "%+.2f%%" % result
else:
result = "%+.4f%%" % result
return result
def _get_list_sd(self, data):
"""
sumX = x1 + x2 + ... + xn
avgX = sumX / n
sumSquareX = x1^2 + ... + xn^2
SD = sqrt([sumSquareX - (n * (avgX ^ 2))] / (n - 1))
"""
o_sum = sqsum = 0.0
n = len(data)
for i in data:
o_sum += float(i)
sqsum += float(i) ** 2
avg = o_sum / n
if avg == 0 or n == 1 or sqsum - (n * avg ** 2) <= 0:
return "0.0"
return "%f" % (((sqsum - (n * avg ** 2)) / (n - 1)) ** 0.5)
def _get_list_avg(self, data):
""" Compute the average of list entries """
o_sum = 0.0
for i in data:
o_sum += float(i)
return "%f" % (o_sum / len(data))
def _get_list_self(self, data):
""" Use this to convert sample dicts """
return data
def _process_lines(self, files_dict, row, func, avg_update, merge):
""" Use unified function to process same lines of different samples """
lines = []
ret = []
for i in range(len(files_dict)):
lines.append(files_dict[i][row].split("|"))
for col in range(len(lines[0])):
data_list = []
for i in range(len(lines)):
tmp = lines[i][col].strip()
if is_int(tmp):
data_list.append(int(tmp))
else:
data_list.append(float(tmp))
ret.append(func(data_list))
if avg_update:
for i in avg_update.split('|'):
l = i.split(',')
ret[int(l[0])] = "%f" % (float(ret[int(l[1])]) /
float(ret[int(l[2])]))
if merge:
return "|".join(ret)
return ret
def _process_files(self, files_dict, func, avg_update=None, merge=True):
"""
Process dicts of sample files with assigned function,
func has one list augment.
"""
ret_lines = []
for i in range(len(files_dict[0])):
if re.findall("[a-zA-Z]", files_dict[0][i]):
ret_lines.append(files_dict[0][i].strip())
else:
line = self._process_lines(files_dict, i, func, avg_update,
merge)
ret_lines.append(line)
return ret_lines
def display(lists, rates, allpvalues, f, ignore_col, o_sum="Augment Rate",
prefix0=None, prefix1=None, prefix2=None, prefix3=None):
"""
Display lists data to standard format
param lists: row data lists
param rates: augment rates lists
param f: result output filepath
param ignore_col: do not display some columns
param o_sum: compare result summary
param prefix0: output prefix in head lines
param prefix1: output prefix in Avg/SD lines
param prefix2: output prefix in Diff Avg/P-value lines
param prefix3: output prefix in total Sign line
"""
def str_ignore(out, split=False):
out = out.split("|")
for i in range(ignore_col):
out[i] = " "
if split:
return "|".join(out[ignore_col:])
return "|".join(out)
def tee_line(content, filepath, n=None):
fd = open(filepath, "a")
print content
out = ""
out += "<TR ALIGN=CENTER>"
content = content.split("|")
for i in range(len(content)):
if not is_int(content[i]) and is_float(content[i]):
if "+" in content[i] or "-" in content[i]:
if float(content[i]) > 100:
content[i] = "%+.2f" % float(content[i])
else:
content[i] = "%+.4f" % float(content[i])
elif float(content[i]) > 100:
content[i] = "%.2f" % float(content[i])
else:
content[i] = "%.4f" % float(content[i])
if n and i >= 2 and i < ignore_col + 2:
out += "<TD ROWSPAN=%d WIDTH=1%% >%.0f</TD>" % (n, float(content[i]))
else:
out += "<TD WIDTH=1%% >%s</TD>" % content[i]
out += "</TR>"
fd.write(out + "\n")
fd.close()
for l in range(len(lists[0])):
if not re.findall("[a-zA-Z]", lists[0][l]):
break
tee("<TABLE BORDER=1 CELLSPACING=1 CELLPADDING=1 width=10%><TBODY>",
f)
tee("<h3>== %s " % o_sum + "==</h3>", f)
category = 0
for i in range(len(lists[0])):
for n in range(len(lists)):
is_diff = False
for j in range(len(lists)):
if lists[0][i] != lists[j][i]:
is_diff = True
if len(lists) == 1 and not re.findall("[a-zA-Z]", lists[j][i]):
is_diff = True
pfix = prefix1[0]
if len(prefix1) != 1:
pfix = prefix1[n]
if is_diff:
if n == 0:
tee_line(pfix + lists[n][i], f, n=len(lists) + len(rates))
else:
tee_line(pfix + str_ignore(lists[n][i], True), f)
if not is_diff and n == 0:
if '|' in lists[n][i]:
tee_line(prefix0 + lists[n][i], f)
elif "Category:" in lists[n][i]:
if category != 0 and prefix3:
if len(allpvalues[category - 1]) > 0:
tee_line(prefix3 + str_ignore(
allpvalues[category - 1][0]), f)
tee("</TBODY></TABLE>", f)
tee("<br>", f)
tee("<TABLE BORDER=1 CELLSPACING=1 CELLPADDING=1 "
"width=10%><TBODY>", f)
category += 1
tee("<TH colspan=3 >%s</TH>" % lists[n][i], f)
else:
tee("<TH colspan=3 >%s</TH>" % lists[n][i], f)
for n in range(len(rates)):
if lists[0][i] != rates[n][i] and (not re.findall("[a-zA-Z]",
rates[n][i]) or "nan" in rates[n][i]):
tee_line(prefix2[n] + str_ignore(rates[n][i], True), f)
if prefix3 and len(allpvalues[-1]) > 0:
tee_line(prefix3 + str_ignore(allpvalues[category - 1][0]), f)
tee("</TBODY></TABLE>", f)
def analyze(test, sample_type, arg1, arg2, configfile):
""" Compute averages/p-vales of two samples, print results nicely """
config = ConfigParser.ConfigParser()
config.read(configfile)
ignore_col = int(config.get(test, "ignore_col"))
avg_update = config.get(test, "avg_update")
desc = config.get(test, "desc")
def get_list(directory):
result_file_pattern = config.get(test, "result_file_pattern")
cmd = 'find %s|grep "%s.*/%s"' % (directory, test, result_file_pattern)
print cmd
return commands.getoutput(cmd)
if sample_type == 'filepath':
arg1 = get_list(arg1)
arg2 = get_list(arg2)
commands.getoutput("rm -f %s.*html" % test)
s1 = Sample(sample_type, arg1)
avg1 = s1.getAvg(avg_update=avg_update)
sd1 = s1.getSD()
s2 = Sample(sample_type, arg2)
avg2 = s2.getAvg(avg_update=avg_update)
sd2 = s2.getSD()
sd1 = s1.getSDRate([avg1, sd1])
sd2 = s1.getSDRate([avg2, sd2])
avgs_rate = s1.getAvgPercent([avg1, avg2])
navg1 = []
navg2 = []
allpvalues = []
tmp1 = []
tmp2 = []
for i in range(len(avg1)):
if not re.findall("[a-zA-Z]", avg1[i]):
tmp1.append([avg1[i]])
tmp2.append([avg2[i]])
elif 'Category' in avg1[i] and i != 0:
navg1.append(tmp1)
navg2.append(tmp2)
tmp1 = []
tmp2 = []
navg1.append(tmp1)
navg2.append(tmp2)
for i in range(len(navg1)):
allpvalues.append(s1.getTtestPvalue(navg1[i], navg2[i], True, True))
pvalues = s1.getTtestPvalue(s1.files_dict, s2.files_dict, False)
rlist = [avgs_rate]
if pvalues:
# p-value list isn't null
rlist.append(pvalues)
desc = desc % s1.len
tee("<pre>####1. Description of setup#1\n%s\n test data: %s</pre>"
% (s1.version, s1.testdata), "%s.html" % test)
tee("<pre>####2. Description of setup#2\n%s\n test data: %s</pre>"
% (s2.version, s2.testdata), "%s.html" % test)
tee("<pre>" + '\n'.join(desc.split('\\n')) + "</pre>", test + ".html")
tee("<pre>" + s1.desc + "</pre>", test + ".html")
display([avg1, sd1, avg2, sd2], rlist, allpvalues, test + ".html",
ignore_col, o_sum="Regression Testing: %s" % test, prefix0="#|Tile|",
prefix1=["1|Avg|", " |%SD|", "2|Avg|", " |%SD|"],
prefix2=["-|%Diff between Avg|", "-|Significance|"],
prefix3="-|Total Significance|")
display(s1.files_dict, [avg1], [], test + ".avg.html", ignore_col,
o_sum="Raw data of sample 1", prefix0="#|Tile|",
prefix1=[" | |"],
prefix2=["-|Avg |"], prefix3="")
display(s2.files_dict, [avg2], [], test + ".avg.html", ignore_col,
o_sum="Raw data of sample 2", prefix0="#|Tile|",
prefix1=[" | |"],
prefix2=["-|Avg |"], prefix3="")
def is_int(n):
try:
int(n)
return True
except ValueError:
return False
def is_float(n):
try:
float(n)
return True
except ValueError:
return False
def tee(content, filepath):
""" Write content to standard output and filepath """
fd = open(filepath, "a")
fd.write(content + "\n")
fd.close()
print content
if __name__ == "__main__":
if len(sys.argv) != 5:
this = os.path.basename(sys.argv[0])
print 'Usage: %s <testname> filepath <dir1> <dir2>' % this
print ' or %s <testname> db <jobid1> <jobid2>' % this
sys.exit(1)
analyze(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], 'perf.conf')
|
import unittest, tempfile, sys, os.path
datadir = os.environ.get('APPORT_DATA_DIR', '/usr/share/apport')
sys.path.insert(0, os.path.join(datadir, 'general-hooks'))
import parse_segv
regs = '''eax 0xffffffff -1
ecx 0xbfc6af40 -1077498048
edx 0x1 1
ebx 0x26eff4 2551796
esp 0xbfc6af24 0xbfc6af24
ebp 0xbfc6af28 0xbfc6af28
esi 0x826bb60 136756064
edi 0x8083480 134755456
eip 0x808354e 0x808354e <main+14>
eflags 0x200286 [ PF SF IF ID ]
cs 0x73 115
ss 0x7b 123
ds 0x7b 123
es 0x7b 123
fs 0x4 4
gs 0x33 51
'''
regs64 = '''rax 0xffffffffffffffff -1
rbx 0x26eff4 2551796
rcx 0xffffffffffffffff -1
rdx 0xffffffffff600180 -10485376
rsi 0x0 0
rdi 0x7fffffffe3b0 140737488348080
rbp 0x0 0x0
rsp 0x0000bfc6af24 0x0000bfc6af24
r8 0x0 0
r9 0x0 0
r10 0x7fffffffe140 140737488347456
r11 0x246 582
r12 0x7fffffffe400 140737488348160
r13 0x7fffffffe468 140737488348264
r14 0x1 1
r15 0x7fffffffe460 140737488348256
rip 0x7ffff790be10 0x7ffff790be10 <nanosleep+16>
eflags 0x246 [ PF ZF IF ]
cs 0x33 51
ss 0x2b 43
ds 0x0 0
es 0x0 0
fs 0x0 0
gs 0x0 0
fctrl 0x37f 895
fstat 0x0 0
ftag 0xffff 65535
fiseg 0x0 0
fioff 0x40303a 4206650
foseg 0x0 0
fooff 0x0 0
fop 0x5d8 1496
mxcsr 0x1f80 [ IM DM ZM OM UM PM ]
'''
maps = '''00110000-0026c000 r-xp 00000000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
0026c000-0026d000 ---p 0015c000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
0026d000-0026f000 r--p 0015c000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
0026f000-00270000 rw-p 0015e000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
00270000-00273000 rw-p 00000000 00:00 0
002c1000-002e5000 r-xp 00000000 08:06 375135 /lib/tls/i686/cmov/libm-2.9.so
002e5000-002e6000 r--p 00023000 08:06 375135 /lib/tls/i686/cmov/libm-2.9.so
002e6000-002e7000 rw-p 00024000 08:06 375135 /lib/tls/i686/cmov/libm-2.9.so
00318000-00334000 r-xp 00000000 08:06 977846 /lib/ld-2.9.so
00334000-00335000 r--p 0001b000 08:06 977846 /lib/ld-2.9.so
00335000-00336000 rw-p 0001c000 08:06 977846 /lib/ld-2.9.so
0056e000-005a1000 r-xp 00000000 08:06 65575 /lib/libncurses.so.5.7
005a1000-005a3000 r--p 00033000 08:06 65575 /lib/libncurses.so.5.7
005a3000-005a4000 rw-p 00035000 08:06 65575 /lib/libncurses.so.5.7
00b67000-00b68000 r-xp 00000000 00:00 0 [vdso]
00bb6000-00bcb000 r-xp 00000000 08:06 375202 /lib/tls/i686/cmov/libpthread-2.9.so
00bcb000-00bcc000 r--p 00014000 08:06 375202 /lib/tls/i686/cmov/libpthread-2.9.so
00bcc000-00bcd000 rw-p 00015000 08:06 375202 /lib/tls/i686/cmov/libpthread-2.9.so
00bcd000-00bcf000 rw-p 00000000 00:00 0
00beb000-00bed000 r-xp 00000000 08:06 375134 /lib/tls/i686/cmov/libdl-2.9.so
00bed000-00bee000 r--p 00001000 08:06 375134 /lib/tls/i686/cmov/libdl-2.9.so
00bee000-00bef000 rw-p 00002000 08:06 375134 /lib/tls/i686/cmov/libdl-2.9.so
00c56000-00c7a000 r-xp 00000000 08:06 1140420 /usr/lib/libexpat.so.1.5.2
00c7a000-00c7c000 r--p 00023000 08:06 1140420 /usr/lib/libexpat.so.1.5.2
00c7c000-00c7d000 rw-p 00025000 08:06 1140420 /usr/lib/libexpat.so.1.5.2
00dce000-00dfa000 r-xp 00000000 08:06 65612 /lib/libreadline.so.5.2
00dfa000-00dfb000 ---p 0002c000 08:06 65612 /lib/libreadline.so.5.2
00dfb000-00dfc000 r--p 0002c000 08:06 65612 /lib/libreadline.so.5.2
00dfc000-00dff000 rw-p 0002d000 08:06 65612 /lib/libreadline.so.5.2
00dff000-00e00000 rw-p 00000000 00:00 0
08048000-0831c000 r-xp 00000000 08:06 1140349 /usr/bin/gdb
0831c000-0831d000 r--p 002d3000 08:06 1140349 /usr/bin/gdb
0831d000-08325000 rw-p 002d4000 08:06 1140349 /usr/bin/gdb
08325000-0833f000 rw-p 00000000 00:00 0
b8077000-b807a000 rw-p 00000000 00:00 0
b8096000-b8098000 rw-p 00000000 00:00 0
bfc57000-bfc6c000 rw-p 00000000 00:00 0 [stack]
'''
disasm = '''0x08083540 <main+0>: lea 0x4(%esp),%ecx
0x08083544 <main+4>: and $0xfffffff0,%esp
0x08083547 <main+7>: pushl -0x4(%ecx)
0x0808354a <main+10>: push %ebp
0x0808354b <main+11>: mov %esp,%ebp
0x0808354d <main+13>: push %ecx
0x0808354e <main+14>: sub $0x14,%esp
0x08083551 <main+17>: mov (%ecx),%eax
0x08083553 <main+19>: mov 0x4(%ecx),%edx
0x08083556 <main+22>: lea -0x14(%ebp),%ecx
0x08083559 <main+25>: movl $0x0,-0xc(%ebp)
0x08083560 <main+32>: movl $0x826bc68,-0x8(%ebp)
0x08083567 <main+39>: mov %eax,-0x14(%ebp)
0x0808356a <main+42>: mov %edx,-0x10(%ebp)
0x0808356d <main+45>: mov %ecx,(%esp)
0x08083570 <main+48>: call 0x8083580 <gdb_main>
0x08083575 <main+53>: add $0x14,%esp
0x08083578 <main+56>: pop %ecx
0x08083579 <main+57>: pop %ebp
0x0808357a <main+58>: lea -0x4(%ecx),%esp
0x0808357d <main+61>: ret
'''
class T(unittest.TestCase):
'''Test Segfault Parser'''
def test_invalid_00_registers(self):
'''Require valid registers'''
regs = 'a 0x10\nb !!!\n'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, '', '')
try:
segv = parse_segv.ParseSegv(regs, '', '')
except ValueError as e:
self.assertTrue('invalid literal for int()' in str(e), str(e))
regs = 'a 0x10'
disasm = '0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.regs['a'], 0x10, segv)
segv.regs = None
self.assertRaises(ValueError, segv.parse_disassembly, '')
def test_invalid_01_disassembly(self):
'''Require valid disassembly'''
regs = 'a 0x10'
disasm = ''
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = 'Dump ...'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = 'Dump ...\nmonkey'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = 'monkey'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = '0x1111111111: Cannot access memory at address 0x1111111111\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x1111111111, segv.pc)
self.assertEqual(segv.insn, None, segv.insn)
self.assertEqual(segv.src, None, segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0x2111111111: \n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x2111111111, segv.pc)
self.assertEqual(segv.insn, None, segv.insn)
self.assertEqual(segv.src, None, segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0x8069ff0 <fopen@plt+132220>: cmpb $0x0,(%eax,%ebx,1)\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x8069ff0, segv.pc)
self.assertEqual(segv.insn, 'cmpb', segv.insn)
self.assertEqual(segv.src, '$0x0', segv.src)
self.assertEqual(segv.dest, '(%eax,%ebx,1)', segv.dest)
disasm = '0xb765bb48 <_XSend+440>: call *0x40(%edi)\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0xb765bb48, segv.pc)
self.assertEqual(segv.insn, 'call', segv.insn)
self.assertEqual(segv.src, '*0x40(%edi)', segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0xb7aae5a0: call 0xb7a805af <_Unwind_Find_FDE@plt+111>\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0xb7aae5a0, segv.pc)
self.assertEqual(segv.insn, 'call', segv.insn)
self.assertEqual(segv.src, '0xb7a805af', segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0x09083540: mov 0x4(%esp),%es:%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x09083540, segv.pc)
self.assertEqual(segv.insn, 'mov', segv.insn)
self.assertEqual(segv.src, '0x4(%esp)', segv.src)
self.assertEqual(segv.dest, '%es:%ecx', segv.dest)
disasm = '0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083540, segv.pc)
self.assertEqual(segv.insn, 'lea', segv.insn)
self.assertEqual(segv.src, '0x4(%esp)', segv.src)
self.assertEqual(segv.dest, '%ecx', segv.dest)
disasm = '''0x404127 <exo_mount_hal_device_mount+167>:
repz cmpsb %es:(%rdi),%ds:(%rsi)\n'''
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x0404127, segv.pc)
self.assertEqual(segv.insn, 'repz cmpsb', segv.insn)
self.assertEqual(segv.src, '%es:(%rdi)', segv.src)
self.assertEqual(segv.dest, '%ds:(%rsi)', segv.dest)
disasm = '0xb031765a <hufftab16+570>: add 0x3430433,%eax'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0xb031765a, segv.pc)
self.assertEqual(segv.insn, 'add', segv.insn)
self.assertEqual(segv.src, '0x3430433', segv.src)
self.assertEqual(segv.dest, '%eax', segv.dest)
disasm = 'Dump ...\n0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083540, segv.pc)
self.assertEqual(segv.insn, 'lea', segv.insn)
self.assertEqual(segv.src, '0x4(%esp)', segv.src)
self.assertEqual(segv.dest, '%ecx', segv.dest)
disasm = '0x08083550 <main+0>: nop\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083550, segv.pc)
self.assertEqual(segv.insn, 'nop', segv.insn)
self.assertEqual(segv.src, None, segv.src)
self.assertEqual(segv.dest, None, segv.dest)
regs = 'esp 0x444'
disasm = '0x08083560 <main+0>: push %ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083560, segv.pc)
self.assertEqual(segv.insn, 'push', segv.insn)
self.assertEqual(segv.src, '%ecx', segv.src)
self.assertEqual(segv.dest, '(%esp)', segv.dest)
# GDB 7.1
regs = 'esp 0x444'
disasm = '=> 0x08083560 <main+0>: push %ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083560, segv.pc)
self.assertEqual(segv.insn, 'push', segv.insn)
self.assertEqual(segv.src, '%ecx', segv.src)
self.assertEqual(segv.dest, '(%esp)', segv.dest)
def test_ioport_operation(self):
'''I/O port violations'''
regs = 'rax 0x3 3'
disasm = '''0x4087f1 <snd_pcm_hw_params_set_channels_near@plt+19345>:
out %al,$0xb3
'''
maps = '''00400000-00412000 r-xp 00000000 08:04 10371157 /usr/sbin/pommed
00611000-00614000 rw-p 00011000 08:04 10371157 /usr/sbin/pommed
00614000-00635000 rw-p 00614000 00:00 0 [heap]
'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertEqual(segv.pc, 0x4087f1, segv.pc)
self.assertEqual(segv.insn, 'out', segv.insn)
self.assertEqual(segv.src, '%al', segv.src)
self.assertEqual(segv.dest, '$0xb3', segv.dest)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('disallowed I/O port operation on port 3' in reason, reason)
def test_invalid_02_maps(self):
'''Require valid maps'''
regs = 'a 0x10'
disasm = 'Dump ...\n0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
maps = 'asdlkfjaadf'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, maps)
maps = '''005a3000-005a4000 rw-p 00035000 08:06 65575 /lib/libncurses.so.5.7
00b67000-00b68000 r-xp 00000000 00:00 0 [vdso]
00c67000-00c68000 r--p 00000000 00:00 0 '''
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertEqual(segv.maps[0]['start'], 0x005a3000, segv)
self.assertEqual(segv.maps[0]['end'], 0x005a4000, segv)
self.assertEqual(segv.maps[0]['perms'], 'rw-p', segv)
self.assertEqual(segv.maps[0]['name'], '/lib/libncurses.so.5.7', segv)
self.assertEqual(segv.maps[1]['start'], 0x00b67000, segv)
self.assertEqual(segv.maps[1]['end'], 0x00b68000, segv)
self.assertEqual(segv.maps[1]['perms'], 'r-xp', segv)
self.assertEqual(segv.maps[1]['name'], '[vdso]', segv)
self.assertEqual(segv.maps[2]['start'], 0x00c67000, segv)
self.assertEqual(segv.maps[2]['end'], 0x00c68000, segv)
self.assertEqual(segv.maps[2]['perms'], 'r--p', segv)
self.assertEqual(segv.maps[2]['name'], None, segv)
def test_debug(self):
'''Debug mode works'''
regs = 'a 0x10'
disasm = 'Dump ...\n0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
maps = '''005a3000-005a4000 rw-p 00035000 08:06 65575 /lib/libncurses.so.5.7
00b67000-00b68000 r-xp 00000000 00:00 0 [vdso]
00c67000-00c68000 r--p 00000000 00:00 0 '''
sys.stderr = tempfile.NamedTemporaryFile(prefix='parse_segv-stderr-')
segv = parse_segv.ParseSegv(regs, disasm, maps, debug=True)
self.assertTrue(segv is not None, segv)
def test_register_values(self):
'''Sub-register parsing'''
disasm = '''0x08083540 <main+0>: mov $1,%ecx'''
segv = parse_segv.ParseSegv(regs64, disasm, '')
val = segv.register_value('%rdx')
self.assertEqual(val, 0xffffffffff600180, hex(val))
val = segv.register_value('%edx')
self.assertEqual(val, 0xff600180, hex(val))
val = segv.register_value('%dx')
self.assertEqual(val, 0x0180, hex(val))
val = segv.register_value('%dl')
self.assertEqual(val, 0x80, hex(val))
def test_segv_unknown(self):
'''Handles unknown segfaults'''
disasm = '''0x08083540 <main+0>: mov $1,%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
# Verify calculations
self.assertEqual(segv.calculate_arg('(%ecx)'), 0xbfc6af40, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('0x10(%ecx)'), 0xbfc6af50, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('-0x20(%ecx)'), 0xbfc6af20, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('%fs:(%ecx)'), 0xbfc6af44, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('0x3404403'), 0x3404403, '0x3404403')
self.assertEqual(segv.calculate_arg('*0x40(%edi)'), 0x80834c0, segv.regs['edi'])
self.assertEqual(segv.calculate_arg('(%edx,%ebx,1)'), 0x26eff5, segv.regs['ebx'])
self.assertEqual(segv.calculate_arg('(%eax,%ebx,1)'), 0x26eff3, segv.regs['ebx'])
self.assertEqual(segv.calculate_arg('0x10(,%ebx,1)'), 0x26f004, segv.regs['ebx'])
# Again, but 64bit
disasm = '''0x08083540 <main+0>: mov $1,%rcx'''
segv = parse_segv.ParseSegv(regs64, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
self.assertEqual(segv.calculate_arg('(%rax,%rbx,1)'), 0x26eff3, segv.regs['rbx'])
def test_segv_pc_missing(self):
'''Handles PC in missing VMA'''
disasm = '''0x00083540 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00083540) not located in a known VMA region' in details, details)
self.assertTrue('executing unknown VMA' in reason, reason)
disasm = '''0x00083544:'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00083544) not located in a known VMA region' in details, details)
self.assertTrue('executing unknown VMA' in reason, reason)
def test_segv_pc_null(self):
'''Handles PC in NULL VMA'''
disasm = '''0x00000540 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00000540) not located in a known VMA region' in details, details)
self.assertTrue('executing NULL VMA' in reason, reason)
def test_segv_pc_nx_writable(self):
'''Handles PC in writable NX VMA'''
disasm = '''0x005a3000 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x005a3000) in non-executable VMA region:' in details, details)
self.assertTrue('executing writable VMA /lib/libncurses.so.5.7' in reason, reason)
def test_segv_pc_nx_unwritable(self):
'''Handles PC in non-writable NX VMA'''
disasm = '''0x00dfb000 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00dfb000) in non-executable VMA region:' in details, details)
self.assertTrue('executing non-writable VMA /lib/libreadline.so.5.2' in reason, reason)
def test_segv_src_missing(self):
'''Handles source in missing VMA'''
reg = regs + 'ecx 0x0006af24 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
# Valid crash
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "-0x4(%ecx)" (0x0006af20) not located in a known VMA region' in details, details)
self.assertTrue('reading unknown VMA' in reason, reason)
# Valid crash
disasm = '0x08083547 <main+7>: callq *%ecx'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "*%ecx" (0x0006af24) not located in a known VMA region' in details, details)
self.assertTrue('reading unknown VMA' in reason, reason)
def test_segv_src_null(self):
'''Handles source in NULL VMA'''
reg = regs + 'ecx 0x00000024 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "-0x4(%ecx)" (0x00000020) not located in a known VMA region' in details, details)
self.assertTrue('reading NULL VMA' in reason, reason)
def test_segv_src_not_readable(self):
'''Handles source not in readable VMA'''
reg = regs + 'ecx 0x0026c080 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "-0x4(%ecx)" (0x0026c07c) in non-readable VMA region:' in details, details)
self.assertTrue('reading VMA /lib/tls/i686/cmov/libc-2.9.so' in reason, reason)
self.assertFalse('Stack memory exhausted' in details, details)
self.assertFalse('Stack pointer not within stack segment' in details, details)
def test_segv_dest_missing(self):
'''Handles destintation in missing VMA'''
reg = regs + 'esp 0x0006af24 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0x0006af24) not located in a known VMA region' in details, details)
self.assertTrue('writing unknown VMA' in reason, reason)
def test_segv_dest_null(self):
'''Handles destintation in NULL VMA'''
reg = regs + 'esp 0x00000024 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0x00000024) not located in a known VMA region' in details, details)
self.assertTrue('writing NULL VMA' in reason, reason)
def test_segv_dest_not_writable(self):
'''Handles destination not in writable VMA'''
reg = regs + 'esp 0x08048080 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0x08048080) in non-writable VMA region:' in details, details)
self.assertTrue('writing VMA /usr/bin/gdb' in reason, reason)
def test_segv_crackful_disasm(self):
'''Rejects insane disassemblies'''
disasm = '0x08083547 <main+7>: pushl -0x4(blah)'
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertRaises(ValueError, segv.report)
disasm = '0x08083547 <main+7>: pushl -04(%ecx)'
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertRaises(ValueError, segv.report)
def test_segv_stack_failure(self):
'''Handles walking off the stack'''
# Triggered via "push"
reg = regs + 'esp 0xbfc56ff0 0xbfc56ff0'
disasm = '0x08083547 <main+7>: push %eax'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0xbfc56ff0) not located in a known VMA region (needed writable region)!' in details, details)
# Triggered via "call"
reg = regs + 'esp 0xbfc56fff 0xbfc56fff'
disasm = '0x08083547 <main+7>: callq 0x08083540'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0xbfc56fff) not located in a known VMA region (needed writable region)!' in details, details)
self.assertTrue('Stack memory exhausted' in details, details)
# Triggered via unknown reason
reg = regs + 'esp 0xdfc56000 0xdfc56000'
disasm = '''0x08083540 <main+0>: mov $1,%rcx'''
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('SP (0xdfc56000) not located in a known VMA region (needed readable region)!' in details, details)
self.assertTrue('Stack pointer not within stack segment' in details, details)
def test_segv_stack_kernel_segfault(self):
'''Handles unknown segfaults in kernel'''
# Crash in valid code path
disasm = '''0x0056e010: ret'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
self.assertTrue('Reason could not be automatically determined.' in details, details)
self.assertFalse('(Unhandled exception in kernel code?)' in details, details)
# Crash from kernel code path
disasm = '''0x00b67422 <__kernel_vsyscall+2>: ret'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
self.assertTrue('Reason could not be automatically determined. (Unhandled exception in kernel code?)' in details, details)
unittest.main()
|
from boxbranding import getBoxType, getBrandOEM
from Components.About import about
class HardwareInfo:
device_name = None
device_version = None
def __init__(self):
if HardwareInfo.device_name is not None:
return
HardwareInfo.device_name = "unknown"
try:
file = open("/proc/stb/info/model", "r")
HardwareInfo.device_name = file.readline().strip()
file.close()
try:
file = open("/proc/stb/info/version", "r")
HardwareInfo.device_version = file.readline().strip()
file.close()
except:
pass
except:
print "----------------"
print "you should upgrade to new drivers for the hardware detection to work properly"
print "----------------"
print "fallback to detect hardware via /proc/cpuinfo!!"
try:
rd = open("/proc/cpuinfo", "r").read()
if "Brcm4380 V4.2" in rd:
HardwareInfo.device_name = "dm8000"
print "dm8000 detected!"
elif "Brcm7401 V0.0" in rd:
HardwareInfo.device_name = "dm800"
print "dm800 detected!"
elif "MIPS 4KEc V4.8" in rd:
HardwareInfo.device_name = "dm7025"
print "dm7025 detected!"
except:
pass
def get_device_name(self):
return HardwareInfo.device_name
def get_device_version(self):
return HardwareInfo.device_version
def has_hdmi(self):
return getBrandOEM() in ('xtrend', 'gigablue', 'dags', 'ixuss', 'odin', 'vuplus', 'ini', 'ebox', 'ceryon') or (getBoxType() in ('dm7020hd', 'dm800se', 'dm500hd', 'dm8000') and HardwareInfo.device_version is not None)
def has_deepstandby(self):
return getBoxType() != 'dm800'
def is_nextgen(self):
if about.getCPUString() in ('BCM7346B2', 'BCM7425B2', 'BCM7429B0'):
return True
return False
|
from __future__ import print_function
from datetime import datetime
import re
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import (DataFrame, Series, Index, date_range, compat,
Timestamp)
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReplace(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
self.assertRaises(TypeError, self.tsframe.replace, nan)
# mixed type
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
# mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
result = df.replace({'Type': {'Q': 0, 'T': 1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
def test_replace_regex_metachar(self):
metachars = '[]', '()', '\d', '\w', '\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = pd.DataFrame([('-', pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
df1 = df.replace('-', np.nan)
expected_df = pd.DataFrame([(np.nan, pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df, expected)
# int block splitting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64'),
'C': Series([1, 2], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64'),
'C': Series([1, 2], dtype='int64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1, 'foo'], dtype='object'),
'B': Series([0, 1], dtype='int64')})
result = df.replace(2, 'foo')
assert_frame_equal(result, expected)
expected = DataFrame({'A': Series(['foo', 'bar'], dtype='object'),
'B': Series([0, 'foo'], dtype='object')})
result = df.replace([1, 2], ['foo', 'bar'])
assert_frame_equal(result, expected)
# test case from
df = DataFrame({'A': Series([3, 0], dtype='int64'),
'B': Series([0, 3], dtype='int64')})
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
def test_replace_value_is_none(self):
self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
self.assertTrue(result.values.all())
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# dict to scalar
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.replace, to_rep, values[1:])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
assert_frame_equal(res1, res2)
assert_frame_equal(res2, res3)
assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({'\D': 1})
assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
'fname': {
'out_augmented_AUG_2011.json':
pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json':
pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json':
pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json':
pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json':
pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json':
pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json':
pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [0, np.nan, 2]})
result = df.replace(np.nan, 1)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': Series([0, 1, 2], dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Eastern'))
assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Pacific'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Pacific'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({'A': np.nan}, Timestamp('20130104'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
|
DEFAULT_PERSON_PUMS2000_QUERIES = [ "alter table person_pums add column agep bigint",
"alter table person_pums add column gender bigint",
"alter table person_pums add column race bigint",
"alter table person_pums add column employment bigint",
"update person_pums set agep = 1 where age < 5",
"update person_pums set agep = 2 where age >= 5 and age < 15",
"update person_pums set agep = 3 where age >= 15 and age < 25",
"update person_pums set agep = 4 where age >= 25 and age < 35",
"update person_pums set agep = 5 where age >= 35 and age < 45",
"update person_pums set agep = 6 where age >= 45 and age < 55",
"update person_pums set agep = 7 where age >= 55 and age < 65",
"update person_pums set agep = 8 where age >= 65 and age < 75",
"update person_pums set agep = 9 where age >= 75 and age < 85",
"update person_pums set agep = 10 where age >= 85",
"update person_pums set gender = sex",
"update person_pums set race = 1 where race1 = 1",
"update person_pums set race = 2 where race1 = 2",
"update person_pums set race = 3 where race1 >=3 and race1 <= 5",
"update person_pums set race = 4 where race1 = 6",
"update person_pums set race = 5 where race1 = 7",
"update person_pums set race = 6 where race1 = 8",
"update person_pums set race = 7 where race1 = 9",
"update person_pums set employment = 1 where esr = 0",
"update person_pums set employment = 2 where esr = 1 or esr = 2 or esr = 4 or esr = 5",
"update person_pums set employment = 3 where esr = 3",
"update person_pums set employment = 4 where esr = 6",
"drop table person_sample",
"create table person_sample select state, pumano, hhid, serialno, pnum, agep, gender, race, employment, relate from person_pums",
"alter table person_sample add index(serialno, pnum)",
"drop table hhld_sample_temp",
"alter table hhld_sample drop column hhldrage",
"alter table hhld_sample rename to hhld_sample_temp",
"drop table hhld_sample",
"create table hhld_sample select hhld_sample_temp.*, agep as hhldrage from hhld_sample_temp left join person_sample using(serialno) where relate = 1",
"alter table hhld_sample add index(serialno)",
"update hhld_sample set hhldrage = 1 where hhldrage <=7 ",
"update hhld_sample set hhldrage = 2 where hhldrage >7"]
DEFAULT_PERSON_PUMSACS_QUERIES = ["alter table person_pums change agep age bigint",
"alter table person_pums change puma pumano bigint",
"alter table person_pums change rac1p race1 bigint",
"alter table person_pums change st state bigint",
"alter table person_pums change sporder pnum bigint",
"alter table person_pums change rel relate bigint",
"alter table person_pums add column agep bigint",
"alter table person_pums add column gender bigint",
"alter table person_pums add column race bigint",
"alter table person_pums add column employment bigint",
"update person_pums set agep = 1 where age < 5",
"update person_pums set agep = 2 where age >= 5 and age < 15",
"update person_pums set agep = 3 where age >= 15 and age < 25",
"update person_pums set agep = 4 where age >= 25 and age < 35",
"update person_pums set agep = 5 where age >= 35 and age < 45",
"update person_pums set agep = 6 where age >= 45 and age < 55",
"update person_pums set agep = 7 where age >= 55 and age < 65",
"update person_pums set agep = 8 where age >= 65 and age < 75",
"update person_pums set agep = 9 where age >= 75 and age < 85",
"update person_pums set agep = 10 where age >= 85",
"update person_pums set gender = sex",
"update person_pums set race = 1 where race1 = 1",
"update person_pums set race = 2 where race1 = 2",
"update person_pums set race = 3 where race1 >=3 and race1 <= 5",
"update person_pums set race = 4 where race1 = 6",
"update person_pums set race = 5 where race1 = 7",
"update person_pums set race = 6 where race1 = 8",
"update person_pums set race = 7 where race1 = 9",
"update person_pums set employment = 1 where esr = 0",
"update person_pums set employment = 2 where esr = 1 or esr = 2 or esr = 4 or esr = 5",
"update person_pums set employment = 3 where esr = 3",
"update person_pums set employment = 4 where esr = 6",
"alter table person_pums add index(serialno)",
"create table person_pums1 select person_pums.*, hhid from person_pums left join serialcorr using(serialno)",
"update person_pums1 set serialno = hhid",
"drop table person_sample",
"create table person_sample select state, pumano, hhid, serialno, pnum, agep, gender, race, employment, relate from person_pums1",
"alter table person_sample add index(serialno, pnum)",
"drop table hhld_sample_temp",
"alter table hhld_sample drop column hhldrage",
"alter table hhld_sample rename to hhld_sample_temp",
"drop table hhld_sample",
"create table hhld_sample select hhld_sample_temp.*, agep as hhldrage from hhld_sample_temp left join person_sample using(serialno) where relate = 0",
"alter table hhld_sample add index(serialno)",
"update hhld_sample set hhldrage = 1 where hhldrage <=7 ",
"update hhld_sample set hhldrage = 2 where hhldrage >7",
"drop table hhld_sample_temp",
"drop table person_pums1"]
DEFAULT_HOUSING_PUMS2000_QUERIES = ["alter table housing_pums add index(serialno)",
"alter table housing_pums add column hhtype bigint",
"alter table housing_pums add column hhldtype bigint",
"alter table housing_pums add column hhldinc bigint",
"alter table housing_pums add column hhldtenure bigint",
"alter table housing_pums add column hhldsize bigint",
"alter table housing_pums add column childpresence bigint",
"alter table housing_pums add column groupquarter bigint",
"alter table housing_pums add column hhldfam bigint",
"update housing_pums set hhtype = 1 where unittype = 0",
"update housing_pums set hhtype = 2 where unittype = 1 or unittype = 2",
"update housing_pums set hhldtype = 1 where hht = 1",
"update housing_pums set hhldtype = 2 where hht = 2",
"update housing_pums set hhldtype = 3 where hht = 3",
"update housing_pums set hhldtype = 4 where hht = 4 or hht = 5",
"update housing_pums set hhldtype = 5 where hht = 6 or hht = 7",
"update housing_pums set hhldtype = -99 where hht = 0",
"update housing_pums set hhldinc = 1 where hinc <15000",
"update housing_pums set hhldinc = 2 where hinc >= 15000 and hinc < 25000",
"update housing_pums set hhldinc = 3 where hinc >= 25000 and hinc < 35000",
"update housing_pums set hhldinc = 4 where hinc >= 35000 and hinc < 45000",
"update housing_pums set hhldinc = 5 where hinc >= 45000 and hinc < 60000",
"update housing_pums set hhldinc = 6 where hinc >= 60000 and hinc < 100000",
"update housing_pums set hhldinc = 7 where hinc >= 100000 and hinc < 150000",
"update housing_pums set hhldinc = 8 where hinc >= 150000",
"update housing_pums set hhldinc = -99 where hht = 0",
#"update housing_pums set hhldtenure = 1 where tenure = 1 or tenure = 2",
#"update housing_pums set hhldtenure = 2 where tenure = 3 or tenure = 4",
#"update housing_pums set hhldtenure = -99 where tenure = 0",
"update housing_pums set hhldsize = persons where persons < 7",
"update housing_pums set hhldsize = 7 where persons >= 7",
"update housing_pums set hhldsize = -99 where hht = 0",
"update housing_pums set childpresence = 1 where noc > 0",
"update housing_pums set childpresence = 2 where noc = 0",
"update housing_pums set childpresence = -99 where hht = 0",
"update housing_pums set groupquarter = unittype where unittype >0",
"update housing_pums set groupquarter = -99 where unittype =0",
"update housing_pums set hhldfam = 1 where hhldtype <=3",
"update housing_pums set hhldfam = 2 where hhldtype > 3",
"delete from housing_pums where persons = 0",
"drop table hhld_sample",
"drop table gq_sample",
"create table hhld_sample select state, pumano, hhid, serialno, hhtype, hhldtype, hhldinc, hhldsize, childpresence, hhldfam from housing_pums where hhtype = 1",
"create table gq_sample select state, pumano, hhid, serialno, hhtype, groupquarter from housing_pums where hhtype = 2",
"alter table hhld_sample add index(serialno)",
"alter table gq_sample add index(serialno)"]
DEFAULT_HOUSING_PUMSACS_QUERIES = ["alter table housing_pums add index(serialno)",
"alter table housing_pums change hincp hinc bigint",
"alter table housing_pums change np persons bigint",
"alter table housing_pums change hupaoc noc bigint",
"alter table housing_pums change type unittype bigint",
"alter table housing_pums change st state bigint",
"alter table housing_pums change puma pumano bigint",
"alter table housing_pums add column hhtype bigint",
"alter table housing_pums add column hhldtype bigint",
"alter table housing_pums add column hhldinc bigint",
"alter table housing_pums add column hhldtenure bigint",
"alter table housing_pums add column hhldsize bigint",
"alter table housing_pums add column childpresence bigint",
"alter table housing_pums add column groupquarter bigint",
"alter table housing_pums add column hhldfam bigint",
"update housing_pums set hhtype = 1 where unittype = 1",
"update housing_pums set hhtype = 2 where unittype = 2 or unittype = 3",
"update housing_pums set hhldtype = 1 where hht = 1",
"update housing_pums set hhldtype = 2 where hht = 2",
"update housing_pums set hhldtype = 3 where hht = 3",
"update housing_pums set hhldtype = 4 where hht = 4 or hht = 6",
"update housing_pums set hhldtype = 5 where hht = 5 or hht = 7",
"update housing_pums set hhldtype = -99 where hht = 0",
"update housing_pums set hhldinc = 1 where hinc <15000",
"update housing_pums set hhldinc = 2 where hinc >= 15000 and hinc < 25000",
"update housing_pums set hhldinc = 3 where hinc >= 25000 and hinc < 35000",
"update housing_pums set hhldinc = 4 where hinc >= 35000 and hinc < 45000",
"update housing_pums set hhldinc = 5 where hinc >= 45000 and hinc < 60000",
"update housing_pums set hhldinc = 6 where hinc >= 60000 and hinc < 100000",
"update housing_pums set hhldinc = 7 where hinc >= 100000 and hinc < 150000",
"update housing_pums set hhldinc = 8 where hinc >= 150000",
"update housing_pums set hhldinc = -99 where hht = 0",
#"update housing_pums set hhldtenure = 1 where tenure = 1 or tenure = 2",
#"update housing_pums set hhldtenure = 2 where tenure = 3 or tenure = 4",
#"update housing_pums set hhldtenure = -99 where tenure = 0",
"update housing_pums set hhldsize = persons where persons < 7",
"update housing_pums set hhldsize = 7 where persons >= 7",
"update housing_pums set hhldsize = -99 where hht = 0",
"update housing_pums set childpresence = 1 where noc =1 or noc = 2 or noc = 3",
"update housing_pums set childpresence = 2 where noc = 4",
"update housing_pums set childpresence = -99 where hht = 0",
"update housing_pums set groupquarter = 1 where unittype >1",
"update housing_pums set groupquarter = -99 where unittype =1",
"update housing_pums set hhldfam = 1 where hhldtype <=3",
"update housing_pums set hhldfam = 2 where hhldtype > 3",
"delete from housing_pums where persons = 0",
"drop table serialcorr",
"create table serialcorr select state, pumano, serialno from housing_pums group by serialno",
"alter table serialcorr add column hhid bigint primary key auto_increment not null",
"alter table serialcorr add index(serialno)",
"drop table hhld_sample",
"drop table gq_sample",
"alter table housing_pums add index(serialno)",
"create table housing_pums1 select housing_pums.*, hhid from housing_pums left join serialcorr using(serialno)",
"update housing_pums1 set serialno = hhid",
"create table hhld_sample select state, pumano, hhid, serialno, hhtype, hhldtype, hhldinc, hhldsize, childpresence, hhldfam from housing_pums1 where hhtype = 1",
"create table gq_sample select state, pumano, hhid, serialno, hhtype, groupquarter from housing_pums1 where hhtype = 2",
"alter table hhld_sample add index(serialno)",
"alter table gq_sample add index(serialno)",
"drop table housing_pums1"]
DEFAULT_SF2000_QUERIES = ["alter table %s add column agep1 bigint",
"alter table %s add column agep2 bigint",
"alter table %s add column agep3 bigint",
"alter table %s add column agep4 bigint",
"alter table %s add column agep5 bigint",
"alter table %s add column agep6 bigint",
"alter table %s add column agep7 bigint",
"alter table %s add column agep8 bigint",
"alter table %s add column agep9 bigint",
"alter table %s add column agep10 bigint",
"alter table %s add column gender1 bigint",
"alter table %s add column gender2 bigint",
"alter table %s add column race1 bigint",
"alter table %s add column race2 bigint",
"alter table %s add column race3 bigint",
"alter table %s add column race4 bigint",
"alter table %s add column race5 bigint",
"alter table %s add column race6 bigint",
"alter table %s add column race7 bigint",
"alter table %s add column employment1 bigint",
"alter table %s add column employment2 bigint",
"alter table %s add column employment3 bigint",
"alter table %s add column employment4 bigint",
"alter table %s add column childpresence1 bigint",
"alter table %s add column childpresence2 bigint",
"alter table %s add column groupquarter1 bigint",
"alter table %s add column groupquarter2 bigint",
"alter table %s add column hhldinc1 bigint",
"alter table %s add column hhldinc2 bigint",
"alter table %s add column hhldinc3 bigint",
"alter table %s add column hhldinc4 bigint",
"alter table %s add column hhldinc5 bigint",
"alter table %s add column hhldinc6 bigint",
"alter table %s add column hhldinc7 bigint",
"alter table %s add column hhldinc8 bigint",
"alter table %s add column hhldsize1 bigint",
"alter table %s add column hhldsize2 bigint",
"alter table %s add column hhldsize3 bigint",
"alter table %s add column hhldsize4 bigint",
"alter table %s add column hhldsize5 bigint",
"alter table %s add column hhldsize6 bigint",
"alter table %s add column hhldsize7 bigint",
"alter table %s add column hhldtype1 bigint",
"alter table %s add column hhldtype2 bigint",
"alter table %s add column hhldtype3 bigint",
"alter table %s add column hhldtype4 bigint",
"alter table %s add column hhldtype5 bigint",
"alter table %s add column hhldrage1 bigint",
"alter table %s add column hhldrage2 bigint",
"alter table %s add column hhldfam1 bigint",
"alter table %s add column hhldfam2 bigint",
"update %s set agep1 = (P008003+P008004+P008005+P008006+P008007) + (P008042+P008043+P008044+P008045+P008046)",
"update %s set agep2 = (P008008+P008009+P008010+P008011+P008012+P008013+P008014+P008015+P008016+P008017 ) + (P008047+P008048+P008049+P008050+P008051+P008052+P008053+P008054+P008055+P008056)",
"update %s set agep3 = (P008018+P008019+P008020+P008021+P008022+P008023+P008024+P008025 ) + (P008057+P008058+P008059+P008060+P008061+P008062+P008063+P008064)",
"update %s set agep4 = (P008026+P008027) + (P008065+P008066)",
"update %s set agep5 = (P008028+P008029) + (P008067+P008068)",
"update %s set agep6 = (P008030+P008031) + (P008069+P008070)",
"update %s set agep7 = (P008032+P008033+P008034) + (P008071+P008072+P008073)",
"update %s set agep8 = (P008035+P008036+P008037) + (P008074+P008075+P008076)",
"update %s set agep9 = (P008038+P008039) + (P008077+P008078)",
"update %s set agep10 = (P008040) + (P008079)",
"update %s set gender1 = P008002",
"update %s set gender2 = P008041",
"update %s set race1 = P006002",
"update %s set race2 = P006003",
"update %s set race3 = P006004",
"update %s set race4 = P006005",
"update %s set race5 = P006006",
"update %s set race6 = P006007",
"update %s set race7 = P006008",
"update %s set employment1 = agep1+agep2+P008018+P008057",
"update %s set employment2 = P043004+P043006+P043011+P043013",
"update %s set employment3 = P043007+P043014",
"update %s set employment4 = P043008+P043015",
"update %s set childpresence1 = P010008 + P010012 + P010015",
"update %s set childpresence2 = P010009 + P010013 + P010016 + P010017 + P010002",
"update %s set groupquarter1 = P009026",
"update %s set groupquarter2 = P009027",
"update %s set hhldinc1 = P052002 + P052003",
"update %s set hhldinc2 = P052004 + P052005",
"update %s set hhldinc3 = P052006 + P052007",
"update %s set hhldinc4 = P052008 + P052009",
"update %s set hhldinc5 = P052010 + P052011",
"update %s set hhldinc6 = P052012 + P052013",
"update %s set hhldinc7 = P052014 + P052015",
"update %s set hhldinc8 = P052016 + P052017",
"update %s set hhldsize1 = P014010 ",
"update %s set hhldsize2 = P014003+P014011 ",
"update %s set hhldsize3 = P014004+P014012 ",
"update %s set hhldsize4 = P014005+P014013 ",
"update %s set hhldsize5 = P014006+P014014 ",
"update %s set hhldsize6 = P014007+P014015 ",
"update %s set hhldsize7 = P014008+P014016 ",
"update %s set hhldtype1 = P010007",
"update %s set hhldtype2 = P010011 ",
"update %s set hhldtype3 = P010014",
"update %s set hhldtype4 = P010002",
"update %s set hhldtype5 = P010017",
"update %s set hhldrage1 = P012002",
"update %s set hhldrage2 = P012017",
"update %s set hhldfam1 = hhldtype1 + hhldtype2 + hhldtype3",
"update %s set hhldfam2 = hhldtype4 + hhldtype5",
"drop table hhld_marginals",
"drop table gq_marginals",
"drop table person_marginals",
"""create table hhld_marginals select state, county, tract, bg, hhldinc1, hhldinc2, hhldinc3, hhldinc4, hhldinc5, hhldinc6, hhldinc7, hhldinc8,"""
"""hhldsize1, hhldsize2, hhldsize3, hhldsize4, hhldsize5, hhldsize6, hhldsize7, hhldtype1, hhldtype2, hhldtype3, hhldtype4, hhldtype5,"""
"""childpresence1, childpresence2, hhldrage1, hhldrage2, hhldfam1, hhldfam2 from %s""",
"create table gq_marginals select state, county, tract, bg, groupquarter1, groupquarter2 from %s",
"""create table person_marginals select state, county, tract, bg, agep1, agep2, agep3, agep4, agep5, agep6, agep7, agep8, agep9, agep10,"""
"""gender1, gender2, race1, race2, race3, race4, race5, race6, race7, employment1, employment2, employment3, employment4 from"""
""" %s"""]
DEFAULT_SFACS_QUERIES = ["alter table %s add column agep1 bigint",
"alter table %s add column agep2 bigint",
"alter table %s add column agep3 bigint",
"alter table %s add column agep4 bigint",
"alter table %s add column agep5 bigint",
"alter table %s add column agep6 bigint",
"alter table %s add column agep7 bigint",
"alter table %s add column agep8 bigint",
"alter table %s add column agep9 bigint",
"alter table %s add column agep10 bigint",
"alter table %s add column gender1 bigint",
"alter table %s add column gender2 bigint",
"alter table %s add column race1 bigint",
"alter table %s add column race2 bigint",
"alter table %s add column race3 bigint",
"alter table %s add column race4 bigint",
"alter table %s add column race5 bigint",
"alter table %s add column race6 bigint",
"alter table %s add column race7 bigint",
"alter table %s add column race11 bigint",
"alter table %s add column race12 bigint",
"alter table %s add column race13 bigint",
"alter table %s add column race14 bigint",
"alter table %s add column race15 bigint",
"alter table %s add column race16 bigint",
"alter table %s add column race17 bigint",
"alter table %s add column race21 bigint",
"alter table %s add column race22 bigint",
"alter table %s add column race23 bigint",
"alter table %s add column race24 bigint",
"alter table %s add column race25 bigint",
"alter table %s add column race26 bigint",
"alter table %s add column race27 bigint",
"alter table %s add column employment1 bigint",
"alter table %s add column employment2 bigint",
"alter table %s add column employment3 bigint",
"alter table %s add column employment4 bigint",
"alter table %s add column childpresence1 bigint",
"alter table %s add column childpresence2 bigint",
"alter table %s add column groupquarter1 bigint",
"alter table %s add column hhldinc1 bigint",
"alter table %s add column hhldinc2 bigint",
"alter table %s add column hhldinc3 bigint",
"alter table %s add column hhldinc4 bigint",
"alter table %s add column hhldinc5 bigint",
"alter table %s add column hhldinc6 bigint",
"alter table %s add column hhldinc7 bigint",
"alter table %s add column hhldinc8 bigint",
"alter table %s add column hhldsize1 bigint",
"alter table %s add column hhldsize2 bigint",
"alter table %s add column hhldsize3 bigint",
"alter table %s add column hhldsize4 bigint",
"alter table %s add column hhldsize5 bigint",
"alter table %s add column hhldsize6 bigint",
"alter table %s add column hhldsize7 bigint",
"alter table %s add column hhldtype1 bigint",
"alter table %s add column hhldtype2 bigint",
"alter table %s add column hhldtype3 bigint",
"alter table %s add column hhldtype4 bigint",
"alter table %s add column hhldtype5 bigint",
"alter table %s add column hhldrage1 bigint",
"alter table %s add column hhldrage2 bigint",
"alter table %s add column hhldfam1 bigint",
"alter table %s add column hhldfam2 bigint",
"alter table %s add column check_gender bigint",
"alter table %s add column check_age bigint",
"alter table %s add column check_race bigint",
"alter table %s add column check_race1 bigint",
"alter table %s add column check_race2 bigint",
"alter table %s add column check_employment bigint",
"alter table %s add column check_type bigint",
"alter table %s add column check_size bigint",
"alter table %s add column check_fam bigint",
"alter table %s add column check_hhldrage bigint",
"alter table %s add column check_inc bigint",
"alter table %s add column check_child bigint",
"update %s set agep1 = (B01001000003)+(B01001000027)",
"update %s set agep2 = (B01001000004+B01001000005) + (B01001000028+B01001000029)",
"update %s set agep3 = (B01001000006+B01001000007+B01001000008+B01001000009+B01001000010) + (B01001000030+B01001000031+B01001000032+B01001000033+B01001000034)",
"update %s set agep4 = (B01001000011+B01001000012) + (B01001000035+B01001000036)",
"update %s set agep5 = (B01001000013+B01001000014) + (B01001000037+B01001000038)",
"update %s set agep6 = (B01001000015+B01001000016) + (B01001000039+B01001000040)",
"update %s set agep7 = (B01001000017+B01001000018+B01001000019) + (B01001000041+B01001000042+B01001000043)",
"update %s set agep8 = (B01001000020+B01001000021+B01001000022) + (B01001000044+B01001000045+B01001000046)",
"update %s set agep9 = (B01001000023+B01001000024) + (B01001000047+B01001000048)",
"update %s set agep10 = (B01001000025) + (B01001000049)",
"update %s set gender1 = B01001000002",
"update %s set gender2 = B01001000026",
"update %s set race1 = B02001000002",
"update %s set race2 = B02001000003",
"update %s set race3 = B02001000004",
"update %s set race4 = B02001000005",
"update %s set race5 = B02001000006",
"update %s set race6 = B02001000007",
"update %s set race7 = B02001000009+B02001000010",
"update %s set race11 = C01001A00001",
"update %s set race12 = C01001B00001",
"update %s set race13 = C01001C00001",
"update %s set race14 = C01001D00001",
"update %s set race15 = C01001E00001",
"update %s set race16 = C01001F00001",
"update %s set race17 = C01001G00001",
"update %s set race21 = B01001A00001",
"update %s set race22 = B01001B00001",
"update %s set race23 = B01001C00001",
"update %s set race24 = B01001D00001",
"update %s set race25 = B01001E00001",
"update %s set race26 = B01001F00001",
"update %s set race27 = B01001G00001",
"""update %s set employment2 = (B23001000005 + B23001000007) + (B23001000012 + B23001000014) + """
"""(B23001000019 + B23001000021) + (B23001000026 + B23001000028) + (B23001000033 + B23001000035) + """
"""(B23001000040 + B23001000042) + (B23001000047 + B23001000049) + (B23001000054 + B23001000056) + """
"""(B23001000061 + B23001000063) + (B23001000068 + B23001000070) + (B23001000075 + B23001000080 + B23001000085) + """
"""(B23001000091 + B23001000093) + (B23001000098 + B23001000100) + """
"""(B23001000105 + B23001000107) + (B23001000112 + B23001000114) + (B23001000119 + B23001000121) + """
"""(B23001000126 + B23001000128) + (B23001000133 + B23001000135) + (B23001000140 + B23001000142) + """
"""(B23001000147 + B23001000149) + (B23001000154 + B23001000156) + (B23001000161 + B23001000166 + B23001000171)""",
"""update %s set employment3 = (B23001000008 + B23001000015 + B23001000022 + """
"""B23001000029 + B23001000036 + B23001000043 + B23001000050 + B23001000057 + B23001000064 +"""
"""B23001000071 + B23001000076 + B23001000081 + B23001000086 + B23001000094 + B23001000101 +"""
"""B23001000108 + B23001000115 + B23001000122 + B23001000129 + B23001000136 + B23001000143 +"""
"""B23001000150 + B23001000157 + B23001000162 + B23001000167 + B23001000172) """,
"""update %s set employment4 = (B23001000009 + B23001000016 + B23001000023 + """
"""B23001000030 + B23001000037 + B23001000044 + B23001000051 + B23001000058 + B23001000065 +"""
"""B23001000072 + B23001000077 + B23001000082 + B23001000087 + B23001000095 + B23001000102 +"""
"""B23001000109 + B23001000116 + B23001000123 + B23001000130 + B23001000137 + B23001000144 +"""
"""B23001000151 + B23001000158 + B23001000163 + B23001000168 + B23001000173) """,
"update %s set employment1 = gender1 + gender2 - employment2 - employment3 - employment4",
"update %s set groupquarter1 = B26001000001",
"update %s set hhldinc1 = B19001000002 + B19001000003",
"update %s set hhldinc2 = B19001000004 + B19001000005",
"update %s set hhldinc3 = B19001000006 + B19001000007",
"update %s set hhldinc4 = B19001000008 + B19001000009",
"update %s set hhldinc5 = B19001000010 + B19001000011",
"update %s set hhldinc6 = B19001000012 + B19001000013",
"update %s set hhldinc7 = B19001000014 + B19001000015",
"update %s set hhldinc8 = B19001000016 + B19001000017",
"update %s set hhldsize1 = B25009000003+B25009000011",
"update %s set hhldsize2 = B25009000004+B25009000012",
"update %s set hhldsize3 = B25009000005+B25009000013",
"update %s set hhldsize4 = B25009000006+B25009000014",
"update %s set hhldsize5 = B25009000007+B25009000015",
"update %s set hhldsize6 = B25009000008+B25009000016",
"update %s set hhldsize7 = B25009000009+B25009000017",
"update %s set hhldtype1 = B11001000003",
"update %s set hhldtype2 = B11001000005",
"update %s set hhldtype3 = B11001000006",
"update %s set hhldtype4 = B11001000008",
"update %s set hhldtype5 = B11001000009",
"""update %s set hhldrage1 = (B25007000003+B25007000004+B25007000005+B25007000006+B25007000007+B25007000008)+"""
"""(B25007000013+B25007000014+B25007000015+B25007000016+B25007000017+B25007000018)""",
"update %s set hhldrage2 = (B25007000009+ B25007000010+B25007000011)+(B25007000019+ B25007000020+B25007000021)",
"update %s set hhldfam1 = hhldtype1 + hhldtype2 + hhldtype3",
"update %s set hhldfam2 = hhldtype4 + hhldtype5",
"update %s set childpresence1 = C23007000002",
"update %s set childpresence2 = C23007000017 + hhldtype4 + hhldtype5",
"update %s set check_gender = gender1 + gender2",
"update %s set check_age = agep1+agep2+agep3+agep4+agep5+agep6+agep7+agep8+agep9+agep10",
"update %s set check_race = race1+race2+race3+race4+race5+race6+race7",
"update %s set check_race1 = race11+race12+race13+race14+race15+race16+race17",
"update %s set check_race2 = race21+race22+race23+race24+race25+race26+race27",
"update %s set check_employment = employment1 + employment2 + employment3 + employment4",
"update %s set check_type = hhldtype1+hhldtype2+hhldtype3+hhldtype4+hhldtype5",
"update %s set check_size = hhldsize1+hhldsize2+hhldsize3+hhldsize4+hhldsize5+hhldsize6+hhldsize7",
"update %s set check_hhldrage = hhldrage1+hhldrage2",
"update %s set check_inc = hhldinc1+hhldinc2+hhldinc3+hhldinc4+hhldinc5+hhldinc6+hhldinc7+hhldinc8",
"update %s set check_fam = hhldfam1+hhldfam2",
"update %s set check_child = childpresence1+childpresence2",
"drop table hhld_marginals",
"drop table gq_marginals",
"drop table person_marginals",
"""create table hhld_marginals select state, county, tract, bg, hhldinc1, hhldinc2, hhldinc3, hhldinc4, hhldinc5, hhldinc6, hhldinc7, hhldinc8,"""
"""hhldsize1, hhldsize2, hhldsize3, hhldsize4, hhldsize5, hhldsize6, hhldsize7, hhldtype1, hhldtype2, hhldtype3, hhldtype4, hhldtype5,"""
"""childpresence1, childpresence2, hhldrage1, hhldrage2, hhldfam1, hhldfam2 from %s""",
"create table gq_marginals select state, county, tract, bg, groupquarter1 from %s",
"""create table person_marginals select state, county, tract, bg, agep1, agep2, agep3, agep4, agep5, agep6, agep7, agep8, agep9, agep10,"""
"""gender1, gender2, race1, race2, race3, race4, race5, race6, race7 from %s"""]
|
"""
Tests for error handling
"""
import unittest
import nest
@nest.ll_api.check_stack
class ErrorTestCase(unittest.TestCase):
"""Tests if errors are handled correctly"""
def test_Raise(self):
"""Error raising"""
def raise_custom_exception(exc, msg):
raise exc(msg)
message = "test"
exception = nest.kernel.NESTError
self.assertRaisesRegex(
exception, message, raise_custom_exception, exception, message)
def test_StackUnderFlow(self):
"""Stack underflow"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "StackUnderflow", nest.ll_api.sr, 'clear ;')
def test_DivisionByZero(self):
"""Division by zero"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "DivisionByZero", nest.ll_api.sr, '1 0 div')
def test_UnknownNode(self):
"""Unknown node"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "UnknownNode", nest.Connect, (99, ), (99, ))
def test_UnknownModel(self):
"""Unknown model name"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "UnknownModelName", nest.Create, -1)
def suite():
suite = unittest.makeSuite(ErrorTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
import os
import opus_matsim.sustain_city.tests as test_dir
from opus_core.tests import opus_unittest
from opus_core.store.csv_storage import csv_storage
from urbansim.datasets.travel_data_dataset import TravelDataDataset
from numpy import *
import numpy
from opus_core.logger import logger
class MatrixTest(opus_unittest.OpusTestCase):
""" Testing access to travel data values stored in numpy arrays
"""
def setUp(self):
print "Entering setup"
# get sensitivity test path
self.test_dir_path = test_dir.__path__[0]
# get location to travel data table
self.input_directory = os.path.join( self.test_dir_path, 'data', 'travel_cost')
logger.log_status("input_directory: %s" % self.input_directory)
# check source file
if not os.path.exists( self.input_directory ):
raise('File not found! %s' % self.input_directory)
print "Leaving setup"
def test_run(self):
print "Entering test run"
# This test loads an exising travel data as a TravelDataSet (numpy array)
# and accesses single (pre-known) values to validate the conversion process
# (numpy array into standard python list).
#
# Here an example:
# my_list = [[1,2,3],
# [4,5,6],
# [7,8,9]]
#
# my_list[0][1] should be = 2
# my_list[2][2] should be = 9
table_name = 'travel_data'
travel_data_attribute = 'single_vehicle_to_work_travel_cost'
# location of pre-calculated MATSim travel costs
in_storage = csv_storage(storage_location = self.input_directory)
# create travel data set (travel costs)
travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=31)
# converting from numpy array into a 2d list
travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
# get two values for validation
value1 = int(travel_list[1][1]) # should be = 0
value2 = int(travel_list[2][1]) # should be = 120
logger.log_status('First validation value should be 0. Current value is %i' % value1)
logger.log_status('Second validation value should be 120. Current value is %i' % value2)
self.assertTrue( value1 == 0 )
self.assertTrue( value2 == 120 )
# self.dump_travel_list(travel_list) # for debugging
print "Leaving test run"
def dump_travel_list(self, travel_list):
''' Dumping travel_list for debugging reasons...
'''
dest = os.path.join( os.environ['OPUS_HOME'], 'opus_matsim', 'tmp')
if not os.path.exists(dest):
os.makedirs(dest)
travel = os.path.join(dest, 'travelFile.txt')
f = open(travel, "w")
f.write( str(travel_list) )
f.close()
if __name__ == "__main__":
#mt = MatrixTest() # for debugging
#mt.test_run() # for debugging
opus_unittest.main()
|
import pytest
class TestNcftp:
@pytest.mark.complete("ncftp ")
def test_1(self, completion):
assert completion
@pytest.mark.complete("ncftp -", require_cmd=True)
def test_2(self, completion):
assert completion
|
"""
FILE: nsistags.py
AUTHOR: Cody Precord
LANGUAGE: Python
SUMMARY:
Generate a DocStruct object that captures the structure of a NSIS Script. It
currently supports generating tags for Sections, Functions, and Macro defs.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: nsistags.py 52675 2008-03-22 03:34:38Z CJP $"
__revision__ = "$Revision: 52675 $"
import taglib
import parselib
def GenerateTags(buff):
"""Create a DocStruct object that represents a NSIS Script
@param buff: a file like buffer object (StringIO)
@todo: generate tags for lua tables?
"""
rtags = taglib.DocStruct()
# Set Descriptions of Document Element Types
rtags.SetElementDescription('variable', "Defines")
rtags.SetElementDescription('section', "Section Definitions")
rtags.SetElementDescription('macro', "Macro Definitions")
rtags.SetElementDescription('function', "Function Definitions")
rtags.SetElementPriority('variable', 4)
rtags.SetElementPriority('section', 3)
rtags.SetElementPriority('function', 2)
rtags.SetElementPriority('macro', 1)
# Parse the lines for code objects
for lnum, line in enumerate(buff):
line = line.strip()
llen = len(line)
# Skip comment and empty lines
if line.startswith(u"#") or line.startswith(u";") or not line:
continue
# Look for functions and sections
if parselib.IsToken(line, 0, u'Function'):
parts = line.split()
if len(parts) > 1:
rtags.AddFunction(taglib.Function(parts[1], lnum))
elif parselib.IsToken(line, 0, u'Section'):
parts = line.split()
if len(parts) > 1 and parts[1][0] not in ['"', "'", "`"]:
rtags.AddElement('section', taglib.Section(parts[1], lnum))
else:
for idx, part in enumerate(parts[1:]):
if parts[idx][-1] in ['"', "'", "`"]:
rtags.AddElement('section', taglib.Section(part, lnum))
break
elif parselib.IsToken(line, 0, u'!macro'):
parts = line.split()
if len(parts) > 1:
rtags.AddElement('macro', taglib.Macro(parts[1], lnum))
elif parselib.IsToken(line, 0, u'!define'):
parts = line.split()
if len(parts) > 1 and parts[1][0].isalpha():
rtags.AddVariable(taglib.Variable(parts[1], lnum))
else:
continue
return rtags
if __name__ == '__main__':
import sys
import StringIO
fhandle = open(sys.argv[1])
txt = fhandle.read()
fhandle.close()
tags = GenerateTags(StringIO.StringIO(txt))
print "\n\nElements:"
for element in tags.GetElements():
print "\n%s:" % element.keys()[0]
for val in element.values()[0]:
print "%s [%d]" % (val.GetName(), val.GetLine())
print "END"
|
import os
import sys
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pyfilm as pf
from skimage.measure import label
from skimage import filters
plt.rcParams.update({'figure.autolayout': True})
mpl.rcParams['axes.unicode_minus'] = False
from run import Run
import plot_style
plot_style.white()
pal = sns.color_palette('deep')
def structure_analysis(run, perc_thresh, create_film=False):
"""
Calculates the number of structures as a function of time for a given
percentile cut-off. Writes results and plots to an appropriate directory.
Parameters
----------
run : object
Run object calculated by the Run class.
perc_thresh : int
Percentile threshold at which to cut off fluctuations.
create_film : bool
Determines whether a film of the labelled structures is produced.
"""
run.read_ntot()
make_results_dir(run, perc_thresh)
labelled_image, nlabels = label_structures(run, perc_thresh)
no_structures = count_structures(run, labelled_image, nlabels)
plot_no_structures(run, no_structures, perc_thresh)
save_results(run, no_structures, perc_thresh)
if create_film:
make_film(run, no_structures, labelled_image, perc_thresh)
def make_results_dir(run, perc_thresh):
os.system('mkdir -p ' + run.run_dir + 'analysis/structures_' +
str(perc_thresh))
def label_structures(run, perc_thresh):
nlabels = np.empty(run.nt, dtype=int)
labelled_image = np.empty([run.nt, run.nx, run.ny], dtype=int)
for it in range(run.nt):
tmp = run.ntot_i[it,:,:].copy()
# Apply Gaussian filter
tmp = filters.gaussian(tmp, sigma=1)
thresh = np.percentile(tmp, perc_thresh,
interpolation='nearest')
tmp_max = np.max(tmp)
tmp_thresh = thresh/tmp_max
tmp /= tmp_max
tmp[tmp <= tmp_thresh] = 0
tmp[tmp > tmp_thresh] = 1
# Label the resulting structures
labelled_image[it,:,:], nlabels[it] = label(tmp, return_num=True,
background=0)
return(labelled_image, nlabels)
def count_structures(run, labelled_image, nlabels):
"""
Remove any structures which are too small and count structures.
"""
nblobs = np.empty(run.nt, dtype=int)
for it in range(run.nt):
hist = np.histogram(np.ravel(labelled_image[it]),
bins=range(1,nlabels[it]+1))[0]
smallest_struc = np.mean(hist)*0.1
hist = hist[hist > smallest_struc]
nblobs[it] = len(hist)
return(nblobs)
def plot_no_structures(run, no_structures, perc_thresh):
"""
Plot number of structures as a function of time.
"""
plt.clf()
plt.plot(no_structures)
plt.xlabel('Time index')
plt.ylabel('Number of structures')
plt.ylim(0)
plt.savefig(run.run_dir + 'analysis/structures_' + str(perc_thresh) +
'/nblobs.pdf')
def save_results(run, no_structures, perc_thresh):
"""
Save the number of structures as a function of time in a file.
"""
np.savetxt(run.run_dir + 'analysis/structures_' + str(perc_thresh) +
'/nblobs.csv', np.transpose((range(run.nt), no_structures)),
delimiter=',', fmt='%d', header='t_index,nblobs')
def make_film(run, no_structures, labelled_image, perc_thresh):
titles = []
for it in range(run.nt):
titles.append('No. of structures = {}'.format(no_structures[it]))
plot_options = {'cmap':'gist_rainbow',
'levels':np.arange(-1,np.max(labelled_image))
}
options = {'file_name':'structures',
'film_dir':run.run_dir + 'analysis/structures_' +
str(perc_thresh) ,
'frame_dir':run.run_dir + 'analysis/structures_' +
str(perc_thresh) + '/film_frames',
'nprocs':None,
'aspect':'equal',
'xlabel':r'$x$ (m)',
'ylabel':r'$y$ (m)',
'cbar_ticks':np.arange(-1,np.max(labelled_image),2),
'cbar_label':r'Label',
'fps':10,
'bbox_inches':'tight',
'title':titles
}
pf.make_film_2d(run.r, run.z, labelled_image,
plot_options=plot_options, options=options)
if __name__ == '__main__':
run = Run(sys.argv[1])
structure_analysis(run, 75, create_film=False)
structure_analysis(run, 95, create_film=False)
|
import exceptions
class PlexError(exceptions.Exception):
message = ""
class PlexTypeError(PlexError, TypeError):
pass
class PlexValueError(PlexError, ValueError):
pass
class InvalidRegex(PlexError):
pass
class InvalidToken(PlexError):
def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
class InvalidScanner(PlexError):
pass
class AmbiguousAction(PlexError):
message = "Two tokens with different actions can match the same string"
def __init__(self):
pass
class UnrecognizedInput(PlexError):
scanner = None
position = None
state_name = None
def __init__(self, scanner, state_name):
self.scanner = scanner
self.position = scanner.position()
self.state_name = state_name
def __str__(self):
return ("'%s', line %d, char %d: Token not recognised in state %s"
% (self.position + (repr(self.state_name),)))
|
from miasm2.core.asmblock import disasmEngine
from miasm2.arch.msp430.arch import mn_msp430
class dis_msp430(disasmEngine):
def __init__(self, bs=None, **kwargs):
super(dis_msp430, self).__init__(mn_msp430, None, bs, **kwargs)
|
from django.contrib import admin
from .models import ZoteroExtractorLog
class ZoteroExtractorLogAdmin(admin.ModelAdmin):
model = ZoteroExtractorLog
list_display = ['item_key', 'version', 'timestamp', 'publication']
search_fields = ['item_key', 'version', 'publication__title', 'publication__slug']
admin.site.register(ZoteroExtractorLog, ZoteroExtractorLogAdmin)
|
__VERSION__="ete2-2.2rev1026"
from clustertree import *
__all__ = clustertree.__all__
|
from __future__ import unicode_literals
__author__ = "mozman <mozman@gmx.at>"
from ..entity import GenericWrapper
from ..tags import DXFTag
from ..classifiedtags import ClassifiedTags
from ..dxfattr import DXFAttr, DXFAttributes, DefSubclass
_LAYERTEMPLATE = """ 0
LAYER
5
0
2
LAYERNAME
70
0
62
7
6
CONTINUOUS
"""
class Layer(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_LAYERTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'color': DXFAttr(62, None), # dxf color index, if < 0 layer is off
'linetype': DXFAttr(6, None),
}))
LOCK = 0b00000100
UNLOCK = 0b11111011
def is_locked(self):
return self.dxf.flags & Layer.LOCK > 0
def lock(self):
self.dxf.flags = self.dxf.flags | Layer.LOCK
def unlock(self):
self.dxf.flags = self.dxf.flags & Layer.UNLOCK
def is_off(self):
return self.dxf.color < 0
def is_on(self):
return not self.is_off()
def on(self):
self.dxf.color = abs(self.dxf.color)
def off(self):
self.dxf.color = -abs(self.dxf.color)
def get_color(self):
return abs(self.dxf.color)
def set_color(self, color):
color = abs(color) if self.is_on() else -abs(color)
self.dxf.color = color
_STYLETEMPLATE = """ 0
STYLE
5
0
2
STYLENAME
70
0
40
0.0
41
1.0
50
0.0
71
0
42
1.0
3
arial.ttf
4
"""
class Style(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_STYLETEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'height': DXFAttr(40, None), # fixed height, 0 if not fixed
'width': DXFAttr(41, None), # width factor
'oblique': DXFAttr(50, None), # oblique angle in degree, 0 = vertical
'text_generation_flags': DXFAttr(71, None), # 2 = backward, 4 = mirrored in Y
'last_height': DXFAttr(42, None), # last height used
'font': DXFAttr(3, None), # primary font file name
'bigfont': DXFAttr(4, None), # big font name, blank if none
}))
_LTYPETEMPLATE = """ 0
LTYPE
5
0
2
LTYPENAME
70
0
3
LTYPEDESCRIPTION
72
65
"""
class Linetype(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_LTYPETEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'description': DXFAttr(3, None),
'length': DXFAttr(40, None),
'items': DXFAttr(73, None),
}))
@classmethod
def new(cls, handle, dxfattribs=None, dxffactory=None):
if dxfattribs is not None:
pattern = dxfattribs.pop('pattern', [0.0])
else:
pattern = [0.0]
entity = super(Linetype, cls).new(handle, dxfattribs, dxffactory)
entity._setup_pattern(pattern)
return entity
def _setup_pattern(self, pattern):
self.tags.noclass.append(DXFTag(73, len(pattern) - 1))
self.tags.noclass.append(DXFTag(40, float(pattern[0])))
self.tags.noclass.extend((DXFTag(49, float(p)) for p in pattern[1:]))
_VPORTTEMPLATE = """ 0
VPORT
5
0
2
VPORTNAME
70
0
10
0.0
20
0.0
11
1.0
21
1.0
12
70.0
22
50.0
13
0.0
23
0.0
14
0.5
24
0.5
15
0.5
25
0.5
16
0.0
26
0.0
36
1.0
17
0.0
27
0.0
37
0.0
40
70.
41
1.34
42
50.0
43
0.0
44
0.0
50
0.0
51
0.0
71
0
72
1000
73
1
74
3
75
0
76
0
77
0
78
0
"""
class Viewport(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_VPORTTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'lower_left': DXFAttr(10, 'Point2D'),
'upper_right': DXFAttr(11, 'Point2D'),
'center_point': DXFAttr(12, 'Point2D'),
'snap_base': DXFAttr(13, 'Point2D'),
'snap_spacing': DXFAttr(14, 'Point2D'),
'grid_spacing': DXFAttr(15, 'Point2D'),
'direction_point': DXFAttr(16, 'Point3D'),
'target_point': DXFAttr(17, 'Point3D'),
'height': DXFAttr(40, None),
'aspect_ratio': DXFAttr(41, None),
'lens_length': DXFAttr(42, None),
'front_clipping': DXFAttr(43, None),
'back_clipping': DXFAttr(44, None),
'snap_rotation': DXFAttr(50, None),
'view_twist': DXFAttr(51, None),
'status': DXFAttr(68, None),
'id': DXFAttr(69, None),
'view_mode': DXFAttr(71, None),
'circle_zoom': DXFAttr(72, None),
'fast_zoom': DXFAttr(73, None),
'ucs_icon': DXFAttr(74, None),
'snap_on': DXFAttr(75, None),
'grid_on': DXFAttr(76, None),
'snap_style': DXFAttr(77, None),
'snap_isopair': DXFAttr(78, None),
}))
_UCSTEMPLATE = """ 0
UCS
5
0
2
UCSNAME
70
0
10
0.0
20
0.0
30
0.0
11
1.0
21
0.0
31
0.0
12
0.0
22
1.0
32
0.0
"""
class UCS(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_UCSTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'origin': DXFAttr(10, 'Point3D'),
'xaxis': DXFAttr(11, 'Point3D'),
'yaxis': DXFAttr(12, 'Point3D'),
}))
_APPIDTEMPLATE = """ 0
APPID
5
0
2
APPNAME
70
0
"""
class AppID(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_APPIDTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
}))
_VIEWTEMPLATE = """ 0
VIEW
5
0
2
VIEWNAME
70
0
10
0.0
20
0.0
11
1.0
21
1.0
31
1.0
12
0.0
22
0.0
32
0.0
40
70.
41
1.0
42
50.0
43
0.0
44
0.0
50
0.0
71
0
"""
class View(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_VIEWTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'height': DXFAttr(40, None),
'width': DXFAttr(41, None),
'center_point': DXFAttr(10, 'Point2D'),
'direction_point': DXFAttr(11, 'Point3D'),
'target_point': DXFAttr(12, 'Point3D'),
'lens_length': DXFAttr(42, None),
'front_clipping': DXFAttr(43, None),
'back_clipping': DXFAttr(44, None),
'view_twist': DXFAttr(50, None),
'view_mode': DXFAttr(71, None),
}))
_DIMSTYLETEMPLATE = """ 0
DIMSTYLE
105
0
2
DIMSTYLENAME
70
0
3
4
5
6
7
40
1.0
41
3.0
42
2.0
43
9.0
44
5.0
45
0.0
46
0.0
47
0.0
48
0.0
140
3.0
141
2.0
142
0.0
143
25.399999999999999
144
1.0
145
0.0
146
1.0
147
2.0
71
0
72
0
73
1
74
1
75
0
76
0
77
0
78
0
170
0
171
2
172
0
173
0
174
0
175
0
176
0
177
0
178
0
"""
class DimStyle(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_DIMSTYLETEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(105, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'dimpost': DXFAttr(3, None),
'dimapost': DXFAttr(4, None),
'dimblk': DXFAttr(5, None),
'dimblk1': DXFAttr(6, None),
'dimblk2': DXFAttr(7, None),
'dimscale': DXFAttr(40, None),
'dimasz': DXFAttr(41, None),
'dimexo': DXFAttr(42, None),
'dimdli': DXFAttr(43, None),
'dimexe': DXFAttr(44, None),
'dimrnd': DXFAttr(45, None),
'dimdle': DXFAttr(46, None),
'dimtp': DXFAttr(47, None),
'dimtm': DXFAttr(48, None),
'dimtxt': DXFAttr(140, None),
'dimcen': DXFAttr(141, None),
'dimtsz': DXFAttr(142, None),
'dimaltf': DXFAttr(143, None),
'dimlfac': DXFAttr(144, None),
'dimtvp': DXFAttr(145, None),
'dimtfac': DXFAttr(146, None),
'dimgap': DXFAttr(147, None),
'dimtol': DXFAttr(71, None),
'dimlim': DXFAttr(72, None),
'dimtih': DXFAttr(73, None),
'dimtoh': DXFAttr(74, None),
'dimse1': DXFAttr(75, None),
'dimse2': DXFAttr(76, None),
'dimtad': DXFAttr(77, None),
'dimzin': DXFAttr(78, None),
'dimalt': DXFAttr(170, None),
'dimaltd': DXFAttr(171, None),
'dimtofl': DXFAttr(172, None),
'dimsah': DXFAttr(173, None),
'dimtix': DXFAttr(174, None),
'dimsoxd': DXFAttr(175, None),
'dimclrd': DXFAttr(176, None),
'dimclre': DXFAttr(177, None),
'dimclrt': DXFAttr(178, None),
}))
|
import serial
BAUD = 38400
PORT = "/dev/ttyAMA0"
TIMEOUT = 0.5 # I needed a longer timeout than ladyada's 0.2 value
SERIALNUM = 0 # start with 0, each camera should have a unique ID.
COMMANDSEND = 0x56
COMMANDREPLY = 0x76
COMMANDEND = 0x00
CMD_GETVERSION = 0x11
CMD_RESET = 0x26
CMD_TAKEPHOTO = 0x36
CMD_READBUFF = 0x32
CMD_GETBUFFLEN = 0x34
FBUF_CURRENTFRAME = 0x00
FBUF_NEXTFRAME = 0x01
FBUF_STOPCURRENTFRAME = 0x00
getversioncommand = [COMMANDSEND, SERIALNUM, CMD_GETVERSION, COMMANDEND]
resetcommand = [COMMANDSEND, SERIALNUM, CMD_RESET, COMMANDEND]
takephotocommand = [COMMANDSEND, SERIALNUM, CMD_TAKEPHOTO, 0x01, FBUF_STOPCURRENTFRAME]
getbufflencommand = [COMMANDSEND, SERIALNUM, CMD_GETBUFFLEN, 0x01, FBUF_CURRENTFRAME]
def checkreply(r, b):
r = map( ord, r )
if( r[0] == COMMANDREPLY and r[1] == SERIALNUM and r[2] == b and r[3] == 0x00):
return True
return False
def reset():
cmd = ''.join( map( chr, resetcommand ) )
s.write(cmd)
reply = s.read(100)
r = list(reply)
if checkreply( r, CMD_RESET ):
return True
return False
def getversion():
cmd = ''.join( map( chr, getversioncommand ))
s.write(cmd)
reply = s.read(16)
r = list(reply)
# print r
if checkreply( r, CMD_GETVERSION ):
print r
return True
return False
def takephoto():
cmd = ''.join( map( chr, takephotocommand ))
s.write(cmd)
reply = s.read(5)
r = list(reply)
# print r
if( checkreply( r, CMD_TAKEPHOTO) and r[3] == chr(0x0)):
return True
return False
def getbufferlength():
cmd = ''.join( map( chr, getbufflencommand ))
s.write(cmd)
reply = s.read(9)
r = list(reply)
if( checkreply( r, CMD_GETBUFFLEN) and r[4] == chr(0x4)):
l = ord(r[5])
l <<= 8
l += ord(r[6])
l <<= 8
l += ord(r[7])
l <<= 8
l += ord(r[8])
return l
return 0
readphotocommand = [COMMANDSEND, SERIALNUM, CMD_READBUFF, 0x0c, FBUF_CURRENTFRAME, 0x0a]
def readbuffer(bytes):
addr = 0 # the initial offset into the frame buffer
photo = []
# bytes to read each time (must be a mutiple of 4)
inc = 8192
while( addr < bytes ):
# on the last read, we may need to read fewer bytes.
chunk = min( bytes-addr, inc );
# append 4 bytes that specify the offset into the frame buffer
command = readphotocommand + [(addr >> 24) & 0xff,
(addr>>16) & 0xff,
(addr>>8 ) & 0xff,
addr & 0xff]
# append 4 bytes that specify the data length to read
command += [(chunk >> 24) & 0xff,
(chunk>>16) & 0xff,
(chunk>>8 ) & 0xff,
chunk & 0xff]
# append the delay
command += [1,0]
# print map(hex, command)
print "Reading", chunk, "bytes at", addr
# make a string out of the command bytes.
cmd = ''.join(map(chr, command))
s.write(cmd)
# the reply is a 5-byte header, followed by the image data
# followed by the 5-byte header again.
reply = s.read(5+chunk+5)
# convert the tuple reply into a list
r = list(reply)
if( len(r) != 5+chunk+5 ):
# retry the read if we didn't get enough bytes back.
print "Read", len(r), "Retrying."
continue
if( not checkreply(r, CMD_READBUFF)):
print "ERROR READING PHOTO"
return
# append the data between the header data to photo
photo += r[5:chunk+5]
# advance the offset into the frame buffer
addr += chunk
print addr, "Bytes written"
return photo
s = serial.Serial( PORT, baudrate=BAUD, timeout = TIMEOUT )
reset()
if( not getversion() ):
print "Camera not found"
exit(0)
print "VC0706 Camera found"
if takephoto():
print "Snap!"
bytes = getbufferlength()
print bytes, "bytes to read"
photo = readbuffer( bytes )
f = open( "photo.jpg", 'w' )
photodata = ''.join( photo )
f.write( photodata )
f.close()
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='produce',
name='plu',
field=models.IntegerField(unique=True),
),
]
|
from netfields import InetAddressField, CidrAddressField
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from nodeshot.core.base.models import BaseAccessLevel
from ..managers import NetAccessLevelManager
from choices import IP_PROTOCOLS
class Ip(BaseAccessLevel):
""" IP Address Model """
interface = models.ForeignKey('net.Interface', verbose_name=_('interface'))
address = InetAddressField(verbose_name=_('ip address'), unique=True, db_index=True)
protocol = models.CharField(_('IP Protocol Version'), max_length=4, choices=IP_PROTOCOLS, default=IP_PROTOCOLS[0][0], blank=True)
netmask = CidrAddressField(_('netmask (CIDR, eg: 10.40.0.0/24)'), blank=True, null=True)
objects = NetAccessLevelManager()
class Meta:
app_label = 'net'
permissions = (('can_view_ip', 'Can view ip'),)
verbose_name = _('ip address')
verbose_name_plural = _('ip addresses')
def __unicode__(self):
return '%s: %s' % (self.protocol, self.address)
def clean(self, *args, **kwargs):
""" TODO """
# netaddr.IPAddress('10.40.2.1') in netaddr.IPNetwork('10.40.0.0/24')
pass
def save(self, *args, **kwargs):
"""
Determines ip protocol version automatically.
Stores address in interface shortcuts for convenience.
"""
self.protocol = 'ipv%d' % self.address.version
# save
super(Ip, self).save(*args, **kwargs)
# TODO: do we really need this?
# save shortcut on interfaces
#ip_cached_list = self.interface.ip_addresses
## if not present in interface shorctus add it to the list
#if str(self.address) not in ip_cached_list:
# # recalculate cached_ip_list
# recalculated_ip_cached_list = []
# for ip in self.interface.ip_set.all():
# recalculated_ip_cached_list.append(str(ip.address))
# # rebuild string in format "<ip_1>, <ip_2>"
# self.interface.data['ip_addresses'] = recalculated_ip_cached_list
# self.interface.save()
@property
def owner(self):
return self.interface.owner
if 'grappelli' in settings.INSTALLED_APPS:
@staticmethod
def autocomplete_search_fields():
return ('address__icontains',)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Malware',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alert_id', models.CharField(max_length=90)),
('alert_type', models.CharField(max_length=80)),
('file_name', models.CharField(max_length=80)),
('computer', models.CharField(max_length=80)),
('contact_group', models.CharField(max_length=80)),
('virus', models.CharField(max_length=80)),
('actual_action', models.CharField(max_length=80)),
('comment', models.CharField(max_length=100)),
('numeric_ip', models.GenericIPAddressField(default='0.0.0.0', protocol='ipv4')),
],
),
]
|
"""
Class used for representing tIntermediateCatchEvent of BPMN 2.0 graph
"""
import graph.classes.events.catch_event_type as catch_event
class IntermediateCatchEvent(catch_event.CatchEvent):
"""
Class used for representing tIntermediateCatchEvent of BPMN 2.0 graph
"""
def __init__(self):
"""
Default constructor, initializes object fields with new instances.
"""
super(IntermediateCatchEvent, self).__init__()
|
"""Tests for qutebrowser.misc.autoupdate."""
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.misc import autoupdate, httpclient
INVALID_JSON = ['{"invalid": { "json"}', '{"wrong": "keys"}']
class HTTPGetStub(httpclient.HTTPClient):
"""A stub class for HTTPClient.
Attributes:
url: the last url used by get()
_success: Whether get() will emit a success signal.
"""
def __init__(self, success=True, json=None):
super().__init__()
self.url = None
self._success = success
if json:
self._json = json
else:
self._json = '{"info": {"version": "test"}}'
def get(self, url):
self.url = url
if self._success:
self.success.emit(self._json)
else:
self.error.emit("error")
def test_constructor(qapp):
client = autoupdate.PyPIVersionClient()
assert isinstance(client._client, httpclient.HTTPClient)
def test_get_version_success(qtbot):
"""Test get_version() when success is emitted."""
http_stub = HTTPGetStub(success=True)
client = autoupdate.PyPIVersionClient(client=http_stub)
with qtbot.assertNotEmitted(client.error):
with qtbot.waitSignal(client.success):
client.get_version('test')
assert http_stub.url == QUrl(client.API_URL.format('test'))
def test_get_version_error(qtbot):
"""Test get_version() when error is emitted."""
http_stub = HTTPGetStub(success=False)
client = autoupdate.PyPIVersionClient(client=http_stub)
with qtbot.assertNotEmitted(client.success):
with qtbot.waitSignal(client.error):
client.get_version('test')
@pytest.mark.parametrize('json', INVALID_JSON)
def test_invalid_json(qtbot, json):
"""Test on_client_success() with invalid JSON."""
http_stub = HTTPGetStub(json=json)
client = autoupdate.PyPIVersionClient(client=http_stub)
client.get_version('test')
with qtbot.assertNotEmitted(client.success):
with qtbot.waitSignal(client.error):
client.get_version('test')
|
"""
JWT tokens (for web interface, mostly, as all peer operations function on
public key cryptography)
JWT tokens can be one of:
* Good
* Expired
* Invalid
And granting them should not take database access. They are meant to
figure out if a user is auth'd without using the database to do so.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from ...utils.timing import TimedTestCase
from ..token import token, jwt_get, jwt_use
class test_token(TimedTestCase):
def test_good_token(self):
"""Valid JWT Token"""
self.threshold = .32
bob = token(u'bob')
example = bob.make(u'print')
bob.check(example)
def test_expired_token(self):
"""Expire a token..."""
self.threshold = .1
a = datetime.datetime.now()
assert a != None
def test_invalid_token(self):
"""Invalid Tokens"""
self.threshold = .1
fred = token(u'fred')
alice = token(u'alice')
wrong = fred.make(u'well then')
alice.check(wrong)
class test_jwt(TimedTestCase):
def test_routes(self):
self.threshold = .1
tok = jwt_get(u'ten')
res = jwt_use(tok)
print(res)
|
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
class NSMachPortKnown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
# one pointer is the ISA
# then we have one other internal pointer, plus
# 4 bytes worth of flags. hence, these values
def offset(self):
logger = lldb.formatters.Logger.Logger()
if self.sys_params.is_64_bit:
return 20
else:
return 12
def port(self):
logger = lldb.formatters.Logger.Logger()
vport = self.valobj.CreateChildAtOffset("port",
self.offset(),
self.sys_params.types_cache.NSUInteger)
return vport.GetValueAsUnsigned(0)
class NSMachPortUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
def port(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression("port","(int)[" + stream.GetData() + " machPort]")
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not NSMachPort>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data,wrapper =lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(valobj,statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSMachPort':
wrapper = NSMachPortKnown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun',valobj)
else:
wrapper = NSMachPortUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('unknown_class',valobj.GetName() + " seen as " + name_string)
return wrapper;
def NSMachPort_SummaryProvider (valobj,dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj);
if provider != None:
if isinstance(provider,lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.port();
except:
summary = None
logger >> "got summary " + str(summary)
if summary == None:
summary = '<variable is not NSMachPort>'
if isinstance(summary, basestring):
return summay
return 'mach port: ' + str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger,dict):
debugger.HandleCommand("type summary add -F NSMachPort.NSMachPort_SummaryProvider NSMachPort")
|
__version__=''' $Id$ '''
__doc__="""The Reportlab PDF generation library."""
Version = "2.7"
import sys
if sys.version_info[0:2] < (2, 7):
warning = """The trunk of reportlab currently requires Python 2.7 or higher.
This is being done to let us move forwards with 2.7/3.x compatibility
with the minimum of baggage.
ReportLab 2.7 was the last packaged version to suppo0rt Python 2.5 and 2.6.
Python 2.3 users may still use ReportLab 2.4 or any other bugfixes
derived from it, and Python 2.4 users may use ReportLab 2.5.
Python 2.2 and below need to use released versions beginning with
1.x (e.g. 1.21), or snapshots or checkouts from our 'version1' branch.
Our current plan is to remove Python 2.5 compatibility on our next release,
allowing us to use the 2to3 tool and work on Python 3.0 compatibility.
If you have a choice, Python 2.7.x is best long term version to use.
"""
raise ImportError("reportlab needs Python 2.5 or higher", warning)
def getStory(context):
"This is a helper for our old autogenerated documentation system"
if context.target == 'UserGuide':
# parse some local file
import os
myDir = os.path.split(__file__)[0]
import yaml
return yaml.parseFile(myDir + os.sep + 'mydocs.yaml')
else:
# this signals that it should revert to default processing
return None
def getMonitor():
import reportlab.monitor
mon = reportlab.monitor.ReportLabToolkitMonitor()
return mon
|
../../../../../../share/pyshared/ubuntuone-storage-protocol/ubuntuone/storageprotocol/delta.py
|
from __future__ import absolute_import
import ROOT
from . import log; log = log[__name__]
from .. import QROOT, asrootpy
from ..base import NamedObject
from ..extern.six import string_types
__all__ = [
'DataSet',
]
class DataSet(NamedObject, QROOT.RooDataSet):
_ROOT = QROOT.RooDataSet
class Entry(object):
def __init__(self, idx, dataset):
self.idx_ = idx
self.dataset_ = dataset
@property
def fields(self):
return asrootpy(self.dataset_.get(self.idx_))
@property
def weight(self):
self.dataset_.get(self.idx_) #set current event
return self.dataset_.weight()
def __len__(self):
return self.numEntries()
def __getitem__(self, idx):
return DataSet.Entry(idx, self)
def __iter__(self):
for idx in range(len(self)):
yield DataSet.Entry(idx, self)
def createHistogram(self, *args, **kwargs):
if args and isinstance(args[0], string_types):
return ROOT.RooAbsData.createHistogram(self, *args, **kwargs)
return super(DataSet, self).createHistogram(*args, **kwargs)
def reduce(self, *args, **kwargs):
return asrootpy(super(DataSet, self).reduce(*args, **kwargs))
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings as django_settings
SLUG = 'macros'
APP_LABEL = 'wiki'
METHODS = getattr(
django_settings,
'WIKI_PLUGINS_METHODS',
('article_list',
'toc',
))
|
if __name__ == "__main__":
try:
from mvc.ui.widgets import Application
except ImportError:
from mvc.ui.console import Application
from mvc.widgets import app
from mvc.widgets import initialize
app.widgetapp = Application()
initialize(app.widgetapp)
|
'''
pysplat is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pysplat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
(C) 2016 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
'''
|
from django.contrib.auth.models import BaseUserManager
from django.db.models import Q
from django.utils import timezone
from django.utils.lru_cache import lru_cache
from pootle_app.models.permissions import check_user_permission
from pootle_translationproject.models import TranslationProject
from . import utils
__all__ = ('UserManager', )
class UserManager(BaseUserManager):
"""Pootle User manager.
This manager hides the 'nobody' and 'default' users for normal
queries, since they are special users. Code that needs access to these
users should use the methods get_default_user and get_nobody_user.
"""
PERMISSION_USERS = ('default', 'nobody')
META_USERS = ('default', 'nobody', 'system')
def _create_user(self, username, email, password, is_superuser,
**extra_fields):
"""Creates and saves a User with the given username, email,
password and superuser status.
Adapted from the core ``auth.User`` model's ``UserManager``: we
have no use for the ``is_staff`` field.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
utils.validate_email_unique(email)
user = self.model(username=username, email=email,
is_active=True, is_superuser=is_superuser,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True,
**extra_fields)
@lru_cache()
def get_default_user(self):
return self.get_queryset().get(username='default')
@lru_cache()
def get_nobody_user(self):
return self.get_queryset().get(username='nobody')
@lru_cache()
def get_system_user(self):
return self.get_queryset().get(username='system')
def hide_permission_users(self):
return self.get_queryset().exclude(username__in=self.PERMISSION_USERS)
def hide_meta(self):
return self.get_queryset().exclude(username__in=self.META_USERS)
def meta_users(self):
return self.get_queryset().filter(username__in=self.META_USERS)
def get_users_with_permission(self, permission_code, project, language):
default = self.get_default_user()
directory = TranslationProject.objects.get(
project=project,
language=language
).directory
if check_user_permission(default, permission_code, directory):
return self.hide_meta().filter(is_active=True)
user_filter = Q(
permissionset__positive_permissions__codename=permission_code
)
language_path = language.directory.pootle_path
project_path = project.directory.pootle_path
user_filter &= (
Q(permissionset__directory__pootle_path=directory.pootle_path)
| Q(permissionset__directory__pootle_path=language_path)
| Q(permissionset__directory__pootle_path=project_path)
)
user_filter |= Q(is_superuser=True)
return self.get_queryset().filter(user_filter).distinct()
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream import ElementBase, StanzaBase, ET
from sleekxmpp.xmlstream import register_stanza_plugin
class Failure(StanzaBase):
"""
"""
name = 'failure'
namespace = 'urn:ietf:params:xml:ns:xmpp-sasl'
interfaces = set(('condition', 'text'))
plugin_attrib = name
sub_interfaces = set(('text',))
conditions = set(('aborted', 'account-disabled', 'credentials-expired',
'encryption-required', 'incorrect-encoding', 'invalid-authzid',
'invalid-mechanism', 'malformed-request', 'mechansism-too-weak',
'not-authorized', 'temporary-auth-failure'))
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup.
Sets a default error type and condition, and changes the
parent stanza's type to 'error'.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
# StanzaBase overrides self.namespace
self.namespace = Failure.namespace
if StanzaBase.setup(self, xml):
#If we had to generate XML then set default values.
self['condition'] = 'not-authorized'
self.xml.tag = self.tag_name()
def get_condition(self):
"""Return the condition element's name."""
for child in self.xml.getchildren():
if "{%s}" % self.namespace in child.tag:
cond = child.tag.split('}', 1)[-1]
if cond in self.conditions:
return cond
return 'not-authorized'
def set_condition(self, value):
"""
Set the tag name of the condition element.
Arguments:
value -- The tag name of the condition element.
"""
if value in self.conditions:
del self['condition']
self.xml.append(ET.Element("{%s}%s" % (self.namespace, value)))
return self
def del_condition(self):
"""Remove the condition element."""
for child in self.xml.getchildren():
if "{%s}" % self.condition_ns in child.tag:
tag = child.tag.split('}', 1)[-1]
if tag in self.conditions:
self.xml.remove(child)
return self
|
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
from io import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
from ._compat import string_types
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""An attempt was made to add a DNS SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
class IncompatibleTypes(dns.exception.DNSException):
"""An attempt was made to add DNS RR data of an incompatible type."""
class Rdataset(dns.set.Set):
"""A DNS rdataset."""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE, ttl=0):
"""Create a new rdataset of the specified class and type.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*covers*, an ``int``, the covered rdatatype.
*ttl*, an ``int``, the TTL.
"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = ttl
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Perform TTL minimization.
Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
*ttl*, an ``int``.
"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional *ttl* parameter is supplied, then
``self.update_ttl(ttl)`` will be called prior to adding the rdata.
*rd*, a ``dns.rdata.Rdata``, the rdata
*ttl*, an ``int``, the TTL.
Raises ``dns.rdataset.IncompatibleTypes`` if the type and class
do not match the type and class of the rdataset.
Raises ``dns.rdataset.DifferingCovers`` if the type is a signature
type and the covered type does not match that of the rdataset.
"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if ttl is not None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
*other*, a ``dns.rdataset.Rdataset``, the rdataset from which
to update.
"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
See ``dns.name.Name.choose_relativity`` for more information
on how *origin* and *relativize* determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
``to_text()`` method.
*name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with
*name* as the owner name.
*origin*, a ``dns.name.Name`` or ``None``, the origin for relative
names.
*relativize*, a ``bool``. If ``True``, names will be relativized
to *origin*.
"""
if name is not None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO()
if override_rdclass is not None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
s.write(u'%s%s%s %s\n' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype)))
else:
for rd in self:
s.write(u'%s%s%d %s %s %s\n' %
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize,
**kw)))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
*name*, a ``dns.name.Name`` is the owner name to use.
*file* is the file where the name is emitted (typically a
BytesIO file).
*compress*, a ``dict``, is the compression table to use. If
``None`` (the default), names will not be compressed.
*origin* is a ``dns.name.Name`` or ``None``. If the name is
relative and origin is not ``None``, then *origin* will be appended
to it.
*override_rdclass*, an ``int``, is used as the class instead of the
class of the rdataset. This is useful when rendering rdatasets
associated with dynamic updates.
*want_shuffle*, a ``bool``. If ``True``, then the order of the
Rdatas within the Rdataset will be shuffled before rendering.
Returns an ``int``, the number of records emitted.
"""
if override_rdclass is not None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns ``True`` if this rdataset matches the specified class,
type, and covers.
"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if isinstance(rdclass, string_types):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_rdata_list(ttl, rdatas)
|
"""Update vulnerability sources."""
from selinon import StoragePool
from f8a_worker.base import BaseTask
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Ecosystem
from f8a_worker.solver import get_ecosystem_solver, OSSIndexDependencyParser
from f8a_worker.workers import CVEcheckerTask
class CVEDBSyncTask(BaseTask):
"""Update vulnerability sources."""
def components_to_scan(self, previous_sync_timestamp, only_already_scanned):
"""Get EPV that were recently updated in OSS Index, so they can contain new vulnerabilities.
Get components (e:p:v) that were recently (since previous_sync_timestamp) updated
in OSS Index, which means that they can contain new vulnerabilities.
:param previous_sync_timestamp: timestamp of previous check
:param only_already_scanned: include already scanned components only
:return: generator of e:p:v
"""
# TODO: reduce cyclomatic complexity
to_scan = []
rdb = StoragePool.get_connected_storage('BayesianPostgres')
for ecosystem in ['nuget']:
ecosystem_solver = get_ecosystem_solver(self.storage.get_ecosystem(ecosystem),
with_parser=OSSIndexDependencyParser())
self.log.debug("Retrieving new %s vulnerabilities from OSS Index", ecosystem)
ossindex_updated_packages = CVEcheckerTask.\
query_ossindex_vulnerability_fromtill(ecosystem=ecosystem,
from_time=previous_sync_timestamp)
for ossindex_updated_package in ossindex_updated_packages:
if Ecosystem.by_name(rdb.session, ecosystem).is_backed_by(EcosystemBackend.maven):
package_name = "{g}:{n}".format(g=ossindex_updated_package['group'],
n=ossindex_updated_package['name'])
else:
package_name = ossindex_updated_package['name']
package_affected_versions = set()
for vulnerability in ossindex_updated_package.get('vulnerabilities', []):
for version_string in vulnerability.get('versions', []):
try:
resolved_versions = ecosystem_solver.\
solve(["{} {}".format(package_name, version_string)],
all_versions=True)
except Exception:
self.log.exception("Failed to resolve %r for %s:%s", version_string,
ecosystem, package_name)
continue
resolved_versions = resolved_versions.get(package_name, [])
if only_already_scanned:
already_scanned_versions =\
[ver for ver in resolved_versions if
self.storage.get_analysis_count(ecosystem, package_name, ver) > 0]
package_affected_versions.update(already_scanned_versions)
else:
package_affected_versions.update(resolved_versions)
for version in package_affected_versions:
to_scan.append({
'ecosystem': ecosystem,
'name': package_name,
'version': version
})
msg = "Components to be {prefix}scanned for vulnerabilities: {components}".\
format(prefix="re-" if only_already_scanned else "",
components=to_scan)
self.log.info(msg)
return to_scan
def execute(self, arguments):
"""Start the task.
:param arguments: optional argument 'only_already_scanned' to run only
on already analysed packages
:return: EPV dict describing which packages should be analysed
"""
only_already_scanned = arguments.pop('only_already_scanned', True) if arguments else True
ignore_modification_time = (arguments.pop('ignore_modification_time', False)
if arguments else False)
CVEcheckerTask.update_victims_cve_db_on_s3()
self.log.debug('Updating sync associated metadata')
s3 = StoragePool.get_connected_storage('S3VulnDB')
previous_sync_timestamp = s3.update_sync_date()
if ignore_modification_time:
previous_sync_timestamp = 0
# get components which might have new vulnerabilities since previous sync
to_scan = self.components_to_scan(previous_sync_timestamp, only_already_scanned)
return {'modified': to_scan}
|
import os, StringIO, sys, traceback, tempfile, random, shutil
from status import OutputStatus
from sagenb.misc.format import format_for_pexpect
from worksheet_process import WorksheetProcess
from sagenb.misc.misc import (walltime,
set_restrictive_permissions, set_permissive_permissions)
import pexpect
class WorksheetProcess_ExpectImplementation(WorksheetProcess):
"""
A controlled Python process that executes code using expect.
INPUT:
- ``process_limits`` -- None or a ProcessLimits objects as defined by
the ``sagenb.interfaces.ProcessLimits`` object.
"""
def __init__(self,
process_limits = None,
timeout = 0.05,
python = 'python'):
"""
Initialize this worksheet process.
"""
self._output_status = OutputStatus('', [], True)
self._expect = None
self._is_started = False
self._is_computing = False
self._timeout = timeout
self._prompt = "__SAGE__"
self._filename = ''
self._all_tempdirs = []
self._process_limits = process_limits
self._max_walltime = None
self._start_walltime = None
self._data_dir = None
self._python = python
if process_limits:
u = ''
if process_limits.max_vmem is not None:
u += ' -v %s'%(int(process_limits.max_vmem)*1000)
if process_limits.max_cputime is not None:
u += ' -t %s'%(int(process_limits.max_cputime))
if process_limits.max_processes is not None:
u += ' -u %s'%(int(process_limits.max_processes))
# prepend ulimit options
if u == '':
self._ulimit = u
else:
self._ulimit = 'ulimit %s'%u
else:
self._ulimit = ''
if process_limits and process_limits.max_walltime:
self._max_walltime = process_limits.max_walltime
def command(self):
return self._python
# TODO: The following simply doesn't work -- this is not a valid way to run
# ulimited. Also we should check if ulimit is available before even
# doing this.
return '&&'.join([x for x in [self._ulimit, self._python] if x])
def __del__(self):
try: self._cleanup_tempfiles()
except: pass
try: self._cleanup_data_dir()
except: pass
def _cleanup_data_dir(self):
if self._data_dir is not None:
set_restrictive_permissions(self._data_dir)
def _cleanup_tempfiles(self):
for X in self._all_tempdirs:
try: shutil.rmtree(X, ignore_errors=True)
except: pass
def __repr__(self):
"""
Return string representation of this worksheet process.
"""
return "Pexpect implementation of worksheet process"
###########################################################
# Control the state of the subprocess
###########################################################
def interrupt(self):
"""
Send an interrupt signal to the currently running computation
in the controlled process. This may or may not succeed. Call
``self.is_computing()`` to find out if it did.
"""
if self._expect is None: return
try:
self._expect.sendline(chr(3))
except: pass
def quit(self):
"""
Quit this worksheet process.
"""
if self._expect is None: return
try:
self._expect.sendline(chr(3)) # send ctrl-c
self._expect.sendline('quit_sage()')
except:
pass
try:
os.killpg(self._expect.pid, 9)
os.kill(self._expect.pid, 9)
except OSError:
pass
self._expect = None
self._is_started = False
self._is_computing = False
self._start_walltime = None
self._cleanup_tempfiles()
self._cleanup_data_dir()
def start(self):
"""
Start this worksheet process running.
"""
#print "Starting worksheet with command: '%s'"%self.command()
self._expect = pexpect.spawn(self.command())
self._is_started = True
self._is_computing = False
self._number = 0
self._read()
self._start_walltime = walltime()
def update(self):
"""
This should be called periodically by the server processes.
It does things like checking for timeouts, etc.
"""
self._check_for_walltimeout()
def _check_for_walltimeout(self):
"""
Check if the walltimeout has been reached, and if so, kill
this worksheet process.
"""
if (self._is_started and \
self._max_walltime and self._start_walltime and \
walltime() - self._start_walltime > self._max_walltime):
self.quit()
###########################################################
# Query the state of the subprocess
###########################################################
def is_computing(self):
"""
Return True if a computation is currently running in this worksheet subprocess.
OUTPUT:
- ``bool``
"""
return self._is_computing
def is_started(self):
"""
Return true if this worksheet subprocess has already been started.
OUTPUT:
- ``bool``
"""
return self._is_started
###########################################################
# Sending a string to be executed in the subprocess
###########################################################
def get_tmpdir(self):
"""
Return two strings (local, remote), where local is the name
of a pre-created temporary directory, and remote is the name
of the same directory but on the machine on which the actual
worksheet process is running.
OUTPUT:
- local directory
- remote directory
"""
# In this implementation the remote process is just running
# as the same user on the local machine.
s = tempfile.mkdtemp()
return (s, s)
def execute(self, string, data=None):
"""
Start executing the given string in this subprocess.
INPUT:
- ``string`` -- a string containing code to be executed.
- ``data`` -- a string or None; if given, must specify an
absolute path on the server host filesystem. This may
be ignored by some worksheet process implementations.
"""
if self._expect is None:
self.start()
if self._expect is None:
raise RuntimeError, "unable to start subprocess using command '%s'"%self.command()
self._number += 1
local, remote = self.get_tmpdir()
if data is not None:
# make a symbolic link from the data directory into local tmp directory
self._data = os.path.split(data)[1]
self._data_dir = data
set_permissive_permissions(data)
os.symlink(data, os.path.join(local, self._data))
else:
self._data = ''
self._tempdir = local
sage_input = '_sage_input_%s.py'%self._number
self._filename = os.path.join(self._tempdir, sage_input)
self._so_far = ''
self._is_computing = True
self._all_tempdirs.append(self._tempdir)
open(self._filename,'w').write(format_for_pexpect(string, self._prompt,
self._number))
try:
self._expect.sendline('\nimport os;os.chdir("%s");\nexecfile("%s")'%(
remote, sage_input))
except OSError as msg:
self._is_computing = False
self._so_far = str(msg)
def _read(self):
try:
self._expect.expect(pexpect.EOF, self._timeout)
# got EOF subprocess must have crashed; cleanup
print "got EOF subprocess must have crashed..."
print self._expect.before
self.quit()
except:
pass
###########################################################
# Getting the output so far from a subprocess
###########################################################
def output_status(self):
"""
Return OutputStatus object, which includes output from the
subprocess from the last executed command up until now,
information about files that were created, and whether
computing is now done.
OUTPUT:
- ``OutputStatus`` object.
"""
self._read()
if self._expect is None:
self._is_computing = False
else:
self._so_far = self._expect.before
import re
v = re.findall('START%s.*%s'%(self._number,self._prompt), self._so_far, re.DOTALL)
if len(v) > 0:
self._is_computing = False
s = v[0][len('START%s'%self._number):-len(self._prompt)]
else:
v = re.findall('START%s.*'%self._number, self._so_far, re.DOTALL)
if len(v) > 0:
s = v[0][len('START%s'%self._number):]
else:
s = ''
if s.endswith(self._prompt):
s = s[:-len(self._prompt)]
files = []
if os.path.exists(self._tempdir):
files = [os.path.join(self._tempdir, x) for x in os.listdir(self._tempdir) if x != self._data]
files = [x for x in files if x != self._filename]
return OutputStatus(s, files, not self._is_computing)
class WorksheetProcess_RemoteExpectImplementation(WorksheetProcess_ExpectImplementation):
"""
This worksheet process class implements computation of worksheet
code as another user possibly on another machine, with the
following requirements:
1. ssh keys are setup for passwordless login from the server to the
remote user account, and
2. there is a shared filesystem that both users can write to,
which need not be mounted in the same location.
VULNERABILITIES: It is possible for a malicious user to see code
input by other notebook users whose processes are currently
running. However, the moment any calculation finishes, the file
results are moved back to the the notebook server in a protected
placed, and everything but the input file is deleted, so the
damage that can be done is limited. In particular, users can't
simply browse much from other users.
INPUT:
- ``user_at_host`` -- a string of the form 'username@host'
such that 'ssh user@host' does not require a password, e.g.,
setup by typing ``ssh-keygen`` as the notebook server and
worksheet users, then putting ~/.ssh/id_rsa.pub as the file
.ssh/authorized_keys. You must make the permissions of
files and directories right.
- ``local_directory`` -- (default: None) name of a directory on
the local computer that the notebook server can write to,
which the remote computer also has read/write access to. If
set to ``None``, then first try the environment variable
:envvar:`SAGENB_TMPDIR` if it exists, then :envvar:`TMPDIR`.
Otherwise, fall back to ``/tmp``.
- ``remote_directory`` -- (default: None) if the local_directory is
mounted on the remote machine as a different directory name,
this string is that directory name.
- ``process_limits`` -- None or a ProcessLimits objects as defined by
the ``sagenb.interfaces.ProcessLimits`` object.
"""
def __init__(self,
user_at_host,
remote_python,
local_directory = None,
remote_directory = None,
process_limits = None,
timeout = 0.05):
WorksheetProcess_ExpectImplementation.__init__(self, process_limits, timeout=timeout)
self._user_at_host = user_at_host
if local_directory is None:
local_directory = os.environ.get("SAGENB_TMPDIR")
if local_directory is None:
local_directory = os.environ.get("TMPDIR")
if local_directory is None:
local_directory = "/tmp"
self._local_directory = local_directory
if remote_directory is None:
remote_directory = local_directory
self._remote_directory = remote_directory
self._remote_python = remote_python
def command(self):
if self._ulimit == '':
c = self._remote_python
else:
c = '&&'.join([x for x in [self._ulimit, self._remote_python] if x])
return 'sage-native-execute ssh -t %s "%s"'%(self._user_at_host, c)
def get_tmpdir(self):
"""
Return two strings (local, remote), where local is the name
of a pre-created temporary directory, and remote is the name
of the same directory but on the machine on which the actual
worksheet process is running.
"""
# In this implementation the remote process is just running
# as the same user on the local machine.
local = tempfile.mkdtemp(dir=self._local_directory)
remote = os.path.join(self._remote_directory, local[len(self._local_directory):].lstrip(os.path.sep))
# Make it so local is world read/writable -- so that the remote worksheet
# process can write to it.
set_permissive_permissions(local)
return (local, remote)
|
from numpy import linspace, array, arange, tile, dot, zeros
from .gaussian import Gaussian
from ..utils import rk4
class BasisFunctions(object):
def __init__(self, n_basis, duration, dt, sigma):
self.n_basis = n_basis
means = linspace(0, duration, n_basis)
# FIXME:
variances = duration / (sigma * n_basis)**2
gaussians = [Gaussian(array([means[k]]), array([[variances]]))
for k in range(len(means))]
self.x = arange(0., duration, dt)
y = array([gaussians[k].normal(self.x.reshape(-1, 1)) for k in range(len(means))])
self.z = y / tile(sum(y, 0), (n_basis, 1))
def trajectory(self, weights):
return dot(weights, self.z)
class MovementPrimitive(object):
def __init__(self, duration, n_basis, dt, stiffness=0., damping=0.):
"""
:param float duration: duration of the movement in seconds
:param list dt: time step used for numerical integration
"""
self.dt = dt
self.duration = duration
self.stiffness = stiffness
self.damping = damping
self.basis = BasisFunctions(n_basis, self.duration, dt, 2.)
self.traj = zeros((self.duration/dt, 3))
self.acc = zeros(self.duration/dt) # +1 due to ..utils.rk4 implementation
def acceleration(self, t, state):
intrinsic_acc = - self.stiffness*state[0] - self.damping*state[1]
return array([state[1], self.acc[t / self.dt] + intrinsic_acc])
def trajectory(self, x0, command):
self.acc = self.basis.trajectory(command)
# self.acc[-1] = self.acc[-2] # still due to ..utils.rk4 implementation
t = 0.
self.traj[0, :] = [x0[0], x0[1], self.acc[0]]
i_t = 1
state = x0
while i_t < self.duration / self.dt:
# print i_t, t, self.duration - self.dt
t, state = rk4(t, self.dt, state, self.acceleration)
# print state
self.traj[i_t, :] = [state[0], state[1], self.acc[i_t]]
i_t += 1
return self.traj
|
"""Tests for qutebrowser.config.configexc."""
import textwrap
import pytest
from qutebrowser.config import configexc
from qutebrowser.utils import usertypes
def test_validation_error():
e = configexc.ValidationError('val', 'msg')
assert e.option is None
assert str(e) == "Invalid value 'val' - msg"
@pytest.mark.parametrize('deleted, renamed, expected', [
(False, None, "No option 'opt'"),
(True, None, "No option 'opt' (this option was removed from qutebrowser)"),
(False, 'new', "No option 'opt' (this option was renamed to 'new')"),
])
def test_no_option_error(deleted, renamed, expected):
e = configexc.NoOptionError('opt', deleted=deleted, renamed=renamed)
assert e.option == 'opt'
assert str(e) == expected
def test_no_option_error_clash():
with pytest.raises(AssertionError):
configexc.NoOptionError('opt', deleted=True, renamed='foo')
def test_backend_error():
e = configexc.BackendError(usertypes.Backend.QtWebKit)
assert str(e) == "This setting is not available with the QtWebKit backend!"
def test_desc_with_text():
"""Test ConfigErrorDesc.with_text."""
old = configexc.ConfigErrorDesc("Error text", Exception("Exception text"))
new = old.with_text("additional text")
assert str(new) == 'Error text (additional text): Exception text'
@pytest.fixture
def errors():
"""Get a ConfigFileErrors object."""
err1 = configexc.ConfigErrorDesc("Error text 1", Exception("Exception 1"))
err2 = configexc.ConfigErrorDesc("Error text 2", Exception("Exception 2"),
"Fake traceback")
return configexc.ConfigFileErrors("config.py", [err1, err2])
def test_config_file_errors_str(errors):
assert str(errors).splitlines() == [
'Errors occurred while reading config.py:',
' Error text 1: Exception 1',
' Error text 2: Exception 2',
]
def test_config_file_errors_html(errors):
html = errors.to_html()
assert textwrap.dedent(html) == textwrap.dedent("""
Errors occurred while reading config.py:
<ul>
<li>
<b>Error text 1</b>: Exception 1
</li>
<li>
<b>Error text 2</b>: Exception 2
<pre>
Fake traceback
</pre>
</li>
</ul>
""")
# Make sure the traceback is not indented
assert '<pre>\nFake traceback\n' in html
|
"""
Visualize the system cells and MPI domains. Run ESPResSo in parallel
to color particles by node. With OpenMPI, this can be achieved using
``mpiexec -n 4 ./pypresso ../samples/visualization_cellsystem.py``.
Set property ``system.cell_system.node_grid = [i, j, k]`` (with ``i * j * k``
equal to the number of MPI ranks) to change the way the cellsystem is
partitioned. Only the domain of MPI rank 0 will be shown in wireframe.
"""
import espressomd
import espressomd.visualization_opengl
import numpy as np
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
box = [40, 30, 20]
system = espressomd.System(box_l=box)
visualizer = espressomd.visualization_opengl.openGLLive(
system,
window_size=[800, 800],
background_color=[0, 0, 0],
camera_position=[20, 15, 80],
particle_coloring='node',
draw_nodes=True,
draw_cells=True)
system.time_step = 0.0005
system.cell_system.set_regular_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0.4
for i in range(100):
system.part.add(pos=box * np.random.random(3))
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=100.0, sigma=1.0, cutoff=3.0, shift="auto")
energy = system.analysis.energy()
print(f"Before Minimization: E_total = {energy['total']:.2e}")
system.integrator.set_steepest_descent(f_max=50, gamma=30.0,
max_displacement=0.001)
system.integrator.run(10000)
system.integrator.set_vv()
energy = system.analysis.energy()
print(f"After Minimization: E_total = {energy['total']:.2e}")
print("Tune skin")
system.cell_system.tune_skin(0.1, 4.0, 1e-1, 1000)
print(system.cell_system.get_state())
system.thermostat.set_langevin(kT=1, gamma=1, seed=42)
visualizer.run(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.